Skip to content

[CI] Use venv for pip installation in benchmarks #3

[CI] Use venv for pip installation in benchmarks

[CI] Use venv for pip installation in benchmarks #3

# Executes benchmarks implemented in this repository using scripts
# for results visualization from intel/llvm (unified-runtime dir).
name: Benchmarks
on:
pull_request:
workflow_call:
inputs:
pr_no:
# even though this is a number, this is a workaround for issues with
# reusable workflow calls that result in "Unexpected value '0'" error.
type: string
default: '0'
bench_script_params:
required: false
type: string
default: ''
upload_report:
required: false
type: boolean
default: false
permissions:
contents: read
pull-requests: write
env:
UMF_DIR: "${{github.workspace}}/umf-repo"
BUILD_DIR : "${{github.workspace}}/umf-repo/build"
jobs:
benchmarks:
name: Benchmarks
# run only on upstream; forks will not have the HW
if: github.repository == 'oneapi-src/unified-memory-framework'
runs-on: L0_PERF
steps:
# Workspace on self-hosted runners is not cleaned automatically.
# We have to delete the files created outside of using actions.
- name: Cleanup self-hosted workspace
if: always()
run: |
ls -la ./
rm -rf ./* || true
- name: Add comment to PR
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1
if: ${{ always() && inputs.pr_no != 0 }}
with:
script: |
const pr_no = '${{ inputs.pr_no }}';
const url = '${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}';
const params = '${{ inputs.bench_script_params }}';
const body = `Compute Benchmarks run (with params: ${params}):\n${url}`;
github.rest.issues.createComment({
issue_number: pr_no,
owner: context.repo.owner,
repo: context.repo.repo,
body: body
})
- name: Checkout UMF
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
path: ${{env.UMF_DIR}}
fetch-depth: 0
# We need to fetch special ref for proper PR's merge commit. Note, this ref may be absent if the PR is already merged.
- name: Fetch PR's merge commit
if: ${{ inputs.pr_no != 0 }}
working-directory: ${{env.UMF_DIR}}
env:
PR_NO: ${{ inputs.pr_no }}
run: |
git fetch -- https://github.com/${{github.repository}} +refs/pull/${PR_NO}/*:refs/remotes/origin/pr/${PR_NO}/*
git checkout origin/pr/${PR_NO}/merge
git rev-parse origin/pr/${PR_NO}/merge
- name: Configure UMF
run: >
cmake
-S ${{env.UMF_DIR}}
-B ${{env.BUILD_DIR}}
-DCMAKE_BUILD_TYPE=Release
-DUMF_BUILD_SHARED_LIBRARY=ON
-DUMF_BUILD_BENCHMARKS=ON
-DUMF_BUILD_BENCHMARKS_MT=ON
-DUMF_BUILD_TESTS=OFF
-DUMF_BUILD_EXAMPLES=OFF
-DUMF_DEVELOPER_MODE=OFF
-DUMF_FORMAT_CODE_STYLE=OFF
-DUMF_BUILD_LEVEL_ZERO_PROVIDER=ON
-DUMF_BUILD_CUDA_PROVIDER=ON
-DUMF_BUILD_LIBUMF_POOL_JEMALLOC=ON
- name: Build UMF
run: cmake --build ${{env.BUILD_DIR}} -j $(nproc)
# Get scripts for benchmark data visualization.
# Use specific tag, as the scripts or files' location may change.
- name: Checkout SYCL
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
repository: intel/llvm
# add preloaded UMF benchmarks
# https://github.com/intel/llvm/pull/17278
ref: b2f9dab5266d227cc9eb19af1b54c5bdc50221d1
path: sycl-repo
fetch-depth: 1
- name: Install benchmarking scripts deps
run: |
python -m venv .venv
source .venv/bin/activate
pip install -r ${{github.workspace}}/sycl-repo/unified-runtime/third_party/benchmark_requirements.txt
- name: Set core range and GPU mask
run: |
# Compute the core range for the second NUMA node; first node is for SYCL/UR jobs.
# Skip the first 4 cores - the kernel is likely to schedule more work on these.
CORES=$(lscpu | awk '
/NUMA node1 CPU|On-line CPU/ {line=$0}
END {
split(line, a, " ")
split(a[4], b, ",")
sub(/^0/, "4", b[1])
print b[1]
}')
echo "Selected core: $CORES"
echo "CORES=$CORES" >> $GITHUB_ENV
ZE_AFFINITY_MASK=1
echo "ZE_AFFINITY_MASK=$ZE_AFFINITY_MASK" >> $GITHUB_ENV
- name: Run UMF benchmarks
id: benchmarks
working-directory: ${{env.BUILD_DIR}}
run: >
source .venv/bin/activate &&
taskset -c ${{ env.CORES }} ${{ github.workspace }}/sycl-repo/unified-runtime/scripts/benchmarks/main.py
~/bench_workdir_umf
--umf ${{env.BUILD_DIR}}
--compare baseline
${{ inputs.upload_report && '--output-html' || '' }}
${{ inputs.pr_no != 0 && '--output-markdown' || '' }}
${{ inputs.bench_script_params }}
# In case it failed to add a comment, we can still print the results.
- name: Print benchmark results
if: ${{ always() && inputs.pr_no != 0 }}
run: cat ${{env.BUILD_DIR}}/benchmark_results.md
- name: Add comment to PR
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1
if: ${{ always() && inputs.pr_no != 0 }}
with:
script: |
let markdown = ""
try {
const fs = require('fs');
markdown = fs.readFileSync('${{env.BUILD_DIR}}/benchmark_results.md', 'utf8');
} catch(err) {
}
const pr_no = '${{ inputs.pr_no }}';
const url = '${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}';
const test_status = '${{ steps.benchmarks.outcome }}';
const job_status = '${{ job.status }}';
const params = '${{ inputs.bench_script_params }}';
const body = `Compute Benchmarks run (${params}):\n${url}\nJob status: ${job_status}. Test status: ${test_status}.\n ${markdown}`;
github.rest.issues.createComment({
issue_number: pr_no,
owner: context.repo.owner,
repo: context.repo.repo,
body: body
})
- name: Upload HTML report
if: ${{ always() && inputs.upload_report }}
uses: actions/cache/save@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0
with:
path: umf-repo/build/benchmark_results.html
key: benchmark-results-${{ github.run_id }}
- name: Get information about platform
if: ${{ always() }}
working-directory: ${{env.UMF_DIR}}
run: .github/scripts/get_system_info.sh