Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
55 changes: 55 additions & 0 deletions .github/actions/sticky-pr-comment/action.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
name: "Sticky PR Comment"
description: "Posts or updates a pull request comment identified by a tag."
inputs:
file:
description: "Path to the file whose contents are to be posted"
required: true
tag:
description: "Unique tag to identify the comment (e.g. <!-- BENCHMARK_REPORT_COMMENT -->)"
required: true
runs:
using: "composite"
steps:
- name: Read file and set as output
id: file
shell: bash
run: |
echo "body<<EOF" >> $GITHUB_OUTPUT
cat "${{ inputs.file }}" >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT

- name: Post or update PR comment
uses: actions/github-script@v7
with:
github-token: ${{ github.token }}
script: |
const tag = `${{ inputs.tag }}`;
const body = `${tag}\n${{ steps.file.outputs.body }}`;
const pr = context.payload.pull_request?.number;
if (!pr) {
core.setFailed("No pull request found in context.");
return;
}
const { data: comments } = await github.rest.issues.listComments({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: pr,
});
const tagged = comments.find(comment =>
comment.body.includes(tag)
);
if (tagged) {
await github.rest.issues.updateComment({
owner: context.repo.owner,
repo: context.repo.repo,
comment_id: tagged.id,
body: body,
});
} else {
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: pr,
body: body,
});
}
60 changes: 60 additions & 0 deletions .github/workflows/benchmark-compare-serial.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
name: "Benchmark Report"

description: "Runs benchmarks and posts or updates a sticky PR comment with the results."

on:
pull_request:
types: [opened, synchronize, reopened]

permissions:
contents: read

jobs:
benchmark:
name: Benchmark
runs-on: ubuntu-latest
env:
nodeVersion: 20
mainRef: "main"
steps:
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: ${{ env.nodeVersion }}

# Run benchmark on main branch
- name: Checkout main branch
uses: actions/checkout@v4
with:
ref: ${{ env.mainRef }}
path: main-source

- name: Install dependencies (main)
run: npm install --ignore-scripts
working-directory: main-source

- name: Run benchmark on main
run: node benchmarks/runBenchmarks.js > ../results.main.csv
working-directory: main-source

# Run benchmark on PR branch
- name: Checkout PR branch
uses: actions/checkout@v4
with:
path: pr-source

- name: Install dependencies (PR)
run: npm install --ignore-scripts
working-directory: pr-source

- name: Run benchmark on PR branch
run: node benchmarks/runBenchmarks.js > ../results.branch.csv
working-directory: pr-source

# Generate the report
- name: Generate report
shell: bash
run: |
cat results.main.csv results.branch.csv | node main-source/benchmarks/report.js >> $GITHUB_STEP_SUMMARY


39 changes: 31 additions & 8 deletions benchmarks/report.js
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,10 @@

const readline = require('readline')

// if any average is more than threshold% off we exit with 1
const threshold = 10
let failed = false

const defaultUnit = 'msg/s' // default unit for the results
const units = {
'pingpong.js': 'ms'
Expand Down Expand Up @@ -96,20 +100,37 @@ function calculateAverages (results) {
return avg
}

function calculatePercentage (ref, value) {
if (ref === undefined) {
return { ref: value, diff: 0 }
}
const perc = ((value / ref) * 100)
if (perc > 100) {
const diff = perc - 100
return { ref, diff }
}
const diff = (100 - perc) * -1
return { ref, diff }
}

function reportAverages (avg) {
console.log('\n # Overall Benchmark Results')
console.log(`\n +x% is better, -x% is worse, current threshold to fail at -${threshold}%\n\n`)
console.log('| Label | Benchmark | Config | Average | Units | Percentage')
console.log('|-------|-----------|--------|---------|-------|-----------')
for (const key in avg) {
let perc
let ref
let oldRef
for (const label in avg[key]) {
const { value, unit, benchmark, config } = avg[key][label]
if (perc === undefined) {
perc = 100
ref = value
} else {
perc = ((value / ref) * 100).toFixed(2)
const { ref, diff } = calculatePercentage(oldRef, value)
oldRef = ref
// for unit = ms lower is better
const correctedDiff = unit === 'ms' ? diff * -1 : diff
const sign = correctedDiff > 0 ? '+' : ''
const perc = correctedDiff === 0 ? 100 : `${sign}${correctedDiff.toFixed(2)}`
if (diff > threshold) {
console.error(`\n\nError: ${key} is more than ${sign}${threshold}% off the reference (${ref} ${unit})`)
failed = true
}
console.log(`| ${label} | ${benchmark} | ${config} | ${value.toFixed(0)} | ${unit} | ${perc}%`)
}
Expand All @@ -125,4 +146,6 @@ async function report () {
}
}

report()
report().then(() => {
process.exit(failed ? 1 : 0)
})
Loading