OpenVM Benchmark v2 #41
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| name: "OpenVM Benchmark v2" | |
| on: | |
| workflow_dispatch: | |
| inputs: | |
| openvm_version: | |
| description: "OpenVM version (commit sha) to benchmark" | |
| required: true | |
| type: string | |
| block_number: | |
| description: "Block number to generate input for" | |
| required: true | |
| type: string | |
| default: "21000000" | |
| rerun_keygen: | |
| description: "Rerun keygen" | |
| required: false | |
| type: boolean | |
| recompile_reth: | |
| description: "Recompile reth program" | |
| required: false | |
| type: boolean | |
| reth_version: | |
| description: "The git commit or branch of Reth program to compile from" | |
| required: false | |
| type: string | |
| default: "" | |
| cleanup: | |
| description: "Cleanup after benchmark" | |
| required: false | |
| type: boolean | |
| default: "true" | |
| jobs: | |
| benchmark: | |
| runs-on: ubuntu-latest | |
| steps: | |
| - name: Checkout repository | |
| uses: actions/checkout@v4 | |
| - name: Setup Rust | |
| uses: actions-rust-lang/setup-rust-toolchain@v1 | |
| with: | |
| cache: false | |
| - name: Prepare input data | |
| run: | | |
| block_number="${{ inputs.block_number }}" | |
| s3_url="https://axiom-public-data-sandbox-us-east-1.s3.us-east-1.amazonaws.com/reth/input/${block_number}.json" | |
| echo "Checking if input file exists for block $block_number..." | |
| if curl --output /dev/null --silent --head --fail "$s3_url"; then | |
| echo "Input file found in S3, downloading..." | |
| curl -o input.json "$s3_url" | |
| else | |
| echo "Input file not found in S3, generating locally..." | |
| # Install OpenVM CLI | |
| echo "Installing OpenVM CLI..." | |
| cargo +1.85 install --git https://github.com/openvm-org/openvm.git --locked --force cargo-openvm | |
| RPC_1=${{ secrets.RPC_URL_1 }} | |
| # Build client binary | |
| cd bin/client-eth | |
| cargo openvm build | |
| mkdir -p ../host/elf | |
| SRC="target/riscv32im-risc0-zkvm-elf/release/openvm-client-eth" | |
| DEST="../host/elf/openvm-client-eth" | |
| if [ ! -f "$DEST" ] || ! cmp -s "$SRC" "$DEST"; then | |
| cp "$SRC" "$DEST" | |
| fi | |
| cd ../.. | |
| # Create necessary directories | |
| mkdir -p rpc-cache | |
| mkdir -p params | |
| # Build the benchmark binary | |
| export JEMALLOC_SYS_WITH_MALLOC_CONF="retain:true,background_thread:true,metadata_thp:always,dirty_decay_ms:-1,muzzy_decay_ms:-1,abort_conf:true" | |
| cargo build --bin openvm-reth-benchmark-bin --profile=release --no-default-features --features="bench-metrics,nightly-features,jemalloc,evm-verify" | |
| # Generate input data | |
| echo "Generating input for block $block_number..." | |
| ./target/release/openvm-reth-benchmark-bin \ | |
| --kzg-params-dir params \ | |
| --mode make-input \ | |
| --block-number $block_number \ | |
| --rpc-url ${{ secrets.RPC_URL_1 }} \ | |
| --input-path input.json \ | |
| --cache-dir rpc-cache | |
| echo "Input file generated successfully" | |
| fi | |
| # Verify input file exists | |
| if [ ! -f "input.json" ]; then | |
| echo "Error: input.json file not found!" | |
| exit 1 | |
| fi | |
| echo "Input file ready: $(ls -lh input.json)" | |
| - name: Prepare benchmark | |
| id: prepare | |
| run: | | |
| # Build JSON payload using jq for proper JSON construction | |
| json_payload=$(jq -n \ | |
| --arg openvm_commit "${{ inputs.openvm_version }}" \ | |
| '{openvm_commit: $openvm_commit}') | |
| if [ "${{ inputs.rerun_keygen }}" = "true" ]; then | |
| json_payload=$(echo "$json_payload" | jq '. + {rekeygen: true}') | |
| fi | |
| if [ "${{ inputs.recompile_reth }}" = "true" ]; then | |
| json_payload=$(echo "$json_payload" | jq '. + {recompile: true}') | |
| fi | |
| echo "Final JSON payload: $json_payload" | |
| response=$(curl -X POST \ | |
| -H "Axiom-API-Key: ${{ secrets.PROVING_SERVICE_API_KEY }}" \ | |
| -H "Content-Type: application/json" \ | |
| -d "$json_payload" \ | |
| https://api.staging.app.axiom.xyz/v1/internal/benchmark_jobs) | |
| echo "Response: $response" | |
| benchmark_id=$(echo "$response" | jq -r '.id') | |
| echo "benchmark_id=$benchmark_id" >> $GITHUB_OUTPUT | |
| - name: Wait for benchmark preparation | |
| run: | | |
| benchmark_id="${{ steps.prepare.outputs.benchmark_id }}" | |
| echo "Waiting for benchmark $benchmark_id to be ready..." | |
| max_iterations=80 # 40min | |
| iteration=0 | |
| while [ $iteration -lt $max_iterations ]; do | |
| response=$(curl -H "Axiom-API-Key: ${{ secrets.PROVING_SERVICE_API_KEY }}" \ | |
| https://api.staging.app.axiom.xyz/v1/internal/benchmark_jobs/$benchmark_id) | |
| echo "Response: $response" | |
| status=$(echo "$response" | jq -r '.status') | |
| echo "Status: $status (iteration $((iteration + 1))/$max_iterations)" | |
| if [ "$status" = "ready" ]; then | |
| echo "Benchmark is ready!" | |
| break | |
| fi | |
| if [ "$status" = "failed" ]; then | |
| echo "Benchmark failed!" | |
| exit 1 | |
| fi | |
| iteration=$((iteration + 1)) | |
| if [ $iteration -lt $max_iterations ]; then | |
| echo "Waiting 30 seconds before next check..." | |
| sleep 30 | |
| fi | |
| done | |
| if [ $iteration -eq $max_iterations ]; then | |
| echo "Timeout: Benchmark preparation did not complete within 10 minutes (20 iterations)" | |
| exit 1 | |
| fi | |
| - name: prove | |
| id: prove | |
| run: | | |
| benchmark_id="${{ steps.prepare.outputs.benchmark_id }}" | |
| echo "Getting program_uuid for benchmark $benchmark_id..." | |
| sleep 180 # wait 3 min to make sure the prove service is ready | |
| response=$(curl -H "Axiom-API-Key: ${{ secrets.PROVING_SERVICE_API_KEY }}" \ | |
| https://api.staging.app.axiom.xyz/v1/internal/benchmark_jobs/$benchmark_id) | |
| echo "Response: $response" | |
| program_uuid=$(echo "$response" | jq -r '.program_uuid') | |
| echo "Program UUID: $program_uuid" | |
| echo "Submitting proof with JSON data..." | |
| response=$(curl -X POST \ | |
| -H "Axiom-API-Key: ${{ secrets.PROVING_SERVICE_API_KEY }}" \ | |
| -H "Content-Type: application/json" \ | |
| -d @input.json \ | |
| "https://api.staging.app.axiom.xyz/v1/proofs?program_id=$program_uuid") | |
| echo "Response: $response" | |
| proof_id=$(echo "$response" | jq -r '.id') | |
| echo "proof_id=$proof_id" >> $GITHUB_OUTPUT | |
| - name: Wait for proof | |
| run: | | |
| proof_id="${{ steps.prove.outputs.proof_id }}" | |
| echo "Waiting for proof $proof_id to complete..." | |
| max_iterations=20 # 10min | |
| iteration=0 | |
| while [ $iteration -lt $max_iterations ]; do | |
| response=$(curl -H "Axiom-API-Key: ${{ secrets.PROVING_SERVICE_API_KEY }}" \ | |
| https://api.staging.app.axiom.xyz/v1/proofs/$proof_id) | |
| echo "Response: $response" | |
| status=$(echo "$response" | jq -r '.state') | |
| echo "Status: $status (iteration $((iteration + 1))/$max_iterations)" | |
| if [ "$status" = "Succeeded" ] || [ "$status" = "Failed" ]; then | |
| echo "Proof completed with status: $status" | |
| break | |
| fi | |
| iteration=$((iteration + 1)) | |
| if [ $iteration -lt $max_iterations ]; then | |
| echo "Waiting 30 seconds before next check..." | |
| sleep 30 | |
| fi | |
| done | |
| if [ $iteration -eq $max_iterations ]; then | |
| echo "Timeout: Proof did not complete within 10 minutes (20 iterations)" | |
| echo "WORKFLOW_FAILED=true" >> $GITHUB_ENV | |
| fi | |
| - name: Cleanup | |
| if: ${{ inputs.cleanup == true }} | |
| run: | | |
| benchmark_id="${{ steps.prepare.outputs.benchmark_id }}" | |
| echo "Deleting benchmark $benchmark_id..." | |
| response=$(curl -X DELETE \ | |
| -H "Axiom-API-Key: ${{ secrets.PROVING_SERVICE_API_KEY }}" \ | |
| "https://api.staging.app.axiom.xyz/v1/internal/benchmark_jobs/$benchmark_id") | |
| echo "Response: $response" | |
| - name: Download and display metrics | |
| run: | | |
| if [ "$WORKFLOW_FAILED" = "true" ]; then | |
| echo "skipping metrics download" | |
| else | |
| proof_id="${{ steps.prove.outputs.proof_id }}" | |
| echo "Downloading metrics for proof $proof_id..." | |
| max_iterations=10 # 5 minutes total | |
| iteration=0 | |
| while [ $iteration -lt $max_iterations ]; do | |
| echo "Attempting to download metrics (attempt $((iteration + 1))/$max_iterations)..." | |
| response_code=$(curl -w "%{http_code}" -s -H "Axiom-API-Key: ${{ secrets.PROVING_SERVICE_API_KEY }}" \ | |
| "https://api.staging.app.axiom.xyz/v1/internal/benchmark_metrics/$proof_id" \ | |
| -o metrics.md) | |
| echo "HTTP response code: $response_code" | |
| if [ "$response_code" = "200" ]; then | |
| echo "Metrics downloaded successfully!" | |
| break | |
| elif [ "$response_code" = "404" ]; then | |
| echo "Metrics not ready yet (404), waiting 30 seconds before retry..." | |
| rm -f metrics.md # Clean up partial file | |
| iteration=$((iteration + 1)) | |
| if [ $iteration -lt $max_iterations ]; then | |
| sleep 30 | |
| fi | |
| else | |
| echo "Unexpected response code: $response_code" | |
| rm -f metrics.md # Clean up partial file | |
| echo "METRICS_FAILED=true" >> $GITHUB_ENV | |
| break | |
| fi | |
| done | |
| if [ $iteration -eq $max_iterations ]; then | |
| echo "Timeout: Metrics were not available after 5 minutes" | |
| echo "METRICS_FAILED=true" >> $GITHUB_ENV | |
| elif [ "$response_code" = "200" ]; then | |
| echo "Metrics downloaded to metrics.md" | |
| echo "=== BENCHMARK METRICS ===" | |
| cat metrics.md | |
| echo "=========================" | |
| fi | |
| fi | |
| - name: Upload metrics as artifact | |
| if: env.WORKFLOW_FAILED != 'true' && env.METRICS_FAILED != 'true' | |
| uses: actions/upload-artifact@v4 | |
| with: | |
| name: benchmark-metrics-${{ inputs.openvm_version }} | |
| path: metrics.md | |
| retention-days: 30 | |
| - name: Check workflow status | |
| run: | | |
| if [ "$WORKFLOW_FAILED" = "true" ]; then | |
| echo "Workflow failed due to timeout or proof failure" | |
| exit 1 | |
| else | |
| echo "Workflow completed successfully" | |
| fi |