benchmarks.yml
1 name: Run snarkOS Benchmarks 2 3 on: 4 push: 5 branches: 6 - 'staging' 7 workflow_dispatch: 8 9 jobs: 10 # Run benchmarks and stores the output to a file 11 benchmark: 12 name: Benchmark 13 runs-on: 14 labels: ubuntu-latest-m 15 permissions: 16 contents: write 17 steps: 18 - name: Checkout 19 uses: actions/checkout@v5 20 21 - name: Install Rust 22 uses: dtolnay/rust-toolchain@1.88 # Update this when the MSRV changes. 23 24 - name: Install required Debian packages 25 run: | 26 sudo apt-get update 27 sudo apt-get install -y lld python3-pip 28 29 - name: Set up Python virtual environment 30 run: | 31 pip install --upgrade pip 32 python -m venv pyenv 33 source pyenv/bin/activate 34 pip install aiohttp 35 36 - name: Set up Rust build caching 37 uses: Swatinem/rust-cache@v2 38 39 - name: Set up Google Cloud Integration 40 env: 41 GCLOUD_SERVICE_KEY: ${{ secrets.GCLOUD_SERVICE_KEY }} 42 run: | 43 curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo gpg --dearmor -o /usr/share/keyrings/cloud.google.gpg 44 echo "deb [signed-by=/usr/share/keyrings/cloud.google.gpg] https://packages.cloud.google.com/apt cloud-sdk main" | sudo tee -a /etc/apt/sources.list.d/google-cloud-sdk.list 45 sudo apt-get update 46 sudo apt-get install google-cloud-cli -y 47 echo $GCLOUD_SERVICE_KEY > ${HOME}/gcloud-key.json 48 gcloud auth activate-service-account --key-file=${HOME}/gcloud-key.json 49 50 - name: Fetch Ledger Data 51 run: | 52 gcloud config set project protocol-development-sandbox 53 # Created with `.ci/generate_ledger.sh 40 250 1` 54 # NOTE: This needs to be updated when the consensus version changes. 55 gcloud storage cp gs://ci_testdata/sync-ledger-val40-250-9ec2291c57.zip ledger.zip 56 unzip ledger.zip 57 58 - name: Install snarkOS (test_network) 59 run: | 60 # use `test_network` flag without snarkVM `dev_println` 61 cargo install --path=. --locked --features=test_consensus_heights,test_targets 62 63 # Download previous benchmark result from cache (if exists) 64 - name: Download previous benchmark data 65 uses: actions/cache@v4 66 with: 67 path: ./cache 68 key: ${{ runner.os }}-benchmark 69 70 - name: Create results file 71 run: | 72 # Close the array 73 printf "[\n" | tee -a results.json 74 75 echo "Generated results file:" 76 cat results.json 77 78 - name: Run REST API benchmark 79 timeout-minutes: 20 80 run: | 81 source pyenv/bin/activate 82 ./.ci/bench_rest_api.sh 83 84 - name: Run P2P sync benchmark 85 timeout-minutes: 60 86 run: 87 ./.ci/bench_p2p_sync.sh 88 89 - name: Run BFT sync benchmark 90 timeout-minutes: 60 91 run: 92 ./.ci/bench_bft_sync.sh 93 94 - name: Install snarkOS (production) 95 run: 96 cargo install --path=. --locked 97 98 - name: Run CDN sync benchmark 99 timeout-minutes: 60 100 run: 101 ./.ci/bench_cdn_sync.sh 102 103 - name: Finish results file 104 run: | 105 printf "]\n" | tee -a results.json 106 echo "\n\nFile Contents:" 107 cat results.json 108 109 110 - name: Generate benchmark results 111 uses: benchmark-action/github-action-benchmark@v1 112 with: 113 name: snarkOS Benchmarks 114 alert-threshold: '150%' 115 comment-on-alert: true 116 # TODO: Set to `true` once the benchmarks have less noise. 117 fail-on-alert: false 118 github-token: ${{ secrets.GITHUB_TOKEN }} 119 # Use generic JSON format 120 # Note: there is no way to set smallerIsBetter for individual benchmarks right now, 121 # but for all, except the sync variance, bigger is indeed better. 122 tool: 'customBiggerIsBetter' 123 output-file-path: results.json 124 alert-comment-cc-users: '@kaimast' 125 # Only push the results on staging (see below) 126 auto-push: false 127 # Enable Job Summary for PRs 128 summary-always: true 129 130 - name: Push benchmark result 131 # Avoid pushing results on pull requests or experimental branches, to reduce noise in the data. 132 # The results for all other runs are still accessible through the workflow summary. 133 if: github.ref_name == 'staging' 134 env: 135 GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 136 run: git push origin gh-pages