From 454cfd65360a1ef1ed05fef48d8789b1e6e692db Mon Sep 17 00:00:00 2001 From: Divvy <54956345+DivvyCr@users.noreply.github.com> Date: Sat, 2 May 2020 18:39:36 +0100 Subject: [PATCH] Support benchmarking reports and lower the benching time. (#242) * Support benchmarking reports and lower the benching time. * Support benchmarking reports and lower the benching time. * Use appropriate GH token. * split up files to increase performance * Final touches? * Only push on master Comment all the time * fix invalid file * only run oce_rlcs intensive is skipped * make it run intensive as a separate only test * switch to using gh edit token for now * Split benchmarking action into 2 - one comments on all pushes, and the second uploads benchmarking data from pushes to master. * try to use the intensive version Co-authored-by: DivvyCr Co-authored-by: dtracers --- .github/workflows/benchmarking-master.yml | 65 ++++++++++++++++++++++ .github/workflows/benchmarking.yml | 27 +++++++-- carball/tests/benchmarking/benchmarking.py | 12 ++-- 3 files changed, 92 insertions(+), 12 deletions(-) create mode 100644 .github/workflows/benchmarking-master.yml diff --git a/.github/workflows/benchmarking-master.yml b/.github/workflows/benchmarking-master.yml new file mode 100644 index 00000000..8ef50f0d --- /dev/null +++ b/.github/workflows/benchmarking-master.yml @@ -0,0 +1,65 @@ +# Uploads benchmarking data on push to master. + +name: Benchmarking (Master) + +on: + push: + branches: [ master ] + +jobs: + build: + runs-on: ubuntu-latest + strategy: + matrix: + benchtest: [short_sample, short_dropshot, full_rumble, oce_rlcs, intensive_oce_rlcs] + + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-python@v1 + with: + python-version: '3.7' + architecture: 'x64' + + - name: Install/Update pip and wheel. + run: + python -m pip install --upgrade pip && + pip install wheel + + - name: Set-up and install protobuf. + run: chmod 777 ./_travis/install-protoc.sh && ./_travis/install-protoc.sh 2.6.1 + + - name: Install dependencies. + run: pip install -r requirements.txt && pip install -r requirements-test.txt + + - name: Initialise. + run: python init.py + + - name: Prepare benchmark. + run: | + pip install -r requirements.txt && pip install -r requirements-test.txt + cd carball/tests/benchmarking + echo === BENCHMARK DIRECTORY === + ls -lh + echo =========================== + + - name: Run benchmark. + run: | + cd carball/tests/benchmarking + pytest benchmarking.py --benchmark-json carball_benchmarking.json -k 'test_${{matrix.benchtest}}' + + # REQUIRES gh-pages BRANCH IN THE MAIN REPO (for the graphs). + - name: Process and store benchmark ${{matrix.benchtest}}. + if: contains(github.ref, 'master') + uses: rhysd/github-action-benchmark@v1 + with: + name: Carball Benchmarks ${{matrix.benchtest}} + tool: 'pytest' + output-file-path: carball/tests/benchmarking/carball_benchmarking.json + benchmark-data-dir-path: dev/bench/${{matrix.benchtest}}/ + # Use personal access token instead of GITHUB_TOKEN due to + # https://github.community/t5/GitHub-Actions/Github-action-not-triggering-gh-pages-upon-push/td-p/26869/highlight/false + github-token: ${{ secrets.GH_WIKI_EDIT_TOKEN }} + auto-push: true + alert-threshold: '125%' # If performance is 1.25x worse, there will be an alert + comment-always: false + comment-on-alert: false # I think benchmarking.yml takes care of this. diff --git a/.github/workflows/benchmarking.yml b/.github/workflows/benchmarking.yml index 4d2d9f4a..25fb47ee 100644 --- a/.github/workflows/benchmarking.yml +++ b/.github/workflows/benchmarking.yml @@ -1,12 +1,15 @@ +# Comment performance benchmarking on every push. + name: Benchmarking -on: - push: - branches: [ master, action-benchmark ] +on: push jobs: build: runs-on: ubuntu-latest + strategy: + matrix: + benchtest: [short_sample, short_dropshot, full_rumble, oce_rlcs, intensive_oce_rlcs] steps: - uses: actions/checkout@v2 @@ -40,7 +43,19 @@ jobs: - name: Run benchmark. run: | cd carball/tests/benchmarking - pytest benchmarking.py + pytest benchmarking.py --benchmark-json carball_benchmarking.json -k 'test_${{matrix.benchtest}}' -# TO BE DONE. -# - name: Store benchmark result(s). \ No newline at end of file + - name: Process and store benchmark ${{matrix.benchtest}}. + uses: rhysd/github-action-benchmark@v1 + with: + name: Carball Benchmarks ${{matrix.benchtest}} + tool: 'pytest' + output-file-path: carball/tests/benchmarking/carball_benchmarking.json + benchmark-data-dir-path: dev/bench/${{matrix.benchtest}}/ + # Use personal access token instead of GITHUB_TOKEN due to + # https://github.community/t5/GitHub-Actions/Github-action-not-triggering-gh-pages-upon-push/td-p/26869/highlight/false + github-token: ${{ secrets.GITHUB_TOKEN }} + auto-push: false + alert-threshold: '125%' # If performance is 1.25x worse, there will be an alert + comment-always: true + fail-on-alert: true diff --git a/carball/tests/benchmarking/benchmarking.py b/carball/tests/benchmarking/benchmarking.py index 24a5d785..6ad0cc98 100644 --- a/carball/tests/benchmarking/benchmarking.py +++ b/carball/tests/benchmarking/benchmarking.py @@ -14,32 +14,32 @@ def test_short_sample(benchmark): replay_path = get_replay_path("SHORT_SAMPLE.replay") benchmark.pedantic(analyze_replay_file, - kwargs={"replay_path":replay_path}, rounds=50, iterations=3) + kwargs={"replay_path":replay_path}, rounds=10, iterations=3) def test_short_dropshot(benchmark): replay_path = get_replay_path("DROPSHOT_PHASE2_BALL.replay") benchmark.pedantic(analyze_replay_file, - kwargs={"replay_path":replay_path}, rounds=30, iterations=3) + kwargs={"replay_path":replay_path}, rounds=10, iterations=3) def test_full_rumble(benchmark): replay_path = get_replay_path("RUMBLE_FULL.replay") benchmark.pedantic(analyze_replay_file, - kwargs={"replay_path":replay_path}, rounds=20, iterations=3) + kwargs={"replay_path":replay_path}, rounds=10, iterations=3) def test_oce_rlcs(benchmark): replay_path = get_replay_path("OCE_RLCS_7_CARS.replay") benchmark.pedantic(analyze_replay_file, - kwargs={"replay_path":replay_path}, rounds=20, iterations=3) + kwargs={"replay_path":replay_path}, rounds=10, iterations=3) -def test_oce_rlcs_intensive(benchmark): +def test_intensive_oce_rlcs(benchmark): replay_path = get_replay_path("OCE_RLCS_7_CARS.replay") benchmark.pedantic(analyze_replay_file, - kwargs={"replay_path":replay_path, "calculate_intensive_events":True}, rounds=5, iterations=3) + kwargs={"replay_path":replay_path, "calculate_intensive_events":True}, rounds=5, iterations=1)