-
-
Notifications
You must be signed in to change notification settings - Fork 62
77 lines (73 loc) · 2.61 KB
/
benchmark.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
name: benchmark
"on":
push:
branches:
- main
- dev
- deps
pull_request:
branches:
- main
workflow_dispatch:
inputs:
ref:
description: (optional) ref to benchmark
env:
FORCE_COLOR: "1"
PYTHONHASHSEED: "42"
BASELINE_URL: https://github.com/jab/bidict/releases/download/microbenchmarks/GHA-linux-cachegrind-x86_64-CPython-3.12.2-baseline.json
jobs:
benchmark:
runs-on: ubuntu-latest
steps:
- name: install valgrind
uses: awalsh128/cache-apt-pkgs-action@44c33b32f808cdddd5ac0366d70595ed63661ed8
with:
packages: valgrind
- name: check out source
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11
with:
fetch-depth: 0
- name: set up Python
uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c
with:
# Pin to micro-release for better reproducibility.
# When upgrading to a new Python version, remember to upload new associated baseline
# benchmark results here: https://github.com/jab/bidict/releases/edit/microbenchmarks
python-version: '3.12.2'
cache: pip
cache-dependency-path: dev-deps/python3.12/test.txt
- name: install PyPI dependencies
run: |
python -m pip install -U pip setuptools wheel
python -m pip install -r dev-deps/python3.12/test.txt
- name: install the version of bidict to benchmark
run: |
git checkout ${{ github.event.inputs.ref || github.sha }}
pip install .
# restore the current revision so we use its version of tests/microbenchmarks.py
git checkout ${{ github.sha }}
# move aside the 'bidict' subdirectory to make sure we always import the installed version
mv -v bidict src
- name: download baseline benchmark results
run: |
curl -Lso baseline.json "$BASELINE_URL"
line1=$(head -n1 baseline.json)
[ "$line1" = "{" ]
- name: benchmark and compare to baseline
run: |
./cachegrind.py pytest -n0 \
--benchmark-autosave \
--benchmark-columns=min,rounds,iterations \
--benchmark-disable-gc \
--benchmark-group-by=name \
--benchmark-compare=baseline.json \
tests/microbenchmarks.py
- name: archive benchmark results
uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8
with:
name: microbenchmark results
path: .benchmarks
if-no-files-found: error
permissions:
contents: read