Skip to content

Commit 24671a9

Browse files
authored
[Benchmark] fail test if model artifact does not exist (#8482)
* replace fetcher * replace fetcher * replace fetcher * replace fetcher * replace fetcher * replace fetcher
1 parent e8d2009 commit 24671a9

File tree

2 files changed

+125
-114
lines changed

2 files changed

+125
-114
lines changed

.github/workflows/android-perf.yml

+63-57
Original file line numberDiff line numberDiff line change
@@ -96,63 +96,6 @@ jobs:
9696
9797
PYTHONPATH="${PWD}" python .ci/scripts/gather_benchmark_configs.py $ARGS
9898
99-
prepare-test-specs:
100-
runs-on: linux.2xlarge
101-
needs: set-parameters
102-
strategy:
103-
matrix: ${{ fromJson(needs.set-parameters.outputs.benchmark_configs) }}
104-
fail-fast: false
105-
steps:
106-
- uses: actions/checkout@v3
107-
108-
- name: Prepare the spec
109-
id: prepare
110-
shell: bash
111-
env:
112-
BENCHMARK_CONFIG: ${{ toJSON(matrix) }}
113-
working-directory: extension/benchmark/android/benchmark
114-
run: |
115-
set -eux
116-
117-
# The model will be exported in the next step to this S3 path
118-
MODEL_PATH="https://gha-artifacts.s3.amazonaws.com/${{ github.repository }}/${{ github.run_id }}/artifacts/${{ matrix.model }}_${{ matrix.config }}/model.zip"
119-
# We could write a script to properly use jinja here, but there is only one variable,
120-
# so let's just sed it
121-
sed -i -e 's,{{ model_path }},'"${MODEL_PATH}"',g' android-llm-device-farm-test-spec.yml.j2
122-
123-
BENCHMARK_CONFIG_ID=$(echo "${{ matrix.model }}_${{ matrix.config }}" | sed -e 's/[^A-Za-z0-9._-]/_/g')
124-
# The config for this benchmark runs, we save it in the test spec so that it can be fetched
125-
# later by the upload script
126-
sed -i -e 's,{{ benchmark_config_id }},'"${BENCHMARK_CONFIG_ID}"',g' android-llm-device-farm-test-spec.yml.j2
127-
128-
cp android-llm-device-farm-test-spec.yml.j2 android-llm-device-farm-test-spec.yml
129-
# Just print the test spec for debugging
130-
cat android-llm-device-farm-test-spec.yml
131-
132-
# Save the benchmark configs so that we can use it later in the dashboard
133-
echo "${BENCHMARK_CONFIG}" > "${BENCHMARK_CONFIG_ID}.json"
134-
echo "benchmark-config-id=${BENCHMARK_CONFIG_ID}" >> $GITHUB_OUTPUT
135-
136-
- name: Upload the spec
137-
uses: seemethere/upload-artifact-s3@v5
138-
with:
139-
s3-bucket: gha-artifacts
140-
s3-prefix: |
141-
${{ github.repository }}/${{ github.run_id }}/artifacts/${{ matrix.model }}_${{ matrix.config }}
142-
retention-days: 1
143-
if-no-files-found: error
144-
path: extension/benchmark/android/benchmark/android-llm-device-farm-test-spec.yml
145-
146-
- name: Update the benchmark configs
147-
uses: seemethere/upload-artifact-s3@v5
148-
with:
149-
s3-bucket: gha-artifacts
150-
s3-prefix: |
151-
${{ github.repository }}/${{ github.run_id }}/artifacts/benchmark-configs/
152-
retention-days: 1
153-
if-no-files-found: error
154-
path: extension/benchmark/android/benchmark/${{ steps.prepare.outputs.benchmark-config-id }}.json
155-
15699
export-models:
157100
name: export-models
158101
uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main
@@ -335,6 +278,69 @@ jobs:
335278
fi
336279
echo "::endgroup::"
337280
281+
prepare-test-specs:
282+
runs-on: linux.2xlarge
283+
needs:
284+
- set-parameters
285+
- export-models
286+
strategy:
287+
matrix: ${{ fromJson(needs.set-parameters.outputs.benchmark_configs) }}
288+
fail-fast: false
289+
steps:
290+
- uses: actions/checkout@v3
291+
292+
- name: Prepare the spec
293+
id: prepare
294+
shell: bash
295+
env:
296+
BENCHMARK_CONFIG: ${{ toJSON(matrix) }}
297+
working-directory: extension/benchmark/android/benchmark
298+
run: |
299+
set -eux
300+
301+
# The model will be exported in the next step to this S3 path
302+
MODEL_PATH="https://gha-artifacts.s3.amazonaws.com/${{ github.repository }}/${{ github.run_id }}/artifacts/${{ matrix.model }}_${{ matrix.config }}/model.zip"
303+
304+
# Check if the model artifact exists, fail this step skip generating test-spec.
305+
curl -s --head -f ${MODEL_PATH}
306+
307+
# We could write a script to properly use jinja here, but there is only one variable,
308+
# so let's just sed it
309+
sed -i -e 's,{{ model_path }},'"${MODEL_PATH}"',g' android-llm-device-farm-test-spec.yml.j2
310+
311+
BENCHMARK_CONFIG_ID=$(echo "${{ matrix.model }}_${{ matrix.config }}" | sed -e 's/[^A-Za-z0-9._-]/_/g')
312+
# The config for this benchmark runs, we save it in the test spec so that it can be fetched
313+
# later by the upload script
314+
sed -i -e 's,{{ benchmark_config_id }},'"${BENCHMARK_CONFIG_ID}"',g' android-llm-device-farm-test-spec.yml.j2
315+
316+
cp android-llm-device-farm-test-spec.yml.j2 android-llm-device-farm-test-spec.yml
317+
# Just print the test spec for debugging
318+
cat android-llm-device-farm-test-spec.yml
319+
320+
# Save the benchmark configs so that we can use it later in the dashboard
321+
echo "${BENCHMARK_CONFIG}" > "${BENCHMARK_CONFIG_ID}.json"
322+
echo "benchmark-config-id=${BENCHMARK_CONFIG_ID}" >> $GITHUB_OUTPUT
323+
324+
- name: Upload the spec
325+
uses: seemethere/upload-artifact-s3@v5
326+
with:
327+
s3-bucket: gha-artifacts
328+
s3-prefix: |
329+
${{ github.repository }}/${{ github.run_id }}/artifacts/${{ matrix.model }}_${{ matrix.config }}
330+
retention-days: 1
331+
if-no-files-found: error
332+
path: extension/benchmark/android/benchmark/android-llm-device-farm-test-spec.yml
333+
334+
- name: Update the benchmark configs
335+
uses: seemethere/upload-artifact-s3@v5
336+
with:
337+
s3-bucket: gha-artifacts
338+
s3-prefix: |
339+
${{ github.repository }}/${{ github.run_id }}/artifacts/benchmark-configs/
340+
retention-days: 1
341+
if-no-files-found: error
342+
path: extension/benchmark/android/benchmark/${{ steps.prepare.outputs.benchmark-config-id }}.json
343+
338344
build-benchmark-app:
339345
name: build-benchmark-app
340346
uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main

.github/workflows/apple-perf.yml

+62-57
Original file line numberDiff line numberDiff line change
@@ -98,63 +98,6 @@ jobs:
9898
9999
echo "benchmark_configs is: ${{ steps.set-parameters.outputs.benchmark_configs }}"
100100
101-
prepare-test-specs:
102-
runs-on: linux.2xlarge
103-
needs: set-parameters
104-
strategy:
105-
matrix: ${{ fromJson(needs.set-parameters.outputs.benchmark_configs) }}
106-
fail-fast: false
107-
steps:
108-
- uses: actions/checkout@v3
109-
110-
- name: Prepare the spec
111-
id: prepare
112-
shell: bash
113-
env:
114-
BENCHMARK_CONFIG: ${{ toJSON(matrix) }}
115-
working-directory: extension/benchmark/apple/Benchmark
116-
run: |
117-
set -eux
118-
119-
# The model will be exported in the next step to this S3 path
120-
MODEL_PATH="https://gha-artifacts.s3.amazonaws.com/${{ github.repository }}/${{ github.run_id }}/artifacts/${{ matrix.model }}_${{ matrix.config }}/model.zip"
121-
# We could write a script to properly use jinja here, but there is only one variable,
122-
# so let's just sed it
123-
sed -i -e 's,{{ model_path }},'"${MODEL_PATH}"',g' default-ios-device-farm-appium-test-spec.yml.j2
124-
125-
BENCHMARK_CONFIG_ID=$(echo "${{ matrix.model }}_${{ matrix.config }}" | sed -e 's/[^A-Za-z0-9._-]/_/g')
126-
# The config for this benchmark runs, we save it in the test spec so that it can be fetched
127-
# later by the upload script
128-
sed -i -e 's,{{ benchmark_config_id }},'"${BENCHMARK_CONFIG_ID}"',g' default-ios-device-farm-appium-test-spec.yml.j2
129-
130-
cp default-ios-device-farm-appium-test-spec.yml.j2 default-ios-device-farm-appium-test-spec.yml
131-
# Just print the test spec for debugging
132-
cat default-ios-device-farm-appium-test-spec.yml
133-
134-
# Save the benchmark configs so that we can use it later in the dashboard
135-
echo "${BENCHMARK_CONFIG}" > "${BENCHMARK_CONFIG_ID}.json"
136-
echo "benchmark-config-id=${BENCHMARK_CONFIG_ID}" >> $GITHUB_OUTPUT
137-
138-
- name: Upload the spec
139-
uses: seemethere/upload-artifact-s3@v5
140-
with:
141-
s3-bucket: gha-artifacts
142-
s3-prefix: |
143-
${{ github.repository }}/${{ github.run_id }}/artifacts/${{ matrix.model }}_${{ matrix.config }}
144-
retention-days: 1
145-
if-no-files-found: error
146-
path: extension/benchmark/apple/Benchmark/default-ios-device-farm-appium-test-spec.yml
147-
148-
- name: Update the benchmark configs
149-
uses: seemethere/upload-artifact-s3@v5
150-
with:
151-
s3-bucket: gha-artifacts
152-
s3-prefix: |
153-
${{ github.repository }}/${{ github.run_id }}/artifacts/benchmark-configs/
154-
retention-days: 1
155-
if-no-files-found: error
156-
path: extension/benchmark/apple/Benchmark/${{ steps.prepare.outputs.benchmark-config-id }}.json
157-
158101
export-models:
159102
name: export-models
160103
uses: pytorch/test-infra/.github/workflows/macos_job.yml@main
@@ -344,6 +287,68 @@ jobs:
344287
fi
345288
echo "::endgroup::"
346289
290+
prepare-test-specs:
291+
runs-on: linux.2xlarge
292+
needs:
293+
- set-parameters
294+
- export-models
295+
strategy:
296+
matrix: ${{ fromJson(needs.set-parameters.outputs.benchmark_configs) }}
297+
fail-fast: false
298+
steps:
299+
- uses: actions/checkout@v3
300+
301+
- name: Prepare the spec
302+
id: prepare
303+
shell: bash
304+
env:
305+
BENCHMARK_CONFIG: ${{ toJSON(matrix) }}
306+
working-directory: extension/benchmark/apple/Benchmark
307+
run: |
308+
set -eux
309+
310+
# The model will be exported in the next step to this S3 path
311+
MODEL_PATH="https://gha-artifacts.s3.amazonaws.com/${{ github.repository }}/${{ github.run_id }}/artifacts/${{ matrix.model }}_${{ matrix.config }}/model.zip"
312+
# Check if the model artifact exists, fail this step skip generating test-spec.
313+
curl -s --head -f ${MODEL_PATH}
314+
# We could write a script to properly use jinja here, but there is only one variable,
315+
# so let's just sed it
316+
sed -i -e 's,{{ model_path }},'"${MODEL_PATH}"',g' default-ios-device-farm-appium-test-spec.yml.j2
317+
318+
BENCHMARK_CONFIG_ID=$(echo "${{ matrix.model }}_${{ matrix.config }}" | sed -e 's/[^A-Za-z0-9._-]/_/g')
319+
# The config for this benchmark runs, we save it in the test spec so that it can be fetched
320+
# later by the upload script
321+
sed -i -e 's,{{ benchmark_config_id }},'"${BENCHMARK_CONFIG_ID}"',g' default-ios-device-farm-appium-test-spec.yml.j2
322+
323+
cp default-ios-device-farm-appium-test-spec.yml.j2 default-ios-device-farm-appium-test-spec.yml
324+
# Just print the test spec for debugging
325+
cat default-ios-device-farm-appium-test-spec.yml
326+
327+
# Save the benchmark configs so that we can use it later in the dashboard
328+
echo "${BENCHMARK_CONFIG}" > "${BENCHMARK_CONFIG_ID}.json"
329+
echo "benchmark-config-id=${BENCHMARK_CONFIG_ID}" >> $GITHUB_OUTPUT
330+
331+
- name: Upload the spec
332+
uses: seemethere/upload-artifact-s3@v5
333+
with:
334+
s3-bucket: gha-artifacts
335+
s3-prefix: |
336+
${{ github.repository }}/${{ github.run_id }}/artifacts/${{ matrix.model }}_${{ matrix.config }}
337+
retention-days: 1
338+
if-no-files-found: error
339+
path: extension/benchmark/apple/Benchmark/default-ios-device-farm-appium-test-spec.yml
340+
341+
- name: Update the benchmark configs
342+
uses: seemethere/upload-artifact-s3@v5
343+
with:
344+
s3-bucket: gha-artifacts
345+
s3-prefix: |
346+
${{ github.repository }}/${{ github.run_id }}/artifacts/benchmark-configs/
347+
retention-days: 1
348+
if-no-files-found: error
349+
path: extension/benchmark/apple/Benchmark/${{ steps.prepare.outputs.benchmark-config-id }}.json
350+
351+
347352
build-benchmark-app:
348353
name: build-benchmark-app
349354
uses: pytorch/test-infra/.github/workflows/macos_job.yml@main

0 commit comments

Comments
 (0)