Skip to content

Commit

Permalink
Move benchmarks job back to build-and-test workflow
Browse files Browse the repository at this point in the history
  • Loading branch information
bayandin committed Jul 5, 2024
1 parent 2c9d59c commit d5a0e3b
Show file tree
Hide file tree
Showing 2 changed files with 79 additions and 96 deletions.
93 changes: 0 additions & 93 deletions .github/workflows/_build-and-test-locally.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,14 +15,6 @@ on:
description: 'debug or release'
required: true
type: string
run-benchmarks:
description: 'run benchmarks'
required: false
type: boolean
outputs:
benchmarks-result:
description: 'benchmarks result'
value: ${{ jobs.benchmarks-result.outputs.benchmarks-result }}

defaults:
run:
Expand Down Expand Up @@ -292,91 +284,6 @@ jobs:
inputs.build-type == 'debug' && matrix.pg_version == 'v14'
uses: ./.github/actions/save-coverage-data

get-benchmarks-durations:
# use format(..) to catch both inputs.run-benchmarks = true AND inputs.run-benchmarks = 'true'
if: inputs.build-type == 'release' && format('{0}', inputs.run-benchmarks) == 'true'
outputs:
json: ${{ steps.get-benchmark-durations.outputs.json }}
runs-on: [ self-hosted, gen3, small ]
container:
image: ${{ inputs.build-tools-image }}
credentials:
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
options: --init
steps:
- uses: actions/checkout@v4

- uses: actions/cache@v4
with:
path: ~/.cache/pypoetry/virtualenvs
key: v1-${{ runner.os }}-${{ runner.arch }}-python-deps-${{ hashFiles('poetry.lock') }}

- run: ./scripts/pysync

- name: get benchmark durations
id: get-benchmark-durations
env:
TEST_RESULT_CONNSTR: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
run: |
poetry run ./scripts/benchmark_durations.py "${TEST_RESULT_CONNSTR}" \
--days 10 \
--output /tmp/benchmark_durations.json
echo "json=$(jq --compact-output '.' /tmp/benchmark_durations.json)" >> $GITHUB_OUTPUT
benchmarks:
needs: [ build-neon, get-benchmarks-durations ]
runs-on: [ self-hosted, gen3, small ]
container:
image: ${{ inputs.build-tools-image }}
credentials:
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
# for changed limits, see comments on `options:` earlier in this file
options: --init --shm-size=512mb --ulimit memlock=67108864:67108864
strategy:
fail-fast: false
matrix:
# the amount of groups (N) should be reflected in `extra_params: --splits N ...`
pytest_split_group: [ 1, 2, 3, 4, 5 ]
steps:
- uses: actions/checkout@v4

- name: Pytest benchmarks
uses: ./.github/actions/run-python-test-set
with:
build_type: ${{ inputs.build-type }}
test_selection: performance
run_in_parallel: false
save_perf_report: ${{ github.ref_name == 'main' }}
extra_params: --splits 5 --group ${{ matrix.pytest_split_group }}
benchmark_durations: ${{ needs.get-benchmarks-durations.outputs.json }}
env:
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
TEST_RESULT_CONNSTR: "${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}"
PAGESERVER_VIRTUAL_FILE_IO_ENGINE: tokio-epoll-uring
PAGESERVER_GET_VECTORED_IMPL: vectored
PAGESERVER_GET_IMPL: vectored
PAGESERVER_VALIDATE_VEC_GET: false

# In the main pipeline we have a job that sends a slack message if benchmarks failed.
# But GitHub doesn't allow to set an output to a result of a job,
# so we're doing it using additional job
#
# Ref https://github.com/actions/runner/issues/2495
benchmarks-result:
if: ${{ !cancelled() }}
needs: [ benchmarks ]
runs-on: ubuntu-22.04
outputs:
benchmarks-result: ${{ steps.benchmarks-result.outputs.benchmarks-result }}
env:
BENCHMARKS_RESULT: ${{ needs.benchmarks.result }}
steps:
- run: echo "benchmarks-result=${BENCHMARKS-RESULT}" | tee -a $GITHUB_OUTPUT
id: benchmarks-result

coverage-report:
if: inputs.build-type == 'debug'
needs: [ regress-tests ]
Expand Down
82 changes: 79 additions & 3 deletions .github/workflows/build_and_test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -205,12 +205,88 @@ jobs:
build-tools-image: ${{ needs.build-build-tools-image.outputs.image }}
build-tag: ${{ needs.tag.outputs.build-tag }}
build-type: ${{ matrix.build-type }}
run-benchmarks: ${{ github.ref_name == 'main' || contains(github.event.pull_request.labels.*.name, 'run-benchmarks') }}
secrets: inherit

# We keep `benchmarks` job outsid of `build-and-test-locally` workflow to make job failures non-blocking
get-benchmarks-durations:
if: github.ref_name == 'main' || contains(github.event.pull_request.labels.*.name, 'run-benchmarks')
outputs:
json: ${{ steps.get-benchmark-durations.outputs.json }}
needs: [ check-permissions, build-build-tools-image ]
runs-on: [ self-hosted, gen3, small ]
container:
image: ${{ needs.build-build-tools-image.outputs.image }}
credentials:
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
options: --init
steps:
- name: Checkout
uses: actions/checkout@v4

- name: Cache poetry deps
uses: actions/cache@v4
with:
path: ~/.cache/pypoetry/virtualenvs
key: v1-${{ runner.os }}-${{ runner.arch }}-python-deps-${{ hashFiles('poetry.lock') }}

- name: Install Python deps
run: ./scripts/pysync

- name: get benchmark durations
id: get-benchmark-durations
env:
TEST_RESULT_CONNSTR: ${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}
run: |
poetry run ./scripts/benchmark_durations.py "${TEST_RESULT_CONNSTR}" \
--days 10 \
--output /tmp/benchmark_durations.json
echo "json=$(jq --compact-output '.' /tmp/benchmark_durations.json)" >> $GITHUB_OUTPUT
benchmarks:
if: github.ref_name == 'main' || contains(github.event.pull_request.labels.*.name, 'run-benchmarks')
needs: [ check-permissions, build-and-test-locally, build-build-tools-image, get-benchmarks-durations ]
runs-on: [ self-hosted, gen3, small ]
container:
image: ${{ needs.build-build-tools-image.outputs.image }}
credentials:
username: ${{ secrets.NEON_DOCKERHUB_USERNAME }}
password: ${{ secrets.NEON_DOCKERHUB_PASSWORD }}
# for changed limits, see comments on `options:` earlier in this file
options: --init --shm-size=512mb --ulimit memlock=67108864:67108864
strategy:
fail-fast: false
matrix:
# the amount of groups (N) should be reflected in `extra_params: --splits N ...`
pytest_split_group: [ 1, 2, 3, 4, 5 ]
build_type: [ release ]
steps:
- name: Checkout
uses: actions/checkout@v4

- name: Pytest benchmarks
uses: ./.github/actions/run-python-test-set
with:
build_type: ${{ matrix.build_type }}
test_selection: performance
run_in_parallel: false
save_perf_report: ${{ github.ref_name == 'main' }}
extra_params: --splits 5 --group ${{ matrix.pytest_split_group }}
benchmark_durations: ${{ needs.get-benchmarks-durations.outputs.json }}
env:
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}"
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}"
TEST_RESULT_CONNSTR: "${{ secrets.REGRESS_TEST_RESULT_CONNSTR_NEW }}"
PAGESERVER_VIRTUAL_FILE_IO_ENGINE: tokio-epoll-uring
PAGESERVER_GET_VECTORED_IMPL: vectored
PAGESERVER_GET_IMPL: vectored
PAGESERVER_VALIDATE_VEC_GET: false
# XXX: no coverage data handling here, since benchmarks are run on release builds,
# while coverage is currently collected for the debug ones

report-benchmarks-failures:
needs: [ build-and-test-locally, create-test-report ]
if: github.ref_name == 'main' && failure() && needs.build-and-test-locally.outputs.benchmarks-result == 'failure'
needs: [ benchmarks, create-test-report ]
if: github.ref_name == 'main' && failure() && needs.benchmarks.result == 'failure'
runs-on: ubuntu-22.04

steps:
Expand Down

0 comments on commit d5a0e3b

Please sign in to comment.