Benchmarking #1204
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
name: Benchmarking | |
on: | |
# uncomment to run on push for debugging your PR | |
# push: | |
# branches: [ your branch ] | |
schedule: | |
# * is a special character in YAML so you have to quote this string | |
# ┌───────────── minute (0 - 59) | |
# │ ┌───────────── hour (0 - 23) | |
# │ │ ┌───────────── day of the month (1 - 31) | |
# │ │ │ ┌───────────── month (1 - 12 or JAN-DEC) | |
# │ │ │ │ ┌───────────── day of the week (0 - 6 or SUN-SAT) | |
- cron: '0 3 * * *' # run once a day, timezone is utc | |
workflow_dispatch: # adds ability to run this manually | |
inputs: | |
region_id: | |
description: 'Project region id. If not set, the default region will be used' | |
required: false | |
default: 'aws-us-east-2' | |
save_perf_report: | |
type: boolean | |
description: 'Publish perf report. If not set, the report will be published only for the main branch' | |
required: false | |
defaults: | |
run: | |
shell: bash -euxo pipefail {0} | |
concurrency: | |
# Allow only one workflow per any non-`main` branch. | |
group: ${{ github.workflow }}-${{ github.ref_name }}-${{ github.ref_name == 'main' && github.sha || 'anysha' }} | |
cancel-in-progress: true | |
jobs: | |
generate-matrices: | |
# Create matrices for the benchmarking jobs, so we run benchmarks on rds only once a week (on Saturday) | |
# | |
# Available platforms: | |
# - neon-captest-new: Freshly created project (1 CU) | |
# - neon-captest-freetier: Use freetier-sized compute (0.25 CU) | |
# - neon-captest-reuse: Reusing existing project | |
# - rds-aurora: Aurora Postgres Serverless v2 with autoscaling from 0.5 to 2 ACUs | |
# - rds-postgres: RDS Postgres db.m5.large instance (2 vCPU, 8 GiB) with gp3 EBS storage | |
runs-on: ubuntu-latest | |
outputs: | |
pgbench-compare-matrix: ${{ steps.pgbench-compare-matrix.outputs.matrix }} | |
steps: | |
- name: Generate matrix for pgbench benchmark | |
id: pgbench-compare-matrix | |
run: | | |
matrix='{ | |
"include": [ | |
{ "platform": "neon-captest-new", "db_size": "300gb", "compute_units": "[7, 7]", "provisioner": "k8s-pod" }, | |
{ "platform": "neonvm-captest-new", "db_size": "300gb", "compute_units": "[7, 7]", "provisioner": "k8s-neonvm" } | |
] | |
}' | |
echo "matrix=$(echo "$matrix" | jq --compact-output '.')" >> $GITHUB_OUTPUT | |
pgbench-compare: | |
needs: [ generate-matrices ] | |
strategy: | |
fail-fast: false | |
matrix: ${{fromJson(needs.generate-matrices.outputs.pgbench-compare-matrix)}} | |
env: | |
TEST_PG_BENCH_DURATIONS_MATRIX: "60m" | |
TEST_PG_BENCH_SCALES_MATRIX: ${{ matrix.db_size }} | |
POSTGRES_DISTRIB_DIR: /tmp/neon/pg_install | |
DEFAULT_PG_VERSION: 14 | |
TEST_OUTPUT: /tmp/test_output | |
BUILD_TYPE: remote | |
SAVE_PERF_REPORT: ${{ github.event.inputs.save_perf_report || ( github.ref_name == 'main' ) }} | |
PLATFORM: ${{ matrix.platform }} | |
runs-on: [ self-hosted, us-east-2, x64 ] | |
container: | |
image: 369495373322.dkr.ecr.eu-central-1.amazonaws.com/rust:pinned | |
options: --init | |
# Increase timeout to 8h, default timeout is 6h | |
timeout-minutes: 480 | |
steps: | |
- uses: actions/checkout@v3 | |
- name: Download Neon artifact | |
uses: ./.github/actions/download | |
with: | |
name: neon-${{ runner.os }}-release-artifact | |
path: /tmp/neon/ | |
prefix: latest | |
- name: Add Postgres binaries to PATH | |
run: | | |
${POSTGRES_DISTRIB_DIR}/v${DEFAULT_PG_VERSION}/bin/pgbench --version | |
echo "${POSTGRES_DISTRIB_DIR}/v${DEFAULT_PG_VERSION}/bin" >> $GITHUB_PATH | |
- name: Create Neon Project | |
if: contains(fromJson('["neon-captest-new", "neon-captest-freetier", "neonvm-captest-new", "neonvm-captest-freetier"]'), matrix.platform) | |
id: create-neon-project | |
uses: ./.github/actions/neon-project-create | |
with: | |
region_id: ${{ github.event.inputs.region_id || 'aws-us-east-2' }} | |
postgres_version: ${{ env.DEFAULT_PG_VERSION }} | |
api_key: ${{ secrets.NEON_STAGING_API_KEY }} | |
compute_units: ${{ matrix.compute_units }} | |
provisioner: ${{ matrix.provisioner }} | |
- name: Set up Connection String | |
id: set-up-connstr | |
run: | | |
case "${PLATFORM}" in | |
neon-captest-reuse) | |
CONNSTR=${{ secrets.BENCHMARK_CAPTEST_CONNSTR }} | |
;; | |
neon-captest-new | neon-captest-freetier | neonvm-captest-new | neonvm-captest-freetier) | |
CONNSTR=${{ steps.create-neon-project.outputs.dsn }} | |
;; | |
rds-aurora) | |
CONNSTR=${{ secrets.BENCHMARK_RDS_AURORA_CONNSTR }} | |
;; | |
rds-postgres) | |
CONNSTR=${{ secrets.BENCHMARK_RDS_POSTGRES_CONNSTR }} | |
;; | |
*) | |
echo >&2 "Unknown PLATFORM=${PLATFORM}" | |
exit 1 | |
;; | |
esac | |
echo "connstr=${CONNSTR}" >> $GITHUB_OUTPUT | |
QUERY="SELECT version();" | |
if [[ "${PLATFORM}" = "neon"* ]]; then | |
QUERY="${QUERY} SHOW neon.tenant_id; SHOW neon.timeline_id;" | |
fi | |
psql ${CONNSTR} -c "${QUERY}" | |
- name: Benchmark init | |
uses: ./.github/actions/run-python-test-set | |
with: | |
build_type: ${{ env.BUILD_TYPE }} | |
test_selection: performance | |
run_in_parallel: false | |
save_perf_report: ${{ env.SAVE_PERF_REPORT }} | |
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_init | |
env: | |
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }} | |
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}" | |
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}" | |
- name: Benchmark simple-update | |
uses: ./.github/actions/run-python-test-set | |
with: | |
build_type: ${{ env.BUILD_TYPE }} | |
test_selection: performance | |
run_in_parallel: false | |
save_perf_report: ${{ env.SAVE_PERF_REPORT }} | |
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_simple_update | |
env: | |
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }} | |
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}" | |
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}" | |
- name: Benchmark select-only | |
uses: ./.github/actions/run-python-test-set | |
with: | |
build_type: ${{ env.BUILD_TYPE }} | |
test_selection: performance | |
run_in_parallel: false | |
save_perf_report: ${{ env.SAVE_PERF_REPORT }} | |
extra_params: -m remote_cluster --timeout 21600 -k test_pgbench_remote_select_only | |
env: | |
BENCHMARK_CONNSTR: ${{ steps.set-up-connstr.outputs.connstr }} | |
VIP_VAP_ACCESS_TOKEN: "${{ secrets.VIP_VAP_ACCESS_TOKEN }}" | |
PERF_TEST_RESULT_CONNSTR: "${{ secrets.PERF_TEST_RESULT_CONNSTR }}" | |
- name: Delete Neon Project | |
if: ${{ steps.create-neon-project.outputs.project_id && always() }} | |
uses: ./.github/actions/neon-project-delete | |
with: | |
project_id: ${{ steps.create-neon-project.outputs.project_id }} | |
api_key: ${{ secrets.NEON_STAGING_API_KEY }} | |
- name: Create Allure report | |
if: ${{ !cancelled() }} | |
uses: ./.github/actions/allure-report-generate | |
- name: Post to a Slack channel | |
if: ${{ github.event.schedule && failure() }} | |
uses: slackapi/slack-github-action@v1 | |
with: | |
channel-id: "C033QLM5P7D" # dev-staging-stream | |
slack-message: "Periodic perf testing ${{ matrix.platform }}: ${{ job.status }}\n${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" | |
env: | |
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} |