diff --git a/examples/postgrest/pgbench/k6/Makefile b/examples/postgrest/pgbench/k6/Makefile new file mode 100644 index 0000000..6cc62b3 --- /dev/null +++ b/examples/postgrest/pgbench/k6/Makefile @@ -0,0 +1,18 @@ +.PHONY: load + +MAKEFLAGS += -j2 + +export + +conns ?= 10 +rampingduration ?= 10 +consecutiveduration ?= 20 +rampscount ?= 1 +requests ?= 1 +rand = $(shell bash -c 'echo $$RANDOM') +testrun ?= "random-run-$(rand)" + +load: + @RAMPING_DURATION=$(rampingduration) CONSECUTIVE_DURATION=$(consecutiveduration) RAMPS_COUNT=$(rampscount) \ + REQUESTS=$(requests) CONNS=$(conns) SHIFT=$(shift) TEST_RUN=$(testrun) ./k6 run load.js \ + --tag testrun=$(testrun) --tag system='postgrest' -o 'prometheus=namespace=k6' \ No newline at end of file diff --git a/examples/postgrest/pgbench/k6/common.js b/examples/postgrest/pgbench/k6/common.js new file mode 100644 index 0000000..1e924fc --- /dev/null +++ b/examples/postgrest/pgbench/k6/common.js @@ -0,0 +1,50 @@ +/** + * Return a random integer between the minimum (inclusive) + * and maximum (exclusive) values + * @param {number} min - The minimum value to return. + * @param {number} max - The maximum value you want to return. + * @return {number} The random number between the min and max. + */ +export function getRandomInt(min, max) { + min = Math.ceil(min) + max = Math.floor(max) + // The maximum is exclusive and the minimum is inclusive + return Math.floor(Math.random() * (max - min) + min) +} + +/** + * Generate default k6 ramping-vus scenario. + * @param {number} baseDuration - Total duration of the scenario. + * @param {number} conns - max number of vus during the scenario execution. + * + * It starts with 0 VUs, ramps up to half the number of connections in 1/12 of total duration then + * it remains on this number for 1/4 of total duration time. + * Then ramps down to a quarter of the number of connections in 1/12 of total duration. + * Then ramps up to the full number of connections in 1/6 of total duration and + * it remains on this number for 1/3 of total duration time. + * Then ramps down to a quarter of the number of connections in 1/12 of total duration, + * then ramps down to 0 VUs in 10s. + */ +export function scenario(rampingDuration, consecutiveDuration, ramps, conns) { + const stages = [] + for (let i = 1; i <= ramps; i++) { + stages.push({ + duration: `${parseInt(rampingDuration)}s`, + target: (i * parseInt(conns)) / parseInt(ramps), + }) + stages.push({ + duration: `${parseInt(consecutiveDuration)}s`, + target: (i * parseInt(conns)) / parseInt(ramps), + }) + } + + return { + executor: 'ramping-vus', + startVUs: 0, + stages: stages, + gracefulRampDown: '10s', + } +} + +/* Exporting an array of default summaryTrendStats to be used in summary result. */ +export const trends = ['avg', 'med', 'p(99)', 'p(95)', 'p(0.1)', 'count'] diff --git a/examples/postgrest/pgbench/k6/load.js b/examples/postgrest/pgbench/k6/load.js new file mode 100644 index 0000000..5f8741d --- /dev/null +++ b/examples/postgrest/pgbench/k6/load.js @@ -0,0 +1,285 @@ +import { check, sleep, group } from 'k6' +import http from 'k6/http' +import { vu, scenario } from 'k6/execution' +import { Rate, Counter, Trend } from 'k6/metrics' + +import sql from 'k6/x/sql' + +import { scenario as sc, trends } from './common.js' +export { handleSummary } from './summary.js' + +const serviceToken = __ENV.SERVICE_TOKEN +const baseUri = __ENV.BASE_URI + ? __ENV.BASE_URI + : 'https://proj.supabase.com' +const restURI = __ENV.REST_URI ? __ENV.REST_URI : `${baseUri}/rest/v1` + +const pgConnectionString = __ENV.CONN_STRING + ? __ENV.CONN_STRING + : `postgres://postgres_user:postgres_pass@$postgres_host:6543/postgres?sslmode=disable` + +const conns = __ENV.CONNS ? parseInt(__ENV.CONNS) : 10 +const requests = __ENV.REQUESTS ? parseInt(__ENV.REQUESTS) : 1 +const rampingDuration = __ENV.RAMPING_DURATION + ? parseInt(__ENV.RAMPING_DURATION) + : 20 +const consecutiveDuration = __ENV.CONSECUTIVE_DURATION + ? parseInt(__ENV.CONSECUTIVE_DURATION) + : 40 +const ramps = __ENV.RAMPS_COUNT ? parseInt(__ENV.RAMPS_COUNT) : 10 +const testRun = __ENV.TEST_RUN ? __ENV.TEST_RUN : 'default' + +const myFailRate = new Rate('failed_requests') +const counterTX = new Counter('tx') +const counterFailed = new Counter('failed') +const txTrend = new Trend('tx_trend', true) + +const to = { + failed_requests: ['rate<0.1'], + http_req_duration: ['p(95)<1000'], +} + +export const options = { + setupTimeout: 600000, + vus: 1, + thresholds: to, + summaryTrendStats: trends, + scenarios: { + pgrest_rpc_update: sc(rampingDuration, consecutiveDuration, ramps, conns), + }, +} + +const headers = { + accept: 'application/json', + Authorization: `Bearer ${serviceToken}`, + apikey: serviceToken, + 'Content-Type': 'application/json', +} + +const db = sql.open('postgres', pgConnectionString) + +export function setup() { + db.exec( + ` +set statement_timeout = 600000; +drop table if exists history; +drop table if exists accounts; +drop table if exists tellers; +drop table if exists branches; + +create table branches ( + bid serial primary key, + bbalance int, + filler char(88) +); + +create table tellers ( + tid serial primary key, + bid int references branches(bid), + tbalance int, + filler char(84) +); + +create table accounts ( + aid serial primary key, + bid int references branches(bid), + abalance int, + filler char(84) +); + +create table history ( + hid serial primary key, + tid int, + bid int, + aid int, + delta int, + mtime timestamp, + filler char(22) +); + +-- Create indexes +create index idx_accounts_bid on accounts(bid); +create index idx_tellers_bid on tellers(bid); +create index idx_history_tid on history(tid); +create index idx_history_bid on history(bid); +create index idx_history_aid on history(aid); + ` + ) + + db.exec( + ` +-- Insert branches +insert into branches (bbalance, filler) +select 0, '' +from generate_series(1, 10); + +-- Insert tellers +insert into tellers (bid, tbalance, filler) +select bid, 0, '' +from branches, generate_series(1, 10); + +-- Insert accounts +insert into accounts (bid, abalance, filler) +select bid, 0, '' +from branches, generate_series(1, 100000); + +-- Prepopulate historical data +insert into history (tid, bid, aid, delta, mtime, filler) +select (random() * 100 + 1)::INT, bid, aid, (random() * 1000 - 500)::INT, NOW(), '' +from accounts +limit 10000000; + ` + ) + +db.exec( + ` +create or replace function update_account_balance(acc int, delta int) returns void as $$ +begin + update accounts set abalance = abalance + delta where aid = acc; +end; +$$ language plpgsql; + +create or replace function update_teller_balance(tel int, delta int) returns void as $$ +begin + update tellers set tbalance = tbalance + delta where tid = tel; +end; +$$ language plpgsql; + +create or replace function update_branch_balance(br int, delta int) returns void as $$ +begin + update branches set bbalance = bbalance + delta where bid = br; +end; +$$ language plpgsql; + ` +) +} + +export default () => { + const name = vu.idInTest + + while (scenario.progress < 1) { + const start = new Date() + for (let i = 1; i <= requests; i++) { + const tid = Math.floor(Math.random() * 100) + 1; // Random teller id + const bid = Math.floor(Math.random() * 10) + 1; // Random branch id + const aid = Math.floor(Math.random() * 100000) + 1; // Random account id + const delta = Math.floor(Math.random() * 1000) - 500; // Random transaction amount + + const exStart = new Date() + + let body = JSON.stringify({ + acc: aid, + delta: delta + }) + let res = http.post( + `${restURI}/rpc/update_account_balance`, + body, + { headers: headers } + ) + myFailRate.add(res.status !== 204) + if (res.status !== 204) { + console.log(`Update account balance failed with status ${res.status}, ${res.status_text}`) + counterTX.add(1) + counterFailed.add(1) + continue + } + + res = http.get( + `${restURI}/accounts?aid=eq.${aid}&select=abalance`, + { headers: headers } + ) + myFailRate.add(res.status !== 200) + if (res.status !== 200) { + console.log(`Select account balance failed with status ${res.status}, ${res.status_text}`) + counterTX.add(1) + counterFailed.add(1) + continue + } + + // Update teller balance + body = JSON.stringify({ + tel: tid, + delta: delta + }) + res = http.post( + `${restURI}/rpc/update_teller_balance`, + body, + { headers: headers } + ) + myFailRate.add(res.status !== 204) + if (res.status !== 204) { + console.log(`Update teller balance failed with status ${res.status}, ${res.status_text}`) + counterTX.add(1) + counterFailed.add(1) + continue + } + + // Update branch balance + body = JSON.stringify({ + br: bid, + delta: delta + }) + res = http.post( + `${restURI}/rpc/update_branch_balance`, + body, + { headers: headers } + ) + myFailRate.add(res.status !== 204) + if (res.status !== 204) { + console.log(`Update branch balance failed with status ${res.status}, ${res.status_text}`) + counterTX.add(1) + counterFailed.add(1) + continue + } + + // Insert history + body = JSON.stringify({ + tid: tid, + bid: bid, + aid: aid, + delta: delta, + mtime: new Date().toISOString(), + filler: '' + }) + res = http.post( + `${restURI}/history?columns=tid,bid,aid,delta,mtime,filler`, + body, + { headers: headers } + ) + myFailRate.add(res.status !== 201) + if (res.status !== 201) { + console.log(`Insert history failed with status ${res.status}, ${res.status_text}`) + counterTX.add(1) + counterFailed.add(1) + continue + } + + const exFinish = new Date() + counterTX.add(1) + txTrend.add(exFinish - exStart) + const finish = new Date() + if (finish - start > 1000) { + break + } + } + const finish = new Date() + if (finish - start < 1000) { + sleep((1000 - (finish - start)) / 1000) + } + } +} + +export function teardown(data) { + db.exec( + ` +drop function if exists update_account_balance(int, int); +drop function if exists update_teller_balance(int, int); +drop function if exists update_branch_balance(int, int); +drop table if exists history; +drop table if exists accounts; +drop table if exists tellers; +drop table if exists branches; + ` + ) + db.close() +} \ No newline at end of file diff --git a/examples/postgrest/pgbench/k6/summary.js b/examples/postgrest/pgbench/k6/summary.js new file mode 100644 index 0000000..5f04bef --- /dev/null +++ b/examples/postgrest/pgbench/k6/summary.js @@ -0,0 +1,79 @@ +import http from 'k6/http' +import { textSummary } from 'https://jslib.k6.io/k6-summary/0.0.1/index.js' + +/* Setting up the environment variables for the test run. */ +const testrun = __ENV.TEST_RUN +const origin = __ENV.TEST_ORIGIN +const benchmark = __ENV.BENCHMARK_ID +const run = __ENV.RUN_ID +const token = __ENV.SUPABENCH_TOKEN +const supabench_uri = __ENV.SUPABENCH_URI + ? __ENV.SUPABENCH_URI + : 'http://localhost:8090' + +/** + * Handle summary implementation that additionally sends the data to the reports server. + */ +export function handleSummary(data) { + console.log('Preparing the end-of-test summary...') + const started = Date.now() + + // Send the results to remote server + if (!run) { + const report = { + output: textSummary(data, { indent: ' ', enableColors: false }), + raw: data, + benchmark_id: benchmark, + name: testrun ? testrun : null, + status: 'success', + origin: origin, + started_at: `${started - 60 * 1000}`, + ended_at: `${ + started + parseInt(data.state.testRunDurationMs) + 60 * 1000 + }`, + } + + const resp = http.post( + `${supabench_uri}/api/collections/runs/records`, + JSON.stringify(report), + { + headers: { + 'Content-Type': 'application/json', + Authorization: `Admin ${token}`, + }, + } + ) + if (resp.status != 200) { + console.error('Could not send summary, got status ' + resp.status) + } + } else { + const report = { + output: textSummary(data, { indent: ' ', enableColors: false }), + raw: data, + status: 'success', + started_at: `${started - 120 * 1000}`, + ended_at: `${ + started + parseInt(data.state.testRunDurationMs) + 15 * 1000 + }`, + } + + const resp = http.patch( + `${supabench_uri}/api/collections/runs/records/${run}`, + JSON.stringify(report), + { + headers: { + 'Content-Type': 'application/json', + Authorization: `Admin ${token}`, + }, + } + ) + if (resp.status != 200) { + console.error('Could not send summary, got status ' + resp.status) + } + } + + return { + stdout: textSummary(data, { indent: ' ', enableColors: true }), // Show the text summary to stdout... + 'summary.json': JSON.stringify(data), // and a JSON with all the details... + } +} diff --git a/examples/postgrest/pgbench/main.tf b/examples/postgrest/pgbench/main.tf new file mode 100644 index 0000000..73d96d3 --- /dev/null +++ b/examples/postgrest/pgbench/main.tf @@ -0,0 +1,41 @@ +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "4.0.0" + } + } +} + +provider "aws" { + region = "ap-southeast-1" +} + +module "script" { + source = "./modules/script" + + ami_id = var.ami_id + instance_type = var.instance_type + instances_count = var.instances_count + security_group_id = var.security_group_id + subnet_id = var.subnet_id + sut_name = var.sut_name + key_name = var.key_name + private_key_location = var.private_key_location + + testrun_name = var.testrun_name + testrun_id = var.testrun_id + test_origin = var.test_origin + benchmark_id = var.benchmark_id + supabench_token = var.supabench_token + supabench_uri = var.supabench_uri + + anon_token = var.anon_token + service_token = var.service_token + base_uri = var.base_uri + conns = var.conns + requests = var.requests + rampscount = var.rampscount + rampingduration = var.rampingduration + consecutiveduration = var.consecutiveduration +} \ No newline at end of file diff --git a/examples/postgrest/pgbench/modules/script/entrypoint.sh.tpl b/examples/postgrest/pgbench/modules/script/entrypoint.sh.tpl new file mode 100644 index 0000000..f16ef30 --- /dev/null +++ b/examples/postgrest/pgbench/modules/script/entrypoint.sh.tpl @@ -0,0 +1,35 @@ +#!/bin/bash + +# update golang and make sure go is in path +wget https://golang.org/dl/go1.19.linux-amd64.tar.gz +sudo rm -rf /usr/local/go && sudo tar -C /usr/local -xzf go1.19.linux-amd64.tar.gz +export PATH=$PATH:/usr/local/go/bin + +# build k6 with xk6 plugins, you may add some extra plugins here if needed +~/go/bin/xk6 build v0.37.0 --output /tmp/k6/k6 \ + --with github.com/jdheyburn/xk6-prometheus@v0.1.6 + +# run telegraf to collect metrics from k6 and host and push them to prometheus +telegraf --config telegraf.conf &>/dev/null & + +# go to k6 dir and run k6 +cd /tmp/k6 || exit 1 + +# leave these as is. Supabench will pass it and it is needed to upload the report. +export RUN_ID="${testrun_id}" +export BENCHMARK_ID="${benchmark_id}" +export TEST_RUN="${testrun_name}" +export TEST_ORIGIN="${test_origin}" +export SUPABENCH_TOKEN="${supabench_token}" +export SUPABENCH_URI="${supabench_uri}" + +# this is the place to add your variables, required by benchmark. +export ANON_TOKEN="${anon_token}" +export SERVICE_TOKEN="${service_token}" +export BASE_URI="${base_uri}" + +# make command from the k6 folder to run k6 benchmark, you can add some extra vars here if needed +# Leave testrun_name as it is passed to k6 command to add global tag to all metrics for grafana! +make load \ + rampingduration="${rampingduration}" consecutiveduration="${consecutiveduration}" rampscount="${rampscount}" \ + requests="${requests}" conns="${conns}" shift="${shift}" testrun="${testrun_name}" \ No newline at end of file diff --git a/examples/postgrest/pgbench/modules/script/main.tf b/examples/postgrest/pgbench/modules/script/main.tf new file mode 100644 index 0000000..86423cb --- /dev/null +++ b/examples/postgrest/pgbench/modules/script/main.tf @@ -0,0 +1,103 @@ +# creating ec2 instance that will be used to generate load +# Most likely you will not need to change it +resource "aws_instance" "k6" { + count = var.instances_count + + ami = var.ami_id + instance_type = var.instance_type + vpc_security_group_ids = [var.security_group_id] + subnet_id = var.subnet_id + + key_name = var.key_name + + tags = { + terraform = "true" + environment = "qa" + app = var.sut_name + creator = "supabench" + } +} + +# uploading k6 scripts and running k6 load test +resource "null_resource" "remote" { + count = var.instances_count + + # ssh into instance, you likely won't need to change this part + connection { + type = "ssh" + user = var.instance_user + host = aws_instance.k6[count.index].public_ip + private_key = var.private_key_location + timeout = "1m" + } + + # upload k6 scripts to remote instance, you likely won't need to change this part + provisioner "file" { + source = "${path.root}/k6" + destination = "/tmp" + } + + # upload entrypoint script to remote instance + # specify your custom variables here + provisioner "file" { + destination = "/tmp/k6/entrypoint.sh" + + content = templatefile( + "${path.module}/entrypoint.sh.tpl", + { + # add your custom variables here + anon_token = var.anon_token + service_token = var.service_token + base_uri = var.base_uri + conns = var.conns + shift = count.index * 100000 + requests = var.requests + rampscount = var.rampscount + rampingduration = var.rampingduration + consecutiveduration = var.consecutiveduration + + # don't change these + testrun_id = var.testrun_id + benchmark_id = var.benchmark_id + testrun_name = var.testrun_name + test_origin = var.test_origin + supabench_token = var.supabench_token + supabench_uri = var.supabench_uri + } + ) + } + + # set env vars + provisioner "remote-exec" { + inline = [ + "#!/bin/bash", + # add your env vars here: + "echo \"export ANON_TOKEN='${var.anon_token}'\" >> ~/.bashrc", + "echo \"export SERVICE_TOKEN='${var.service_token}'\" >> ~/.bashrc", + "echo \"export BASE_URI='${var.base_uri}'\" >> ~/.bashrc", + # don't change these: + "echo \"export RUN_ID='${var.testrun_id}'\" >> ~/.bashrc", + "echo \"export BENCHMARK_ID='${var.benchmark_id}'\" >> ~/.bashrc", + "echo \"export TEST_RUN='${var.testrun_name}'\" >> ~/.bashrc", + "echo \"export TEST_ORIGIN='${var.test_origin}'\" >> ~/.bashrc", + "echo \"export SUPABENCH_TOKEN='${var.supabench_token}'\" >> ~/.bashrc", + "echo \"export SUPABENCH_URI='${var.supabench_uri}'\" >> ~/.bashrc", + ] + } + + # run k6 load test, you likely won't need to change this part + provisioner "remote-exec" { + inline = [ + "#!/bin/bash", + "source ~/.bashrc", + "sudo chown -R ubuntu:ubuntu /tmp/k6", + "sudo chmod +x /tmp/k6/entrypoint.sh", + "/tmp/k6/entrypoint.sh", + ] + } + + # we should provide instance first so that we can ssh into it + depends_on = [ + aws_instance.k6, + ] +} \ No newline at end of file diff --git a/examples/postgrest/pgbench/modules/script/variables.tf b/examples/postgrest/pgbench/modules/script/variables.tf new file mode 100644 index 0000000..eecddb5 --- /dev/null +++ b/examples/postgrest/pgbench/modules/script/variables.tf @@ -0,0 +1,139 @@ +# This block is related to your benchmark. +# Variables to pass to the benchmark script. +# Variables required to run the SUT infrastructure. + +# Specify some variables that are required for your benchmark. + +variable "anon_token" { + description = "anon_token - anon token for the project" + type = string +} +variable "service_token" { + description = "service_token - service token for the project" + type = string +} +variable "base_uri" { + description = "base_uri - supabase project base uri" + type = string + default = "https://proj.supabase.com" +} +variable "conns" { + description = "conns - number of virtual users" + type = string + default = "100" +} +variable "requests" { + description = "requests - number of requests per virtual user" + type = string + default = "10" +} +variable "rampscount" { + description = "rampscount - number of stages with ramping vus and holding them for a duration" + type = string + default = "10" +} +variable "rampingduration" { + description = "rampingduration - duration of the ramping stage" + type = string + default = "30" +} +variable "consecutiveduration" { + description = "consecutiveduration - duration of the consecutive requests stage" + type = string + default = "60" +} + +# Some variables that you can reuse. + +# You will probably need these to create ec2 loader instance. +# You should set values for these variables in supabench. + +variable "ec2_name" { + description = "Name of ec2 loader instance" + type = string + default = "supaloader" # run ID +} + +variable "instance_type" { + description = "Size of ec2 loader instance" + type = string + default = "t2.micro" +} + +variable "instances_count" { + description = "Number of EC2 instances" + type = number + default = 1 +} + +variable "ami_id" { + description = "AMI to use for ec2 loader instance" + type = string +} + +variable "security_group_id" { + description = "Security group to use for ec2 loader instance" + type = string +} + +variable "subnet_id" { + description = "Subnet to use for ec2 loader instance" + type = string +} + +variable "instance_user" { + description = "The instance user for sshing" + type = string + default = "admin" +} + +variable "key_name" { + description = "The instance key" + type = string +} + +variable "private_key_location" { + description = "Location of your private key to SSH into the instance" + type = string +} + +variable "sut_name" { + description = "Name of the system under test" + type = string + default = "postgrest" +} + +# Leave these variables as is. They will be passed by Supabench. +# You don't need to set values for it. + +variable "testrun_name" { + description = "Name of the testrun" + type = string +} + +variable "testrun_id" { + description = "ID of the testrun" + type = string +} + +variable "test_origin" { + description = "Origin of the test" + type = string + default = "" +} + +variable "benchmark_id" { + description = "ID of the benchmark" + type = string +} + +variable "supabench_token" { + description = "Token to access the supabench" + type = string + sensitive = true +} + +variable "supabench_uri" { + description = "URI of the supabench server" + type = string +} diff --git a/examples/postgrest/pgbench/variables.tf b/examples/postgrest/pgbench/variables.tf new file mode 100644 index 0000000..7e21a97 --- /dev/null +++ b/examples/postgrest/pgbench/variables.tf @@ -0,0 +1,156 @@ +# This block is related to your benchmark. +# Variables to pass to the benchmark script. +# Variables required to run the SUT infrastructure. + +# Specify some variables that are required for your benchmark. + +variable "anon_token" { + description = "anon_token - anon token for the project" + type = string +} +variable "service_token" { + description = "service_token - service token for the project" + type = string +} +variable "base_uri" { + description = "base_uri - supabase project base uri" + type = string + default = "https://proj.supabase.com" +} +variable "conns" { + description = "conns - number of virtual users" + type = string + default = "100" +} +variable "requests" { + description = "requests - number of requests per virtual user" + type = string + default = "10" +} +variable "rampingduration" { + description = "rampingduration - duration of the ramping stage" + type = string + default = "30" +} +variable "consecutiveduration" { + description = "consecutiveduration - duration of the consecutive requests stage" + type = string + default = "60" +} +variable "rampscount" { + description = "rampscount - number of stages with ramping vus and holding them for a duration" + type = string + default = "10" +} + +# Some variables required to setup the SUT. + +# Some variables that you can reuse. + +# You will probably need these to create ec2 loader instance. +# You should set values for these variables in supabench. + +variable "ec2_name" { + description = "Name of ec2 loader instance" + type = string + default = "supaloader" # run ID +} + +variable "instance_type" { + description = "Size of ec2 loader instance" + type = string + default = "t2.micro" +} + +variable "instances_count" { + description = "Number of EC2 instances" + type = number + default = 1 +} + +variable "ami_id" { + description = "AMI to use for ec2 loader instance" + type = string + default = "" +} + +variable "security_group_id" { + description = "Security group to use for ec2 loader instance" + type = string + default = "" +} + +variable "subnet_id" { + description = "Subnet to use for ec2 loader instance" + type = string + default = "" +} + +variable "instance_user" { + description = "The instance user for sshing" + type = string + default = "admin" +} + +variable "key_name" { + description = "The instance key" + type = string + default = "egor-dev" +} + +variable "private_key_location" { + description = "Location of your private key to SSH into the instance" + type = string +} + +variable "sut_name" { + description = "Name of the system under test" + type = string + default = "postgrest" +} + +# Leave these variables as is. They will be passed by Supabench. +# You don't need to set values for it. + +variable "testrun_name" { + description = "Name of the testrun" + type = string + default = "" +} + +variable "testrun_id" { + description = "ID of the testrun" + type = string + default = "" +} + +variable "test_origin" { + description = "Origin of the test" + type = string + default = "" +} + +variable "benchmark_id" { + description = "ID of the benchmark" + type = string + default = "" +} + +variable "supabench_token" { + description = "Token to access the supabench" + type = string + default = "" + sensitive = true +} + +variable "supabench_uri" { + description = "URI of the supabench server" + type = string + default = "" +} + +variable "fly_access_token" { + description = "Fly access token" + type = string + default = "" +}