From da851582dff192e6d75fdd47804c57dc8b07b298 Mon Sep 17 00:00:00 2001 From: Geoffrey Yu Date: Sun, 12 Nov 2023 13:32:27 -0500 Subject: [PATCH] Scenario and system adjustments - Increase length of time between workload changes - Fix test failure - Switch back to ceiling based comparator --- .../scale_up/run_workload.sh | 38 ++++++++++++------- src/brad/planner/compare/provider.py | 4 +- tests/test_enumeration.py | 2 +- 3 files changed, 27 insertions(+), 17 deletions(-) diff --git a/experiments/15-e2e-scenarios-v2/scale_up/run_workload.sh b/experiments/15-e2e-scenarios-v2/scale_up/run_workload.sh index 3d81b335..14cfe4e5 100755 --- a/experiments/15-e2e-scenarios-v2/scale_up/run_workload.sh +++ b/experiments/15-e2e-scenarios-v2/scale_up/run_workload.sh @@ -67,25 +67,34 @@ start_repeating_olap_runner 8 15 5 $initial_queries "ra_8" rana_pid=$runner_pid sleep 2 -# Scale up to 8 transactional clients and hold for 15 minutes. -log_workload_point "start_increase_txn_4_to_8" -txn_sweep "4 5 6 7 8" 1 8 -log_workload_point "hold_txn_8_15_min" -sleep $((15 * 60)) +# Start with 4 transactional clients; hold for 10 minutes to stabilize. +log_workload_point "start_txn_4" +start_txn_runner 4 +txn_pid=$runner_pid +sleep $((10 * 60)) -# Scale up to 28 transactional clients. Hold for 15 minutes. -log_workload_point "start_increase_txn_12_to_28" +# Scale up to 8 transactional clients and hold for 20 minutes. +log_workload_point "start_increase_txn_4_to_8" kill -INT $txn_pid wait $txn_pid -txn_sweep "12 16 20 24 28" 2 28 -log_workload_point "hold_txn_28_15_min" -sleep $((15 * 60)) +txn_sweep "5 6 7 8" 3 8 +log_workload_point "hold_txn_8_20_min" +sleep $((20 * 60)) # 32 mins total; 42 mins cumulative -# 15 minutes. +# Disabled for now - this will take too long. +# Scale up to 28 transactional clients. Hold for 15 minutes. +# log_workload_point "start_increase_txn_12_to_28" +# kill -INT $txn_pid +# wait $txn_pid +# txn_sweep "12 16 20 24 28" 2 28 +# log_workload_point "hold_txn_28_15_min" +# sleep $((15 * 60)) + +# 20 minutes. log_workload_point "start_heavy_rana_8" -start_repeating_olap_runner 8 5 1 $heavier_queries "ra_8_heavy" 8 +start_repeating_olap_runner 8 15 1 $heavier_queries "ra_8_heavy" 8 heavy_rana_pid=$runner_pid -sleep $((15 * 60)) +sleep $((20 * 60)) # 20 mins total; 62 mins cumulative # 20 minutes. log_workload_point "start_heavy_rana_20" @@ -93,7 +102,8 @@ kill -INT $heavy_rana_pid wait $heavy_rana_pid start_repeating_olap_runner 20 5 1 $heavier_queries "ra_20_heavy" 8 heavy_rana_pid=$runner_pid -sleep $((20 * 60)) +sleep $((20 * 60)) # 20 mins total; 82 mins cumulative + log_workload_point "experiment_workload_done" # Shut down everything now. diff --git a/src/brad/planner/compare/provider.py b/src/brad/planner/compare/provider.py index c10de959..c33d429c 100644 --- a/src/brad/planner/compare/provider.py +++ b/src/brad/planner/compare/provider.py @@ -1,5 +1,5 @@ from brad.planner.compare.function import BlueprintComparator -from brad.planner.compare.cost import best_weighted_score_under_perf_ceilings +from brad.planner.compare.cost import best_cost_under_perf_ceilings class BlueprintComparatorProvider: @@ -21,6 +21,6 @@ def __init__( self._max_txn_p90_latency_s = max_txn_p90_latency_s def get_comparator(self) -> BlueprintComparator: - return best_weighted_score_under_perf_ceilings( + return best_cost_under_perf_ceilings( self._max_query_latency_s, self._max_txn_p90_latency_s ) diff --git a/tests/test_enumeration.py b/tests/test_enumeration.py index fd65334c..c1af5792 100644 --- a/tests/test_enumeration.py +++ b/tests/test_enumeration.py @@ -25,7 +25,7 @@ def test_provisioning_enumerate_aurora(): def test_provisioning_enumerate_redshift(): redshift = ProvisioningEnumerator(Engine.Redshift) - base_redshift = Provisioning("dc2.large", 1) + base_redshift = Provisioning("dc2.large", 2) redshift_nearby = [ p.clone()