@@ -17,23 +17,29 @@ def compare_command_logic(args):
1717 max_negative_pct_change = max_pct_change * - 1.0
1818 enabled_fail = args .enable_fail_above
1919
20- baseline_json = retrieve_local_or_remote_input_json (baseline_file , local_path , "--baseline-file" )
20+ baseline_json = retrieve_local_or_remote_input_json (
21+ baseline_file , local_path , "--baseline-file"
22+ )
2123 if baseline_json is None :
22- print (' Error while retrieving {}! Exiting..' .format (baseline_file ))
24+ print (" Error while retrieving {}! Exiting.." .format (baseline_file ))
2325 sys .exit (1 )
2426
25- comparison_json = retrieve_local_or_remote_input_json (comparison_file , local_path , "--comparison-file" )
27+ comparison_json = retrieve_local_or_remote_input_json (
28+ comparison_file , local_path , "--comparison-file"
29+ )
2630 if comparison_json is None :
27- print (' Error while retrieving {}! Exiting..' .format (comparison_file ))
31+ print (" Error while retrieving {}! Exiting.." .format (comparison_file ))
2832 sys .exit (1 )
2933
3034 ##### Comparison starts here #####
3135 baseline_key_results_steps = baseline_json ["key-results" ].keys ()
3236 comparison_key_results_steps = comparison_json ["key-results" ].keys ()
33- baseline_df_config = generate_comparison_dataframe_configs (baseline_json ["benchmark-config" ],
34- baseline_key_results_steps )
35- comparison_df_config = generate_comparison_dataframe_configs (comparison_json ["benchmark-config" ],
36- comparison_key_results_steps )
37+ baseline_df_config = generate_comparison_dataframe_configs (
38+ baseline_json ["benchmark-config" ], baseline_key_results_steps
39+ )
40+ comparison_df_config = generate_comparison_dataframe_configs (
41+ comparison_json ["benchmark-config" ], comparison_key_results_steps
42+ )
3743
3844 percentange_change_map = {}
3945 for step in baseline_key_results_steps :
@@ -42,49 +48,70 @@ def compare_command_logic(args):
4248 percentange_change_map [step ] = {}
4349 print ("##############################" )
4450 print ("Comparing {} step" .format (step ))
45- key_result_run_name , baseline_metrics = get_key_results_and_values (baseline_json , step , use_result )
46- key_result_run_name , comparison_metrics = get_key_results_and_values (comparison_json , step , use_result )
51+ key_result_run_name , baseline_metrics = get_key_results_and_values (
52+ baseline_json , step , use_result
53+ )
54+ key_result_run_name , comparison_metrics = get_key_results_and_values (
55+ comparison_json , step , use_result
56+ )
4757 for baseline_metric_name , baseline_metric_value in baseline_metrics .items ():
4858 comparison_metric_value = None
4959 if baseline_metric_name in comparison_metrics :
5060 comparison_metric_value = comparison_metrics [baseline_metric_name ]
51- df_dict [baseline_metric_name ] = [baseline_metric_value , comparison_metric_value ]
61+ df_dict [baseline_metric_name ] = [
62+ baseline_metric_value ,
63+ comparison_metric_value ,
64+ ]
5265 df = pd .DataFrame (df_dict , index = ["baseline" , "comparison" ])
5366 print ("Percentage of change for comparison on {}" .format (step ))
54- df = df .append (df .pct_change ().rename (index = {'comparison' : 'pct_change' }).loc ['pct_change' ] * 100.0 )
67+ df = df .append (
68+ df .pct_change ()
69+ .rename (index = {"comparison" : "pct_change" })
70+ .loc ["pct_change" ]
71+ * 100.0
72+ )
5573
5674 for metric_name , items in df .iteritems ():
5775
58- lower_is_better = baseline_df_config [step ]["sorting_metric_sorting_direction_map" ][metric_name ]
76+ lower_is_better = baseline_df_config [step ][
77+ "sorting_metric_sorting_direction_map"
78+ ][metric_name ]
5979
6080 multiplier = 1.0
6181 # if lower is better than negative changes are and performance improvement
6282 if lower_is_better :
6383 multiplier = - 1.0
6484
6585 pct_change = items .get ("pct_change" ) * multiplier
66- df .at [' pct_change' , metric_name ] = pct_change
86+ df .at [" pct_change" , metric_name ] = pct_change
6787 percentange_change_map [step ][metric_name ] = pct_change
6888
6989 print (df )
7090 if enabled_fail :
71- failing_metrics_serie = df .loc [' pct_change' ] <= max_negative_pct_change
72- failing_metrics = df .loc [' pct_change' ][failing_metrics_serie ]
91+ failing_metrics_serie = df .loc [" pct_change" ] <= max_negative_pct_change
92+ failing_metrics = df .loc [" pct_change" ][failing_metrics_serie ]
7393 ammount_of_failing_metrics = len (failing_metrics )
7494 if ammount_of_failing_metrics > 0 :
7595 df_keys = df .keys ()
76- print ("There was a total of {} metrics that presented a regression above {} %" .format (
77- ammount_of_failing_metrics , max_pct_change ))
96+ print (
97+ "There was a total of {} metrics that presented a regression above {} %" .format (
98+ ammount_of_failing_metrics , max_pct_change
99+ )
100+ )
78101 for pos , failed in enumerate (failing_metrics_serie ):
79102 if failed :
80- print ("\t Metric '{}' failed. with an percentage of change of {:.2f} %" .format (df_keys [pos ],
81- df .loc [
82- 'pct_change' ][
83- pos ]))
103+ print (
104+ "\t Metric '{}' failed. with an percentage of change of {:.2f} %" .format (
105+ df_keys [pos ], df .loc ["pct_change" ][pos ]
106+ )
107+ )
84108 sys .exit (1 )
85109 else :
86- print ("Skipping step: {} due to command line argument --steps not containing it ({})" .format (step , "," .join (
87- included_steps )))
110+ print (
111+ "Skipping step: {} due to command line argument --steps not containing it ({})" .format (
112+ step , "," .join (included_steps )
113+ )
114+ )
88115
89116
90117def generate_comparison_dataframe_configs (benchmark_config , steps ):
@@ -104,7 +131,9 @@ def generate_comparison_dataframe_configs(benchmark_config, steps):
104131 step_df_dict [step ]["metric_json_path" ].append (metric_json_path )
105132 step_df_dict [step ]["df_dict" ][metric_name ] = []
106133 step_df_dict [step ]["sorting_metric_sorting_direction" ].append (
107- False if metric ["comparison" ] == "higher-better" else True )
108- step_df_dict [step ]["sorting_metric_sorting_direction_map" ][metric_name ] = False if metric [
109- "comparison" ] == "higher-better" else True
134+ False if metric ["comparison" ] == "higher-better" else True
135+ )
136+ step_df_dict [step ]["sorting_metric_sorting_direction_map" ][metric_name ] = (
137+ False if metric ["comparison" ] == "higher-better" else True
138+ )
110139 return step_df_dict
0 commit comments