Skip to content

Commit 90b591b

Browse files
Print out intermediate benchmark results (#1100)
* Print out intermediate benchmark results * Fix benchmarks action * Formatter Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> * Formatter Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --------- Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
1 parent 2020741 commit 90b591b

File tree

2 files changed

+34
-16
lines changed

2 files changed

+34
-16
lines changed

.github/workflows/Benchmarking.yml

Lines changed: 9 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -33,14 +33,19 @@ jobs:
3333
echo "$version_info" >> $GITHUB_ENV
3434
echo "EOF" >> $GITHUB_ENV
3535
36-
# Capture benchmark output into a variable
36+
# Capture benchmark output into a variable. The sed and tail calls cut out anything but the
37+
# final block of results.
3738
echo "Running Benchmarks..."
38-
benchmark_output=$(julia --project=benchmarks benchmarks/benchmarks.jl)
39-
39+
benchmark_output=$(\
40+
julia --project=benchmarks benchmarks/benchmarks.jl \
41+
| sed -n '/Final results:/,$p' \
42+
| tail -n +2\
43+
)
44+
4045
# Print benchmark results directly to the workflow log
4146
echo "Benchmark Results:"
4247
echo "$benchmark_output"
43-
48+
4449
# Set the benchmark output as an env var for later steps
4550
echo "BENCHMARK_OUTPUT<<EOF" >> $GITHUB_ENV
4651
echo "$benchmark_output" >> $GITHUB_ENV

benchmarks/benchmarks.jl

Lines changed: 25 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,27 @@ using StableRNGs: StableRNG
77

88
rng = StableRNG(23)
99

10+
function print_results(results_table)
11+
table_matrix = hcat(Iterators.map(collect, zip(results_table...))...)
12+
header = [
13+
"Model",
14+
"Dim",
15+
"AD Backend",
16+
"VarInfo",
17+
"Linked",
18+
"t(eval)/t(ref)",
19+
"t(grad)/t(eval)",
20+
]
21+
return pretty_table(
22+
table_matrix;
23+
column_labels=header,
24+
backend=:text,
25+
formatters=[fmt__printf("%.1f", [6, 7])],
26+
fit_table_in_display_horizontally=false,
27+
fit_table_in_display_vertically=false,
28+
)
29+
end
30+
1031
# Create DynamicPPL.Model instances to run benchmarks on.
1132
smorgasbord_instance = Models.smorgasbord(randn(rng, 100), randn(rng, 100))
1233
loop_univariate1k, multivariate1k = begin
@@ -82,17 +103,9 @@ for (model_name, model, varinfo_choice, adbackend, islinked) in chosen_combinati
82103
relative_ad_eval_time,
83104
),
84105
)
106+
println("Results so far:")
107+
print_results(results_table)
85108
end
86109

87-
table_matrix = hcat(Iterators.map(collect, zip(results_table...))...)
88-
header = [
89-
"Model", "Dim", "AD Backend", "VarInfo", "Linked", "t(eval)/t(ref)", "t(grad)/t(eval)"
90-
]
91-
pretty_table(
92-
table_matrix;
93-
column_labels=header,
94-
backend=:text,
95-
formatters=[fmt__printf("%.1f", [6, 7])],
96-
fit_table_in_display_horizontally=false,
97-
fit_table_in_display_vertically=false,
98-
)
110+
println("Final results:")
111+
print_results(results_table)

0 commit comments

Comments
 (0)