diff --git a/Cargo.lock b/Cargo.lock index 40e98d3c..d576cfbc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -127,7 +127,22 @@ checksum = "5a833d97bf8a5f0f878daf2c8451fff7de7f9de38baa5a45d936ec718d81255a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", +] + +[[package]] +name = "android-tzdata" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" + +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", ] [[package]] @@ -345,7 +360,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -410,8 +425,11 @@ name = "benchmark-analyzer" version = "1.5.0" dependencies = [ "anyhow", + "chrono", "clap", "colored", + "era-compiler-common", + "regex", "serde", "serde_json", ] @@ -601,6 +619,21 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" +[[package]] +name = "chrono" +version = "0.4.38" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" +dependencies = [ + "android-tzdata", + "iana-time-zone", + "js-sys", + "num-traits", + "serde", + "wasm-bindgen", + "windows-targets 0.52.6", +] + [[package]] name = "cid" version = "0.5.1" @@ -643,7 +676,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -675,6 +708,7 @@ dependencies = [ "anyhow", "benchmark-analyzer", "bincode", + "chrono", "clap", "colored", "era-compiler-common", @@ -897,7 +931,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.1", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -917,7 +951,7 @@ checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", "unicode-xid", ] @@ -950,7 +984,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -1017,7 +1051,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -1028,7 +1062,7 @@ checksum = "2f9ed6b3789237c8a0c1c505af1c7eb2c560df6186f01b098c3a1064ea532f38" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -1398,7 +1432,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -1722,6 +1756,29 @@ dependencies = [ "tracing", ] +[[package]] +name = "iana-time-zone" +version = "0.1.61" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "wasm-bindgen", + "windows-core", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +dependencies = [ + "cc", +] + [[package]] name = "icu_collections" version = "1.5.0" @@ -1837,7 +1894,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -1896,7 +1953,7 @@ checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -1931,7 +1988,7 @@ source = "git+https://github.com/matter-labs-forks/inkwell?branch=llvm-17#c5d783 dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -2335,14 +2392,14 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] name = "object" -version = "0.36.5" +version = "0.36.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aedf0a2d09c573ed1d8d85b30c119153926a2b36dce0ab28322c09a117a4683e" +checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" dependencies = [ "memchr", ] @@ -2382,7 +2439,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -2508,7 +2565,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b7cafe60d6cf8e62e1b9b2ea516a089c008945bb5a275416789e7db0bc199dc" dependencies = [ "memchr", - "thiserror 2.0.8", + "thiserror 2.0.9", "ucd-trie", ] @@ -2529,7 +2586,7 @@ checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -2676,7 +2733,7 @@ dependencies = [ "rustc-hash", "rustls", "socket2", - "thiserror 2.0.8", + "thiserror 2.0.9", "tokio", "tracing", ] @@ -2695,7 +2752,7 @@ dependencies = [ "rustls", "rustls-pki-types", "slab", - "thiserror 2.0.8", + "thiserror 2.0.9", "tinyvec", "tracing", "web-time", @@ -2712,7 +2769,7 @@ dependencies = [ "once_cell", "socket2", "tracing", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -3216,7 +3273,7 @@ dependencies = [ "proc-macro-crate 3.2.0", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -3361,7 +3418,7 @@ checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -3617,9 +3674,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.90" +version = "2.0.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "919d3b74a5dd0ccd15aeb8f93e7006bd9e14c295087c9896a110f490752bcf31" +checksum = "d53cbcb5a243bd33b7858b1d7f4aca2153490815872d86d955d6ea29f743c035" dependencies = [ "proc-macro2", "quote", @@ -3649,7 +3706,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -3703,11 +3760,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.8" +version = "2.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08f5383f3e0071702bf93ab5ee99b52d26936be9dedd9413067cbdcddcb6141a" +checksum = "f072643fd0190df67a8bab670c20ef5d8737177d6ac6b2e9a236cb096206b2cc" dependencies = [ - "thiserror-impl 2.0.8", + "thiserror-impl 2.0.9", ] [[package]] @@ -3718,18 +3775,18 @@ checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] name = "thiserror-impl" -version = "2.0.8" +version = "2.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2f357fcec90b3caef6623a099691be676d033b40a058ac95d2a6ade6fa0c943" +checksum = "7b50fa271071aae2e6ee85f842e2e28ba8cd2c5fb67f11fcb1fd70b276f9e7d4" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -3762,9 +3819,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.8.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "445e881f4f6d382d5f27c034e25eb92edd7c784ceab92a0937db7f2e9471b938" +checksum = "022db8904dfa342efe721985167e9fcd16c29b226db4397ed752a761cfce81e8" dependencies = [ "tinyvec_macros", ] @@ -3876,7 +3933,7 @@ checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -4058,7 +4115,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", "wasm-bindgen-shared", ] @@ -4093,7 +4150,7 @@ checksum = "30d7a95b763d3c45903ed6c81f156801839e5ee968bb07e534c44df0fcd330c2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -4174,6 +4231,15 @@ dependencies = [ "winsafe", ] +[[package]] +name = "windows-core" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" +dependencies = [ + "windows-targets 0.52.6", +] + [[package]] name = "windows-registry" version = "0.2.0" @@ -4427,7 +4493,7 @@ checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", "synstructure", ] @@ -4449,7 +4515,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -4469,7 +4535,7 @@ checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", "synstructure", ] @@ -4490,7 +4556,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -4512,7 +4578,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] diff --git a/README.md b/README.md index 0993fad5..76142535 100644 --- a/README.md +++ b/README.md @@ -296,10 +296,36 @@ cargo run --release --bin benchmark-analyzer -- --reference reference.json --can After you make any changes in LLVM, you only need to repeat steps 2-3 to update the working branch benchmark data. +### Comparing results + +By default, benchmark analyzer compares tests from groups with the same name, which means that every test should be compiled with the same codegen and optimizations. +To compare two groups with different names, use the options `--query-reference` and `--query-candidate`. Then, use benchmark analyzer: + +```shell +cargo run --release --bin benchmark-analyzer -- --reference reference.json --candidate candidate.json --query-reference "M0B0" --query-candidate "M3B3" +``` + +The queries are regular expressions, and the group name, codegen, and +optimization options are matched against it. + + + ### Report formats -Use the parameter `--benchmark-format` to select the output format: `json` (default), or `csv`. +Use the parameter `--benchmark-format` of compiler tester to select the output format: `json` (default), `csv`, or `json-lnt`. + +If `json-lnt` format is selected: +1. The benchmark report will consist of multiple files. They will be placed in the directory provided via the `--output` argument. +2. It is mandatory to pass a JSON file with additional information using `--benchmark-context`. Here is a minimal example: + +```json +{ + "machine": "some_machine", + "target": "some_target", + "toolchain": "some_solc_type" +} +``` ## Troubleshooting diff --git a/benchmark_analyzer/Cargo.toml b/benchmark_analyzer/Cargo.toml index 4e23ff99..1ee1078b 100644 --- a/benchmark_analyzer/Cargo.toml +++ b/benchmark_analyzer/Cargo.toml @@ -3,6 +3,7 @@ name = "benchmark-analyzer" version = "1.5.0" authors = [ "Oleksandr Zarudnyi ", + "Igor Zhirkov ", ] license = "MIT OR Apache-2.0" edition = "2021" @@ -16,6 +17,10 @@ path = "src/benchmark_analyzer/main.rs" clap = { version = "=4.5.21", features = ["derive"] } anyhow = "=1.0.89" colored = "=2.1.0" +regex = "=1.11.0" serde = { version = "=1.0.210", features = [ "derive" ] } serde_json = "=1.0.128" +chrono = { version = "=0.4.38", features = [ "serde", "clock" ] } + +era-compiler-common = { git = "https://github.com/matter-labs/era-compiler-common", branch = "main" } diff --git a/benchmark_analyzer/src/analysis/evm_interpreter/mod.rs b/benchmark_analyzer/src/analysis/evm_interpreter/mod.rs new file mode 100644 index 00000000..83e39a43 --- /dev/null +++ b/benchmark_analyzer/src/analysis/evm_interpreter/mod.rs @@ -0,0 +1,59 @@ +//! +//! Collects definitions related to the analysis of EVM interpreter tests. +//! + +use std::collections::BTreeMap; + +use crate::model::benchmark::test::codegen::versioned::executable::run::Run; +use crate::model::benchmark::test::metadata::Metadata as TestMetadata; +use crate::model::evm_interpreter::OPCODES; +use crate::results::group::Group; + +const OPTIMIZE_FOR_CYCLES: &str = "+M3B3"; + +/// +/// Returns `true` if the group collects measurements of EVM Interpreter tests +/// compiled for maximum performance. +/// +pub fn is_evm_interpreter_cycles_tests_group(group: &Group<'_>) -> bool { + matches!( + group, + Group::EVMInterpreter { + optimizations: OPTIMIZE_FOR_CYCLES, + .. + } + ) +} + +/// +/// Returns the EVM interpreter ergs/gas ratio for every EVM bytecode. +/// +pub fn opcode_cost_ratios<'a>( + group: &BTreeMap<&'a str, (&'a TestMetadata, &'a Run)>, +) -> Vec<(String, f64)> { + let mut results = Vec::new(); + + for evm_opcode in OPCODES.into_iter() { + // Case name corresponds to the EVM bytecode + // Collect three last #fallback's to get the gas and ergs measurements + let runs = group + .values() + .filter_map(|(metadata, run)| match &metadata.selector.case { + Some(case) if case == evm_opcode => match &metadata.selector.input { + Some(input) if input.is_fallback() => Some(*run), + _ => None, + }, + _ => None, + }) + .collect::>(); + let [_skip, full, template]: [&'a Run; 3] = runs + .try_into() + .unwrap_or_else(|_| panic!("Failed to extract three #fallback tests from the EVM interpreter tests attributed to the opcode {evm_opcode}")); + + let ergs_difference = full.ergs as i64 - template.ergs as i64; + let gas_difference = full.gas as i64 - template.gas as i64; + let ergs_gas_ratio = (ergs_difference as f64) / (gas_difference as f64); + results.push((evm_opcode.to_owned(), ergs_gas_ratio)); + } + results +} diff --git a/benchmark_analyzer/src/analysis/mod.rs b/benchmark_analyzer/src/analysis/mod.rs new file mode 100644 index 00000000..0dc7024b --- /dev/null +++ b/benchmark_analyzer/src/analysis/mod.rs @@ -0,0 +1,277 @@ +//! +//! Provides tools for collecting and comparing benchmark results. +//! + +pub mod evm_interpreter; + +use std::collections::BTreeMap; + +use evm_interpreter::is_evm_interpreter_cycles_tests_group; +use evm_interpreter::opcode_cost_ratios; + +use crate::model::benchmark::test::codegen::versioned::executable::run::Run; +use crate::model::benchmark::test::metadata::Metadata as TestMetadata; +use crate::model::benchmark::Benchmark; +use crate::results::group::Group; +use crate::results::Results; +use crate::util::btreemap::cross_join_filter_map; +use crate::util::btreemap::intersect_keys; +use crate::util::btreemap::intersect_map; + +type GroupRuns<'a> = BTreeMap<&'a str, (&'a TestMetadata, &'a Run)>; + +/// +/// Collects measurements from a benchmark into groups. +/// Groups may intersect. +/// +fn collect_runs(benchmark: &Benchmark) -> BTreeMap, GroupRuns> { + let mut result: BTreeMap, GroupRuns> = BTreeMap::new(); + + for (test_identifier, test) in &benchmark.tests { + for (codegen, codegen_group) in &test.codegen_groups { + for versioned_group in codegen_group.versioned_groups.values() { + for (mode, executable) in &versioned_group.executables { + for tag in test + .metadata + .tags + .iter() + .map(Some) + .chain(std::iter::once(None)) + { + let tag = tag.map(|t| t.as_str()); + result + .entry(Group::from_tag(tag, Some(codegen), Some(mode))) + .or_default() + .insert(test_identifier.as_str(), (&test.metadata, &executable.run)); + } + } + } + } + } + + result +} + +/// +/// Compare two benchmark runs [reference] and [candidate] by groups. +/// Every resulting group is either: +/// - a result of comparison of a group from [reference] with a group from [candidate] sharing the same name +/// - or a result of comparing two distinct groups from [reference] and +/// [candidate] for which [custom_group_comparisons] returned `true`. +/// +pub fn compare<'a>( + reference: &'a Benchmark, + candidate: &'a Benchmark, + custom_group_comparisons: impl Fn(&Group, &Group) -> bool, +) -> Vec<(Group<'a>, Results<'a>)> { + let groups = { + let reference_runs: BTreeMap, GroupRuns<'a>> = collect_runs(reference); + let candidate_runs: BTreeMap, GroupRuns<'a>> = collect_runs(candidate); + + let comparisons: Vec<(Group<'a>, GroupRuns<'a>, GroupRuns<'a>)> = + cross_join_filter_map(&reference_runs, &candidate_runs, |g1, g2| { + if custom_group_comparisons(g1, g2) { + Some(Group::new_comparison(g1, g2)) + } else { + None + } + }); + + intersect_keys(reference_runs, candidate_runs).chain(comparisons) + }; + + let results: Vec<(Group<'_>, Results<'_>)> = groups + .map(|(group_name, reference_tests, candidate_tests)| { + let ratios = if is_evm_interpreter_cycles_tests_group(&group_name) { + Some(( + opcode_cost_ratios(&reference_tests), + opcode_cost_ratios(&candidate_tests), + )) + } else { + None + }; + + let runs: Vec<(&TestMetadata, &Run, &Run)> = intersect_map( + reference_tests, + candidate_tests, + |_id, (metadata, run_reference), (_, run_candidate)| { + (metadata, run_reference, run_candidate) + }, + ) + .collect(); + let results = { + let mut runs = compare_runs(runs); + + if let Some((reference_ratios, candidate_ratios)) = ratios { + runs.set_evm_interpreter_ratios(reference_ratios, candidate_ratios); + } + runs + }; + (group_name, results) + }) + .collect(); + + results +} + +/// +/// Compare two sets of measurements. +/// The parameter `[run]` is a vector of triples where each element contains: +/// - metadata for a test, +/// - measurement in the first set, +/// - measurement in the second set. +/// +fn compare_runs<'a>(runs: Vec<(&'a TestMetadata, &'a Run, &'a Run)>) -> Results<'a> { + let elements_number = runs.len(); + + let mut size_factors = Vec::with_capacity(elements_number); + let mut size_min = 1.0; + let mut size_max = 1.0; + let mut size_negatives: Vec<(f64, &TestMetadata)> = Vec::with_capacity(elements_number); + let mut size_positives: Vec<(f64, &TestMetadata)> = Vec::with_capacity(elements_number); + let mut size_total_reference: u64 = 0; + let mut size_total_candidate: u64 = 0; + + let mut cycles_factors = Vec::with_capacity(elements_number); + let mut cycles_min = 1.0; + let mut cycles_max = 1.0; + let mut cycles_negatives: Vec<(f64, &TestMetadata)> = Vec::with_capacity(elements_number); + let mut cycles_positives: Vec<(f64, &TestMetadata)> = Vec::with_capacity(elements_number); + let mut cycles_total_reference: u64 = 0; + let mut cycles_total_candidate: u64 = 0; + + let mut ergs_factors = Vec::with_capacity(elements_number); + let mut ergs_min = 1.0; + let mut ergs_max = 1.0; + let mut ergs_negatives: Vec<(f64, &TestMetadata)> = Vec::with_capacity(elements_number); + let mut ergs_positives: Vec<(f64, &TestMetadata)> = Vec::with_capacity(elements_number); + let mut ergs_total_reference: u64 = 0; + let mut ergs_total_candidate: u64 = 0; + + let mut gas_factors = Vec::with_capacity(elements_number); + let mut gas_min = 1.0; + let mut gas_max = 1.0; + let mut gas_negatives = Vec::with_capacity(elements_number); + let mut gas_positives = Vec::with_capacity(elements_number); + let mut gas_total_reference: u64 = 0; + let mut gas_total_candidate: u64 = 0; + + for (metadata, reference, candidate) in runs { + let file_path = &metadata.selector.path; + // FIXME: ad-hoc patch + if file_path.contains(crate::model::evm_interpreter::TEST_PATH) { + if let Some(input) = &metadata.selector.input { + if input.is_deployer() { + continue; + } + } + } + + cycles_total_reference += reference.cycles as u64; + cycles_total_candidate += candidate.cycles as u64; + let cycles_factor = (candidate.cycles as f64) / (reference.cycles as f64); + if cycles_factor > 1.0 { + cycles_negatives.push((cycles_factor, metadata)); + } + if cycles_factor < 1.0 { + cycles_positives.push((cycles_factor, metadata)); + } + if cycles_factor < cycles_min { + cycles_min = cycles_factor; + } + if cycles_factor > cycles_max { + cycles_max = cycles_factor; + } + cycles_factors.push(cycles_factor); + + ergs_total_reference += reference.ergs; + ergs_total_candidate += candidate.ergs; + let ergs_factor = (candidate.ergs as f64) / (reference.ergs as f64); + if ergs_factor > 1.0 { + ergs_negatives.push((ergs_factor, metadata)); + } + if ergs_factor < 1.0 { + ergs_positives.push((ergs_factor, metadata)); + } + if ergs_factor < ergs_min { + ergs_min = ergs_factor; + } + if ergs_factor > ergs_max { + ergs_max = ergs_factor; + } + ergs_factors.push(ergs_factor); + + gas_total_reference += reference.gas; + gas_total_candidate += candidate.gas; + let gas_factor = (candidate.gas as f64) / (reference.gas as f64); + if gas_factor > 1.0 { + gas_negatives.push((gas_factor, metadata)); + } + if gas_factor < 1.0 { + gas_positives.push((gas_factor, metadata)); + } + if gas_factor < gas_min { + gas_min = gas_factor; + } + if gas_factor > gas_max { + gas_max = gas_factor; + } + gas_factors.push(gas_factor); + + let reference_size = match reference.size { + Some(size) => size, + None => continue, + }; + let candidate_size = match candidate.size { + Some(size) => size, + None => continue, + }; + size_total_reference += reference_size as u64; + size_total_candidate += candidate_size as u64; + let size_factor = (candidate_size as f64) / (reference_size as f64); + if size_factor > 1.0 { + size_negatives.push((size_factor, metadata)); + } + if size_factor < 1.0 { + size_positives.push((size_factor, metadata)); + } + if size_factor < size_min { + size_min = size_factor; + } + if size_factor > size_max { + size_max = size_factor; + } + size_factors.push(size_factor); + } + + let size_total = (size_total_candidate as f64) / (size_total_reference as f64); + + let cycles_total = (cycles_total_candidate as f64) / (cycles_total_reference as f64); + + let ergs_total = (ergs_total_candidate as f64) / (ergs_total_reference as f64); + + let gas_total = (gas_total_candidate as f64) / (gas_total_reference as f64); + + Results::new( + size_min, + size_max, + size_total, + size_negatives, + size_positives, + cycles_min, + cycles_max, + cycles_total, + cycles_negatives, + cycles_positives, + ergs_min, + ergs_max, + ergs_total, + ergs_negatives, + ergs_positives, + gas_min, + gas_max, + gas_total, + gas_negatives, + gas_positives, + ) +} diff --git a/benchmark_analyzer/src/benchmark/format/csv.rs b/benchmark_analyzer/src/benchmark/format/csv.rs deleted file mode 100644 index a9e22762..00000000 --- a/benchmark_analyzer/src/benchmark/format/csv.rs +++ /dev/null @@ -1,71 +0,0 @@ -//! -//! Serializing benchmark data to CSV. -//! - -use std::fmt::Write; - -use super::Benchmark; -use super::IBenchmarkSerializer; -use crate::benchmark::group::element::selector::Selector; -use crate::benchmark::group::element::Element; -use crate::benchmark::metadata::Metadata; - -/// -/// Serialize the benchmark to CSV in the following format: -/// "group_name", "element_name", "size_str", "cycles", "ergs", "gas" -/// -#[derive(Default)] -pub struct Csv; - -impl IBenchmarkSerializer for Csv { - type Err = std::fmt::Error; - - fn serialize_to_string(&self, benchmark: &Benchmark) -> Result { - let mut result = String::with_capacity(estimate_csv_size(benchmark)); - result.push_str( - r#""group", "mode", "version", "path", "case", "input", "size", "cycles", "ergs", "gas""#, - ); - result.push('\n'); - for (group_name, group) in &benchmark.groups { - for Element { - metadata: - Metadata { - selector: Selector { path, case, input }, - mode, - version, - group: _, - }, - size, - cycles, - ergs, - gas, - } in group.elements.values() - { - let size_str = size.map(|s| s.to_string()).unwrap_or_default(); - let mode = mode.as_deref().unwrap_or_default(); - let input = input.clone().map(|s| s.to_string()).unwrap_or_default(); - let case = case.as_deref().unwrap_or_default(); - let version = version.as_deref().unwrap_or_default(); - writeln!( - &mut result, - r#""{group_name}", "{mode}", "{version}", "{path}", "{case}", "{input}", {size_str}, {cycles}, {ergs}, {gas}"#, - )?; - } - } - Ok(result) - } -} - -fn estimate_csv_line_length() -> usize { - let number_fields = 4; - let number_field_estimated_max_length = 15; - let group_name_estimated_max = 10; - let test_name_estimated_max = 300; - group_name_estimated_max - + test_name_estimated_max - + number_fields * number_field_estimated_max_length -} - -fn estimate_csv_size(benchmark: &Benchmark) -> usize { - (benchmark.groups.len() + 1) * estimate_csv_line_length() -} diff --git a/benchmark_analyzer/src/benchmark/format/json.rs b/benchmark_analyzer/src/benchmark/format/json.rs deleted file mode 100644 index 04e19a33..00000000 --- a/benchmark_analyzer/src/benchmark/format/json.rs +++ /dev/null @@ -1,18 +0,0 @@ -//! -//! Serializing benchmark data to JSON. -//! - -use super::Benchmark; -use super::IBenchmarkSerializer; - -/// Serialize the benchmark data to JSON using `serde` library. -#[derive(Default)] -pub struct Json; - -impl IBenchmarkSerializer for Json { - type Err = serde_json::error::Error; - - fn serialize_to_string(&self, benchmark: &Benchmark) -> Result { - serde_json::to_string(benchmark) - } -} diff --git a/benchmark_analyzer/src/benchmark/format/mod.rs b/benchmark_analyzer/src/benchmark/format/mod.rs deleted file mode 100644 index f22ad54f..00000000 --- a/benchmark_analyzer/src/benchmark/format/mod.rs +++ /dev/null @@ -1,15 +0,0 @@ -//! -//! Serialization of benchmark data in different output formats. -//! - -pub mod csv; -pub mod json; - -use crate::benchmark::Benchmark; - -/// Serialization format for benchmark data. -pub trait IBenchmarkSerializer { - type Err: std::error::Error; - /// Serialize benchmark data in the selected format. - fn serialize_to_string(&self, benchmark: &Benchmark) -> anyhow::Result; -} diff --git a/benchmark_analyzer/src/benchmark/group/mod.rs b/benchmark_analyzer/src/benchmark/group/mod.rs deleted file mode 100644 index 94d7db2a..00000000 --- a/benchmark_analyzer/src/benchmark/group/mod.rs +++ /dev/null @@ -1,211 +0,0 @@ -//! -//! The benchmark group representation. -//! - -pub mod element; -pub mod results; - -use std::collections::BTreeMap; - -use serde::Deserialize; -use serde::Serialize; - -use crate::benchmark::Benchmark; - -use self::element::Element; -use self::results::Results; - -/// -/// The benchmark group representation. -/// -#[derive(Debug, Default, Serialize, Deserialize, Clone)] -pub struct Group { - /// The group elements. - pub elements: BTreeMap, -} - -impl Group { - /// - /// Compares two benchmark groups. - /// - pub fn compare<'a>(reference: &'a Self, candidate: &'a Self) -> Results<'a> { - let elements_number = reference.elements.len(); - - let mut size_factors = Vec::with_capacity(elements_number); - let mut size_min = 1.0; - let mut size_max = 1.0; - let mut size_negatives = Vec::with_capacity(elements_number); - let mut size_positives = Vec::with_capacity(elements_number); - let mut size_total_reference: u64 = 0; - let mut size_total_candidate: u64 = 0; - - let mut cycles_factors = Vec::with_capacity(elements_number); - let mut cycles_min = 1.0; - let mut cycles_max = 1.0; - let mut cycles_negatives = Vec::with_capacity(elements_number); - let mut cycles_positives = Vec::with_capacity(elements_number); - let mut cycles_total_reference: u64 = 0; - let mut cycles_total_candidate: u64 = 0; - - let mut ergs_factors = Vec::with_capacity(elements_number); - let mut ergs_min = 1.0; - let mut ergs_max = 1.0; - let mut ergs_negatives = Vec::with_capacity(elements_number); - let mut ergs_positives = Vec::with_capacity(elements_number); - let mut ergs_total_reference: u64 = 0; - let mut ergs_total_candidate: u64 = 0; - - let mut gas_factors = Vec::with_capacity(elements_number); - let mut gas_min = 1.0; - let mut gas_max = 1.0; - let mut gas_negatives = Vec::with_capacity(elements_number); - let mut gas_positives = Vec::with_capacity(elements_number); - let mut gas_total_reference: u64 = 0; - let mut gas_total_candidate: u64 = 0; - - for (path, reference) in reference.elements.iter() { - if path.contains("tests/solidity/complex/interpreter/test.json") - && path.contains("#deployer") - { - continue; - } - let candidate = match candidate.elements.get(path.as_str()) { - Some(candidate) => candidate, - None => continue, - }; - - cycles_total_reference += reference.cycles as u64; - cycles_total_candidate += candidate.cycles as u64; - let cycles_factor = (candidate.cycles as f64) / (reference.cycles as f64); - if cycles_factor > 1.0 { - cycles_negatives.push((cycles_factor, path.as_str())); - } - if cycles_factor < 1.0 { - cycles_positives.push((cycles_factor, path.as_str())); - } - if cycles_factor < cycles_min { - cycles_min = cycles_factor; - } - if cycles_factor > cycles_max { - cycles_max = cycles_factor; - } - cycles_factors.push(cycles_factor); - - ergs_total_reference += reference.ergs; - ergs_total_candidate += candidate.ergs; - let ergs_factor = (candidate.ergs as f64) / (reference.ergs as f64); - if ergs_factor > 1.0 { - ergs_negatives.push((ergs_factor, path.as_str())); - } - if ergs_factor < 1.0 { - ergs_positives.push((ergs_factor, path.as_str())); - } - if ergs_factor < ergs_min { - ergs_min = ergs_factor; - } - if ergs_factor > ergs_max { - ergs_max = ergs_factor; - } - ergs_factors.push(ergs_factor); - - gas_total_reference += reference.gas; - gas_total_candidate += candidate.gas; - let gas_factor = (candidate.gas as f64) / (reference.gas as f64); - if gas_factor > 1.0 { - gas_negatives.push((gas_factor, path.as_str())); - } - if gas_factor < 1.0 { - gas_positives.push((gas_factor, path.as_str())); - } - if gas_factor < gas_min { - gas_min = gas_factor; - } - if gas_factor > gas_max { - gas_max = gas_factor; - } - gas_factors.push(gas_factor); - - let reference_size = match reference.size { - Some(size) => size, - None => continue, - }; - let candidate_size = match candidate.size { - Some(size) => size, - None => continue, - }; - size_total_reference += reference_size as u64; - size_total_candidate += candidate_size as u64; - let size_factor = (candidate_size as f64) / (reference_size as f64); - if size_factor > 1.0 { - size_negatives.push((size_factor, path.as_str())); - } - if size_factor < 1.0 { - size_positives.push((size_factor, path.as_str())); - } - if size_factor < size_min { - size_min = size_factor; - } - if size_factor > size_max { - size_max = size_factor; - } - size_factors.push(size_factor); - } - - let size_total = (size_total_candidate as f64) / (size_total_reference as f64); - - let cycles_total = (cycles_total_candidate as f64) / (cycles_total_reference as f64); - - let ergs_total = (ergs_total_candidate as f64) / (ergs_total_reference as f64); - - let gas_total = (gas_total_candidate as f64) / (gas_total_reference as f64); - - Results::new( - size_min, - size_max, - size_total, - size_negatives, - size_positives, - cycles_min, - cycles_max, - cycles_total, - cycles_negatives, - cycles_positives, - ergs_min, - ergs_max, - ergs_total, - ergs_negatives, - ergs_positives, - gas_min, - gas_max, - gas_total, - gas_negatives, - gas_positives, - ) - } - - /// - /// Returns the EVM interpreter ergs/gas ratio. - /// - pub fn evm_interpreter_ratios(&self) -> Vec<(String, f64)> { - let mut results = Vec::with_capacity(Benchmark::EVM_OPCODES.len()); - for evm_opcode in Benchmark::EVM_OPCODES.into_iter() { - let name_substring = format!("test.json::{evm_opcode}["); - let [full, template]: [Element; 2] = self - .elements - .iter() - .filter(|element| element.0.contains(name_substring.as_str())) - .rev() - .take(2) - .map(|element| (element.1.to_owned())) - .collect::>() - .try_into() - .expect("Always valid"); - - let ergs_difference = full.ergs - template.ergs; - let gas_difference = full.gas - template.gas; - let ergs_gas_ratio = (ergs_difference as f64) / (gas_difference as f64); - results.push((evm_opcode.to_owned(), ergs_gas_ratio)); - } - results - } -} diff --git a/benchmark_analyzer/src/benchmark/metadata.rs b/benchmark_analyzer/src/benchmark/metadata.rs deleted file mode 100644 index bda33fa1..00000000 --- a/benchmark_analyzer/src/benchmark/metadata.rs +++ /dev/null @@ -1,29 +0,0 @@ -//! -//! Information associated with the benchmark element. -//! - -use serde::Deserialize; -use serde::Serialize; - -use crate::benchmark::group::element::selector::Selector; - -/// -/// Encoded compiler mode. In future, it can be expanded into a structured type -/// shared between crates `benchmark_analyzer` and `compiler_tester`. -/// -pub type Mode = String; - -/// -/// Information associated with the benchmark element. -/// -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct Metadata { - /// Test selector. - pub selector: Selector, - /// Compiler mode. - pub mode: Option, - /// Compiler version, e.g. solc. - pub version: Option, - /// Test group - pub group: String, -} diff --git a/benchmark_analyzer/src/benchmark/mod.rs b/benchmark_analyzer/src/benchmark/mod.rs deleted file mode 100644 index 9839fc50..00000000 --- a/benchmark_analyzer/src/benchmark/mod.rs +++ /dev/null @@ -1,232 +0,0 @@ -//! -//! The benchmark representation. -//! - -pub mod format; -pub mod group; -pub mod metadata; - -use std::collections::BTreeMap; -use std::path::PathBuf; - -use format::IBenchmarkSerializer; -use serde::Deserialize; -use serde::Serialize; - -use self::group::results::Results; -use self::group::Group; - -/// -/// The benchmark representation. -/// -#[derive(Debug, Default, Serialize, Deserialize)] -pub struct Benchmark { - /// The benchmark groups. - pub groups: BTreeMap, -} - -impl Benchmark { - /// The EVM interpreter group identifier. - pub const EVM_INTERPRETER_GROUP_NAME: &'static str = "EVMInterpreter"; - - /// The EVM interpreter cycles group identifier. - pub const EVM_INTERPRETER_GROUP_NAME_CYCLES: &'static str = "EVMInterpreter M3B3"; - - /// The EVM opcodes to test. - pub const EVM_OPCODES: [&'static str; 135] = [ - "ADD", - "MUL", - "SUB", - "DIV", - "SDIV", - "MOD", - "SMOD", - "ADDMOD", - "MULMOD", - "EXP", - "SIGNEXTEND", - "LT", - "GT", - "SLT", - "SGT", - "EQ", - "ISZERO", - "AND", - "OR", - "XOR", - "NOT", - "BYTE", - "SHL", - "SHR", - "SAR", - "SGT", - "SHA3", - "ADDRESS", - "BALANCE", - "ORIGIN", - "CALLER", - "CALLVALUE", - "CALLDATALOAD", - "CALLDATASIZE", - "CALLDATACOPY", - "CODESIZE", - "CODECOPY", - "GASPRICE", - "EXTCODESIZE", - "EXTCODECOPY", - "RETURNDATASIZE", - "RETURNDATACOPY", - "EXTCODEHASH", - "BLOCKHASH", - "COINBASE", - "TIMESTAMP", - "NUMBER", - "PREVRANDAO", - "GASLIMIT", - "CHAINID", - "SELFBALANCE", - "BASEFEE", - "POP", - "MLOAD", - "MSTORE", - "MSTORE8", - "SLOAD", - "SSTORE", - "JUMP", - "JUMPI", - "PC", - "MSIZE", - "GAS", - "JUMPDEST", - "PUSH0", - "PUSH1", - "PUSH2", - "PUSH4", - "PUSH5", - "PUSH6", - "PUSH7", - "PUSH8", - "PUSH9", - "PUSH10", - "PUSH11", - "PUSH12", - "PUSH13", - "PUSH14", - "PUSH15", - "PUSH16", - "PUSH17", - "PUSH18", - "PUSH19", - "PUSH20", - "PUSH21", - "PUSH22", - "PUSH23", - "PUSH24", - "PUSH25", - "PUSH26", - "PUSH27", - "PUSH28", - "PUSH29", - "PUSH30", - "PUSH31", - "PUSH32", - "DUP1", - "DUP2", - "DUP3", - "DUP4", - "DUP5", - "DUP6", - "DUP7", - "DUP8", - "DUP9", - "DUP10", - "DUP11", - "DUP12", - "DUP13", - "DUP14", - "DUP15", - "DUP16", - "SWAP1", - "SWAP2", - "SWAP3", - "SWAP4", - "SWAP5", - "SWAP6", - "SWAP7", - "SWAP8", - "SWAP9", - "SWAP10", - "SWAP11", - "SWAP12", - "SWAP13", - "SWAP14", - "SWAP15", - "SWAP16", - "CALL", - "STATICCALL", - "DELEGATECALL", - "CREATE", - "CREATE2", - "RETURN", - "REVERT", - ]; - - /// - /// Compares two benchmarks. - /// - pub fn compare<'a>(reference: &'a Self, candidate: &'a Self) -> BTreeMap<&'a str, Results<'a>> { - let mut results = BTreeMap::new(); - - for (group_name, reference_group) in reference.groups.iter() { - let candidate_group = match candidate.groups.get(group_name) { - Some(candidate_group) => candidate_group, - None => continue, - }; - - let mut group_results = Group::compare(reference_group, candidate_group); - if group_name.starts_with(Self::EVM_INTERPRETER_GROUP_NAME_CYCLES) { - if let (Some(reference_ratios), Some(candidate_ratios)) = ( - reference - .groups - .get(group_name.as_str()) - .map(|group| group.evm_interpreter_ratios()), - candidate - .groups - .get(group_name.as_str()) - .map(|group| group.evm_interpreter_ratios()), - ) { - group_results.set_evm_interpreter_ratios(reference_ratios, candidate_ratios); - } - } - results.insert(group_name.as_str(), group_results); - } - - results - } - - /// - /// Writes the benchmark results to a file using a provided serializer. - /// - pub fn write_to_file( - self, - path: PathBuf, - serializer: impl IBenchmarkSerializer, - ) -> anyhow::Result<()> { - let contents = serializer.serialize_to_string(&self).expect("Always valid"); - std::fs::write(path.as_path(), contents) - .map_err(|error| anyhow::anyhow!("Benchmark file {path:?} reading: {error}"))?; - Ok(()) - } -} - -impl TryFrom for Benchmark { - type Error = anyhow::Error; - - fn try_from(path: PathBuf) -> Result { - let text = std::fs::read_to_string(path.as_path()) - .map_err(|error| anyhow::anyhow!("Benchmark file {:?} reading: {}", path, error))?; - let json: Self = serde_json::from_str(text.as_str()) - .map_err(|error| anyhow::anyhow!("Benchmark file {:?} parsing: {}", path, error))?; - Ok(json) - } -} diff --git a/benchmark_analyzer/src/benchmark_analyzer/arguments.rs b/benchmark_analyzer/src/benchmark_analyzer/arguments.rs index f86f5077..3fffc116 100644 --- a/benchmark_analyzer/src/benchmark_analyzer/arguments.rs +++ b/benchmark_analyzer/src/benchmark_analyzer/arguments.rs @@ -27,4 +27,12 @@ pub struct Arguments { /// Maximum number of results displayed in a group. #[structopt(long, default_value_t = 100)] pub group_max: usize, + + /// Regular expression to select reference group for the comparison. + #[structopt(long)] + pub query_reference: Option, + + /// Regular expression to select candidate group for the comparison. + #[structopt(long)] + pub query_candidate: Option, } diff --git a/benchmark_analyzer/src/benchmark_analyzer/main.rs b/benchmark_analyzer/src/benchmark_analyzer/main.rs index 5c3cdaf9..572f9a3e 100644 --- a/benchmark_analyzer/src/benchmark_analyzer/main.rs +++ b/benchmark_analyzer/src/benchmark_analyzer/main.rs @@ -9,6 +9,7 @@ use std::io::Write; use clap::Parser; use self::arguments::Arguments; +use benchmark_analyzer::ResultsGroup; /// /// The application entry point. @@ -16,18 +17,47 @@ use self::arguments::Arguments; fn main() -> anyhow::Result<()> { let arguments = Arguments::try_parse()?; - let reference = benchmark_analyzer::Benchmark::try_from(arguments.reference)?; - let candidate = benchmark_analyzer::Benchmark::try_from(arguments.candidate)?; + let reference_benchmark = benchmark_analyzer::Benchmark::try_from(arguments.reference)?; + let candidate_benchmark = benchmark_analyzer::Benchmark::try_from(arguments.candidate)?; - let groups_results = benchmark_analyzer::Benchmark::compare(&reference, &candidate); + let groups_results = if let (Some(reference_query), Some(candidate_query)) = + (arguments.query_reference, arguments.query_candidate) + { + // If the user provides regular expressions to select groups for + // comparison, the analyzer will compare all groups with the same names, + // plus all pairs of groups matching regular expressions + // [regex_reference] and [regex_candidate]. + + let regex_reference = + regex::Regex::new(&reference_query).expect("Invalid reference query regexp"); + let regex_candidate = + regex::Regex::new(&candidate_query).expect("Invalid candidate query regexp"); + + benchmark_analyzer::analysis::compare( + &reference_benchmark, + &candidate_benchmark, + |g1: &ResultsGroup<'_>, g2: &ResultsGroup<'_>| { + g1.regex_matches(®ex_reference) && g2.regex_matches(®ex_candidate) + }, + ) + } else { + // If the user did not provide regular expressions to select groups for + // comparison, the analyzer will compare only the groups with the same + // names. + benchmark_analyzer::analysis::compare( + &reference_benchmark, + &candidate_benchmark, + |_: &ResultsGroup<'_>, _: &ResultsGroup<'_>| false, + ) + }; match arguments.output_file { Some(output_path) => { let mut file = std::fs::File::create(output_path)?; for (group_name, mut results) in groups_results.into_iter() { results.sort_worst(); - results.print_worst_results(arguments.group_max, group_name); - results.write_all(&mut file, group_name)?; + results.print_worst_results(arguments.group_max, &group_name.to_string()); + results.write_all(&mut file, &group_name.to_string())?; writeln!(file)?; println!(); println!(); @@ -37,8 +67,8 @@ fn main() -> anyhow::Result<()> { let mut stdout = std::io::stdout(); for (group_name, mut results) in groups_results.into_iter() { results.sort_worst(); - results.print_worst_results(arguments.group_max, group_name); - results.write_all(&mut stdout, group_name)?; + results.print_worst_results(arguments.group_max, &group_name.to_string()); + results.write_all(&mut stdout, &group_name.to_string())?; writeln!(stdout)?; println!(); println!(); diff --git a/benchmark_analyzer/src/lib.rs b/benchmark_analyzer/src/lib.rs index f18b07de..622f4eb5 100644 --- a/benchmark_analyzer/src/lib.rs +++ b/benchmark_analyzer/src/lib.rs @@ -2,18 +2,34 @@ //! The benchmark analyzer library. //! -pub(crate) mod benchmark; +pub mod analysis; +pub mod model; +pub mod output; +pub mod results; +pub mod util; -pub use self::benchmark::format::csv::Csv as CsvSerializer; -pub use self::benchmark::format::json::Json as JsonSerializer; -pub use self::benchmark::group::element::input::Input; -pub use self::benchmark::group::element::selector::Selector as TestSelector; -pub use self::benchmark::group::element::Element as BenchmarkElement; -pub use self::benchmark::group::Group as BenchmarkGroup; -pub use self::benchmark::metadata::Metadata; -pub use self::benchmark::Benchmark; +pub use crate::output::format::csv::Csv as CsvSerializer; +pub use crate::output::format::json::lnt::JsonLNT as JsonLNTSerializer; +pub use crate::output::format::json::native::Json as JsonNativeSerializer; -/// -/// The all elements group name. -/// -pub const BENCHMARK_ALL_GROUP_NAME: &str = "All"; +pub use crate::model::benchmark::test::codegen::versioned::executable::run::Run; +pub use crate::model::benchmark::test::codegen::versioned::executable::Executable; +pub use crate::model::benchmark::test::codegen::versioned::VersionedGroup; +pub use crate::model::benchmark::test::codegen::CodegenGroup; +pub use crate::model::benchmark::test::input::Input; +pub use crate::model::benchmark::test::selector::Selector as TestSelector; +pub use crate::model::benchmark::test::Test; +pub use crate::model::benchmark::write_to_file; +pub use crate::model::benchmark::Benchmark; +pub use crate::model::context::validate_context; +pub use crate::model::context::Context as BenchmarkContext; + +// Metadata for various parts of the model +pub use crate::model::benchmark::metadata::BenchmarkVersion; +pub use crate::model::benchmark::metadata::Metadata as BenchmarkMetadata; +pub use crate::model::benchmark::test::codegen::versioned::executable::metadata::Metadata as ExecutableMetadata; +pub use crate::model::benchmark::test::metadata::Metadata as TestMetadata; + +pub use crate::results::group::Group as ResultsGroup; + +pub use crate::model::evm_interpreter::GROUP_NAME as TEST_GROUP_EVM_INTERPRETER; diff --git a/benchmark_analyzer/src/model/benchmark/metadata.rs b/benchmark_analyzer/src/model/benchmark/metadata.rs new file mode 100644 index 00000000..93695e63 --- /dev/null +++ b/benchmark_analyzer/src/model/benchmark/metadata.rs @@ -0,0 +1,36 @@ +//! +//! Information associated with the benchmark run. +//! + +use chrono::DateTime; +use chrono::Utc; +use serde::Deserialize; +use serde::Serialize; + +use crate::model::context::Context; + +/// Version of the benchmark format. +#[derive(Clone, Debug, Default, Serialize, Deserialize)] +pub enum BenchmarkVersion { + #[default] + /// Flat format, a map from key (Identifier + mode) to measurements. + V1, + /// New format with metadata. + V2, +} + +/// +/// Information associated with the benchmark run. +/// +#[derive(Clone, Debug, Default, Serialize, Deserialize)] +pub struct Metadata { + /// Version for the benchmark report. + pub version: BenchmarkVersion, + /// Start of the benchmark run. + pub start: DateTime, + /// End of the benchmark run. + pub end: DateTime, + /// Context of benchmarking, passed from compiler tester. + #[serde(skip)] + pub context: Option, +} diff --git a/benchmark_analyzer/src/model/benchmark/mod.rs b/benchmark_analyzer/src/model/benchmark/mod.rs new file mode 100644 index 00000000..0e82ceb5 --- /dev/null +++ b/benchmark_analyzer/src/model/benchmark/mod.rs @@ -0,0 +1,78 @@ +//! +//! The benchmark representation. +//! + +pub mod metadata; +pub mod test; + +use std::collections::BTreeMap; +use std::path::PathBuf; + +use serde::Deserialize; +use serde::Serialize; + +use crate::output::comparison_result::Output; +use crate::output::file::File; +use crate::output::IBenchmarkSerializer; + +use metadata::Metadata; + +use self::test::Test; + +/// +/// The benchmark representation. +/// +#[derive(Debug, Default, Serialize, Deserialize)] +pub struct Benchmark { + /// Metadata related to the whole benchmark. + pub metadata: Metadata, + /// The tests. + pub tests: BTreeMap, +} + +/// +/// Writes the benchmark results to a file using a provided serializer. +/// +pub fn write_to_file( + benchmark: &Benchmark, + path: PathBuf, + serializer: impl IBenchmarkSerializer, +) -> anyhow::Result<()> { + match serializer + .serialize_to_string(benchmark) + .expect("Always valid") + { + Output::SingleFile(contents) => { + std::fs::write(path.as_path(), contents) + .map_err(|error| anyhow::anyhow!("Benchmark file {path:?} writing: {error}"))?; + } + Output::MultipleFiles(files) => { + if !files.is_empty() { + std::fs::create_dir_all(&path)?; + } + for File { + path: relative_path, + contents, + } in files + { + let file_path = path.join(relative_path); + std::fs::write(file_path.as_path(), contents).map_err(|error| { + anyhow::anyhow!("Benchmark file {file_path:?} writing: {error}") + })?; + } + } + } + Ok(()) +} + +impl TryFrom for Benchmark { + type Error = anyhow::Error; + + fn try_from(path: PathBuf) -> Result { + let text = std::fs::read_to_string(path.as_path()) + .map_err(|error| anyhow::anyhow!("Benchmark file {path:?} reading: {error}"))?; + let json: Self = serde_json::from_str(text.as_str()) + .map_err(|error| anyhow::anyhow!("Benchmark file {path:?} parsing: {error}"))?; + Ok(json) + } +} diff --git a/benchmark_analyzer/src/model/benchmark/test/codegen/mod.rs b/benchmark_analyzer/src/model/benchmark/test/codegen/mod.rs new file mode 100644 index 00000000..1cde5971 --- /dev/null +++ b/benchmark_analyzer/src/model/benchmark/test/codegen/mod.rs @@ -0,0 +1,26 @@ +//! +//! Groups test instances by the code generator version. +//! + +pub mod versioned; + +use std::collections::BTreeMap; + +use serde::Deserialize; +use serde::Serialize; +use versioned::VersionedGroup; + +/// +/// The language version associated with a test. +/// +pub type Version = String; + +/// +/// Groups test instances by the code generator version. +/// +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +pub struct CodegenGroup { + #[serde(flatten)] + /// Inner groups that differ by the associated language version. + pub versioned_groups: BTreeMap, +} diff --git a/benchmark_analyzer/src/model/benchmark/test/codegen/versioned/executable/metadata.rs b/benchmark_analyzer/src/model/benchmark/test/codegen/versioned/executable/metadata.rs new file mode 100644 index 00000000..bc9776de --- /dev/null +++ b/benchmark_analyzer/src/model/benchmark/test/codegen/versioned/executable/metadata.rs @@ -0,0 +1,12 @@ +//! +//! Information associated with an executable in a benchmark. +//! + +use serde::Deserialize; +use serde::Serialize; + +/// +/// Information associated with an executable in a benchmark. +/// +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +pub struct Metadata {} diff --git a/benchmark_analyzer/src/model/benchmark/test/codegen/versioned/executable/mod.rs b/benchmark_analyzer/src/model/benchmark/test/codegen/versioned/executable/mod.rs new file mode 100644 index 00000000..65353f30 --- /dev/null +++ b/benchmark_analyzer/src/model/benchmark/test/codegen/versioned/executable/mod.rs @@ -0,0 +1,26 @@ +//! +//! Executable is the compiled artifact corresponding to the test. +//! Executables differ by compilation flags. +//! + +pub mod metadata; +pub mod run; + +use metadata::Metadata; +use run::Run; +use serde::Deserialize; +use serde::Serialize; + +/// +/// Executable is the compiled artifact corresponding to the test. +/// Executables differ by compilation flags. +/// +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Executable { + #[serde(default, skip)] + /// Metadata associated with the compiled executable. + pub metadata: Metadata, + #[serde(flatten)] + /// Measurements. + pub run: Run, +} diff --git a/benchmark_analyzer/src/benchmark/group/element/mod.rs b/benchmark_analyzer/src/model/benchmark/test/codegen/versioned/executable/run/mod.rs similarity index 56% rename from benchmark_analyzer/src/benchmark/group/element/mod.rs rename to benchmark_analyzer/src/model/benchmark/test/codegen/versioned/executable/run/mod.rs index b3fe8c1d..48652f60 100644 --- a/benchmark_analyzer/src/benchmark/group/element/mod.rs +++ b/benchmark_analyzer/src/model/benchmark/test/codegen/versioned/executable/run/mod.rs @@ -1,22 +1,15 @@ //! -//! The benchmark element. +//! A run of a test with fixed compiler options (mode). //! -pub mod input; -pub mod selector; - use serde::Deserialize; use serde::Serialize; -use crate::benchmark::metadata::Metadata; - /// -/// The benchmark element. +/// A run of a test with fixed compiler options (mode). /// #[derive(Debug, Clone, Serialize, Deserialize)] -pub struct Element { - /// Associated metadata. - pub metadata: Metadata, +pub struct Run { /// The contract size, `Some` for contracts deploys. pub size: Option, /// The number of cycles. @@ -27,19 +20,12 @@ pub struct Element { pub gas: u64, } -impl Element { +impl Run { /// /// A shortcut constructor. /// - pub fn new( - metadata: Metadata, - size: Option, - cycles: usize, - ergs: u64, - gas: u64, - ) -> Self { + pub fn new(size: Option, cycles: usize, ergs: u64, gas: u64) -> Self { Self { - metadata, size, cycles, ergs, diff --git a/benchmark_analyzer/src/model/benchmark/test/codegen/versioned/mod.rs b/benchmark_analyzer/src/model/benchmark/test/codegen/versioned/mod.rs new file mode 100644 index 00000000..ce3ced25 --- /dev/null +++ b/benchmark_analyzer/src/model/benchmark/test/codegen/versioned/mod.rs @@ -0,0 +1,27 @@ +//! +//! Groups test runs by the language version associated with them. +//! + +pub mod executable; + +use std::collections::BTreeMap; + +use executable::Executable; +use serde::Deserialize; +use serde::Serialize; + +/// +/// Encoded compiler mode. In future, it can be replaced with a structured type +/// shared between crates `benchmark_analyzer` and `compiler_tester`. +/// +pub type Mode = String; + +/// +/// Groups test runs by the language version associated with them. +/// +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +pub struct VersionedGroup { + #[serde(flatten)] + /// Compiled executables associated with test runs. + pub executables: BTreeMap, +} diff --git a/benchmark_analyzer/src/benchmark/group/element/input.rs b/benchmark_analyzer/src/model/benchmark/test/input.rs similarity index 72% rename from benchmark_analyzer/src/benchmark/group/element/input.rs rename to benchmark_analyzer/src/model/benchmark/test/input.rs index 55e52616..0190968e 100644 --- a/benchmark_analyzer/src/benchmark/group/element/input.rs +++ b/benchmark_analyzer/src/model/benchmark/test/input.rs @@ -15,6 +15,11 @@ pub enum Input { /// Contract identifier, usually file name and contract name separated by a colon. contract_identifier: String, }, + /// The fallback method. + Fallback { + /// Index in the array of inputs. + input_index: usize, + }, /// The contract call. Runtime { /// Index in the array of inputs. @@ -34,6 +39,24 @@ pub enum Input { }, } +impl Input { + /// Returns `true` if the input is [`Deployer`]. + /// + /// [`Deployer`]: Input::Deployer + #[must_use] + pub fn is_deployer(&self) -> bool { + matches!(self, Self::Deployer { .. }) + } + + /// Returns `true` if the input is [`Fallback`]. + /// + /// [`Fallback`]: Input::Fallback + #[must_use] + pub fn is_fallback(&self) -> bool { + matches!(self, Self::Fallback { .. }) + } +} + impl std::fmt::Display for Input { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { @@ -49,6 +72,7 @@ impl std::fmt::Display for Input { Input::Balance { input_index } => { f.write_fmt(format_args!("#balance_check:{input_index}")) } + Input::Fallback { input_index } => f.write_fmt(format_args!("#fallback:{input_index}")), } } } diff --git a/benchmark_analyzer/src/model/benchmark/test/metadata.rs b/benchmark_analyzer/src/model/benchmark/test/metadata.rs new file mode 100644 index 00000000..ebd565c9 --- /dev/null +++ b/benchmark_analyzer/src/model/benchmark/test/metadata.rs @@ -0,0 +1,29 @@ +//! +//! Information associated with a specific test in benchmark. +//! + +use serde::Deserialize; +use serde::Serialize; + +use crate::model::benchmark::test::selector::Selector; + +/// +/// Information associated with a specific test in benchmark. +/// +#[derive(Clone, Debug, Default, Serialize, Deserialize)] +pub struct Metadata { + #[serde(default)] + /// Tests may be tagged with one or many groups. + pub tags: Vec, + /// Test selector. + pub selector: Selector, +} + +impl Metadata { + /// + /// Creates a new instance of test metadata provided with the test selector and tags. + /// + pub fn new(selector: Selector, tags: Vec) -> Self { + Self { selector, tags } + } +} diff --git a/benchmark_analyzer/src/model/benchmark/test/mod.rs b/benchmark_analyzer/src/model/benchmark/test/mod.rs new file mode 100644 index 00000000..ebd82fd1 --- /dev/null +++ b/benchmark_analyzer/src/model/benchmark/test/mod.rs @@ -0,0 +1,44 @@ +//! +//! The benchmark group representation. +//! + +pub mod codegen; +pub mod input; +pub mod metadata; +pub mod selector; + +use std::collections::BTreeMap; + +use codegen::CodegenGroup; +use metadata::Metadata; +use serde::Deserialize; +use serde::Serialize; + +/// +/// The codegen associated with a test definition. +/// +pub type Codegen = String; + +/// +/// The benchmark group representation. +/// +#[derive(Debug, Default, Serialize, Deserialize, Clone)] +pub struct Test { + /// Metadata for this test. + #[serde(default)] + pub metadata: Metadata, + /// Versions. + pub codegen_groups: BTreeMap, +} + +impl Test { + /// + /// Creates a new test with provided metadata. + /// + pub fn new(metadata: Metadata) -> Self { + Self { + codegen_groups: Default::default(), + metadata, + } + } +} diff --git a/benchmark_analyzer/src/benchmark/group/element/selector.rs b/benchmark_analyzer/src/model/benchmark/test/selector.rs similarity index 88% rename from benchmark_analyzer/src/benchmark/group/element/selector.rs rename to benchmark_analyzer/src/model/benchmark/test/selector.rs index b804d954..00abf174 100644 --- a/benchmark_analyzer/src/benchmark/group/element/selector.rs +++ b/benchmark_analyzer/src/model/benchmark/test/selector.rs @@ -5,12 +5,12 @@ use serde::Deserialize; use serde::Serialize; -use crate::benchmark::group::element::input::Input; +use crate::model::benchmark::test::input::Input; /// /// Test selector, unambiously locating a test suite, case, or input. /// -#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Clone, Default, Eq, PartialEq, Serialize, Deserialize)] pub struct Selector { /// Path to the file containing test. pub path: String, diff --git a/benchmark_analyzer/src/model/context/mod.rs b/benchmark_analyzer/src/model/context/mod.rs new file mode 100644 index 00000000..9bd9ca65 --- /dev/null +++ b/benchmark_analyzer/src/model/context/mod.rs @@ -0,0 +1,35 @@ +//! +//! A context for benchmarking, passed by compiler-tester. +//! + +/// +/// A context for benchmarking, passed by compiler-tester. +/// +#[derive(Clone, Debug, Eq, PartialEq, serde::Serialize, serde::Deserialize)] +pub struct Context { + /// Unique identifier of the machine. + pub machine: String, + /// Target, for example "eravm" or "evm". + pub target: era_compiler_common::Target, + /// Type of `solc`, for example `zksync` + pub toolchain: String, +} + +/// +/// Checks that the context is well-formed. +/// +pub fn validate_context(context: &Context) -> anyhow::Result<()> { + let Context { + machine, + toolchain, + target: _, + } = context; + + if machine.is_empty() { + anyhow::bail!("The `machine` field in the benchmark context is empty") + } + if toolchain.is_empty() { + anyhow::bail!("The `toolchain` field in the benchmark context is empty") + } + Ok(()) +} diff --git a/benchmark_analyzer/src/model/evm_interpreter.rs b/benchmark_analyzer/src/model/evm_interpreter.rs new file mode 100644 index 00000000..91e09836 --- /dev/null +++ b/benchmark_analyzer/src/model/evm_interpreter.rs @@ -0,0 +1,148 @@ +//! +//! Collects the benchmark model's definitions related to EVM Interpreter test suite. +//! + +/// Path to EVM interpreter test. +pub const TEST_PATH: &str = "tests/solidity/complex/interpreter/test.json"; + +/// Component of a name of the results group where tests for EVM opcodes reside. +pub const GROUP_NAME: &str = "EVMInterpreter"; + +/// The EVM opcodes to test. +pub const OPCODES: [&str; 135] = [ + "ADD", + "MUL", + "SUB", + "DIV", + "SDIV", + "MOD", + "SMOD", + "ADDMOD", + "MULMOD", + "EXP", + "SIGNEXTEND", + "LT", + "GT", + "SLT", + "SGT", + "EQ", + "ISZERO", + "AND", + "OR", + "XOR", + "NOT", + "BYTE", + "SHL", + "SHR", + "SAR", + "SGT", + "SHA3", + "ADDRESS", + "BALANCE", + "ORIGIN", + "CALLER", + "CALLVALUE", + "CALLDATALOAD", + "CALLDATASIZE", + "CALLDATACOPY", + "CODESIZE", + "CODECOPY", + "GASPRICE", + "EXTCODESIZE", + "EXTCODECOPY", + "RETURNDATASIZE", + "RETURNDATACOPY", + "EXTCODEHASH", + "BLOCKHASH", + "COINBASE", + "TIMESTAMP", + "NUMBER", + "PREVRANDAO", + "GASLIMIT", + "CHAINID", + "SELFBALANCE", + "BASEFEE", + "POP", + "MLOAD", + "MSTORE", + "MSTORE8", + "SLOAD", + "SSTORE", + "JUMP", + "JUMPI", + "PC", + "MSIZE", + "GAS", + "JUMPDEST", + "PUSH0", + "PUSH1", + "PUSH2", + "PUSH4", + "PUSH5", + "PUSH6", + "PUSH7", + "PUSH8", + "PUSH9", + "PUSH10", + "PUSH11", + "PUSH12", + "PUSH13", + "PUSH14", + "PUSH15", + "PUSH16", + "PUSH17", + "PUSH18", + "PUSH19", + "PUSH20", + "PUSH21", + "PUSH22", + "PUSH23", + "PUSH24", + "PUSH25", + "PUSH26", + "PUSH27", + "PUSH28", + "PUSH29", + "PUSH30", + "PUSH31", + "PUSH32", + "DUP1", + "DUP2", + "DUP3", + "DUP4", + "DUP5", + "DUP6", + "DUP7", + "DUP8", + "DUP9", + "DUP10", + "DUP11", + "DUP12", + "DUP13", + "DUP14", + "DUP15", + "DUP16", + "SWAP1", + "SWAP2", + "SWAP3", + "SWAP4", + "SWAP5", + "SWAP6", + "SWAP7", + "SWAP8", + "SWAP9", + "SWAP10", + "SWAP11", + "SWAP12", + "SWAP13", + "SWAP14", + "SWAP15", + "SWAP16", + "CALL", + "STATICCALL", + "DELEGATECALL", + "CREATE", + "CREATE2", + "RETURN", + "REVERT", +]; diff --git a/benchmark_analyzer/src/model/mod.rs b/benchmark_analyzer/src/model/mod.rs new file mode 100644 index 00000000..1c6ad9a0 --- /dev/null +++ b/benchmark_analyzer/src/model/mod.rs @@ -0,0 +1,7 @@ +//! +//! Defines a model of benchmark data. +//! + +pub mod benchmark; +pub mod context; +pub mod evm_interpreter; diff --git a/benchmark_analyzer/src/output/comparison_result.rs b/benchmark_analyzer/src/output/comparison_result.rs new file mode 100644 index 00000000..7f25f048 --- /dev/null +++ b/benchmark_analyzer/src/output/comparison_result.rs @@ -0,0 +1,16 @@ +//! +//! Result of comparing two benchmarks. +//! + +use super::file::File; + +/// +/// Result of comparing two benchmarks. +/// +pub enum Output { + /// Benchmark output is a single unnamed file. + SingleFile(String), + /// Benchmark output is structured as a file tree, relative to some + /// user-provided output directory. + MultipleFiles(Vec), +} diff --git a/benchmark_analyzer/src/output/file.rs b/benchmark_analyzer/src/output/file.rs new file mode 100644 index 00000000..8813c0aa --- /dev/null +++ b/benchmark_analyzer/src/output/file.rs @@ -0,0 +1,15 @@ +//! +//! Represents a single benchmark output file in a set of many. +//! + +use std::path::PathBuf; + +/// +/// Represents a single benchmark output file in a set of many. +/// +pub struct File { + /// Path to this file relative to user-provided root. + pub path: PathBuf, + /// File contents. + pub contents: String, +} diff --git a/benchmark_analyzer/src/output/format/csv.rs b/benchmark_analyzer/src/output/format/csv.rs new file mode 100644 index 00000000..52c3fa12 --- /dev/null +++ b/benchmark_analyzer/src/output/format/csv.rs @@ -0,0 +1,88 @@ +//! +//! Serializing benchmark data to CSV. +//! + +use std::fmt::Write as _; + +use crate::model::benchmark::test::metadata::Metadata as TestMetadata; +use crate::model::benchmark::test::selector::Selector; +use crate::model::benchmark::test::Test; +use crate::model::benchmark::Benchmark; +use crate::output::IBenchmarkSerializer; +use crate::output::Output; + +/// +/// Serialize the benchmark to CSV in the following format: +/// "group", "codegen", "version", "optimizations", "path", "case", "input", "size", "cycles", "ergs", "gas"" +/// +#[derive(Default)] +pub struct Csv; + +impl IBenchmarkSerializer for Csv { + type Err = std::fmt::Error; + + fn serialize_to_string(&self, benchmark: &Benchmark) -> Result { + let mut result = String::with_capacity(estimate_csv_size(benchmark)); + result.push_str( + r#""group", "codegen", "version", "optimizations", "path", "case", "input", "size", "cycles", "ergs", "gas""#, + ); + + result.push('\n'); + for Test { + metadata: + TestMetadata { + tags, + selector: Selector { path, case, input }, + }, + codegen_groups, + } in benchmark.tests.values() + { + for (codegen, codegen_group) in codegen_groups { + for (version, versioned_group) in &codegen_group.versioned_groups { + for ( + optimizations, + crate::Executable { + run: + crate::Run { + size, + cycles, + ergs, + gas, + }, + .. + }, + ) in &versioned_group.executables + { + let tags = { + let mut tags = tags.clone(); + tags.sort(); + tags.join(" ") + }; + let size_str = size.map(|s| s.to_string()).unwrap_or_default(); + let input = input.clone().map(|s| s.to_string()).unwrap_or_default(); + let case = case.as_deref().unwrap_or_default(); + writeln!( + &mut result, + r#""{tags}", "{codegen}", "{version}", "{optimizations}", "{path}", "{case}", "{input}", {size_str}, {cycles}, {ergs}, {gas}"#, + )?; + } + } + } + } + Ok(Output::SingleFile(result)) + } +} + +fn estimate_csv_line_length() -> usize { + let number_fields = 4; + let number_field_estimated_max_length = 15; + let group_name_estimated_max = 10; + let test_name_estimated_max = 300; + group_name_estimated_max + + test_name_estimated_max + + number_fields * number_field_estimated_max_length +} + +fn estimate_csv_size(benchmark: &Benchmark) -> usize { + (benchmark.tests.len() + 1) * estimate_csv_line_length() +} diff --git a/benchmark_analyzer/src/output/format/json/lnt/benchmark/machine.rs b/benchmark_analyzer/src/output/format/json/lnt/benchmark/machine.rs new file mode 100644 index 00000000..8536b686 --- /dev/null +++ b/benchmark_analyzer/src/output/format/json/lnt/benchmark/machine.rs @@ -0,0 +1,20 @@ +//! +//! Description of the `machine` section in the JSON file generated for LNT. +//! See https://llvm.org/docs/lnt/importing_data.html +//! + +/// +/// Description of the `machine` section in the JSON file generated for LNT. +/// See https://llvm.org/docs/lnt/importing_data.html +/// +#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)] +pub struct Machine { + /// Machine name, for example "LNT-AArch64-A53-O3__clang_DEV__aarch64". + pub name: String, + /// Target name, for example "eravm" or "solc". + pub target: era_compiler_common::Target, + /// Optimizations level, for example "+M3B3". + pub optimizations: String, + /// Type of solc, for example, "zksync". + pub toolchain: String, +} diff --git a/benchmark_analyzer/src/output/format/json/lnt/benchmark/mod.rs b/benchmark_analyzer/src/output/format/json/lnt/benchmark/mod.rs new file mode 100644 index 00000000..2e8fda40 --- /dev/null +++ b/benchmark_analyzer/src/output/format/json/lnt/benchmark/mod.rs @@ -0,0 +1,30 @@ +//! Root benchmark structure describing a single JSON file passed to LNT. +//! One such file is generated for every machine configuration. +//! See https://llvm.org/docs/lnt/importing_data.html + +pub mod machine; +pub mod run_description; +pub mod test_description; + +use machine::Machine; +use run_description::RunDescription; +use test_description::TestDescription; + +use crate::BenchmarkVersion; + +/// +/// Root benchmark structure describing a single JSON file passed to LNT. +/// One such file is generated for every machine configuration. +/// See https://llvm.org/docs/lnt/importing_data.html +/// +#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)] +pub struct LntBenchmark { + /// Benchmark format version + pub format_version: BenchmarkVersion, + /// Machine description is used as a group identifier + pub machine: Machine, + /// Describes the runtime benchmark characteristics, for example, when it has started and when it has ended + pub run: RunDescription, + /// Tests grouped in this benchmark. + pub tests: Vec, +} diff --git a/benchmark_analyzer/src/output/format/json/lnt/benchmark/run_description.rs b/benchmark_analyzer/src/output/format/json/lnt/benchmark/run_description.rs new file mode 100644 index 00000000..bb0ae38d --- /dev/null +++ b/benchmark_analyzer/src/output/format/json/lnt/benchmark/run_description.rs @@ -0,0 +1,17 @@ +//! +//! Description of the benchmark run in a JSON file passed to LNT. +//! + +use chrono::DateTime; +use chrono::Utc; + +/// +/// Description of the benchmark run in a JSON file passed to LNT. +/// +#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)] +pub struct RunDescription { + /// Time when benchmark run was started. + pub start_time: DateTime, + /// Time when benchmark run was finished. + pub end_time: DateTime, +} diff --git a/benchmark_analyzer/src/output/format/json/lnt/benchmark/test_description.rs b/benchmark_analyzer/src/output/format/json/lnt/benchmark/test_description.rs new file mode 100644 index 00000000..17eb3371 --- /dev/null +++ b/benchmark_analyzer/src/output/format/json/lnt/benchmark/test_description.rs @@ -0,0 +1,18 @@ +//! +//! Description of a single measurement in a JSON file passed to LNT. +//! + +use crate::model::benchmark::test::codegen::versioned::executable::run::Run; + +/// +/// Description of a single measurement in a JSON file passed to LNT. +/// +#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)] +pub struct TestDescription { + /// A unique identifier of the test, incorporating language version, optimization levels and so on. + /// See [crate::output::format::json::lnt::test_name]. + pub name: String, + /// Measurements: gas, ergs, cycles, and size for contract deploys. + #[serde(flatten)] + pub measurements: Run, +} diff --git a/benchmark_analyzer/src/output/format/json/lnt/error.rs b/benchmark_analyzer/src/output/format/json/lnt/error.rs new file mode 100644 index 00000000..1b9e73ac --- /dev/null +++ b/benchmark_analyzer/src/output/format/json/lnt/error.rs @@ -0,0 +1,25 @@ +//! +//! Errors occuring during generation of LNT-compatible JSON files. +//! + +/// +/// Errors occuring during generation of LNT-compatible JSON files. +/// +#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)] +pub enum LntSerializationError { + /// + /// No instance of [crate::model::context::Context] is provided. + /// + NoContext, +} + +impl std::fmt::Display for LntSerializationError { + #[allow(unreachable_patterns)] + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + LntSerializationError::NoContext => f.write_str("LNT backend requires explicitly passed benchmark context, but no context was provided."), + _ => f.write_fmt(format_args!("{self:?}")), + } + } +} +impl std::error::Error for LntSerializationError {} diff --git a/benchmark_analyzer/src/output/format/json/lnt/mod.rs b/benchmark_analyzer/src/output/format/json/lnt/mod.rs new file mode 100644 index 00000000..be2d1c1e --- /dev/null +++ b/benchmark_analyzer/src/output/format/json/lnt/mod.rs @@ -0,0 +1,127 @@ +//! +//! JSON format compatible with LNT. +//! + +pub mod benchmark; +pub mod error; + +use std::collections::BTreeMap; +use std::path::PathBuf; + +use benchmark::machine::Machine; +use benchmark::run_description::RunDescription; +use benchmark::test_description::TestDescription; +use benchmark::LntBenchmark; +use error::LntSerializationError; + +use crate::model::benchmark::test::metadata::Metadata as TestMetadata; +use crate::model::benchmark::test::selector::Selector; +use crate::model::benchmark::test::Test; +use crate::model::benchmark::Benchmark; +use crate::output::format::json::make_json_file; +use crate::output::IBenchmarkSerializer; +use crate::output::Output; + +/// +/// Serialize the benchmark to a set of JSON files compatible with LNT format. +/// +#[derive(Default)] +pub struct JsonLNT; + +/// +/// Generate the test name for a measurement, containing a unique test identifier. +/// +fn test_name(selector: &Selector, version: impl std::fmt::Display) -> String { + fn shorten_file_name(name: &str) -> String { + let path_buf = PathBuf::from(name); + path_buf + .file_name() + .expect("Always valid") + .to_str() + .expect("Always valid") + .to_string() + } + let Selector { path, case, input } = selector; + let short_path = shorten_file_name(path); + let short_input = match input { + Some(crate::Input::Deployer { + contract_identifier, + }) => Some(crate::Input::Deployer { + contract_identifier: shorten_file_name(contract_identifier), + }), + _ => input.clone(), + }; + format!( + "{} {version}", + Selector { + path: short_path.to_string(), + case: case.clone(), + input: short_input, + } + ) +} + +impl IBenchmarkSerializer for JsonLNT { + type Err = LntSerializationError; + + fn serialize_to_string(&self, benchmark: &Benchmark) -> anyhow::Result { + let mut files: BTreeMap = Default::default(); + + let context = if let Some(context) = &benchmark.metadata.context { + context + } else { + return Err(LntSerializationError::NoContext); + }; + + for Test { + metadata: TestMetadata { selector, .. }, + codegen_groups, + } in benchmark.tests.values() + { + for (codegen, codegen_group) in codegen_groups { + for (version, versioned_group) in &codegen_group.versioned_groups { + for ( + optimizations, + crate::Executable { + run: measurements, .. + }, + ) in &versioned_group.executables + { + let machine_name = format!("{}-{codegen}-{optimizations}", context.machine); + + let machine = Machine { + name: context.machine.clone(), + target: context.target, + optimizations: optimizations.to_owned(), + toolchain: context.toolchain.clone(), + }; + let run = RunDescription { + start_time: benchmark.metadata.start, + end_time: benchmark.metadata.end, + }; + files + .entry(machine_name) + .or_insert(LntBenchmark { + format_version: benchmark.metadata.version.clone(), + machine, + run, + tests: vec![], + }) + .tests + .push(TestDescription { + name: test_name(selector, version), + measurements: measurements.clone(), + }); + } + } + } + } + + Ok(Output::MultipleFiles( + files + .iter() + .map(|(key, value)| make_json_file(key, value)) + .collect(), + )) + } +} diff --git a/benchmark_analyzer/src/output/format/json/mod.rs b/benchmark_analyzer/src/output/format/json/mod.rs new file mode 100644 index 00000000..3aac068d --- /dev/null +++ b/benchmark_analyzer/src/output/format/json/mod.rs @@ -0,0 +1,20 @@ +//! +//! Defines JSON-based output formats. +//! + +pub mod lnt; +pub mod native; + +use crate::output::file::File; + +/// +/// Create a new [`crate::output::File`] instance with an object serialized to JSON. +/// +pub(crate) fn make_json_file(filename: impl std::fmt::Display, object: T) -> File +where + T: Sized + serde::Serialize, +{ + let path = format!("{filename}.json").into(); + let contents = serde_json::to_string_pretty(&object).expect("Always valid"); + File { path, contents } +} diff --git a/benchmark_analyzer/src/output/format/json/native/mod.rs b/benchmark_analyzer/src/output/format/json/native/mod.rs new file mode 100644 index 00000000..42cf7f82 --- /dev/null +++ b/benchmark_analyzer/src/output/format/json/native/mod.rs @@ -0,0 +1,21 @@ +//! +//! Native JSON format that corresponds to the inner benchmark analyzer data model. +//! + +use crate::model::benchmark::Benchmark; +use crate::output::comparison_result::Output; +use crate::output::IBenchmarkSerializer; + +/// +/// Serialize the benchmark internal model to JSON using `serde` library. +/// +#[derive(Default)] +pub struct Json; + +impl IBenchmarkSerializer for Json { + type Err = serde_json::error::Error; + + fn serialize_to_string(&self, benchmark: &Benchmark) -> Result { + serde_json::to_string_pretty(benchmark).map(Output::SingleFile) + } +} diff --git a/benchmark_analyzer/src/output/format/mod.rs b/benchmark_analyzer/src/output/format/mod.rs new file mode 100644 index 00000000..4c33cf82 --- /dev/null +++ b/benchmark_analyzer/src/output/format/mod.rs @@ -0,0 +1,6 @@ +//! +//! Serialization of benchmark data in different output formats. +//! + +pub mod csv; +pub mod json; diff --git a/benchmark_analyzer/src/output/mod.rs b/benchmark_analyzer/src/output/mod.rs new file mode 100644 index 00000000..01903740 --- /dev/null +++ b/benchmark_analyzer/src/output/mod.rs @@ -0,0 +1,26 @@ +//! +//! Benchmark-analyzer output. +//! + +pub mod comparison_result; +pub mod file; +pub mod format; + +use comparison_result::Output; + +use crate::model::benchmark::Benchmark; + +/// +/// Serialization format for benchmark data. +/// +pub trait IBenchmarkSerializer { + /// + /// Type of serialization error. + /// + type Err: std::error::Error; + + /// + /// Serialize benchmark data in the selected format. + /// + fn serialize_to_string(&self, benchmark: &Benchmark) -> anyhow::Result; +} diff --git a/benchmark_analyzer/src/results/group.rs b/benchmark_analyzer/src/results/group.rs new file mode 100644 index 00000000..08b03131 --- /dev/null +++ b/benchmark_analyzer/src/results/group.rs @@ -0,0 +1,175 @@ +//! +//! A group of results +//! + +use crate::model::evm_interpreter; +use regex::Regex; +use std::fmt::Display; + +/// +/// Group of results. +/// +#[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)] +pub enum Group<'a> { + /// A group with EVM interpreter tests. + EVMInterpreter { + /// Codegen used to produce executables for all tests in this group. + codegen: &'a str, + /// Optimization level used to produce executables for all tests in this group. + optimizations: &'a str, + }, + /// A default group containing all tests. + Default { + /// Codegen used to produce executables for all tests in this group. + codegen: &'a str, + /// Optimization level used to produce executables for all tests in this group. + optimizations: &'a str, + }, + /// A user-named group. + Named { + /// Group name, provided by the user in the test definition file. + name: &'a str, + /// Codegen used to produce executables for all tests in this group. + codegen: &'a str, + /// Optimization level used to produce executables for all tests in this group. + optimizations: &'a str, + }, + /// A group comparing two groups with distinct names: + /// - one belonging to a reference run, + /// - another belonging to a candidate run. + Comparison { + /// Group belonging to the reference run. + reference: Box>, + /// Group belonging to the candidate run. + candidate: Box>, + }, +} + +impl Group<'_> { + fn comparison_name(reference: &Group<'_>, candidate: &Group<'_>) -> String { + if reference.name() == candidate.name() { + format!( + "{}: {}{} vs {}{}", + reference.name(), + reference.codegen().unwrap_or_default(), + reference.optimizations().unwrap_or_default(), + candidate.codegen().unwrap_or_default(), + candidate.optimizations().unwrap_or_default(), + ) + } else { + format!("{} vs {}", reference.name(), candidate.name()) + } + } + + /// + /// Returns true if the provided regular expression matches the string representation of the group. + /// + pub fn regex_matches(&self, regex: &Regex) -> bool { + !self.is_comparison() && (regex.is_match(&self.to_string())) + } + + /// + /// Codegen used in this group. + /// + pub fn codegen(&self) -> Option { + match self { + Group::EVMInterpreter { codegen, .. } => Some(codegen.to_string()), + Group::Default { codegen, .. } => Some(codegen.to_string()), + Group::Named { codegen, .. } => Some(codegen.to_string()), + Group::Comparison { .. } => None, + } + } + + /// + /// Optimizations used in this group. + /// + pub fn optimizations(&self) -> Option { + match self { + Group::EVMInterpreter { optimizations, .. } => Some(optimizations.to_string()), + Group::Default { optimizations, .. } => Some(optimizations.to_string()), + Group::Named { optimizations, .. } => Some(optimizations.to_string()), + Group::Comparison { .. } => None, + } + } + + /// + /// Name of the group. + /// + pub fn name(&self) -> String { + match self { + Group::EVMInterpreter { .. } => "EVMInterpreter".into(), + Group::Default { .. } => "All".into(), + Group::Named { name, .. } => name.to_string(), + Group::Comparison { .. } => "Comparison".into(), + } + } + + /// Returns `true` if the group is [`Comparison`]. + /// + /// [`Comparison`]: Group::Comparison + #[must_use] + pub fn is_comparison(&self) -> bool { + matches!(self, Self::Comparison { .. }) + } +} + +impl<'a> Group<'a> { + /// + /// Create a new group provided an optional tag, codegen and optimization level. + /// + pub fn from_tag(tag: Option<&'a str>, codegen: Option<&'a str>, opt: Option<&'a str>) -> Self { + let codegen = codegen.unwrap_or_default(); + let optimizations = opt.unwrap_or_default(); + match tag { + None => Self::Default { + optimizations, + codegen, + }, + Some(group_name) if group_name == evm_interpreter::GROUP_NAME => Self::EVMInterpreter { + optimizations, + codegen, + }, + Some(name) => Self::Named { + name, + codegen, + optimizations, + }, + } + } + + /// + /// Create a new group that compares two groups with distinct names: + /// - one belonging to a reference run, + /// - another belonging to a candidate run. + /// + pub fn new_comparison(reference: &Self, candidate: &Self) -> Self { + Self::Comparison { + reference: Box::new(reference.clone()), + candidate: Box::new(candidate.clone()), + } + } +} + +impl Display for Group<'_> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Group::EVMInterpreter { + codegen, + optimizations, + } => f.write_fmt(format_args!("{} {codegen} {optimizations}", self.name())), + Group::Default { + codegen, + optimizations, + } => f.write_fmt(format_args!("{} {codegen} {optimizations}", self.name())), + Group::Named { + name, + codegen, + optimizations, + } => f.write_fmt(format_args!("{name} {codegen} {optimizations}")), + Group::Comparison { + reference, + candidate, + } => f.write_str(&Self::comparison_name(reference, candidate)), + } + } +} diff --git a/benchmark_analyzer/src/benchmark/group/results.rs b/benchmark_analyzer/src/results/mod.rs similarity index 86% rename from benchmark_analyzer/src/benchmark/group/results.rs rename to benchmark_analyzer/src/results/mod.rs index 3b306b92..e5970591 100644 --- a/benchmark_analyzer/src/benchmark/group/results.rs +++ b/benchmark_analyzer/src/results/mod.rs @@ -2,6 +2,9 @@ //! The benchmark group results. //! +pub mod group; + +use crate::model::benchmark::test::metadata::Metadata as TestMetadata; use colored::Colorize; use std::cmp; @@ -17,9 +20,9 @@ pub struct Results<'a> { /// The size total decrease result. pub size_total: f64, /// The size negative result test names. - pub size_negatives: Vec<(f64, &'a str)>, + pub size_negatives: Vec<(f64, &'a TestMetadata)>, /// The size positive result test names. - pub size_positives: Vec<(f64, &'a str)>, + pub size_positives: Vec<(f64, &'a TestMetadata)>, /// The cycles best result. pub cycles_best: f64, @@ -28,9 +31,9 @@ pub struct Results<'a> { /// The cycles total decrease result. pub cycles_total: f64, /// The cycles negative result test names. - pub cycles_negatives: Vec<(f64, &'a str)>, + pub cycles_negatives: Vec<(f64, &'a TestMetadata)>, /// The cycles positive result test names. - pub cycles_positives: Vec<(f64, &'a str)>, + pub cycles_positives: Vec<(f64, &'a TestMetadata)>, /// The ergs best result. pub ergs_best: f64, @@ -39,9 +42,9 @@ pub struct Results<'a> { /// The ergs total decrease result. pub ergs_total: f64, /// The ergs negative result test names. - pub ergs_negatives: Vec<(f64, &'a str)>, + pub ergs_negatives: Vec<(f64, &'a TestMetadata)>, /// The ergs positive result test names. - pub ergs_positives: Vec<(f64, &'a str)>, + pub ergs_positives: Vec<(f64, &'a TestMetadata)>, /// The gas best result. pub gas_best: f64, @@ -50,9 +53,9 @@ pub struct Results<'a> { /// The gas total decrease result. pub gas_total: f64, /// The gas negative result test names. - pub gas_negatives: Vec<(f64, &'a str)>, + pub gas_negatives: Vec<(f64, &'a TestMetadata)>, /// The gas positive result test names. - pub gas_positives: Vec<(f64, &'a str)>, + pub gas_positives: Vec<(f64, &'a TestMetadata)>, /// The EVM interpreter reference ratios. pub evm_interpreter_reference_ratios: Option>, @@ -69,26 +72,26 @@ impl<'a> Results<'a> { size_best: f64, size_worst: f64, size_total: f64, - size_negatives: Vec<(f64, &'a str)>, - size_positives: Vec<(f64, &'a str)>, + size_negatives: Vec<(f64, &'a TestMetadata)>, + size_positives: Vec<(f64, &'a TestMetadata)>, cycles_best: f64, cycles_worst: f64, cycles_total: f64, - cycles_negatives: Vec<(f64, &'a str)>, - cycles_positives: Vec<(f64, &'a str)>, + cycles_negatives: Vec<(f64, &'a TestMetadata)>, + cycles_positives: Vec<(f64, &'a TestMetadata)>, ergs_best: f64, ergs_worst: f64, ergs_total: f64, - ergs_negatives: Vec<(f64, &'a str)>, - ergs_positives: Vec<(f64, &'a str)>, + ergs_negatives: Vec<(f64, &'a TestMetadata)>, + ergs_positives: Vec<(f64, &'a TestMetadata)>, gas_best: f64, gas_worst: f64, gas_total: f64, - gas_negatives: Vec<(f64, &'a str)>, - gas_positives: Vec<(f64, &'a str)>, + gas_negatives: Vec<(f64, &'a TestMetadata)>, + gas_positives: Vec<(f64, &'a TestMetadata)>, ) -> Self { Self { size_best, @@ -206,7 +209,7 @@ impl<'a> Results<'a> { self.size_negatives.len() ); for (value, path) in self.size_negatives.iter().take(count) { - println!("{:010}: {}", Self::format_f64(*value), path); + println!("{:010}: {}", Self::format_f64(*value), path.selector); } println!(); println!( @@ -216,7 +219,7 @@ impl<'a> Results<'a> { self.cycles_negatives.len() ); for (value, path) in self.cycles_negatives.iter().take(count) { - println!("{:010}: {}", Self::format_f64(*value), path); + println!("{:010}: {}", Self::format_f64(*value), path.selector); } println!(); println!( @@ -226,7 +229,7 @@ impl<'a> Results<'a> { self.ergs_negatives.len() ); for (value, path) in self.ergs_negatives.iter().take(count) { - println!("{:010}: {}", Self::format_f64(*value), path); + println!("{:010}: {}", Self::format_f64(*value), path.selector); } println!(); println!( @@ -236,7 +239,7 @@ impl<'a> Results<'a> { self.gas_negatives.len() ); for (value, path) in self.gas_negatives.iter().take(count) { - println!("{:010}: {}", Self::format_f64(*value), path); + println!("{:010}: {}", Self::format_f64(*value), path.selector); } println!(); @@ -247,7 +250,7 @@ impl<'a> Results<'a> { self.size_positives.len() ); for (value, path) in self.size_positives.iter().take(count) { - println!("{:010}: {}", Self::format_f64(*value), path); + println!("{:010}: {}", Self::format_f64(*value), path.selector); } println!(); println!( @@ -257,7 +260,7 @@ impl<'a> Results<'a> { self.cycles_positives.len() ); for (value, path) in self.cycles_positives.iter().take(count) { - println!("{:010}: {}", Self::format_f64(*value), path); + println!("{:010}: {}", Self::format_f64(*value), path.selector); } println!(); println!( @@ -267,7 +270,7 @@ impl<'a> Results<'a> { self.ergs_positives.len() ); for (value, path) in self.ergs_positives.iter().take(count) { - println!("{:010}: {}", Self::format_f64(*value), path); + println!("{:010}: {}", Self::format_f64(*value), path.selector); } println!(); println!( @@ -277,7 +280,7 @@ impl<'a> Results<'a> { self.gas_positives.len() ); for (value, path) in self.gas_positives.iter().take(count) { - println!("{:010}: {}", Self::format_f64(*value), path); + println!("{:010}: {}", Self::format_f64(*value), path.selector); } println!(); } @@ -293,24 +296,24 @@ impl<'a> Results<'a> { w, "╔═╡ {} ╞{}╡ {} ╞═╗", "Size (-%)".bright_white(), - "═".repeat(cmp::max(24 - group_name.len(), 0)), + "═".repeat(cmp::max(44 - group_name.len(), 0)), group_name.bright_white() )?; writeln!( w, - "║ {:33} {:07} ║", + "║ {:53} {:07} ║", "Best".bright_white(), Self::format_f64(self.size_best) )?; writeln!( w, - "║ {:33} {:07} ║", + "║ {:53} {:07} ║", "Worst".bright_white(), Self::format_f64(self.size_worst) )?; writeln!( w, - "║ {:33} {:07} ║", + "║ {:53} {:07} ║", "Total".bright_white(), Self::format_f64(self.size_total) )?; @@ -319,24 +322,24 @@ impl<'a> Results<'a> { w, "╠═╡ {} ╞{}╡ {} ╞═╣", "Cycles (-%)".bright_white(), - "═".repeat(cmp::max(22 - group_name.len(), 0)), + "═".repeat(cmp::max(42 - group_name.len(), 0)), group_name.bright_white() )?; writeln!( w, - "║ {:33} {:07} ║", + "║ {:53} {:07} ║", "Best".bright_white(), Self::format_f64(self.cycles_best) )?; writeln!( w, - "║ {:33} {:07} ║", + "║ {:53} {:07} ║", "Worst".bright_white(), Self::format_f64(self.cycles_worst) )?; writeln!( w, - "║ {:33} {:07} ║", + "║ {:53} {:07} ║", "Total".bright_white(), Self::format_f64(self.cycles_total) )?; @@ -345,24 +348,24 @@ impl<'a> Results<'a> { w, "╠═╡ {} ╞{}╡ {} ╞═╣", "Ergs (-%)".bright_white(), - "═".repeat(cmp::max(24 - group_name.len(), 0)), + "═".repeat(cmp::max(44 - group_name.len(), 0)), group_name.bright_white() )?; writeln!( w, - "║ {:33} {:07} ║", + "║ {:53} {:07} ║", "Best".bright_white(), Self::format_f64(self.ergs_best) )?; writeln!( w, - "║ {:33} {:07} ║", + "║ {:53} {:07} ║", "Worst".bright_white(), Self::format_f64(self.ergs_worst) )?; writeln!( w, - "║ {:33} {:07} ║", + "║ {:53} {:07} ║", "Total".bright_white(), Self::format_f64(self.ergs_total) )?; @@ -371,24 +374,24 @@ impl<'a> Results<'a> { w, "╠══╡ {} ╞{}╡ {} ╞═╣", "Gas (-%)".bright_white(), - "═".repeat(cmp::max(24 - group_name.len(), 0)), + "═".repeat(cmp::max(44 - group_name.len(), 0)), group_name.bright_white() )?; writeln!( w, - "║ {:33} {:07} ║", + "║ {:53} {:07} ║", "Best".bright_white(), Self::format_f64(self.gas_best) )?; writeln!( w, - "║ {:33} {:07} ║", + "║ {:53} {:07} ║", "Worst".bright_white(), Self::format_f64(self.gas_worst) )?; writeln!( w, - "║ {:33} {:07} ║", + "║ {:53} {:07} ║", "Total".bright_white(), Self::format_f64(self.gas_total) )?; @@ -401,7 +404,7 @@ impl<'a> Results<'a> { w, "╠═╡ {} ╞{}╡ {} ╞═╣", "Ergs/gas".bright_white(), - "═".repeat(cmp::max(25 - group_name.len(), 0)), + "═".repeat(cmp::max(45 - group_name.len(), 0)), group_name.bright_white() )?; for (opcode, reference_ratio) in gas_reference_ratios.iter() { @@ -421,7 +424,7 @@ impl<'a> Results<'a> { writeln!( w, - "║ {:32} {} ║", + "║ {:52} {} ║", if is_positive { opcode.green() } else if is_negative { @@ -443,7 +446,7 @@ impl<'a> Results<'a> { w, "╠═╡ {} ╞{}╡ {} ╞═╣", "Ergs/gas (-%)".bright_white(), - "═".repeat(cmp::max(20 - group_name.len(), 0)), + "═".repeat(cmp::max(40 - group_name.len(), 0)), group_name.bright_white() )?; for (opcode, reference_ratio) in gas_reference_ratios.iter() { @@ -466,7 +469,7 @@ impl<'a> Results<'a> { writeln!( w, - "║ {:32} {} ║", + "║ {:52} {} ║", if is_positive { opcode.green() } else if is_negative { @@ -485,7 +488,10 @@ impl<'a> Results<'a> { } } } - writeln!(w, "╚═══════════════════════════════════════════╝")?; + writeln!( + w, + "╚═══════════════════════════════════════════════════════════════╝" + )?; Ok(()) } diff --git a/benchmark_analyzer/src/util/btreemap.rs b/benchmark_analyzer/src/util/btreemap.rs new file mode 100644 index 00000000..437f562c --- /dev/null +++ b/benchmark_analyzer/src/util/btreemap.rs @@ -0,0 +1,162 @@ +//! +//! Utility functions +//! + +use std::collections::BTreeMap; + +/// Intersects two `BTreeMap` instances and merges their entries using a +/// specified merger function. +/// +/// # Arguments +/// +/// * `map1` - The first `BTreeMap` containing keys of type `K` and values of +/// type `V1`. +/// * `map2` - The second `BTreeMap` containing keys of type `K` and values of +/// type `V2`. This map is modified during the intersection. +/// * `merger` - A closure that takes a key of type `K`, and a value from each +/// map (`V1` and `V2`), and returns a merged result of type `R`. +/// +/// # Returns +/// +/// An iterator that yields merged results of type `R` for each intersecting key +/// from the maps. +/// +/// # Example +/// +/// ```rust +/// use benchmark_analyzer::util::btreemap::intersect_map; +/// +/// let first = [(1, 1), (2, 2), (3, 3)]; +/// let second = [(1, 10), (3, 30)]; +/// let expected: Vec<_> = [111, 333].into(); +/// assert_eq!( +/// intersect_map(first.into(), second.into(), |k, v1, v2| 100 * k + v1 + v2) +/// .collect::>(), +/// expected +/// ) +/// ``` + +pub fn intersect_map( + map1: BTreeMap, + mut map2: BTreeMap, + merger: impl Fn(K, V1, V2) -> R + 'static, +) -> impl Iterator +where + K: Ord, +{ + map1.into_iter().filter_map(move |(key, value1)| { + map2.remove(&key).map(|value2| merger(key, value1, value2)) + }) +} + +/// Perform a cross join on two `BTreeMap` instances, applying a +/// selector function to each pair of keys. If the selector function returns an +/// `Option::Some`, it includes the transformed key along with cloned values +/// from both maps into the result vector. +/// +/// # Arguments +/// +/// * `map1` - A reference to the first `BTreeMap` with key type `K` and value +/// type `V1`. +/// * `map2` - A reference to the second `BTreeMap` with key type `K` and value +/// type `V2`. +/// * `selector` - A closure or function that takes two keys (one from each map) +/// and returns an `Option`, where `N` is the type of the transformed key +/// to be included in the result if the option is `Some`. +/// +/// # Returns +/// +/// A vector containing tuples of the transformed key type `N`, and values from +/// both maps (`V1` and `V2`), corresponding to each pair of matched keys for +/// which the selector function has returned `Some`. +/// +/// # Type Parameters +/// +/// * `K` - The type of key used in both input maps, which must implement `Ord`. +/// * `N` - The type for transformed key pairs. +/// * `V1` - The type of value in the first map, which must implement `Clone`. +/// * `V2` - The type of value in the second map, which must implement `Clone`. +/// +/// # Example +/// +/// ```rust +/// use std::collections::BTreeMap; +/// +/// // Assume we have two BTreeMaps. +/// let map1: BTreeMap<_, _> = [(1, "a"), (2, "b")].iter().cloned().collect(); +/// let map2: BTreeMap<_, _> = [(1, "x"), (2, "y")].iter().cloned().collect(); +/// +/// // Define a selector function that combines the keys. +/// let selector = |k1: &i32, k2: &i32| if k1 == k2 { Some(k1 + k2) } else { None }; +/// +/// // Execute the cross join with filtering using the selector. +/// let result = cross_join_filter_map(&map1, &map2, selector); +/// +/// // Result now contains: [(2, "a", "x"), (4, "b", "y")] +/// assert_eq!(result, vec![(2, "a", "x"), (4, "b", "y")]); +/// ``` + +pub fn cross_join_filter_map( + map1: &BTreeMap, + map2: &BTreeMap, + selector: impl Fn(&K, &K) -> Option, +) -> Vec<(N, V1, V2)> +where + K: Ord, + V1: Clone, + V2: Clone, +{ + let mut result: Vec<(N, V1, V2)> = Vec::new(); + + for (key1, value1) in map1 { + for (key2, value2) in map2 { + if let Some(new_key) = selector(key1, key2) { + result.push((new_key, value1.clone(), value2.clone())); + } + } + } + + result +} + +/// Returns an iterator over +/// the elements that are common to both `map1` and `map2`. +/// +/// # Arguments +/// +/// * `map1` - A BTreeMap where the keys are compared. +/// * `map2` - A mutable BTreeMap from which matching keys are removed and their values paired with those from `map1`. +/// +/// # Returns +/// +/// An iterator over tuples `(K, V1, V2)` where: +/// * `K` is the common key. +/// * `V1` is the associated value from `map1`. +/// * `V2` is the associated value from `map2`. +/// +/// The iterator only includes keys that are present in both maps. +/// +/// # Example +/// +/// ```rust +/// use benchmark_analyzer::util::btreemap::intersect_keys; +/// +/// let first = [(1, "1"), (2, "2"), (3, "3")]; +/// let second = [(1, "11"), (3, "33")]; +/// let expected: Vec<_> = [(1, "1", "11"), (3, "3", "33")].into(); +/// assert_eq!( +/// intersect_keys(first.into(), second.into()).collect::>(), +/// expected +/// ) +/// ``` +/// +pub fn intersect_keys( + map1: BTreeMap, + mut map2: BTreeMap, +) -> impl Iterator +where + K: Ord, +{ + map1.into_iter() + .filter_map(move |(key, value1)| map2.remove(&key).map(|value2| (key, value1, value2))) +} diff --git a/benchmark_analyzer/src/util/mod.rs b/benchmark_analyzer/src/util/mod.rs new file mode 100644 index 00000000..e0e4bfb0 --- /dev/null +++ b/benchmark_analyzer/src/util/mod.rs @@ -0,0 +1,5 @@ +//! +//! Utility functions. +//! + +pub mod btreemap; diff --git a/compiler_tester/Cargo.toml b/compiler_tester/Cargo.toml index 5f573431..092d386e 100644 --- a/compiler_tester/Cargo.toml +++ b/compiler_tester/Cargo.toml @@ -38,6 +38,7 @@ once_cell = "=1.20.2" rayon = "=1.10.0" lazy_static = "=1.5.0" bincode = "=1.3.3" +chrono = "=0.4.38" evm = { git = "https://github.com/rust-ethereum/evm", rev = "f7a23df6c478ca6a151af5f60e62944800529a61" } revm = { git = "https://github.com/bluealloy/revm", rev = "fa5650ee8a4d802f4f3557014dd157adfb074460" } diff --git a/compiler_tester/src/benchmark_format.rs b/compiler_tester/src/compiler_tester/arguments/benchmark_format.rs similarity index 82% rename from compiler_tester/src/benchmark_format.rs rename to compiler_tester/src/compiler_tester/arguments/benchmark_format.rs index 28002bbb..4bbc89e5 100644 --- a/compiler_tester/src/benchmark_format.rs +++ b/compiler_tester/src/compiler_tester/arguments/benchmark_format.rs @@ -8,10 +8,12 @@ #[derive(Debug, Default, Clone, PartialEq, Eq)] pub enum BenchmarkFormat { #[default] - /// JSON format. + /// Unstable JSON format, corresponds to the inner data model of benchmark analyzer. Json, /// CSV format. Csv, + /// JSON format compatible with LNT. + JsonLNT, } impl std::str::FromStr for BenchmarkFormat { @@ -20,6 +22,7 @@ impl std::str::FromStr for BenchmarkFormat { fn from_str(string: &str) -> Result { match string.to_lowercase().as_str() { "json" => Ok(Self::Json), + "json-lnt" => Ok(Self::JsonLNT), "csv" => Ok(Self::Csv), string => anyhow::bail!( "Unknown benchmark format `{string}`. Supported formats: {}", @@ -37,6 +40,7 @@ impl std::fmt::Display for BenchmarkFormat { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let repr = match self { BenchmarkFormat::Json => "json", + BenchmarkFormat::JsonLNT => "json-lnt", BenchmarkFormat::Csv => "csv", }; f.write_str(repr) diff --git a/compiler_tester/src/compiler_tester/arguments.rs b/compiler_tester/src/compiler_tester/arguments/mod.rs similarity index 84% rename from compiler_tester/src/compiler_tester/arguments.rs rename to compiler_tester/src/compiler_tester/arguments/mod.rs index 45e912e7..951b091b 100644 --- a/compiler_tester/src/compiler_tester/arguments.rs +++ b/compiler_tester/src/compiler_tester/arguments/mod.rs @@ -2,10 +2,16 @@ //! The compiler tester arguments. //! +pub mod benchmark_format; +pub mod validation; + use std::path::PathBuf; +use benchmark_format::BenchmarkFormat; use clap::Parser; +pub const ARGUMENT_BENCHMARK_CONTEXT: &str = "benchmark-context"; + /// /// The compiler tester arguments. /// @@ -40,9 +46,15 @@ pub struct Arguments { #[structopt(short, long)] pub benchmark: Option, - /// The benchmark output format. - #[structopt(long = "benchmark-format", default_value_t = compiler_tester::BenchmarkFormat::Json)] - pub benchmark_format: compiler_tester::BenchmarkFormat, + /// The benchmark output format: `json`, `csv`, or `json-lnt`. + /// Using `json-lnt` requires providing the path to a JSON file describing the + /// benchmarking context via `--benchmark-context`. + #[structopt(long = "benchmark-format", default_value_t = BenchmarkFormat::Json)] + pub benchmark_format: BenchmarkFormat, + + /// The benchmark context to pass additional data to backends. + #[structopt(long = ARGUMENT_BENCHMARK_CONTEXT )] + pub benchmark_context: Option, /// Sets the number of threads, which execute the tests concurrently. #[structopt(short, long)] diff --git a/compiler_tester/src/compiler_tester/arguments/validation.rs b/compiler_tester/src/compiler_tester/arguments/validation.rs new file mode 100644 index 00000000..2c1eb263 --- /dev/null +++ b/compiler_tester/src/compiler_tester/arguments/validation.rs @@ -0,0 +1,28 @@ +//! +//! Validate the arguments passed from user, checking invariants that are not +//! expressed in the type system. +//! + +use super::benchmark_format::BenchmarkFormat; +use super::Arguments; + +use super::ARGUMENT_BENCHMARK_CONTEXT; + +/// +/// Validate the arguments passed from user, checking invariants that are not +/// expressed in the type system. +/// +pub fn validate_arguments(arguments: Arguments) -> anyhow::Result { + match (&arguments.benchmark_format, &arguments.benchmark_context) { + (BenchmarkFormat::JsonLNT, None) => + anyhow::bail!("Generation of LNT-compatible benchmark results in JSON format requires passing a valid context in the argument `--{ARGUMENT_BENCHMARK_CONTEXT}` to compiler tester.") + , + (BenchmarkFormat::JsonLNT, Some(_)) => (), + (_, Some(_)) => + anyhow::bail!("Only LNT backend in JSON format supports passing a valid context in the argument `--{ARGUMENT_BENCHMARK_CONTEXT}` to compiler tester.") + , + _ => (), + } + + Ok(arguments) +} diff --git a/compiler_tester/src/compiler_tester/main.rs b/compiler_tester/src/compiler_tester/main.rs index 5735712c..36e7faec 100644 --- a/compiler_tester/src/compiler_tester/main.rs +++ b/compiler_tester/src/compiler_tester/main.rs @@ -8,6 +8,8 @@ use std::path::PathBuf; use std::str::FromStr; use std::time::Instant; +use arguments::benchmark_format::BenchmarkFormat; +use arguments::validation::validate_arguments; use clap::Parser; use colored::Colorize; @@ -38,6 +40,7 @@ fn main() { /// The entry point wrapper used for proper error handling. /// fn main_inner(arguments: Arguments) -> anyhow::Result<()> { + let arguments = validate_arguments(arguments)?; println!( " {} {} v{} (LLVM build {})", "Starting".bright_green().bold(), @@ -93,17 +96,6 @@ fn main_inner(arguments: Arguments) -> anyhow::Result<()> { .build_global() .expect("Thread pool configuration failure"); - let summary = compiler_tester::Summary::new(arguments.verbose, arguments.quiet).wrap(); - - let filters = compiler_tester::Filters::new(arguments.path, arguments.mode, arguments.group); - - let compiler_tester = compiler_tester::CompilerTester::new( - summary.clone(), - filters, - debug_config.clone(), - arguments.workflow, - )?; - let toolchain = match (arguments.target, arguments.toolchain) { (era_compiler_common::Target::EraVM, Some(toolchain)) => toolchain, (era_compiler_common::Target::EraVM, None) => compiler_tester::Toolchain::IrLLVM, @@ -145,6 +137,19 @@ fn main_inner(arguments: Arguments) -> anyhow::Result<()> { ), }; + let summary = compiler_tester::Summary::new(arguments.verbose, arguments.quiet) + .start_timer()? + .wrap(); + + let filters = compiler_tester::Filters::new(arguments.path, arguments.mode, arguments.group); + + let compiler_tester = compiler_tester::CompilerTester::new( + summary.clone(), + filters, + debug_config.clone(), + arguments.workflow, + )?; + let run_time_start = Instant::now(); println!( " {} tests with {} worker threads", @@ -211,7 +216,7 @@ fn main_inner(arguments: Arguments) -> anyhow::Result<()> { } }?; - let summary = compiler_tester::Summary::unwrap_arc(summary); + let summary = compiler_tester::Summary::unwrap_arc(summary).stop_timer()?; print!("{summary}"); println!( " {} running tests in {}m{:02}s", @@ -221,14 +226,28 @@ fn main_inner(arguments: Arguments) -> anyhow::Result<()> { ); if let Some(path) = arguments.benchmark { - let benchmark = summary.benchmark(toolchain)?; + let context = if let Some(context_path) = arguments.benchmark_context { + Some(read_context(context_path)?) + } else { + None + }; + let benchmark = summary.benchmark(toolchain, context)?; match arguments.benchmark_format { - compiler_tester::BenchmarkFormat::Json => { - benchmark.write_to_file(path, benchmark_analyzer::JsonSerializer)? - } - compiler_tester::BenchmarkFormat::Csv => { - benchmark.write_to_file(path, benchmark_analyzer::CsvSerializer)? - } + BenchmarkFormat::Json => benchmark_analyzer::write_to_file( + &benchmark, + path, + benchmark_analyzer::JsonNativeSerializer, + )?, + BenchmarkFormat::Csv => benchmark_analyzer::write_to_file( + &benchmark, + path, + benchmark_analyzer::CsvSerializer, + )?, + BenchmarkFormat::JsonLNT => benchmark_analyzer::write_to_file( + &benchmark, + path, + benchmark_analyzer::JsonLNTSerializer, + )?, } } @@ -239,6 +258,24 @@ fn main_inner(arguments: Arguments) -> anyhow::Result<()> { Ok(()) } +/// +/// Reads the benchmarking context from a JSON file and validates its correctness. +/// Benchmarking context provides additional information about benchmarking that +/// will be used to generate a report. +/// +/// # Errors +/// +/// This function will return an error if +/// - file can't be read, +/// - deserialization from JSON file failed, +/// - the context validation failed. +fn read_context(path: PathBuf) -> anyhow::Result { + let contents = std::fs::read_to_string(path)?; + let context: benchmark_analyzer::BenchmarkContext = serde_json::de::from_str(&contents)?; + benchmark_analyzer::validate_context(&context)?; + Ok(context) +} + #[cfg(test)] mod tests { use std::path::PathBuf; @@ -257,7 +294,8 @@ mod tests { path: vec!["tests/solidity/simple/default.sol".to_owned()], group: vec![], benchmark: None, - benchmark_format: compiler_tester::BenchmarkFormat::Json, + benchmark_format: crate::BenchmarkFormat::Json, + benchmark_context: None, threads: Some(1), dump_system: false, disable_deployer: false, diff --git a/compiler_tester/src/compilers/eravm/mode.rs b/compiler_tester/src/compilers/eravm/mode.rs index 158b5e85..98dfbac6 100644 --- a/compiler_tester/src/compilers/eravm/mode.rs +++ b/compiler_tester/src/compilers/eravm/mode.rs @@ -2,6 +2,8 @@ //! The compiler tester EraVM mode. //! +use crate::compilers::mode::imode::IMode; + /// /// The compiler tester EraVM mode. /// @@ -13,3 +15,16 @@ impl std::fmt::Display for Mode { write!(f, "") } } +impl IMode for Mode { + fn optimizations(&self) -> Option { + None + } + + fn codegen(&self) -> Option { + None + } + + fn version(&self) -> Option { + None + } +} diff --git a/compiler_tester/src/compilers/llvm/mode.rs b/compiler_tester/src/compilers/llvm/mode.rs index 419038bc..54ec79a5 100644 --- a/compiler_tester/src/compilers/llvm/mode.rs +++ b/compiler_tester/src/compilers/llvm/mode.rs @@ -2,6 +2,7 @@ //! The compiler tester LLVM mode. //! +use crate::compilers::mode::imode::IMode; use crate::compilers::mode::llvm_options::LLVMOptions; use crate::compilers::mode::Mode as ModeWrapper; @@ -45,8 +46,22 @@ impl Mode { } } +impl IMode for Mode { + fn optimizations(&self) -> Option { + Some(format!("{}", self.llvm_optimizer_settings)) + } + + fn codegen(&self) -> Option { + None + } + + fn version(&self) -> Option { + None + } +} + impl std::fmt::Display for Mode { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.llvm_optimizer_settings,) + write!(f, "{}", self.optimizations().unwrap_or_default(),) } } diff --git a/compiler_tester/src/compilers/mode/imode.rs b/compiler_tester/src/compilers/mode/imode.rs new file mode 100644 index 00000000..92a72475 --- /dev/null +++ b/compiler_tester/src/compilers/mode/imode.rs @@ -0,0 +1,31 @@ +//! +//! Common interface for different compiler modes. +//! + +/// +/// Common interface for different compiler modes. +/// +pub trait IMode { + /// Optimization level, if applicable. + fn optimizations(&self) -> Option; + + /// Codegen version, if applicable. + fn codegen(&self) -> Option; + + /// Language version, if applicable. + fn version(&self) -> Option; +} + +pub fn mode_to_string_aux(mode: &impl IMode, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + for (i, element) in [mode.optimizations(), mode.codegen(), mode.version()] + .iter() + .flatten() + .enumerate() + { + if i > 0 { + write!(f, " ")?; + } + write!(f, "{}", element)?; + } + Ok(()) +} diff --git a/compiler_tester/src/compilers/mode/mod.rs b/compiler_tester/src/compilers/mode/mod.rs index e2e8b95f..5bcb58b5 100644 --- a/compiler_tester/src/compilers/mode/mod.rs +++ b/compiler_tester/src/compilers/mode/mod.rs @@ -2,9 +2,13 @@ //! The compiler mode. //! +pub mod imode; pub mod llvm_options; use std::collections::HashSet; +use std::fmt::Display; + +use imode::{mode_to_string_aux, IMode}; use crate::compilers::eravm::mode::Mode as EraVMMode; use crate::compilers::llvm::mode::Mode as LLVMMode; @@ -253,16 +257,46 @@ impl From for Mode { } } -impl std::fmt::Display for Mode { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { +impl IMode for Mode { + fn optimizations(&self) -> Option { + match self { + Mode::Solidity(mode) => mode.optimizations(), + Mode::SolidityUpstream(mode) => mode.optimizations(), + Mode::Yul(mode) => mode.optimizations(), + Mode::YulUpstream(mode) => mode.optimizations(), + Mode::Vyper(mode) => mode.optimizations(), + Mode::LLVM(mode) => mode.optimizations(), + Mode::EraVM(mode) => mode.optimizations(), + } + } + + fn codegen(&self) -> Option { + match self { + Mode::Solidity(mode) => mode.codegen(), + Mode::SolidityUpstream(mode) => mode.codegen(), + Mode::Yul(mode) => mode.codegen(), + Mode::YulUpstream(mode) => mode.codegen(), + Mode::Vyper(mode) => mode.codegen(), + Mode::LLVM(mode) => mode.codegen(), + Mode::EraVM(mode) => mode.codegen(), + } + } + + fn version(&self) -> Option { match self { - Self::Solidity(inner) => write!(f, "{inner}"), - Self::SolidityUpstream(inner) => write!(f, "{inner}"), - Self::Yul(inner) => write!(f, "{inner}"), - Self::YulUpstream(inner) => write!(f, "{inner}"), - Self::Vyper(inner) => write!(f, "{inner}"), - Self::LLVM(inner) => write!(f, "{inner}"), - Self::EraVM(inner) => write!(f, "{inner}"), + Mode::Solidity(mode) => mode.version(), + Mode::SolidityUpstream(mode) => mode.version(), + Mode::Yul(mode) => mode.version(), + Mode::YulUpstream(mode) => mode.version(), + Mode::Vyper(mode) => mode.version(), + Mode::LLVM(mode) => mode.version(), + Mode::EraVM(mode) => mode.version(), } } } + +impl Display for Mode { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + mode_to_string_aux(self, f) + } +} diff --git a/compiler_tester/src/compilers/solidity/mode.rs b/compiler_tester/src/compilers/solidity/mode.rs index 964fd3d9..f89463c6 100644 --- a/compiler_tester/src/compilers/solidity/mode.rs +++ b/compiler_tester/src/compilers/solidity/mode.rs @@ -6,6 +6,7 @@ use itertools::Itertools; use crate::compilers::mode::llvm_options::LLVMOptions; +use crate::compilers::mode::imode::IMode; use crate::compilers::mode::Mode as ModeWrapper; /// @@ -126,25 +127,29 @@ impl Mode { } } } +} - /// - /// Returns a string representation excluding the solc version. - /// - pub fn repr_without_version(&self) -> String { - format!( - "{}{}{}", - match self.solc_codegen { +impl IMode for Mode { + fn optimizations(&self) -> Option { + Some(format!( + "{}{}", + if self.solc_optimize { '+' } else { '-' }, + self.llvm_optimizer_settings, + )) + } + + fn codegen(&self) -> Option { + Some( + (match self.solc_codegen { era_solc::StandardJsonInputCodegen::Yul => "Y", era_solc::StandardJsonInputCodegen::EVMLA if self.via_ir => "I", era_solc::StandardJsonInputCodegen::EVMLA => "E", - }, - if self.solc_optimize { '+' } else { '-' }, - self.llvm_optimizer_settings, + }) + .to_string(), ) } -} -impl std::fmt::Display for Mode { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{} {}", self.repr_without_version(), self.solc_version) + + fn version(&self) -> Option { + Some(self.solc_version.to_string()) } } diff --git a/compiler_tester/src/compilers/solidity/upstream/mode.rs b/compiler_tester/src/compilers/solidity/upstream/mode.rs index 46a84994..4d4d9616 100644 --- a/compiler_tester/src/compilers/solidity/upstream/mode.rs +++ b/compiler_tester/src/compilers/solidity/upstream/mode.rs @@ -4,7 +4,7 @@ use itertools::Itertools; -use crate::compilers::mode::Mode as ModeWrapper; +use crate::compilers::mode::{imode::IMode, Mode as ModeWrapper}; /// /// The compiler tester Solidity mode. @@ -111,28 +111,29 @@ impl Mode { } } } +} - /// - /// Returns a string representation excluding the solc version. - /// - pub fn repr_without_version(&self) -> String { - if self.via_mlir { - return "L".to_owned(); - } - format!( - "{}{}", - match self.solc_codegen { - era_solc::StandardJsonInputCodegen::Yul => "Y", - era_solc::StandardJsonInputCodegen::EVMLA if self.via_ir => "I", - era_solc::StandardJsonInputCodegen::EVMLA => "E", - }, - if self.solc_optimize { '+' } else { '-' }, +impl IMode for Mode { + fn optimizations(&self) -> Option { + Some((if self.solc_optimize { "+" } else { "-" }).to_string()) + } + + fn codegen(&self) -> Option { + Some( + (if self.via_mlir { + "L" + } else { + match self.solc_codegen { + era_solc::StandardJsonInputCodegen::Yul => "Y", + era_solc::StandardJsonInputCodegen::EVMLA if self.via_ir => "I", + era_solc::StandardJsonInputCodegen::EVMLA => "E", + } + }) + .to_string(), ) } -} -impl std::fmt::Display for Mode { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{} {}", self.repr_without_version(), self.solc_version) + fn version(&self) -> Option { + Some(self.solc_version.to_string()) } } diff --git a/compiler_tester/src/compilers/vyper/mode.rs b/compiler_tester/src/compilers/vyper/mode.rs index fcbd9be2..03a80fbf 100644 --- a/compiler_tester/src/compilers/vyper/mode.rs +++ b/compiler_tester/src/compilers/vyper/mode.rs @@ -2,6 +2,7 @@ //! The compiler tester Vyper mode. //! +use crate::compilers::mode::imode::IMode; use crate::compilers::mode::llvm_options::LLVMOptions; use crate::compilers::mode::Mode as ModeWrapper; @@ -74,21 +75,21 @@ impl Mode { } }) } - - /// - /// Returns a string representation excluding the vyper version. - /// - pub fn repr_without_version(&self) -> String { - format!( - "V{}{}", +} +impl IMode for Mode { + fn optimizations(&self) -> Option { + Some(format!( + "{}{}", if self.vyper_optimize { '+' } else { '-' }, self.llvm_optimizer_settings, - ) + )) + } + + fn codegen(&self) -> Option { + Some("V".into()) } -} -impl std::fmt::Display for Mode { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{} {}", self.repr_without_version(), self.vyper_version,) + fn version(&self) -> Option { + Some(format!("{}", self.vyper_version)) } } diff --git a/compiler_tester/src/compilers/yul/mode.rs b/compiler_tester/src/compilers/yul/mode.rs index 2f405409..2044eb07 100644 --- a/compiler_tester/src/compilers/yul/mode.rs +++ b/compiler_tester/src/compilers/yul/mode.rs @@ -2,6 +2,7 @@ //! The compiler tester Yul mode. //! +use crate::compilers::mode::imode::IMode; use crate::compilers::mode::llvm_options::LLVMOptions; use crate::compilers::mode::Mode as ModeWrapper; @@ -51,8 +52,16 @@ impl Mode { } } -impl std::fmt::Display for Mode { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.llvm_optimizer_settings) +impl IMode for Mode { + fn optimizations(&self) -> Option { + Some(format!("{}", self.llvm_optimizer_settings)) + } + + fn codegen(&self) -> Option { + None + } + + fn version(&self) -> Option { + None } } diff --git a/compiler_tester/src/compilers/yul/mode_upstream.rs b/compiler_tester/src/compilers/yul/mode_upstream.rs index a7a9db7f..9f6faffa 100644 --- a/compiler_tester/src/compilers/yul/mode_upstream.rs +++ b/compiler_tester/src/compilers/yul/mode_upstream.rs @@ -2,7 +2,7 @@ //! The compiler tester upstream Yul mode. //! -use crate::compilers::mode::Mode as ModeWrapper; +use crate::compilers::mode::{imode::IMode, Mode as ModeWrapper}; /// /// The compiler tester upstream Yul mode. @@ -42,21 +42,18 @@ impl Mode { _ => panic!("Non-Yul-upstream mode"), } } +} - /// - /// Returns a string representation excluding the solc version. - /// - pub fn repr_without_version(&self) -> String { - if self.via_mlir { - String::from("L") - } else { - format!("Y{}", if self.solc_optimize { '+' } else { '-' },) - } +impl IMode for Mode { + fn optimizations(&self) -> Option { + Some((if self.solc_optimize { "+" } else { "-" }).to_string()) + } + + fn codegen(&self) -> Option { + Some((if self.via_mlir { "L" } else { "Y" }).to_string()) } -} -impl std::fmt::Display for Mode { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{} {}", self.repr_without_version(), self.solc_version) + fn version(&self) -> Option { + Some(format!("{}", self.solc_version)) } } diff --git a/compiler_tester/src/directories/matter_labs/test/mod.rs b/compiler_tester/src/directories/matter_labs/test/mod.rs index e79a194d..184d2fca 100644 --- a/compiler_tester/src/directories/matter_labs/test/mod.rs +++ b/compiler_tester/src/directories/matter_labs/test/mod.rs @@ -304,14 +304,18 @@ impl MatterLabsTest { Ok(instances) } + fn is_evm_interpreter_test(&self) -> bool { + matches!( + self.metadata.group.as_deref(), + Some(benchmark_analyzer::TEST_GROUP_EVM_INTERPRETER) + ) + } /// /// Returns cases needed for running benchmarks on the EVM interpreter. /// - fn evm_interpreter_benchmark_cases(&self) -> Vec { - if self.metadata.group.as_deref() - != Some(benchmark_analyzer::Benchmark::EVM_INTERPRETER_GROUP_NAME) - { - return vec![]; + fn evm_interpreter_benchmark_cases(&self) -> Option> { + if !self.is_evm_interpreter_test() { + return None; } let mut evm_contracts: Vec = self @@ -398,7 +402,7 @@ impl MatterLabsTest { expected_evm: None, }) } - metadata_cases + Some(metadata_cases) } } @@ -467,8 +471,13 @@ impl Buildable for MatterLabsTest { }; instances.extend(evm_instances); - let mut metadata_cases = self.metadata.cases.to_owned(); - metadata_cases.extend(self.evm_interpreter_benchmark_cases()); + let metadata_cases = { + let mut base_cases = self.metadata.cases.to_owned(); + if let Some(opcode_test_cases) = self.evm_interpreter_benchmark_cases() { + base_cases.extend(opcode_test_cases); + } + base_cases + }; let mut cases = Vec::with_capacity(metadata_cases.len()); for case in metadata_cases.into_iter() { diff --git a/compiler_tester/src/lib.rs b/compiler_tester/src/lib.rs index c177a3e0..ff5c2a57 100644 --- a/compiler_tester/src/lib.rs +++ b/compiler_tester/src/lib.rs @@ -7,7 +7,6 @@ #![allow(clippy::too_many_arguments)] #![allow(clippy::type_complexity)] -pub(crate) mod benchmark_format; pub(crate) mod compilers; pub(crate) mod directories; pub(crate) mod environment; @@ -27,7 +26,6 @@ use itertools::Itertools; use rayon::iter::IntoParallelIterator; use rayon::iter::ParallelIterator; -pub use crate::benchmark_format::BenchmarkFormat; pub use crate::compilers::eravm::EraVMCompiler; pub use crate::compilers::llvm::LLVMCompiler; pub use crate::compilers::mode::llvm_options::LLVMOptions; diff --git a/compiler_tester/src/summary/benchmark_adapters/input.rs b/compiler_tester/src/summary/benchmark_adapters/input.rs index e5ff9ad2..85b59b97 100644 --- a/compiler_tester/src/summary/benchmark_adapters/input.rs +++ b/compiler_tester/src/summary/benchmark_adapters/input.rs @@ -24,6 +24,9 @@ impl From for benchmark_analyzer::Input { InputIdentifier::Balance { input_index } => { benchmark_analyzer::Input::Balance { input_index } } + InputIdentifier::Fallback { input_index } => { + benchmark_analyzer::Input::Fallback { input_index } + } } } } diff --git a/compiler_tester/src/summary/benchmark_adapters/metadata.rs b/compiler_tester/src/summary/benchmark_adapters/metadata.rs index d713251d..cef3f580 100644 --- a/compiler_tester/src/summary/benchmark_adapters/metadata.rs +++ b/compiler_tester/src/summary/benchmark_adapters/metadata.rs @@ -3,52 +3,9 @@ //! use crate::test::description::TestDescription; -use crate::Mode; -pub fn convert_description( - description: &TestDescription, - default_group: &str, -) -> benchmark_analyzer::Metadata { - let TestDescription { - group, - mode, - selector, - } = description.clone(); - let selector = selector.into(); - let version = match &mode { - Some(mode) => mode_version(mode.clone()).map(|m| m.to_string()), - None => None, - }; - let mode = mode.map(mode_string).unwrap_or_default(); - let group = group.unwrap_or(default_group.to_string()); - benchmark_analyzer::Metadata { - selector, - mode, - version, - group, - } -} - -fn mode_version(mode: Mode) -> Option { - match mode { - Mode::Solidity(mode) => Some(mode.solc_version), - Mode::SolidityUpstream(mode) => Some(mode.solc_version), - Mode::Yul(_) => None, - Mode::YulUpstream(mode) => Some(mode.solc_version), - Mode::Vyper(mode) => Some(mode.vyper_version), - Mode::LLVM(_) => None, - Mode::EraVM(_) => None, - } -} - -fn mode_string(mode: Mode) -> Option { - match mode { - Mode::Solidity(mode) => Some(mode.repr_without_version()), - Mode::SolidityUpstream(mode) => Some(mode.repr_without_version()), - Mode::Yul(_) => None, - Mode::YulUpstream(mode) => Some(mode.repr_without_version()), - Mode::Vyper(mode) => Some(mode.repr_without_version()), - Mode::LLVM(_) => None, - Mode::EraVM(_) => None, +impl From for benchmark_analyzer::ExecutableMetadata { + fn from(_: TestDescription) -> Self { + benchmark_analyzer::ExecutableMetadata {} } } diff --git a/compiler_tester/src/summary/benchmark_adapters/mod.rs b/compiler_tester/src/summary/benchmark_adapters/mod.rs index 9968d960..24643d12 100644 --- a/compiler_tester/src/summary/benchmark_adapters/mod.rs +++ b/compiler_tester/src/summary/benchmark_adapters/mod.rs @@ -5,4 +5,5 @@ pub mod input; pub mod metadata; +pub mod mode; pub mod selector; diff --git a/compiler_tester/src/summary/benchmark_adapters/mode.rs b/compiler_tester/src/summary/benchmark_adapters/mode.rs new file mode 100644 index 00000000..76b063de --- /dev/null +++ b/compiler_tester/src/summary/benchmark_adapters/mode.rs @@ -0,0 +1,36 @@ +//! +//! Representation of compiler mode stored in the benchmark. +//! + +use crate::compilers::mode::imode::IMode; + +const DEFAULT_CODEGEN: &str = "NoCodegen"; +const DEFAULT_OPTIMIZATIONS: &str = "NoOptimizations"; +const DEFAULT_VERSION: &str = "NoVersion"; + +/// +/// Representation of compiler mode stored in the benchmark. +/// +pub struct ModeInfo { + /// Codegen type if applicable, or a default value [`DEFAULT_CODEGEN`]. + pub codegen: String, + /// Optimization level if applicable, or a default value [`DEFAULT_OPTIMIZATIONS`]. + pub optimizations: String, + /// Language version if applicable, or a default value [`DEFAULT_VERSION`]. + pub version: String, +} + +impl From for ModeInfo +where + T: IMode, +{ + fn from(value: T) -> ModeInfo { + ModeInfo { + codegen: value.codegen().unwrap_or(DEFAULT_CODEGEN.into()), + optimizations: value + .optimizations() + .unwrap_or(DEFAULT_OPTIMIZATIONS.into()), + version: value.version().unwrap_or(DEFAULT_VERSION.into()), + } + } +} diff --git a/compiler_tester/src/summary/mod.rs b/compiler_tester/src/summary/mod.rs index 1f20ff10..fdf5ef0a 100644 --- a/compiler_tester/src/summary/mod.rs +++ b/compiler_tester/src/summary/mod.rs @@ -8,17 +8,21 @@ pub mod element; use std::sync::Arc; use std::sync::Mutex; -use benchmark_adapters::metadata::convert_description; +use benchmark_adapters::mode::ModeInfo; use colored::Colorize; use crate::test::case::input::output::Output; use crate::test::description::TestDescription; use crate::toolchain::Toolchain; +use crate::utils::timer::Timer; use self::element::outcome::passed_variant::PassedVariant; use self::element::outcome::Outcome; use self::element::Element; +const BENCHMARK_FORMAT_VERSION: benchmark_analyzer::BenchmarkVersion = + benchmark_analyzer::BenchmarkVersion::V2; + /// /// The compiler tester summary. /// @@ -38,6 +42,7 @@ pub struct Summary { invalid: usize, /// The ignored tests counter. ignored: usize, + timer: Timer, } impl Summary { @@ -56,9 +61,33 @@ impl Summary { failed: 0, invalid: 0, ignored: 0, + timer: Timer::default(), } } + /// Starts the timer associated with the object. + /// + /// # Returns + /// + /// `anyhow::Result`: If the timer is successfully started, the function + /// returns `Ok(self)`, allowing method chaining. If the timer is in an invalid + /// state (e.g., already started or stopped), it returns an error. + pub fn start_timer(mut self) -> anyhow::Result { + self.timer.start()?; + Ok(self) + } + + /// Stops the timer associated with the object. + /// + /// # Returns + /// + /// `anyhow::Result`: If the timer is successfully stopped, the function + /// returns `Ok(self)`, permitting method chaining. An error is returned if the + /// timer is in an invalid state (e.g., not started or already stopped). + pub fn stop_timer(mut self) -> anyhow::Result { + self.timer.stop()?; + Ok(self) + } /// /// Whether the test run has been successful. /// @@ -78,40 +107,39 @@ impl Summary { /// /// Returns the benchmark structure. /// - pub fn benchmark(&self, toolchain: Toolchain) -> anyhow::Result { + pub fn benchmark( + &self, + toolchain: Toolchain, + context: Option, + ) -> anyhow::Result { + if let Toolchain::SolcLLVM = toolchain { + anyhow::bail!("The benchmarking is not supported for the SolcLLVM toolchain.") + } + let mut benchmark = benchmark_analyzer::Benchmark::default(); - match toolchain { - Toolchain::IrLLVM => { - benchmark.groups.insert( - format!( - "{} {}", - benchmark_analyzer::BENCHMARK_ALL_GROUP_NAME, - era_compiler_llvm_context::OptimizerSettings::cycles(), - ), - benchmark_analyzer::BenchmarkGroup::default(), - ); - benchmark.groups.insert( - format!( - "{} {}", - benchmark_analyzer::BENCHMARK_ALL_GROUP_NAME, - era_compiler_llvm_context::OptimizerSettings::size(), - ), - benchmark_analyzer::BenchmarkGroup::default(), - ); - } - Toolchain::Solc => { - benchmark.groups.insert( - benchmark_analyzer::BENCHMARK_ALL_GROUP_NAME.to_owned(), - benchmark_analyzer::BenchmarkGroup::default(), - ); - } - Toolchain::SolcLLVM => { - anyhow::bail!("The benchmarking is not supported for the SolcLLVM toolchain.") - } + + if let (Some(start), Some(end)) = (self.timer.get_start(), self.timer.get_end()) { + benchmark.metadata = benchmark_analyzer::BenchmarkMetadata { + version: BENCHMARK_FORMAT_VERSION, + start, + end, + context, + }; + } else { + anyhow::bail!("Invalid state: the time of running the benchmark was not measured."); } - for element in self.elements.iter() { - let (size, cycles, ergs, group, gas) = match &element.outcome { + for Element { + test_description: + TestDescription { + group, + mode, + selector, + }, + outcome, + } in self.elements.iter() + { + let (size, cycles, ergs, gas) = match outcome { Outcome::Passed { variant: PassedVariant::Deploy { @@ -120,59 +148,51 @@ impl Summary { ergs, gas, }, - group, - } => (Some(*size), *cycles, *ergs, group.clone(), *gas), + .. + } => (Some(*size), *cycles, *ergs, *gas), Outcome::Passed { variant: PassedVariant::Runtime { cycles, ergs, gas }, - group, - } => (None, *cycles, *ergs, group.clone(), *gas), + .. + } => (None, *cycles, *ergs, *gas), _ => continue, }; - let key = format!( - "{:24} {}", - element - .test_description - .mode - .as_ref() - .map(|mode| mode.to_string()) - .unwrap_or_default(), - element.test_description.selector - ); - let mode = element - .test_description - .mode - .as_ref() - .and_then(|mode| mode.llvm_optimizer_settings().cloned()); - - let metadata = { - let default_group = group.clone().unwrap_or_default(); - convert_description(&element.test_description, &default_group) - }; - let benchmark_element = - benchmark_analyzer::BenchmarkElement::new(metadata, size, cycles, ergs, gas); - if let Some(group) = group { - let group_key = match mode { - Some(ref mode) => format!("{group} {mode}"), - None => group, - }; - benchmark - .groups - .entry(group_key) - .or_default() - .elements - .insert(key.clone(), benchmark_element.clone()); - } - - let group_key = match mode { - Some(ref mode) => { - format!("{} {mode}", benchmark_analyzer::BENCHMARK_ALL_GROUP_NAME) - } - None => benchmark_analyzer::BENCHMARK_ALL_GROUP_NAME.to_owned(), - }; - if let Some(group) = benchmark.groups.get_mut(group_key.as_str()) { - group.elements.insert(key, benchmark_element); - } + let test_name = selector.to_string(); + + let tags: Vec = group.iter().cloned().collect(); + + let ModeInfo { + codegen, + optimizations, + version, + } = mode + .clone() + .expect("The compiler mode is missing from description.") + .into(); + + benchmark + .tests + .entry(test_name) + .or_insert(benchmark_analyzer::Test::new( + benchmark_analyzer::TestMetadata::new(selector.clone().into(), tags), + )) + .codegen_groups + .entry(codegen) + .or_insert(Default::default()) + .versioned_groups + .entry(version) + .or_insert(Default::default()) + .executables + .entry(optimizations) + .or_insert(benchmark_analyzer::Executable { + metadata: benchmark_analyzer::ExecutableMetadata {}, + run: benchmark_analyzer::Run { + size, + cycles, + ergs, + gas, + }, + }); } Ok(benchmark) } diff --git a/compiler_tester/src/test/case/input/identifier.rs b/compiler_tester/src/test/case/input/identifier.rs index 6210abf5..21bcbd8b 100644 --- a/compiler_tester/src/test/case/input/identifier.rs +++ b/compiler_tester/src/test/case/input/identifier.rs @@ -9,6 +9,8 @@ pub enum InputIdentifier { /// The contract deploy, regardless of target. Deployer { contract_identifier: String }, + /// The fallback method. + Fallback { input_index: usize }, /// The contract call. Runtime { input_index: usize, name: String }, /// The storage empty check. @@ -32,6 +34,9 @@ impl std::fmt::Display for InputIdentifier { InputIdentifier::Balance { input_index } => { f.write_fmt(format_args!("#balance_check:{input_index}")) } + InputIdentifier::Fallback { input_index } => { + f.write_fmt(format_args!("#fallback:{input_index}")) + } } } } diff --git a/compiler_tester/src/test/case/input/runtime.rs b/compiler_tester/src/test/case/input/runtime.rs index 7070f5cf..f6f5cbc5 100644 --- a/compiler_tester/src/test/case/input/runtime.rs +++ b/compiler_tester/src/test/case/input/runtime.rs @@ -84,15 +84,12 @@ impl Runtime { let input_index = context.selector; let test = TestDescription::from_context( context, - InputIdentifier::Runtime { - input_index, - name: self.name, - }, + Self::select_input_identifier(self.name, input_index), ); let name = test.selector.to_string(); vm.populate_storage(self.storage.inner); let vm_function = match group.as_deref() { - Some(benchmark_analyzer::Benchmark::EVM_INTERPRETER_GROUP_NAME) => { + Some(benchmark_analyzer::TEST_GROUP_EVM_INTERPRETER) => { EraVM::execute_evm_interpreter:: } _ => EraVM::execute::, @@ -126,6 +123,13 @@ impl Runtime { } } + fn select_input_identifier(name: String, input_index: usize) -> InputIdentifier { + match name.as_str() { + "#fallback" => InputIdentifier::Fallback { input_index }, + _ => InputIdentifier::Runtime { input_index, name }, + } + } + /// /// Runs the call on EVM emulator. /// @@ -138,10 +142,7 @@ impl Runtime { let input_index = context.selector; let test = TestDescription::from_context( context, - InputIdentifier::Runtime { - input_index, - name: self.name, - }, + Self::select_input_identifier(self.name, input_index), ); let name = test.selector.to_string(); vm.populate_storage(self.storage.inner); diff --git a/compiler_tester/src/test/context/mod.rs b/compiler_tester/src/test/context/mod.rs index 1f756d3b..849b7764 100644 --- a/compiler_tester/src/test/context/mod.rs +++ b/compiler_tester/src/test/context/mod.rs @@ -1,2 +1,7 @@ +//! +//! Context accumulates information as we traverse the tree of tests, cases and +//! their inputs. +//! + pub mod case; pub mod input; diff --git a/compiler_tester/src/utils.rs b/compiler_tester/src/utils/mod.rs similarity index 99% rename from compiler_tester/src/utils.rs rename to compiler_tester/src/utils/mod.rs index 3d606c98..23b0d796 100644 --- a/compiler_tester/src/utils.rs +++ b/compiler_tester/src/utils/mod.rs @@ -2,6 +2,8 @@ //! The compiler tester utils. //! +pub mod timer; + use sha3::Digest; /// diff --git a/compiler_tester/src/utils/timer.rs b/compiler_tester/src/utils/timer.rs new file mode 100644 index 00000000..a53dbf05 --- /dev/null +++ b/compiler_tester/src/utils/timer.rs @@ -0,0 +1,115 @@ +//! +//! A simple timer capable of measuring time intervals. +//! + +#![allow(dead_code)] + +type TimeStamp = chrono::DateTime; + +/// +/// A simple timer capable of measuring time intervals between invokations of +/// `[start]` and `[stop]` methods. +/// +#[derive(Clone, Default, Debug)] +pub struct Timer { + /// Start time, + start: Option, + /// End time, + end: Option, +} + +impl Timer { + /// Starts the timer. This sets the `start` time to the current time. + /// + /// # Returns + /// + /// An Ok result if the timer was successfully started, or an error if + /// the timer was already started or stopped before. + /// + pub fn start(&mut self) -> anyhow::Result<()> { + match (self.start, self.end) { + (None, None) => { + self.start = Some(chrono::offset::Utc::now()); + Ok(()) + } + _ => anyhow::bail!("Malformed timer state: {self:?}"), + } + } + + /// Stops the timer from ticking. Assumes the timer has been started with + /// `[start]`. + /// + /// # Returns + /// + /// An Ok result if the timer was successfully stopped, or an error if + /// the timer has not been started or if it was already stopped. + /// + pub fn stop(&mut self) -> anyhow::Result<()> { + match (self.start, self.end) { + (Some(_), None) => { + self.end = Some(chrono::offset::Utc::now()); + Ok(()) + } + _ => anyhow::bail!("Malformed timer state: {self:?}"), + } + } + + /// + /// Checks if timer is ticking now. + /// + /// # Returns + /// + /// `true` if the timer has been started and is currently running, `false` otherwise. + /// + pub fn is_started(&self) -> bool { + self.start.is_some() + } + + /// Returns true if the timer was started and then subsequently stopped. + /// + /// # Returns + /// + /// `true` if the timer has finished (started and stopped), `false` otherwise. + /// + pub fn is_finished(&self) -> bool { + self.end.is_some() + } + + /// Returns the start time of this [`Timer`]. + /// + /// # Returns + /// + /// An `Option` containing the start `TimeStamp` if the timer has been started, + /// or `None` if it has not been started. + /// + pub fn get_start(&self) -> Option { + self.start + } + + /// Returns the end time of this [`Timer`]. + /// + /// # Returns + /// + /// An `Option` containing the end `TimeStamp` if the timer has been stopped, + /// or `None` if it is still running or has not been started. + /// + pub fn get_end(&self) -> Option { + self.end + } + + /// Returns the elapsed time between the start and end of the timer. + /// + /// # Returns + /// + /// An Ok result with the `chrono::TimeDelta` representing the elapsed time + /// if the timer was started (and optionally stopped), or an error if the + /// timer was not started correctly. + /// + pub fn elapsed(&self) -> anyhow::Result { + match (self.start, self.end) { + (Some(start), Some(end)) => Ok(end - start), + (Some(start), None) => Ok(chrono::Utc::now() - start), + _ => anyhow::bail!("Malformed timer state: {self:?}"), + } + } +}