From 23788c276b7115e0e11d2240635613cdae37897c Mon Sep 17 00:00:00 2001 From: sinu <65924192+sinui0@users.noreply.github.com> Date: Sun, 2 Mar 2025 22:26:35 -0800 Subject: [PATCH] feat: consolidated harness --- .github/workflows/ci.yml | 2 +- Cargo.toml | 25 +- crates/benches/binary/Cargo.toml | 70 ---- crates/benches/binary/README.md | 53 --- crates/benches/binary/bench.sh | 13 - crates/benches/binary/bench.toml | 45 --- crates/benches/binary/benches.Dockerfile | 55 --- .../binary/benches.Dockerfile.dockerignore | 2 - crates/benches/binary/bin/bench.rs | 62 ---- crates/benches/binary/bin/prover.rs | 8 - crates/benches/binary/bin/prover_memory.rs | 15 - crates/benches/binary/bin/verifier.rs | 8 - crates/benches/binary/bin/verifier_memory.rs | 15 - crates/benches/binary/docker.md | 13 - crates/benches/binary/src/config.rs | 123 ------- crates/benches/binary/src/lib.rs | 273 -------------- crates/benches/binary/src/metrics.rs | 31 -- crates/benches/binary/src/preprocess.rs | 5 - crates/benches/binary/src/prover.rs | 57 --- crates/benches/binary/src/prover_main.rs | 176 --------- crates/benches/binary/src/verifier_main.rs | 131 ------- crates/benches/browser/core/Cargo.toml | 13 - crates/benches/browser/core/src/lib.rs | 68 ---- crates/benches/browser/core/src/msg.rs | 17 - crates/benches/browser/native/Cargo.toml | 22 -- crates/benches/browser/native/src/lib.rs | 331 ----------------- .../benches/browser/wasm/.cargo/config.toml | 11 - crates/benches/browser/wasm/Cargo.toml | 30 -- crates/benches/browser/wasm/pkg/comlink.mjs | 346 ------------------ crates/benches/browser/wasm/pkg/index.html | 7 - crates/benches/browser/wasm/pkg/index.js | 7 - crates/benches/browser/wasm/pkg/worker.js | 45 --- .../benches/browser/wasm/rust-toolchain.toml | 2 - crates/benches/browser/wasm/src/lib.rs | 102 ------ crates/benches/library/Cargo.toml | 19 - crates/benches/library/src/lib.rs | 133 ------- crates/{benches/binary => harness}/.gitignore | 0 crates/harness/Cargo.toml | 76 ++++ crates/harness/README.md | 30 ++ crates/harness/bench.toml | 44 +++ crates/harness/bin/main.rs | 120 ++++++ .../{benches/binary => harness}/bin/plot.rs | 2 +- .../run.sh => harness/build.sh} | 3 +- crates/harness/src/bench.rs | 195 ++++++++++ crates/harness/src/bench/io.rs | 78 ++++ crates/harness/src/bench/prover.rs | 113 ++++++ crates/harness/src/bench/verifier.rs | 86 +++++ crates/harness/src/cli.rs | 38 ++ crates/harness/src/config.rs | 2 + crates/harness/src/io.rs | 5 + crates/harness/src/lib.rs | 38 ++ crates/harness/src/provider.rs | 171 +++++++++ crates/harness/src/runner.rs | 62 ++++ crates/harness/src/runner/browser.rs | 136 +++++++ .../src/runner/browser}/chrome_driver.rs | 36 +- .../src/runner/browser}/wasm_server.rs | 10 +- .../src/runner/browser/ws_proxy.rs} | 7 +- crates/harness/src/runner/native.rs | 135 +++++++ .../src/runner}/server_fixture.rs | 20 +- .../src/runner}/tlsn_fixture.rs | 48 ++- crates/harness/src/spawn.rs | 16 + crates/harness/src/test.rs | 84 +++++ crates/harness/src/tests.rs | 109 ++++++ crates/harness/src/wasm.rs | 70 ++++ .../static/favicon.ico | Bin .../static/index.html | 0 crates/harness/static/index.js | 5 + crates/harness/static/worker.js | 31 ++ .../notary-server.Dockerfile.dockerignore | 2 +- crates/tests-integration/Cargo.toml | 23 -- .../tests/defer_decryption.rs | 133 ------- crates/tests-integration/tests/notarize.rs | 151 -------- crates/tests-integration/tests/verify.rs | 149 -------- crates/wasm-test-runner/Cargo.toml | 28 -- crates/wasm-test-runner/src/lib.rs | 39 -- crates/wasm-test-runner/src/main.rs | 42 --- crates/wasm-test-runner/static/index.js | 5 - crates/wasm-test-runner/static/worker.js | 50 --- crates/wasm/Cargo.toml | 23 +- crates/wasm/src/lib.rs | 7 +- crates/wasm/src/tests.rs | 186 ---------- pre-commit-check.sh | 4 +- 82 files changed, 1729 insertions(+), 3218 deletions(-) delete mode 100644 crates/benches/binary/Cargo.toml delete mode 100644 crates/benches/binary/README.md delete mode 100755 crates/benches/binary/bench.sh delete mode 100644 crates/benches/binary/bench.toml delete mode 100644 crates/benches/binary/benches.Dockerfile delete mode 100644 crates/benches/binary/benches.Dockerfile.dockerignore delete mode 100644 crates/benches/binary/bin/bench.rs delete mode 100644 crates/benches/binary/bin/prover.rs delete mode 100644 crates/benches/binary/bin/prover_memory.rs delete mode 100644 crates/benches/binary/bin/verifier.rs delete mode 100644 crates/benches/binary/bin/verifier_memory.rs delete mode 100644 crates/benches/binary/docker.md delete mode 100644 crates/benches/binary/src/config.rs delete mode 100644 crates/benches/binary/src/lib.rs delete mode 100644 crates/benches/binary/src/metrics.rs delete mode 100644 crates/benches/binary/src/preprocess.rs delete mode 100644 crates/benches/binary/src/prover.rs delete mode 100644 crates/benches/binary/src/prover_main.rs delete mode 100644 crates/benches/binary/src/verifier_main.rs delete mode 100644 crates/benches/browser/core/Cargo.toml delete mode 100644 crates/benches/browser/core/src/lib.rs delete mode 100644 crates/benches/browser/core/src/msg.rs delete mode 100644 crates/benches/browser/native/Cargo.toml delete mode 100644 crates/benches/browser/native/src/lib.rs delete mode 100644 crates/benches/browser/wasm/.cargo/config.toml delete mode 100644 crates/benches/browser/wasm/Cargo.toml delete mode 100644 crates/benches/browser/wasm/pkg/comlink.mjs delete mode 100644 crates/benches/browser/wasm/pkg/index.html delete mode 100644 crates/benches/browser/wasm/pkg/index.js delete mode 100644 crates/benches/browser/wasm/pkg/worker.js delete mode 100644 crates/benches/browser/wasm/rust-toolchain.toml delete mode 100644 crates/benches/browser/wasm/src/lib.rs delete mode 100644 crates/benches/library/Cargo.toml delete mode 100644 crates/benches/library/src/lib.rs rename crates/{benches/binary => harness}/.gitignore (100%) create mode 100644 crates/harness/Cargo.toml create mode 100644 crates/harness/README.md create mode 100644 crates/harness/bench.toml create mode 100644 crates/harness/bin/main.rs rename crates/{benches/binary => harness}/bin/plot.rs (99%) rename crates/{wasm-test-runner/run.sh => harness/build.sh} (55%) create mode 100644 crates/harness/src/bench.rs create mode 100644 crates/harness/src/bench/io.rs create mode 100644 crates/harness/src/bench/prover.rs create mode 100644 crates/harness/src/bench/verifier.rs create mode 100644 crates/harness/src/cli.rs create mode 100644 crates/harness/src/config.rs create mode 100644 crates/harness/src/io.rs create mode 100644 crates/harness/src/lib.rs create mode 100644 crates/harness/src/provider.rs create mode 100644 crates/harness/src/runner.rs create mode 100644 crates/harness/src/runner/browser.rs rename crates/{wasm-test-runner/src => harness/src/runner/browser}/chrome_driver.rs (72%) rename crates/{wasm-test-runner/src => harness/src/runner/browser}/wasm_server.rs (88%) rename crates/{wasm-test-runner/src/ws.rs => harness/src/runner/browser/ws_proxy.rs} (84%) create mode 100644 crates/harness/src/runner/native.rs rename crates/{wasm-test-runner/src => harness/src/runner}/server_fixture.rs (60%) rename crates/{wasm-test-runner/src => harness/src/runner}/tlsn_fixture.rs (85%) create mode 100644 crates/harness/src/spawn.rs create mode 100644 crates/harness/src/test.rs create mode 100644 crates/harness/src/tests.rs create mode 100644 crates/harness/src/wasm.rs rename crates/{wasm-test-runner => harness}/static/favicon.ico (100%) rename crates/{wasm-test-runner => harness}/static/index.html (100%) create mode 100644 crates/harness/static/index.js create mode 100644 crates/harness/static/worker.js delete mode 100644 crates/tests-integration/Cargo.toml delete mode 100644 crates/tests-integration/tests/defer_decryption.rs delete mode 100644 crates/tests-integration/tests/notarize.rs delete mode 100644 crates/tests-integration/tests/verify.rs delete mode 100644 crates/wasm-test-runner/Cargo.toml delete mode 100644 crates/wasm-test-runner/src/lib.rs delete mode 100644 crates/wasm-test-runner/src/main.rs delete mode 100644 crates/wasm-test-runner/static/index.js delete mode 100644 crates/wasm-test-runner/static/worker.js delete mode 100644 crates/wasm/src/tests.rs diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 619d2e6dce..02a8537e41 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -95,7 +95,7 @@ jobs: - name: Run tests run: | - cd crates/wasm-test-runner + cd crates/harness ./run.sh - name: Run build diff --git a/Cargo.toml b/Cargo.toml index 226a46f3da..c82c4e5beb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,10 +1,5 @@ [workspace] members = [ - "crates/benches/binary", - "crates/benches/browser/core", - "crates/benches/browser/native", - "crates/benches/browser/wasm", - "crates/benches/library", "crates/common", "crates/components/deap", "crates/components/cipher", @@ -21,7 +16,6 @@ members = [ "crates/prover", "crates/server-fixture/certs", "crates/server-fixture/server", - "crates/tests-integration", "crates/tls/backend", "crates/tls/client", "crates/tls/client-async", @@ -30,22 +24,15 @@ members = [ "crates/tls/server-fixture", "crates/verifier", "crates/wasm", - "crates/wasm-test-runner", + "crates/harness", ] resolver = "2" -[profile.tests-integration] -inherits = "release" -opt-level = 1 - [workspace.dependencies] notary-client = { path = "crates/notary/client" } notary-server = { path = "crates/notary/server" } tls-server-fixture = { path = "crates/tls/server-fixture" } tlsn-cipher = { path = "crates/components/cipher" } -tlsn-benches-browser-core = { path = "crates/benches/browser/core" } -tlsn-benches-browser-native = { path = "crates/benches/browser/native" } -tlsn-benches-library = { path = "crates/benches/library" } tlsn-common = { path = "crates/common" } tlsn-core = { path = "crates/core" } tlsn-data-fixtures = { path = "crates/data-fixtures" } @@ -64,6 +51,8 @@ tlsn-tls-client-async = { path = "crates/tls/client-async" } tlsn-tls-core = { path = "crates/tls/core" } tlsn-utils = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "8555275" } tlsn-utils-aio = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "8555275" } +tlsn-harness = { path = "crates/harness" } +tlsn-wasm = { path = "crates/wasm" } tlsn-verifier = { path = "crates/verifier" } mpz-circuits = { git = "https://github.com/privacy-scaling-explorations/mpz", branch = "alpha.1" } @@ -94,8 +83,10 @@ bcs = { version = "0.1" } bincode = { version = "1.3" } blake3 = { version = "1.5" } bytes = { version = "1.4" } +cfg-if = { version = "1" } chrono = { version = "0.4" } cipher = { version = "0.4" } +clap = { version = "4.5" } criterion = { version = "0.5" } ctr = { version = "0.9" } derive_builder = { version = "0.12" } @@ -113,6 +104,7 @@ http = { version = "1.1" } http-body-util = { version = "0.1" } hyper = { version = "1.1" } hyper-util = { version = "0.1" } +inventory = { version = "0.3" } itybity = { version = "0.2" } k256 = { version = "0.13" } log = { version = "0.4" } @@ -121,6 +113,7 @@ opaque-debug = { version = "0.3" } p256 = { version = "0.13" } pkcs8 = { version = "0.10" } pin-project-lite = { version = "0.2" } +pollster = { version = "0.4" } rand = { version = "0.8" } rand_chacha = { version = "0.3" } rand_core = { version = "0.6" } @@ -143,9 +136,13 @@ tokio-util = { version = "0.7" } tracing = { version = "0.1" } tracing-subscriber = { version = "0.3" } uuid = { version = "1.4" } +wasm-bindgen = { version = "0.2" } +wasm-bindgen-futures = { version = "0.4" } web-spawn = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "2d93c56" } web-time = { version = "0.2" } webpki = { version = "0.22" } webpki-roots = { version = "0.26" } ws_stream_tungstenite = { version = "0.14" } +# Use the patched ws_stream_wasm to fix the issue https://github.com/najamelan/ws_stream_wasm/issues/12#issuecomment-1711902958 +ws_stream_wasm = { git = "https://github.com/tlsnotary/ws_stream_wasm", rev = "2ed12aad9f0236e5321f577672f309920b2aef51" } zeroize = { version = "1.8" } diff --git a/crates/benches/binary/Cargo.toml b/crates/benches/binary/Cargo.toml deleted file mode 100644 index b7de8fc24f..0000000000 --- a/crates/benches/binary/Cargo.toml +++ /dev/null @@ -1,70 +0,0 @@ -[package] -edition = "2021" -name = "tlsn-benches" -publish = false -version = "0.0.0" - -[features] -default = [] -# Enables benchmarks in the browser. -browser-bench = ["tlsn-benches-browser-native"] - -[dependencies] -mpz-common = { workspace = true } -mpz-core = { workspace = true } -mpz-garble = { workspace = true } -mpz-ot = { workspace = true, features = ["ideal"] } -tlsn-benches-library = { workspace = true } -tlsn-benches-browser-native = { workspace = true, optional = true} -tlsn-common = { workspace = true } -tlsn-core = { workspace = true } -tlsn-hmac-sha256 = { workspace = true } -tlsn-prover = { workspace = true } -tlsn-server-fixture = { workspace = true } -tlsn-server-fixture-certs = { workspace = true } -tlsn-tls-core = { workspace = true } -tlsn-verifier = { workspace = true } - -anyhow = { workspace = true } -async-trait = { workspace = true } -charming = {version = "0.3.1", features = ["ssr"]} -csv = "1.3.0" -dhat = { version = "0.3.3" } -env_logger = { version = "0.6.0", default-features = false } -futures = { workspace = true } -serde = { workspace = true } -tokio = { workspace = true, features = [ - "rt", - "rt-multi-thread", - "macros", - "net", - "io-std", - "fs", -] } -tokio-util = { workspace = true } -toml = "0.8.11" -tracing-subscriber = {workspace = true, features = ["env-filter"]} - -[[bin]] -name = "bench" -path = "bin/bench.rs" - -[[bin]] -name = "prover" -path = "bin/prover.rs" - -[[bin]] -name = "prover-memory" -path = "bin/prover_memory.rs" - -[[bin]] -name = "verifier" -path = "bin/verifier.rs" - -[[bin]] -name = "verifier-memory" -path = "bin/verifier_memory.rs" - -[[bin]] -name = "plot" -path = "bin/plot.rs" diff --git a/crates/benches/binary/README.md b/crates/benches/binary/README.md deleted file mode 100644 index e45bcadd82..0000000000 --- a/crates/benches/binary/README.md +++ /dev/null @@ -1,53 +0,0 @@ -# TLSNotary bench utilities - -This crate provides utilities for benchmarking protocol performance under various network conditions and usage patterns. - -As the protocol is mostly IO bound, it's important to track how it performs in low bandwidth and/or high latency environments. To do this we set up temporary network namespaces and add virtual ethernet interfaces which we can control using the linux `tc` (Traffic Control) utility. - -## Configuration - -See the `bench.toml` file for benchmark configurations. - -## Preliminaries - -To run the benchmarks you will need `iproute2` installed, eg: -```sh -sudo apt-get install iproute2 -y -``` - -## Running benches - -Running the benches requires root privileges because they will set up virtual interfaces. The script is designed to fully clean up when the benches are done, but run them at your own risk. - -#### Native benches - -Make sure you're in the `crates/benches/` directory, build the binaries, and then run the script: - -```sh -cd binary -cargo build --release -sudo ./bench.sh -``` - -#### Browser benches - -(Note, we recommend running browser benches inside a docker container (see docker.md) to avoid -facing incompatibility issues observed in the latest versions of Chrome.) - -With a Chrome browser installed on your system, make sure you're in the `crates/benches/` -directory, build the wasm module, build the binaries, and then run the script: -```sh -cd browser/wasm -rustup run nightly wasm-pack build --release --target web -cd ../../binary -cargo build --release --features browser-bench -sudo ./bench.sh -``` - -## Metrics - -After you run the benches you will see a `metrics.csv` file in the working directory. It will be owned by `root`, so you probably want to run - -```sh -sudo chown $USER metrics.csv -``` \ No newline at end of file diff --git a/crates/benches/binary/bench.sh b/crates/benches/binary/bench.sh deleted file mode 100755 index 1e78a527ca..0000000000 --- a/crates/benches/binary/bench.sh +++ /dev/null @@ -1,13 +0,0 @@ -#! /bin/bash - -# Check if we are running as root. -if [ "$EUID" -ne 0 ]; then - echo "This script must be run as root" - exit -fi - -# Run the benchmark binary. -../../../target/release/bench - -# Plot the results. -../../../target/release/plot metrics.csv diff --git a/crates/benches/binary/bench.toml b/crates/benches/binary/bench.toml deleted file mode 100644 index e265884f50..0000000000 --- a/crates/benches/binary/bench.toml +++ /dev/null @@ -1,45 +0,0 @@ -[[benches]] -name = "latency" -upload = 250 -upload-delay = [10, 25, 50] -download = 250 -download-delay = [10, 25, 50] -upload-size = 1024 -download-size = 4096 -defer-decryption = true -memory-profile = false - -[[benches]] -name = "download_bandwidth" -upload = 250 -upload-delay = 25 -download = [10, 25, 50, 100, 250] -download-delay = 25 -upload-size = 1024 -download-size = 4096 -defer-decryption = true -memory-profile = false - -[[benches]] -name = "upload_bandwidth" -upload = [10, 25, 50, 100, 250] -upload-delay = 25 -download = 250 -download-delay = 25 -upload-size = 1024 -download-size = 4096 -defer-decryption = [false, true] -memory-profile = false - -[[benches]] -name = "download_volume" -upload = 250 -upload-delay = 25 -download = 250 -download-delay = 25 -upload-size = 1024 -# Setting download-size higher than 45000 will cause a `Maximum call stack size exceeded` -# error in the browser. -download-size = [1024, 4096, 16384, 45000] -defer-decryption = true -memory-profile = true diff --git a/crates/benches/binary/benches.Dockerfile b/crates/benches/binary/benches.Dockerfile deleted file mode 100644 index 7937e20985..0000000000 --- a/crates/benches/binary/benches.Dockerfile +++ /dev/null @@ -1,55 +0,0 @@ -FROM rust AS builder -WORKDIR /usr/src/tlsn -COPY . . - -ARG BENCH_TYPE=native - -RUN \ - if [ "$BENCH_TYPE" = "browser" ]; then \ - # ring's build script needs clang. - apt update && apt install -y clang; \ - rustup install nightly; \ - rustup component add rust-src --toolchain nightly; \ - cargo install wasm-pack; \ - cd crates/benches/browser/wasm; \ - rustup run nightly wasm-pack build --release --target web; \ - cd ../../binary; \ - cargo build --release --features browser-bench; \ - else \ - cd crates/benches/binary; \ - cargo build --release; \ - fi - -FROM debian:latest - -ARG BENCH_TYPE=native - -RUN apt update && apt upgrade -y && apt install -y --no-install-recommends \ - iproute2 \ - sudo - -RUN \ - if [ "$BENCH_TYPE" = "browser" ]; then \ - # Using Chromium since Chrome for Linux is not available on ARM. - apt install -y chromium; \ - fi - -RUN apt clean && rm -rf /var/lib/apt/lists/* - -COPY --from=builder \ - ["/usr/src/tlsn/target/release/bench", \ - "/usr/src/tlsn/target/release/prover", \ - "/usr/src/tlsn/target/release/prover-memory", \ - "/usr/src/tlsn/target/release/verifier", \ - "/usr/src/tlsn/target/release/verifier-memory", \ - "/usr/src/tlsn/target/release/plot", \ - "/usr/local/bin/"] - -ENV PROVER_PATH="/usr/local/bin/prover" -ENV VERIFIER_PATH="/usr/local/bin/verifier" -ENV PROVER_MEMORY_PATH="/usr/local/bin/prover-memory" -ENV VERIFIER_MEMORY_PATH="/usr/local/bin/verifier-memory" - -VOLUME [ "/benches" ] -WORKDIR "/benches" -CMD ["/bin/bash", "-c", "bench && bench --memory-profiling && plot /benches/metrics.csv && cat /benches/metrics.csv"] diff --git a/crates/benches/binary/benches.Dockerfile.dockerignore b/crates/benches/binary/benches.Dockerfile.dockerignore deleted file mode 100644 index 1a3de888d1..0000000000 --- a/crates/benches/binary/benches.Dockerfile.dockerignore +++ /dev/null @@ -1,2 +0,0 @@ -# exclude any /target folders -**/target* diff --git a/crates/benches/binary/bin/bench.rs b/crates/benches/binary/bin/bench.rs deleted file mode 100644 index 271b5594e8..0000000000 --- a/crates/benches/binary/bin/bench.rs +++ /dev/null @@ -1,62 +0,0 @@ -use std::{env, process::Command, thread, time::Duration}; - -use tlsn_benches::{clean_up, set_up}; - -fn main() { - let args: Vec = env::args().collect(); - let is_memory_profiling = args.contains(&"--memory-profiling".to_string()); - - let (prover_path, verifier_path) = if is_memory_profiling { - ( - std::env::var("PROVER_MEMORY_PATH") - .unwrap_or_else(|_| "../../../target/release/prover-memory".to_string()), - std::env::var("VERIFIER_MEMORY_PATH") - .unwrap_or_else(|_| "../../../target/release/verifier-memory".to_string()), - ) - } else { - ( - std::env::var("PROVER_PATH") - .unwrap_or_else(|_| "../../../target/release/prover".to_string()), - std::env::var("VERIFIER_PATH") - .unwrap_or_else(|_| "../../../target/release/verifier".to_string()), - ) - }; - - if let Err(e) = set_up() { - println!("Error setting up: {}", e); - clean_up(); - } - - // Run prover and verifier binaries in parallel. - let Ok(mut verifier) = Command::new("ip") - .arg("netns") - .arg("exec") - .arg("verifier-ns") - .arg(verifier_path) - .spawn() - else { - println!("Failed to start verifier"); - return clean_up(); - }; - - // Allow the verifier some time to start listening before the prover attempts to - // connect. - thread::sleep(Duration::from_secs(1)); - - let Ok(mut prover) = Command::new("ip") - .arg("netns") - .arg("exec") - .arg("prover-ns") - .arg(prover_path) - .spawn() - else { - println!("Failed to start prover"); - return clean_up(); - }; - - // Wait for both to finish. - _ = prover.wait(); - _ = verifier.wait(); - - clean_up(); -} diff --git a/crates/benches/binary/bin/prover.rs b/crates/benches/binary/bin/prover.rs deleted file mode 100644 index b7ca29662a..0000000000 --- a/crates/benches/binary/bin/prover.rs +++ /dev/null @@ -1,8 +0,0 @@ -//! A Prover without memory profiling. - -use tlsn_benches::prover_main::prover_main; - -#[tokio::main] -async fn main() -> anyhow::Result<()> { - prover_main(false).await -} diff --git a/crates/benches/binary/bin/prover_memory.rs b/crates/benches/binary/bin/prover_memory.rs deleted file mode 100644 index 14753541fa..0000000000 --- a/crates/benches/binary/bin/prover_memory.rs +++ /dev/null @@ -1,15 +0,0 @@ -//! A Prover with memory profiling. - -use tlsn_benches::prover_main::prover_main; - -#[global_allocator] -static ALLOC: dhat::Alloc = dhat::Alloc; - -#[tokio::main] -async fn main() -> anyhow::Result<()> { - if cfg!(feature = "browser-bench") { - // Memory profiling is not compatible with browser benches. - return Ok(()); - } - prover_main(true).await -} diff --git a/crates/benches/binary/bin/verifier.rs b/crates/benches/binary/bin/verifier.rs deleted file mode 100644 index b7675fb30b..0000000000 --- a/crates/benches/binary/bin/verifier.rs +++ /dev/null @@ -1,8 +0,0 @@ -//! A Verifier without memory profiling. - -use tlsn_benches::verifier_main::verifier_main; - -#[tokio::main] -async fn main() -> anyhow::Result<()> { - verifier_main(false).await -} diff --git a/crates/benches/binary/bin/verifier_memory.rs b/crates/benches/binary/bin/verifier_memory.rs deleted file mode 100644 index 3ca2f42bc3..0000000000 --- a/crates/benches/binary/bin/verifier_memory.rs +++ /dev/null @@ -1,15 +0,0 @@ -//! A Verifier with memory profiling. - -use tlsn_benches::verifier_main::verifier_main; - -#[global_allocator] -static ALLOC: dhat::Alloc = dhat::Alloc; - -#[tokio::main] -async fn main() -> anyhow::Result<()> { - if cfg!(feature = "browser-bench") { - // Memory profiling is not compatible with browser benches. - return Ok(()); - } - verifier_main(true).await -} diff --git a/crates/benches/binary/docker.md b/crates/benches/binary/docker.md deleted file mode 100644 index 8747b9165f..0000000000 --- a/crates/benches/binary/docker.md +++ /dev/null @@ -1,13 +0,0 @@ -# Run the TLSN benches with Docker - -In the root folder of this repository, run: -``` -# Change to BENCH_TYPE=browser if you want benchmarks to run in the browser. -docker build -t tlsn-bench . -f ./crates/benches/binary/benches.Dockerfile --build-arg BENCH_TYPE=native -``` - -Next run the benches with: -``` -docker run -it --privileged -v ./crates/benches/binary:/benches tlsn-bench -``` -The `--privileged` parameter is required because this test bench needs permission to create networks with certain parameters \ No newline at end of file diff --git a/crates/benches/binary/src/config.rs b/crates/benches/binary/src/config.rs deleted file mode 100644 index c36cd3e025..0000000000 --- a/crates/benches/binary/src/config.rs +++ /dev/null @@ -1,123 +0,0 @@ -use serde::{Deserialize, Serialize}; - -#[derive(Deserialize)] -#[serde(untagged)] -pub enum Field { - Single(T), - Multiple(Vec), -} - -#[derive(Deserialize)] -pub struct Config { - pub benches: Vec, -} - -#[derive(Deserialize)] -pub struct Bench { - pub name: String, - pub upload: Field, - #[serde(rename = "upload-delay")] - pub upload_delay: Field, - pub download: Field, - #[serde(rename = "download-delay")] - pub download_delay: Field, - #[serde(rename = "upload-size")] - pub upload_size: Field, - #[serde(rename = "download-size")] - pub download_size: Field, - #[serde(rename = "defer-decryption")] - pub defer_decryption: Field, - #[serde(rename = "memory-profile")] - pub memory_profile: Field, -} - -impl Bench { - /// Flattens the config into a list of instances - pub fn flatten(self) -> Vec { - let mut instances = vec![]; - - let upload = match self.upload { - Field::Single(u) => vec![u], - Field::Multiple(u) => u, - }; - - let upload_delay = match self.upload_delay { - Field::Single(u) => vec![u], - Field::Multiple(u) => u, - }; - - let download = match self.download { - Field::Single(u) => vec![u], - Field::Multiple(u) => u, - }; - - let download_latency = match self.download_delay { - Field::Single(u) => vec![u], - Field::Multiple(u) => u, - }; - - let upload_size = match self.upload_size { - Field::Single(u) => vec![u], - Field::Multiple(u) => u, - }; - - let download_size = match self.download_size { - Field::Single(u) => vec![u], - Field::Multiple(u) => u, - }; - - let defer_decryption = match self.defer_decryption { - Field::Single(u) => vec![u], - Field::Multiple(u) => u, - }; - - let memory_profile = match self.memory_profile { - Field::Single(u) => vec![u], - Field::Multiple(u) => u, - }; - - for u in upload { - for ul in &upload_delay { - for d in &download { - for dl in &download_latency { - for us in &upload_size { - for ds in &download_size { - for dd in &defer_decryption { - for mp in &memory_profile { - instances.push(BenchInstance { - name: self.name.clone(), - upload: u, - upload_delay: *ul, - download: *d, - download_delay: *dl, - upload_size: *us, - download_size: *ds, - defer_decryption: *dd, - memory_profile: *mp, - }); - } - } - } - } - } - } - } - } - - instances - } -} - -#[derive(Debug, Clone, Serialize)] -pub struct BenchInstance { - pub name: String, - pub upload: usize, - pub upload_delay: usize, - pub download: usize, - pub download_delay: usize, - pub upload_size: usize, - pub download_size: usize, - pub defer_decryption: bool, - /// Whether this instance should be used for memory profiling. - pub memory_profile: bool, -} diff --git a/crates/benches/binary/src/lib.rs b/crates/benches/binary/src/lib.rs deleted file mode 100644 index ab1c9dce2f..0000000000 --- a/crates/benches/binary/src/lib.rs +++ /dev/null @@ -1,273 +0,0 @@ -pub mod config; -pub mod metrics; -mod preprocess; -pub mod prover; -pub mod prover_main; -pub mod verifier_main; - -use std::{ - io, - process::{Command, Stdio}, -}; - -pub const PROVER_NAMESPACE: &str = "prover-ns"; -pub const PROVER_INTERFACE: &str = "prover-veth"; -pub const PROVER_SUBNET: &str = "10.10.1.0/24"; -pub const VERIFIER_NAMESPACE: &str = "verifier-ns"; -pub const VERIFIER_INTERFACE: &str = "verifier-veth"; -pub const VERIFIER_SUBNET: &str = "10.10.1.1/24"; - -pub fn set_up() -> io::Result<()> { - // Create network namespaces - create_network_namespace(PROVER_NAMESPACE)?; - create_network_namespace(VERIFIER_NAMESPACE)?; - - // Create veth pair and attach to namespaces - create_veth_pair( - PROVER_NAMESPACE, - PROVER_INTERFACE, - VERIFIER_NAMESPACE, - VERIFIER_INTERFACE, - )?; - - // Set devices up - set_device_up(PROVER_NAMESPACE, PROVER_INTERFACE)?; - set_device_up(VERIFIER_NAMESPACE, VERIFIER_INTERFACE)?; - - // Bring up the loopback interface. - set_device_up(PROVER_NAMESPACE, "lo")?; - set_device_up(VERIFIER_NAMESPACE, "lo")?; - - // Assign IPs - assign_ip_to_interface(PROVER_NAMESPACE, PROVER_INTERFACE, PROVER_SUBNET)?; - assign_ip_to_interface(VERIFIER_NAMESPACE, VERIFIER_INTERFACE, VERIFIER_SUBNET)?; - - // Set default routes - set_default_route( - PROVER_NAMESPACE, - PROVER_INTERFACE, - PROVER_SUBNET.split('/').next().unwrap(), - )?; - set_default_route( - VERIFIER_NAMESPACE, - VERIFIER_INTERFACE, - VERIFIER_SUBNET.split('/').next().unwrap(), - )?; - - Ok(()) -} - -pub fn clean_up() { - // Delete interface pair - if let Err(e) = Command::new("ip") - .args([ - "netns", - "exec", - PROVER_NAMESPACE, - "ip", - "link", - "delete", - PROVER_INTERFACE, - ]) - .status() - { - println!("Error deleting interface {}: {}", PROVER_INTERFACE, e); - } - - // Delete namespaces - if let Err(e) = Command::new("ip") - .args(["netns", "del", PROVER_NAMESPACE]) - .status() - { - println!("Error deleting namespace {}: {}", PROVER_NAMESPACE, e); - } - - if let Err(e) = Command::new("ip") - .args(["netns", "del", VERIFIER_NAMESPACE]) - .status() - { - println!("Error deleting namespace {}: {}", VERIFIER_NAMESPACE, e); - } -} - -/// Sets the interface parameters. -/// -/// Must be run in the correct namespace. -/// -/// # Arguments -/// -/// * `egress` - The egress bandwidth in mbps. -/// * `burst` - The burst in mbps. -/// * `delay` - The delay in ms. -pub fn set_interface(interface: &str, egress: usize, burst: usize, delay: usize) -> io::Result<()> { - // Clear rules - let output = Command::new("tc") - .arg("qdisc") - .arg("del") - .arg("dev") - .arg(interface) - .arg("root") - .stdout(Stdio::piped()) - .output()?; - - if output.stderr == "Error: Cannot delete qdisc with handle of zero.\n".as_bytes() { - // This error is informative, do not log it to stderr. - } else if !output.status.success() { - return Err(io::Error::other("Failed to clear rules")); - } - - // Egress - Command::new("tc") - .arg("qdisc") - .arg("add") - .arg("dev") - .arg(interface) - .arg("root") - .arg("handle") - .arg("1:") - .arg("tbf") - .arg("rate") - .arg(format!("{}mbit", egress)) - .arg("burst") - .arg(format!("{}mbit", burst)) - .arg("latency") - .arg("60s") - .status()?; - - // Delay - Command::new("tc") - .arg("qdisc") - .arg("add") - .arg("dev") - .arg(interface) - .arg("parent") - .arg("1:1") - .arg("handle") - .arg("10:") - .arg("netem") - .arg("delay") - .arg(format!("{}ms", delay)) - .status()?; - - Ok(()) -} - -/// Create a network namespace with the given name if it does not already exist. -fn create_network_namespace(name: &str) -> io::Result<()> { - // Check if namespace already exists - if Command::new("ip") - .args(["netns", "list"]) - .output()? - .stdout - .windows(name.len()) - .any(|ns| ns == name.as_bytes()) - { - println!("Namespace {} already exists", name); - return Ok(()); - } else { - println!("Creating namespace {}", name); - Command::new("ip").args(["netns", "add", name]).status()?; - } - - Ok(()) -} - -fn create_veth_pair( - left_namespace: &str, - left_interface: &str, - right_namespace: &str, - right_interface: &str, -) -> io::Result<()> { - // Check if interfaces are already present in namespaces - if is_interface_present_in_namespace(left_namespace, left_interface)? - || is_interface_present_in_namespace(right_namespace, right_interface)? - { - println!("Virtual interface already exists."); - return Ok(()); - } - - // Create veth pair - Command::new("ip") - .args([ - "link", - "add", - left_interface, - "type", - "veth", - "peer", - "name", - right_interface, - ]) - .status()?; - - println!( - "Created veth pair {} and {}", - left_interface, right_interface - ); - - // Attach veth pair to namespaces - attach_interface_to_namespace(left_namespace, left_interface)?; - attach_interface_to_namespace(right_namespace, right_interface)?; - - Ok(()) -} - -fn attach_interface_to_namespace(namespace: &str, interface: &str) -> io::Result<()> { - Command::new("ip") - .args(["link", "set", interface, "netns", namespace]) - .status()?; - - println!("Attached {} to namespace {}", interface, namespace); - - Ok(()) -} - -fn set_default_route(namespace: &str, interface: &str, ip: &str) -> io::Result<()> { - Command::new("ip") - .args([ - "netns", "exec", namespace, "ip", "route", "add", "default", "via", ip, "dev", - interface, - ]) - .status()?; - - println!( - "Set default route for namespace {} ip {} to {}", - namespace, ip, interface - ); - - Ok(()) -} - -fn is_interface_present_in_namespace( - namespace: &str, - interface: &str, -) -> Result { - Ok(Command::new("ip") - .args([ - "netns", "exec", namespace, "ip", "link", "list", "dev", interface, - ]) - .output()? - .stdout - .windows(interface.len()) - .any(|ns| ns == interface.as_bytes())) -} - -fn set_device_up(namespace: &str, interface: &str) -> io::Result<()> { - Command::new("ip") - .args([ - "netns", "exec", namespace, "ip", "link", "set", interface, "up", - ]) - .status()?; - - Ok(()) -} - -fn assign_ip_to_interface(namespace: &str, interface: &str, ip: &str) -> io::Result<()> { - Command::new("ip") - .args([ - "netns", "exec", namespace, "ip", "addr", "add", ip, "dev", interface, - ]) - .status()?; - - Ok(()) -} diff --git a/crates/benches/binary/src/metrics.rs b/crates/benches/binary/src/metrics.rs deleted file mode 100644 index d55819c7fa..0000000000 --- a/crates/benches/binary/src/metrics.rs +++ /dev/null @@ -1,31 +0,0 @@ -use serde::{Deserialize, Serialize}; -use tlsn_benches_library::ProverKind; - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct Metrics { - pub name: String, - /// The kind of the prover, either native or browser. - pub kind: ProverKind, - /// Upload bandwidth in Mbps. - pub upload: usize, - /// Upload latency in ms. - pub upload_delay: usize, - /// Download bandwidth in Mbps. - pub download: usize, - /// Download latency in ms. - pub download_delay: usize, - /// Total bytes sent to the server. - pub upload_size: usize, - /// Total bytes received from the server. - pub download_size: usize, - /// Whether deferred decryption was used. - pub defer_decryption: bool, - /// The total runtime of the benchmark in seconds. - pub runtime: u64, - /// The total amount of data uploaded to the verifier in bytes. - pub uploaded: u64, - /// The total amount of data downloaded from the verifier in bytes. - pub downloaded: u64, - /// The peak heap memory usage in bytes. - pub heap_max_bytes: Option, -} diff --git a/crates/benches/binary/src/preprocess.rs b/crates/benches/binary/src/preprocess.rs deleted file mode 100644 index 8e0e9e080f..0000000000 --- a/crates/benches/binary/src/preprocess.rs +++ /dev/null @@ -1,5 +0,0 @@ -use hmac_sha256::build_circuits; - -pub async fn preprocess_prf_circuits() { - build_circuits().await; -} diff --git a/crates/benches/binary/src/prover.rs b/crates/benches/binary/src/prover.rs deleted file mode 100644 index b17abee417..0000000000 --- a/crates/benches/binary/src/prover.rs +++ /dev/null @@ -1,57 +0,0 @@ -use std::time::Instant; - -use tlsn_benches_library::{run_prover, AsyncIo, ProverKind, ProverTrait}; - -use async_trait::async_trait; - -pub struct NativeProver { - upload_size: usize, - download_size: usize, - defer_decryption: bool, - io: Option>, - client_conn: Option>, -} - -#[async_trait] -impl ProverTrait for NativeProver { - async fn setup( - upload_size: usize, - download_size: usize, - defer_decryption: bool, - io: Box, - client_conn: Box, - ) -> anyhow::Result - where - Self: Sized, - { - Ok(Self { - upload_size, - download_size, - defer_decryption, - io: Some(io), - client_conn: Some(client_conn), - }) - } - - async fn run(&mut self) -> anyhow::Result { - let io = std::mem::take(&mut self.io).unwrap(); - let client_conn = std::mem::take(&mut self.client_conn).unwrap(); - - let start_time = Instant::now(); - - run_prover( - self.upload_size, - self.download_size, - self.defer_decryption, - io, - client_conn, - ) - .await?; - - Ok(Instant::now().duration_since(start_time).as_secs()) - } - - fn kind(&self) -> ProverKind { - ProverKind::Native - } -} diff --git a/crates/benches/binary/src/prover_main.rs b/crates/benches/binary/src/prover_main.rs deleted file mode 100644 index a72330d996..0000000000 --- a/crates/benches/binary/src/prover_main.rs +++ /dev/null @@ -1,176 +0,0 @@ -//! Contains the actual main() function of the prover binary. It is moved here -//! in order to enable cargo to build two prover binaries - with and without -//! memory profiling. - -use std::{ - fs::metadata, - io::Write, - sync::{ - atomic::{AtomicU64, Ordering}, - Arc, - }, -}; - -use crate::{ - config::{BenchInstance, Config}, - metrics::Metrics, - preprocess::preprocess_prf_circuits, - set_interface, PROVER_INTERFACE, -}; -use anyhow::Context; -use tlsn_benches_library::{AsyncIo, ProverTrait}; -use tlsn_server_fixture::bind; - -use csv::WriterBuilder; - -use tokio_util::{ - compat::TokioAsyncReadCompatExt, - io::{InspectReader, InspectWriter}, -}; -use tracing_subscriber::{fmt::format::FmtSpan, EnvFilter}; - -#[cfg(not(feature = "browser-bench"))] -use crate::prover::NativeProver as BenchProver; -#[cfg(feature = "browser-bench")] -use tlsn_benches_browser_native::BrowserProver as BenchProver; - -pub async fn prover_main(is_memory_profiling: bool) -> anyhow::Result<()> { - let config_path = std::env::var("CFG").unwrap_or_else(|_| "bench.toml".to_string()); - let config: Config = toml::from_str( - &std::fs::read_to_string(config_path).context("failed to read config file")?, - ) - .context("failed to parse config")?; - - tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .with_span_events(FmtSpan::NEW | FmtSpan::CLOSE) - .init(); - - let ip = std::env::var("VERIFIER_IP").unwrap_or_else(|_| "10.10.1.1".to_string()); - let port: u16 = std::env::var("VERIFIER_PORT") - .map(|port| port.parse().expect("port is valid u16")) - .unwrap_or(8000); - let verifier_host = (ip.as_str(), port); - - let mut file = std::fs::OpenOptions::new() - .create(true) - .append(true) - .open("metrics.csv") - .context("failed to open metrics file")?; - - // Preprocess the PRF circuits as they are allocating a lot of memory, which - // don't need to be accounted for in the benchmarks. - preprocess_prf_circuits().await; - - { - let mut metric_wrt = WriterBuilder::new() - // If file is not empty, assume that the CSV header is already present in the file. - .has_headers(metadata("metrics.csv")?.len() == 0) - .from_writer(&mut file); - for bench in config.benches { - let instances = bench.flatten(); - for instance in instances { - if is_memory_profiling && !instance.memory_profile { - continue; - } - - println!("{:?}", &instance); - - let io = tokio::net::TcpStream::connect(verifier_host) - .await - .context("failed to open tcp connection")?; - metric_wrt.serialize( - run_instance(instance, io, is_memory_profiling) - .await - .context("failed to run instance")?, - )?; - metric_wrt.flush()?; - } - } - } - - file.flush()?; - - Ok(()) -} - -async fn run_instance( - instance: BenchInstance, - io: impl AsyncIo, - is_memory_profiling: bool, -) -> anyhow::Result { - let uploaded = Arc::new(AtomicU64::new(0)); - let downloaded = Arc::new(AtomicU64::new(0)); - let io = InspectWriter::new( - InspectReader::new(io, { - let downloaded = downloaded.clone(); - move |data| { - downloaded.fetch_add(data.len() as u64, Ordering::Relaxed); - } - }), - { - let uploaded = uploaded.clone(); - move |data| { - uploaded.fetch_add(data.len() as u64, Ordering::Relaxed); - } - }, - ); - - let BenchInstance { - name, - upload, - upload_delay, - download, - download_delay, - upload_size, - download_size, - defer_decryption, - memory_profile, - } = instance.clone(); - - set_interface(PROVER_INTERFACE, upload, 1, upload_delay)?; - - let _profiler = if is_memory_profiling { - assert!(memory_profile, "Instance doesn't have `memory_profile` set"); - // Build a testing profiler as it won't output to stderr. - Some(dhat::Profiler::builder().testing().build()) - } else { - None - }; - - let (client_conn, server_conn) = tokio::io::duplex(1 << 16); - tokio::spawn(bind(server_conn.compat())); - - let mut prover = BenchProver::setup( - upload_size, - download_size, - defer_decryption, - Box::new(io), - Box::new(client_conn), - ) - .await?; - - let runtime = prover.run().await?; - - let heap_max_bytes = if is_memory_profiling { - Some(dhat::HeapStats::get().max_bytes) - } else { - None - }; - - Ok(Metrics { - name, - kind: prover.kind(), - upload, - upload_delay, - download, - download_delay, - upload_size, - download_size, - defer_decryption, - runtime, - uploaded: uploaded.load(Ordering::SeqCst), - downloaded: downloaded.load(Ordering::SeqCst), - heap_max_bytes, - }) -} diff --git a/crates/benches/binary/src/verifier_main.rs b/crates/benches/binary/src/verifier_main.rs deleted file mode 100644 index d76752975c..0000000000 --- a/crates/benches/binary/src/verifier_main.rs +++ /dev/null @@ -1,131 +0,0 @@ -//! Contains the actual main() function of the verifier binary. It is moved here -//! in order to enable cargo to build two verifier binaries - with and without -//! memory profiling. - -use crate::{ - config::{BenchInstance, Config}, - preprocess::preprocess_prf_circuits, - set_interface, VERIFIER_INTERFACE, -}; -use tls_core::verify::WebPkiVerifier; -use tlsn_common::config::ProtocolConfigValidator; -use tlsn_core::CryptoProvider; -use tlsn_server_fixture_certs::CA_CERT_DER; -use tlsn_verifier::{Verifier, VerifierConfig}; - -use anyhow::Context; -use tokio::io::{AsyncRead, AsyncWrite}; -use tokio_util::compat::TokioAsyncReadCompatExt; -use tracing_subscriber::{fmt::format::FmtSpan, EnvFilter}; - -pub async fn verifier_main(is_memory_profiling: bool) -> anyhow::Result<()> { - let config_path = std::env::var("CFG").unwrap_or_else(|_| "bench.toml".to_string()); - let config: Config = toml::from_str( - &std::fs::read_to_string(config_path).context("failed to read config file")?, - ) - .context("failed to parse config")?; - - tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .with_span_events(FmtSpan::NEW | FmtSpan::CLOSE) - .init(); - - let ip = std::env::var("VERIFIER_IP").unwrap_or_else(|_| "10.10.1.1".to_string()); - let port: u16 = std::env::var("VERIFIER_PORT") - .map(|port| port.parse().expect("port is valid u16")) - .unwrap_or(8000); - let host = (ip.as_str(), port); - - let listener = tokio::net::TcpListener::bind(host) - .await - .context("failed to bind to port")?; - - // Preprocess the PRF circuits as they are allocating a lot of memory, which - // don't need to be accounted for in the benchmarks. - preprocess_prf_circuits().await; - - for bench in config.benches { - for instance in bench.flatten() { - if is_memory_profiling && !instance.memory_profile { - continue; - } - - let (io, _) = listener - .accept() - .await - .context("failed to accept connection")?; - run_instance(instance, io, is_memory_profiling) - .await - .context("failed to run instance")?; - } - } - - Ok(()) -} - -async fn run_instance( - instance: BenchInstance, - io: S, - is_memory_profiling: bool, -) -> anyhow::Result<()> { - let BenchInstance { - download, - download_delay, - upload_size, - download_size, - memory_profile, - .. - } = instance; - - set_interface(VERIFIER_INTERFACE, download, 1, download_delay)?; - - let _profiler = if is_memory_profiling { - assert!(memory_profile, "Instance doesn't have `memory_profile` set"); - // Build a testing profiler as it won't output to stderr. - Some(dhat::Profiler::builder().testing().build()) - } else { - None - }; - - let provider = CryptoProvider { - cert: cert_verifier(), - ..Default::default() - }; - - let config_validator = ProtocolConfigValidator::builder() - .max_sent_data(upload_size + 256) - .max_recv_data(download_size + 256) - .build() - .unwrap(); - - let verifier = Verifier::new( - VerifierConfig::builder() - .protocol_config_validator(config_validator) - .crypto_provider(provider) - .build()?, - ); - - verifier.verify(io.compat()).await?; - - println!("verifier done"); - - if is_memory_profiling { - // XXX: we may want to profile the Verifier's memory usage at a future - // point. - // println!( - // "verifier peak heap memory usage: {}", - // dhat::HeapStats::get().max_bytes - // ); - } - - Ok(()) -} - -fn cert_verifier() -> WebPkiVerifier { - let mut root_store = tls_core::anchors::RootCertStore::empty(); - root_store - .add(&tls_core::key::Certificate(CA_CERT_DER.to_vec())) - .unwrap(); - - WebPkiVerifier::new(root_store, None) -} diff --git a/crates/benches/browser/core/Cargo.toml b/crates/benches/browser/core/Cargo.toml deleted file mode 100644 index e182a9e800..0000000000 --- a/crates/benches/browser/core/Cargo.toml +++ /dev/null @@ -1,13 +0,0 @@ -[package] -edition = "2021" -name = "tlsn-benches-browser-core" -publish = false -version = "0.0.0" - -[dependencies] -tlsn-benches-library = { workspace = true } - -serio = { workspace = true } - -serde = { workspace = true } -tokio-util= { workspace = true, features = ["compat", "io-util"] } diff --git a/crates/benches/browser/core/src/lib.rs b/crates/benches/browser/core/src/lib.rs deleted file mode 100644 index 8b41fe08ce..0000000000 --- a/crates/benches/browser/core/src/lib.rs +++ /dev/null @@ -1,68 +0,0 @@ -//! Contains core types shared by the native and the wasm components. - -use std::{ - io::Error, - pin::Pin, - task::{Context, Poll}, -}; - -use tlsn_benches_library::AsyncIo; - -use serio::{ - codec::{Bincode, Framed}, - Sink, Stream, -}; -use tokio_util::codec::LengthDelimitedCodec; - -pub mod msg; - -/// A sink/stream for serializable types with a framed transport. -pub struct FramedIo { - inner: - serio::Framed, LengthDelimitedCodec>, Bincode>, -} - -impl FramedIo { - /// Creates a new `FramedIo` from the given async `io`. - #[allow(clippy::default_constructed_unit_structs)] - pub fn new(io: Box) -> Self { - let io = LengthDelimitedCodec::builder().new_framed(io); - Self { - inner: Framed::new(io, Bincode::default()), - } - } -} - -impl Sink for FramedIo { - type Error = Error; - - fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut self.inner).poll_ready(cx) - } - - fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut self.inner).poll_close(cx) - } - - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut self.inner).poll_flush(cx) - } - - fn start_send( - mut self: Pin<&mut Self>, - item: Item, - ) -> std::result::Result<(), Self::Error> { - Pin::new(&mut self.inner).start_send(item) - } -} - -impl Stream for FramedIo { - type Error = Error; - - fn poll_next( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll>> { - Pin::new(&mut self.inner).poll_next(cx) - } -} diff --git a/crates/benches/browser/core/src/msg.rs b/crates/benches/browser/core/src/msg.rs deleted file mode 100644 index 753279fcd7..0000000000 --- a/crates/benches/browser/core/src/msg.rs +++ /dev/null @@ -1,17 +0,0 @@ -//! Messages exchanged by the native and the wasm components of the browser -//! prover. - -use serde::{Deserialize, Serialize}; - -#[derive(Serialize, Deserialize, PartialEq)] -/// The config sent to the wasm component. -pub struct Config { - pub upload_size: usize, - pub download_size: usize, - pub defer_decryption: bool, -} - -#[derive(Serialize, Deserialize, PartialEq)] -/// Sent by the wasm component when proving process is finished. Contains total -/// runtime in seconds. -pub struct Runtime(pub u64); diff --git a/crates/benches/browser/native/Cargo.toml b/crates/benches/browser/native/Cargo.toml deleted file mode 100644 index e0c22578dc..0000000000 --- a/crates/benches/browser/native/Cargo.toml +++ /dev/null @@ -1,22 +0,0 @@ -[package] -edition = "2021" -name = "tlsn-benches-browser-native" -publish = false -version = "0.0.0" - -[dependencies] -tlsn-benches-browser-core = { workspace = true } -tlsn-benches-library = { workspace = true } - -serio = { workspace = true } -websocket-relay = { workspace = true } - -anyhow = { workspace = true } -async-trait = { workspace = true } -chromiumoxide = { version = "0.6.0" , features = ["tokio-runtime"] } -futures = { workspace = true } -rust-embed = "8.5.0" -tokio = { workspace = true, features = ["rt", "io-std"] } -tracing = { workspace = true } -warp = "0.3.7" -warp-embed = "0.5.0" diff --git a/crates/benches/browser/native/src/lib.rs b/crates/benches/browser/native/src/lib.rs deleted file mode 100644 index 10fc3b04f9..0000000000 --- a/crates/benches/browser/native/src/lib.rs +++ /dev/null @@ -1,331 +0,0 @@ -//! Contains the native component of the browser prover. -//! -//! Conceptually the browser prover consists of the native and the wasm -//! components. The native component is responsible for starting the browser, -//! loading the wasm component and driving it. - -use std::{env, net::IpAddr}; - -use serio::{stream::IoStreamExt, SinkExt as _}; -use tlsn_benches_browser_core::{ - msg::{Config, Runtime}, - FramedIo, -}; -use tlsn_benches_library::{AsyncIo, ProverKind, ProverTrait}; - -use anyhow::{anyhow, Context, Result}; -use async_trait::async_trait; -use chromiumoxide::{ - cdp::{ - browser_protocol::log::{EventEntryAdded, LogEntryLevel}, - js_protocol::runtime::EventExceptionThrown, - }, - Browser, BrowserConfig, Page, -}; -use futures::{Future, FutureExt, StreamExt}; -use rust_embed::RustEmbed; -use tokio::{io, io::AsyncWriteExt, net::TcpListener, task::JoinHandle}; -use tracing::{debug, error, info}; -use warp::Filter; - -/// The IP on which the wasm component is served. -pub static DEFAULT_WASM_IP: &str = "127.0.0.1"; -/// The IP of the websocket relay. -pub static DEFAULT_WS_IP: &str = "127.0.0.1"; - -/// The port on which the wasm component is served. -pub static DEFAULT_WASM_PORT: u16 = 9001; -/// The port of the websocket relay. -pub static DEFAULT_WS_PORT: u16 = 9002; -/// The port for the wasm component to communicate with the TLS server. -pub static DEFAULT_WASM_TO_SERVER_PORT: u16 = 9003; -/// The port for the wasm component to communicate with the verifier. -pub static DEFAULT_WASM_TO_VERIFIER_PORT: u16 = 9004; -/// The port for the wasm component to communicate with the native component. -pub static DEFAULT_WASM_TO_NATIVE_PORT: u16 = 9005; - -// The `pkg` dir will be embedded into the binary at compile-time. -#[derive(RustEmbed)] -#[folder = "../wasm/pkg"] -struct Data; - -/// The native component of the prover which runs in the browser. -pub struct BrowserProver { - /// Io for communication with the wasm component. - wasm_io: FramedIo, - /// The browser spawned by the prover. - browser: Browser, - /// A handle to the http server. - http_server: JoinHandle<()>, - /// Handles to the relays. - relays: Vec>>, -} - -#[async_trait] -impl ProverTrait for BrowserProver { - async fn setup( - upload_size: usize, - download_size: usize, - defer_decryption: bool, - verifier_io: Box, - server_io: Box, - ) -> anyhow::Result - where - Self: Sized, - { - let wasm_port: u16 = env::var("WASM_PORT") - .map(|port| port.parse().expect("port should be valid integer")) - .unwrap_or(DEFAULT_WASM_PORT); - let ws_port: u16 = env::var("WS_PORT") - .map(|port| port.parse().expect("port should be valid integer")) - .unwrap_or(DEFAULT_WS_PORT); - let wasm_to_server_port: u16 = env::var("WASM_TO_SERVER_PORT") - .map(|port| port.parse().expect("port should be valid integer")) - .unwrap_or(DEFAULT_WASM_TO_SERVER_PORT); - let wasm_to_verifier_port: u16 = env::var("WASM_TO_VERIFIER_PORT") - .map(|port| port.parse().expect("port should be valid integer")) - .unwrap_or(DEFAULT_WASM_TO_VERIFIER_PORT); - let wasm_to_native_port: u16 = env::var("WASM_TO_NATIVE_PORT") - .map(|port| port.parse().expect("port should be valid integer")) - .unwrap_or(DEFAULT_WASM_TO_NATIVE_PORT); - - let wasm_ip: IpAddr = env::var("WASM_IP") - .map(|addr| addr.parse().expect("should be valid IP address")) - .unwrap_or(IpAddr::V4(DEFAULT_WASM_IP.parse().unwrap())); - let ws_ip: IpAddr = env::var("WS_IP") - .map(|addr| addr.parse().expect("should be valid IP address")) - .unwrap_or(IpAddr::V4(DEFAULT_WS_IP.parse().unwrap())); - - let mut relays = Vec::with_capacity(4); - - relays.push(spawn_websocket_relay(ws_ip, ws_port).await?); - - let http_server = spawn_http_server(wasm_ip, wasm_port)?; - - // Relay data from the wasm component to the server. - relays.push(spawn_port_relay(wasm_to_server_port, server_io).await?); - - // Relay data from the wasm component to the verifier. - relays.push(spawn_port_relay(wasm_to_verifier_port, verifier_io).await?); - - // Create a framed connection to the wasm component. - let (wasm_left, wasm_right) = tokio::io::duplex(1 << 16); - - relays.push(spawn_port_relay(wasm_to_native_port, Box::new(wasm_right)).await?); - let mut wasm_io = FramedIo::new(Box::new(wasm_left)); - - info!("spawning browser"); - - // Note that the browser must be spawned only when the WebSocket relay is - // running. - let browser = spawn_browser( - wasm_ip, - ws_ip, - wasm_port, - ws_port, - wasm_to_server_port, - wasm_to_verifier_port, - wasm_to_native_port, - ) - .await?; - - info!("sending config to the browser component"); - - wasm_io - .send(Config { - upload_size, - download_size, - defer_decryption, - }) - .await?; - - Ok(Self { - wasm_io, - browser, - http_server, - relays, - }) - } - - async fn run(&mut self) -> anyhow::Result { - let runtime: Runtime = self.wasm_io.expect_next().await.unwrap(); - - _ = self.clean_up().await?; - - Ok(runtime.0) - } - - fn kind(&self) -> ProverKind { - ProverKind::Browser - } -} - -impl BrowserProver { - async fn clean_up(&mut self) -> anyhow::Result<()> { - // Kill the http server. - self.http_server.abort(); - - // Kill all relays. - let _ = self - .relays - .iter_mut() - .map(|task| task.abort()) - .collect::>(); - - // Close the browser. - self.browser.close().await?; - self.browser.wait().await?; - - Ok(()) - } -} - -pub async fn spawn_websocket_relay( - ip: IpAddr, - port: u16, -) -> anyhow::Result>> { - let listener = TcpListener::bind((ip, port)).await?; - Ok(tokio::spawn(websocket_relay::run(listener))) -} - -/// Binds to the given localhost `port`, accepts a connection and relays data -/// between the connection and the `channel`. -pub async fn spawn_port_relay( - port: u16, - channel: Box, -) -> anyhow::Result>> { - let listener = tokio::net::TcpListener::bind(("127.0.0.1", port)) - .await - .context("failed to bind to port")?; - - let handle = tokio::spawn(async move { - let (tcp, _) = listener - .accept() - .await - .context("failed to accept a connection") - .unwrap(); - - relay_data(Box::new(tcp), channel).await - }); - - Ok(handle) -} - -/// Relays data between two sources. -pub async fn relay_data(left: Box, right: Box) -> Result<()> { - let (mut left_read, mut left_write) = io::split(left); - let (mut right_read, mut right_write) = io::split(right); - - let left_to_right = async { - io::copy(&mut left_read, &mut right_write).await?; - right_write.shutdown().await - }; - - let right_to_left = async { - io::copy(&mut right_read, &mut left_write).await?; - left_write.shutdown().await - }; - - tokio::try_join!(left_to_right, right_to_left)?; - - Ok(()) -} - -/// Spawns the browser and starts the wasm component. -async fn spawn_browser( - wasm_ip: IpAddr, - ws_ip: IpAddr, - wasm_port: u16, - ws_port: u16, - wasm_to_server_port: u16, - wasm_to_verifier_port: u16, - wasm_to_native_port: u16, -) -> anyhow::Result { - // Chrome requires --no-sandbox when running as root. - let config = BrowserConfig::builder() - .no_sandbox() - .incognito() - .build() - .map_err(|s| anyhow!(s))?; - - debug!("launching chromedriver"); - - let (browser, mut handler) = Browser::launch(config).await?; - - debug!("chromedriver started"); - - tokio::spawn(async move { - while let Some(res) = handler.next().await { - res.unwrap(); - } - }); - - let page = browser - .new_page(&format!("http://{}:{}/index.html", wasm_ip, wasm_port)) - .await?; - - tokio::spawn(register_listeners(&page).await?); - - page.wait_for_navigation().await?; - // Note that `format!` needs double {{ }} in order to escape them. - let _ = page - .evaluate_function(&format!( - r#" - async function() {{ - await window.worker.init(); - // Do not `await` run() or else it will block the browser. - window.worker.run("{}", {}, {}, {}, {}); - }} - "#, - ws_ip, ws_port, wasm_to_server_port, wasm_to_verifier_port, wasm_to_native_port - )) - .await?; - - Ok(browser) -} - -pub fn spawn_http_server(ip: IpAddr, port: u16) -> anyhow::Result> { - let handle = tokio::spawn(async move { - // Serve embedded files with additional headers. - let data_serve = warp_embed::embed(&Data); - - let data_serve_with_headers = data_serve - .map(|reply| { - warp::reply::with_header(reply, "Cross-Origin-Opener-Policy", "same-origin") - }) - .map(|reply| { - warp::reply::with_header(reply, "Cross-Origin-Embedder-Policy", "require-corp") - }); - - warp::serve(data_serve_with_headers).run((ip, port)).await; - }); - - Ok(handle) -} - -async fn register_listeners(page: &Page) -> Result> { - let mut logs = page.event_listener::().await?.fuse(); - let mut exceptions = page.event_listener::().await?.fuse(); - - Ok(futures::future::join( - async move { - while let Some(event) = logs.next().await { - let entry = &event.entry; - match entry.level { - LogEntryLevel::Error => { - error!("{:?}", entry); - } - _ => { - debug!("{:?}: {}", entry.timestamp, entry.text); - } - } - } - }, - async move { - while let Some(event) = exceptions.next().await { - error!("{:?}", event); - } - }, - ) - .map(|_| ())) -} diff --git a/crates/benches/browser/wasm/.cargo/config.toml b/crates/benches/browser/wasm/.cargo/config.toml deleted file mode 100644 index 587d6d2325..0000000000 --- a/crates/benches/browser/wasm/.cargo/config.toml +++ /dev/null @@ -1,11 +0,0 @@ -[build] -target = "wasm32-unknown-unknown" - -[target.wasm32-unknown-unknown] -rustflags = [ - "-C", - "target-feature=+atomics,+bulk-memory,+mutable-globals", -] - -[unstable] -build-std = ["panic_abort", "std"] \ No newline at end of file diff --git a/crates/benches/browser/wasm/Cargo.toml b/crates/benches/browser/wasm/Cargo.toml deleted file mode 100644 index ecd436d41a..0000000000 --- a/crates/benches/browser/wasm/Cargo.toml +++ /dev/null @@ -1,30 +0,0 @@ -[package] -edition = "2021" -name = "tlsn-benches-browser-wasm" -publish = false -version = "0.0.0" - -[lib] -crate-type = ["cdylib", "rlib"] - -[dependencies] -tlsn-benches-browser-core = { workspace = true } -tlsn-benches-library = { workspace = true } -tlsn-wasm = { path = "../../../wasm" } - -serio = { workspace = true } - -anyhow = { workspace = true } -tracing = { workspace = true } -wasm-bindgen = { version = "0.2.87" } -wasm-bindgen-futures = { version = "0.4.37" } -web-time = { workspace = true } -# Use the patched ws_stream_wasm to fix the issue https://github.com/najamelan/ws_stream_wasm/issues/12#issuecomment-1711902958 -ws_stream_wasm = { version = "0.7.4", git = "https://github.com/tlsnotary/ws_stream_wasm", rev = "2ed12aad9f0236e5321f577672f309920b2aef51", features = [ - "tokio_io", -] } - -[package.metadata.wasm-pack.profile.release] -# Note: these wasm-pack options should match those in crates/wasm/Cargo.toml -opt-level = "z" -wasm-opt = true diff --git a/crates/benches/browser/wasm/pkg/comlink.mjs b/crates/benches/browser/wasm/pkg/comlink.mjs deleted file mode 100644 index 68951d278e..0000000000 --- a/crates/benches/browser/wasm/pkg/comlink.mjs +++ /dev/null @@ -1,346 +0,0 @@ -/** - * @license - * Copyright 2019 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ -const proxyMarker = Symbol("Comlink.proxy"); -const createEndpoint = Symbol("Comlink.endpoint"); -const releaseProxy = Symbol("Comlink.releaseProxy"); -const finalizer = Symbol("Comlink.finalizer"); -const throwMarker = Symbol("Comlink.thrown"); -const isObject = (val) => (typeof val === "object" && val !== null) || typeof val === "function"; -/** - * Internal transfer handle to handle objects marked to proxy. - */ -const proxyTransferHandler = { - canHandle: (val) => isObject(val) && val[proxyMarker], - serialize(obj) { - const { port1, port2 } = new MessageChannel(); - expose(obj, port1); - return [port2, [port2]]; - }, - deserialize(port) { - port.start(); - return wrap(port); - }, -}; -/** - * Internal transfer handler to handle thrown exceptions. - */ -const throwTransferHandler = { - canHandle: (value) => isObject(value) && throwMarker in value, - serialize({ value }) { - let serialized; - if (value instanceof Error) { - serialized = { - isError: true, - value: { - message: value.message, - name: value.name, - stack: value.stack, - }, - }; - } - else { - serialized = { isError: false, value }; - } - return [serialized, []]; - }, - deserialize(serialized) { - if (serialized.isError) { - throw Object.assign(new Error(serialized.value.message), serialized.value); - } - throw serialized.value; - }, -}; -/** - * Allows customizing the serialization of certain values. - */ -const transferHandlers = new Map([ - ["proxy", proxyTransferHandler], - ["throw", throwTransferHandler], -]); -function isAllowedOrigin(allowedOrigins, origin) { - for (const allowedOrigin of allowedOrigins) { - if (origin === allowedOrigin || allowedOrigin === "*") { - return true; - } - if (allowedOrigin instanceof RegExp && allowedOrigin.test(origin)) { - return true; - } - } - return false; -} -function expose(obj, ep = globalThis, allowedOrigins = ["*"]) { - ep.addEventListener("message", function callback(ev) { - if (!ev || !ev.data) { - return; - } - if (!isAllowedOrigin(allowedOrigins, ev.origin)) { - console.warn(`Invalid origin '${ev.origin}' for comlink proxy`); - return; - } - const { id, type, path } = Object.assign({ path: [] }, ev.data); - const argumentList = (ev.data.argumentList || []).map(fromWireValue); - let returnValue; - try { - const parent = path.slice(0, -1).reduce((obj, prop) => obj[prop], obj); - const rawValue = path.reduce((obj, prop) => obj[prop], obj); - switch (type) { - case "GET" /* MessageType.GET */: - { - returnValue = rawValue; - } - break; - case "SET" /* MessageType.SET */: - { - parent[path.slice(-1)[0]] = fromWireValue(ev.data.value); - returnValue = true; - } - break; - case "APPLY" /* MessageType.APPLY */: - { - returnValue = rawValue.apply(parent, argumentList); - } - break; - case "CONSTRUCT" /* MessageType.CONSTRUCT */: - { - const value = new rawValue(...argumentList); - returnValue = proxy(value); - } - break; - case "ENDPOINT" /* MessageType.ENDPOINT */: - { - const { port1, port2 } = new MessageChannel(); - expose(obj, port2); - returnValue = transfer(port1, [port1]); - } - break; - case "RELEASE" /* MessageType.RELEASE */: - { - returnValue = undefined; - } - break; - default: - return; - } - } - catch (value) { - returnValue = { value, [throwMarker]: 0 }; - } - Promise.resolve(returnValue) - .catch((value) => { - return { value, [throwMarker]: 0 }; - }) - .then((returnValue) => { - const [wireValue, transferables] = toWireValue(returnValue); - ep.postMessage(Object.assign(Object.assign({}, wireValue), { id }), transferables); - if (type === "RELEASE" /* MessageType.RELEASE */) { - // detach and deactive after sending release response above. - ep.removeEventListener("message", callback); - closeEndPoint(ep); - if (finalizer in obj && typeof obj[finalizer] === "function") { - obj[finalizer](); - } - } - }) - .catch((error) => { - // Send Serialization Error To Caller - const [wireValue, transferables] = toWireValue({ - value: new TypeError("Unserializable return value"), - [throwMarker]: 0, - }); - ep.postMessage(Object.assign(Object.assign({}, wireValue), { id }), transferables); - }); - }); - if (ep.start) { - ep.start(); - } -} -function isMessagePort(endpoint) { - return endpoint.constructor.name === "MessagePort"; -} -function closeEndPoint(endpoint) { - if (isMessagePort(endpoint)) - endpoint.close(); -} -function wrap(ep, target) { - return createProxy(ep, [], target); -} -function throwIfProxyReleased(isReleased) { - if (isReleased) { - throw new Error("Proxy has been released and is not useable"); - } -} -function releaseEndpoint(ep) { - return requestResponseMessage(ep, { - type: "RELEASE" /* MessageType.RELEASE */, - }).then(() => { - closeEndPoint(ep); - }); -} -const proxyCounter = new WeakMap(); -const proxyFinalizers = "FinalizationRegistry" in globalThis && - new FinalizationRegistry((ep) => { - const newCount = (proxyCounter.get(ep) || 0) - 1; - proxyCounter.set(ep, newCount); - if (newCount === 0) { - releaseEndpoint(ep); - } - }); -function registerProxy(proxy, ep) { - const newCount = (proxyCounter.get(ep) || 0) + 1; - proxyCounter.set(ep, newCount); - if (proxyFinalizers) { - proxyFinalizers.register(proxy, ep, proxy); - } -} -function unregisterProxy(proxy) { - if (proxyFinalizers) { - proxyFinalizers.unregister(proxy); - } -} -function createProxy(ep, path = [], target = function () { }) { - let isProxyReleased = false; - const proxy = new Proxy(target, { - get(_target, prop) { - throwIfProxyReleased(isProxyReleased); - if (prop === releaseProxy) { - return () => { - unregisterProxy(proxy); - releaseEndpoint(ep); - isProxyReleased = true; - }; - } - if (prop === "then") { - if (path.length === 0) { - return { then: () => proxy }; - } - const r = requestResponseMessage(ep, { - type: "GET" /* MessageType.GET */, - path: path.map((p) => p.toString()), - }).then(fromWireValue); - return r.then.bind(r); - } - return createProxy(ep, [...path, prop]); - }, - set(_target, prop, rawValue) { - throwIfProxyReleased(isProxyReleased); - // FIXME: ES6 Proxy Handler `set` methods are supposed to return a - // boolean. To show good will, we return true asynchronously ¯\_(ツ)_/¯ - const [value, transferables] = toWireValue(rawValue); - return requestResponseMessage(ep, { - type: "SET" /* MessageType.SET */, - path: [...path, prop].map((p) => p.toString()), - value, - }, transferables).then(fromWireValue); - }, - apply(_target, _thisArg, rawArgumentList) { - throwIfProxyReleased(isProxyReleased); - const last = path[path.length - 1]; - if (last === createEndpoint) { - return requestResponseMessage(ep, { - type: "ENDPOINT" /* MessageType.ENDPOINT */, - }).then(fromWireValue); - } - // We just pretend that `bind()` didn’t happen. - if (last === "bind") { - return createProxy(ep, path.slice(0, -1)); - } - const [argumentList, transferables] = processArguments(rawArgumentList); - return requestResponseMessage(ep, { - type: "APPLY" /* MessageType.APPLY */, - path: path.map((p) => p.toString()), - argumentList, - }, transferables).then(fromWireValue); - }, - construct(_target, rawArgumentList) { - throwIfProxyReleased(isProxyReleased); - const [argumentList, transferables] = processArguments(rawArgumentList); - return requestResponseMessage(ep, { - type: "CONSTRUCT" /* MessageType.CONSTRUCT */, - path: path.map((p) => p.toString()), - argumentList, - }, transferables).then(fromWireValue); - }, - }); - registerProxy(proxy, ep); - return proxy; -} -function myFlat(arr) { - return Array.prototype.concat.apply([], arr); -} -function processArguments(argumentList) { - const processed = argumentList.map(toWireValue); - return [processed.map((v) => v[0]), myFlat(processed.map((v) => v[1]))]; -} -const transferCache = new WeakMap(); -function transfer(obj, transfers) { - transferCache.set(obj, transfers); - return obj; -} -function proxy(obj) { - return Object.assign(obj, { [proxyMarker]: true }); -} -function windowEndpoint(w, context = globalThis, targetOrigin = "*") { - return { - postMessage: (msg, transferables) => w.postMessage(msg, targetOrigin, transferables), - addEventListener: context.addEventListener.bind(context), - removeEventListener: context.removeEventListener.bind(context), - }; -} -function toWireValue(value) { - for (const [name, handler] of transferHandlers) { - if (handler.canHandle(value)) { - const [serializedValue, transferables] = handler.serialize(value); - return [ - { - type: "HANDLER" /* WireValueType.HANDLER */, - name, - value: serializedValue, - }, - transferables, - ]; - } - } - return [ - { - type: "RAW" /* WireValueType.RAW */, - value, - }, - transferCache.get(value) || [], - ]; -} -function fromWireValue(value) { - switch (value.type) { - case "HANDLER" /* WireValueType.HANDLER */: - return transferHandlers.get(value.name).deserialize(value.value); - case "RAW" /* WireValueType.RAW */: - return value.value; - } -} -function requestResponseMessage(ep, msg, transfers) { - return new Promise((resolve) => { - const id = generateUUID(); - ep.addEventListener("message", function l(ev) { - if (!ev.data || !ev.data.id || ev.data.id !== id) { - return; - } - ep.removeEventListener("message", l); - resolve(ev.data); - }); - if (ep.start) { - ep.start(); - } - ep.postMessage(Object.assign({ id }, msg), transfers); - }); -} -function generateUUID() { - return new Array(4) - .fill(0) - .map(() => Math.floor(Math.random() * Number.MAX_SAFE_INTEGER).toString(16)) - .join("-"); -} - -export { createEndpoint, expose, finalizer, proxy, proxyMarker, releaseProxy, transfer, transferHandlers, windowEndpoint, wrap }; -//# sourceMappingURL=comlink.mjs.map diff --git a/crates/benches/browser/wasm/pkg/index.html b/crates/benches/browser/wasm/pkg/index.html deleted file mode 100644 index 8b2fa8cbc4..0000000000 --- a/crates/benches/browser/wasm/pkg/index.html +++ /dev/null @@ -1,7 +0,0 @@ - - - - - - - \ No newline at end of file diff --git a/crates/benches/browser/wasm/pkg/index.js b/crates/benches/browser/wasm/pkg/index.js deleted file mode 100644 index 7c987a0f11..0000000000 --- a/crates/benches/browser/wasm/pkg/index.js +++ /dev/null @@ -1,7 +0,0 @@ -import * as Comlink from "./comlink.mjs"; - -async function init() { - const worker = Comlink.wrap(new Worker("worker.js", { type: "module" })); - window.worker = worker; -} -init(); diff --git a/crates/benches/browser/wasm/pkg/worker.js b/crates/benches/browser/wasm/pkg/worker.js deleted file mode 100644 index cc231b9fc9..0000000000 --- a/crates/benches/browser/wasm/pkg/worker.js +++ /dev/null @@ -1,45 +0,0 @@ -import * as Comlink from "./comlink.mjs"; - -import init, { wasm_main, initialize } from './tlsn_benches_browser_wasm.js'; - -class Worker { - async init() { - try { - await init(); - // Tracing may interfere with the benchmark results. We should enable it only for debugging. - // init_logging({ - // level: 'Debug', - // crate_filters: undefined, - // span_events: undefined, - // }); - await initialize({ thread_count: navigator.hardwareConcurrency }); - } catch (e) { - console.error(e); - throw e; - } - } - - async run( - ws_ip, - ws_port, - wasm_to_server_port, - wasm_to_verifier_port, - wasm_to_native_port - ) { - try { - await wasm_main( - ws_ip, - ws_port, - wasm_to_server_port, - wasm_to_verifier_port, - wasm_to_native_port); - } catch (e) { - console.error(e); - throw e; - } - } -} - -const worker = new Worker(); - -Comlink.expose(worker); \ No newline at end of file diff --git a/crates/benches/browser/wasm/rust-toolchain.toml b/crates/benches/browser/wasm/rust-toolchain.toml deleted file mode 100644 index 271800cb2f..0000000000 --- a/crates/benches/browser/wasm/rust-toolchain.toml +++ /dev/null @@ -1,2 +0,0 @@ -[toolchain] -channel = "nightly" \ No newline at end of file diff --git a/crates/benches/browser/wasm/src/lib.rs b/crates/benches/browser/wasm/src/lib.rs deleted file mode 100644 index 61f026d4dc..0000000000 --- a/crates/benches/browser/wasm/src/lib.rs +++ /dev/null @@ -1,102 +0,0 @@ -#![cfg(target_arch = "wasm32")] - -//! Contains the wasm component of the browser prover. -//! -//! Conceptually the browser prover consists of the native and the wasm -//! components. - -use serio::{stream::IoStreamExt, SinkExt as _}; -use tlsn_benches_browser_core::{ - msg::{Config, Runtime}, - FramedIo, -}; -use tlsn_benches_library::run_prover; - -use anyhow::Result; -use tracing::info; -use wasm_bindgen::prelude::*; -use web_time::Instant; -use ws_stream_wasm::WsMeta; - -#[wasm_bindgen] -pub async fn wasm_main( - ws_ip: String, - ws_port: u16, - wasm_to_server_port: u16, - wasm_to_verifier_port: u16, - wasm_to_native_port: u16, -) -> Result<(), JsError> { - // Wrapping main() since wasm_bindgen doesn't support anyhow. - main( - ws_ip, - ws_port, - wasm_to_server_port, - wasm_to_verifier_port, - wasm_to_native_port, - ) - .await - .map_err(|err| JsError::new(&err.to_string())) -} - -pub async fn main( - ws_ip: String, - ws_port: u16, - wasm_to_server_port: u16, - wasm_to_verifier_port: u16, - wasm_to_native_port: u16, -) -> Result<()> { - info!("starting main"); - - // Connect to the server. - let (_, server_io_ws) = WsMeta::connect( - &format!( - "ws://{}:{}/tcp?addr=localhost%3A{}", - ws_ip, ws_port, wasm_to_server_port - ), - None, - ) - .await?; - let server_io = server_io_ws.into_io(); - - // Connect to the verifier. - let (_, verifier_io_ws) = WsMeta::connect( - &format!( - "ws://{}:{}/tcp?addr=localhost%3A{}", - ws_ip, ws_port, wasm_to_verifier_port - ), - None, - ) - .await?; - let verifier_io = verifier_io_ws.into_io(); - - // Connect to the native component of the browser prover. - let (_, native_io_ws) = WsMeta::connect( - &format!( - "ws://{}:{}/tcp?addr=localhost%3A{}", - ws_ip, ws_port, wasm_to_native_port - ), - None, - ) - .await?; - let mut native_io = FramedIo::new(Box::new(native_io_ws.into_io())); - - info!("expecting config from the native component"); - - let cfg: Config = native_io.expect_next().await?; - - let start_time = Instant::now(); - run_prover( - cfg.upload_size, - cfg.download_size, - cfg.defer_decryption, - Box::new(verifier_io), - Box::new(server_io), - ) - .await?; - - native_io - .send(Runtime(start_time.elapsed().as_secs())) - .await?; - - Ok(()) -} diff --git a/crates/benches/library/Cargo.toml b/crates/benches/library/Cargo.toml deleted file mode 100644 index 53a3b5af3e..0000000000 --- a/crates/benches/library/Cargo.toml +++ /dev/null @@ -1,19 +0,0 @@ -[package] -edition = "2021" -name = "tlsn-benches-library" -publish = false -version = "0.0.0" - -[dependencies] -tlsn-common = { workspace = true } -tlsn-core = { workspace = true } -tlsn-prover = { workspace = true } -tlsn-server-fixture-certs = { workspace = true } -tlsn-tls-core = { workspace = true } - -anyhow = "1.0" -async-trait = "0.1.81" -futures = { version = "0.3", features = ["compat"] } -serde = { workspace = true } -tokio = {version = "1", default-features = false, features = ["rt", "macros"]} -tokio-util= {version = "0.7", features = ["compat", "io"]} diff --git a/crates/benches/library/src/lib.rs b/crates/benches/library/src/lib.rs deleted file mode 100644 index 6c7f3b3509..0000000000 --- a/crates/benches/library/src/lib.rs +++ /dev/null @@ -1,133 +0,0 @@ -use tls_core::{anchors::RootCertStore, verify::WebPkiVerifier}; -use tlsn_common::config::ProtocolConfig; -use tlsn_core::{transcript::Idx, CryptoProvider}; -use tlsn_prover::{Prover, ProverConfig}; -use tlsn_server_fixture_certs::{CA_CERT_DER, SERVER_DOMAIN}; - -use anyhow::Context; -use async_trait::async_trait; -use futures::{future::try_join, AsyncReadExt as _, AsyncWriteExt as _, TryFutureExt}; -use serde::{Deserialize, Serialize}; -use tokio::io::{AsyncRead, AsyncWrite}; -use tokio_util::compat::TokioAsyncReadCompatExt; - -pub trait AsyncIo: AsyncRead + AsyncWrite + Send + Sync + Unpin + 'static {} -impl AsyncIo for T where T: AsyncRead + AsyncWrite + Send + Sync + Unpin + 'static {} - -#[async_trait] -pub trait ProverTrait { - /// Sets up the prover preparing it to be run. Returns a prover ready to be - /// run. - async fn setup( - upload_size: usize, - download_size: usize, - defer_decryption: bool, - verifier_io: Box, - server_io: Box, - ) -> anyhow::Result - where - Self: Sized; - - /// Runs the prover. Returns the total run time in seconds. - async fn run(&mut self) -> anyhow::Result; - - /// Returns the kind of the prover. - fn kind(&self) -> ProverKind; -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -/// The kind of a prover. -pub enum ProverKind { - /// The prover compiled into a native binary. - Native, - /// The prover compiled into a wasm binary. - Browser, -} - -impl From for String { - fn from(value: ProverKind) -> Self { - match value { - ProverKind::Native => "Native".to_string(), - ProverKind::Browser => "Browser".to_string(), - } - } -} - -pub async fn run_prover( - upload_size: usize, - download_size: usize, - defer_decryption: bool, - io: Box, - client_conn: Box, -) -> anyhow::Result<()> { - let provider = CryptoProvider { - cert: WebPkiVerifier::new(root_store(), None), - ..Default::default() - }; - - let protocol_config = if defer_decryption { - ProtocolConfig::builder() - .max_sent_data(upload_size + 256) - .max_recv_data(download_size + 256) - .build() - .unwrap() - } else { - ProtocolConfig::builder() - .max_sent_data(upload_size + 256) - .max_recv_data(download_size + 256) - .max_recv_data_online(download_size + 256) - .build() - .unwrap() - }; - - let prover = Prover::new( - ProverConfig::builder() - .server_name(SERVER_DOMAIN) - .protocol_config(protocol_config) - .defer_decryption_from_start(defer_decryption) - .crypto_provider(provider) - .build() - .context("invalid prover config")?, - ) - .setup(io.compat()) - .await?; - - let (mut mpc_tls_connection, prover_fut) = prover.connect(client_conn.compat()).await?; - let tls_fut = async move { - let request = format!( - "GET /bytes?size={} HTTP/1.1\r\nConnection: close\r\nData: {}\r\n\r\n", - download_size, - String::from_utf8(vec![0x42u8; upload_size]).unwrap(), - ); - - mpc_tls_connection.write_all(request.as_bytes()).await?; - mpc_tls_connection.close().await?; - - let mut response = vec![]; - mpc_tls_connection.read_to_end(&mut response).await?; - - dbg!(response.len()); - - Ok::<(), anyhow::Error>(()) - }; - - let (prover_task, _) = try_join(prover_fut.map_err(anyhow::Error::from), tls_fut).await?; - - let mut prover = prover_task.start_prove(); - - let (sent_len, recv_len) = prover.transcript().len(); - prover - .prove_transcript(Idx::new(0..sent_len), Idx::new(0..recv_len)) - .await?; - prover.finalize().await?; - - Ok(()) -} - -fn root_store() -> RootCertStore { - let mut root_store = RootCertStore::empty(); - root_store - .add(&tls_core::key::Certificate(CA_CERT_DER.to_vec())) - .unwrap(); - root_store -} diff --git a/crates/benches/binary/.gitignore b/crates/harness/.gitignore similarity index 100% rename from crates/benches/binary/.gitignore rename to crates/harness/.gitignore diff --git a/crates/harness/Cargo.toml b/crates/harness/Cargo.toml new file mode 100644 index 0000000000..ae92789eb5 --- /dev/null +++ b/crates/harness/Cargo.toml @@ -0,0 +1,76 @@ +[package] +name = "tlsn-harness" +version = "0.0.0" +edition = "2021" +publish = false + +[lib] +crate-type = ["cdylib", "rlib"] + +[features] +default = ["runner"] +runner = [ + "dep:axum", + "dep:chromiumoxide", + "dep:clap", + "dep:csv", + "dep:tlsn-server-fixture", + "dep:tokio", + "dep:tokio-util", + "dep:toml", + "dep:tower", + "dep:tower-http", + "dep:websocket-relay", +] + +[dependencies] +tlsn-common = { workspace = true } +tlsn-core = { workspace = true } +tlsn-hmac-sha256 = { workspace = true } +tlsn-prover = { workspace = true } +tlsn-server-fixture = { workspace = true, optional = true } +tlsn-server-fixture-certs = { workspace = true } +tlsn-tls-core = { workspace = true } +tlsn-wasm = { workspace = true } +tlsn-verifier = { workspace = true } +futures-limit = { git = "https://github.com/tlsnotary/tlsn-utils", rev = "1a511ab" } + +websocket-relay = { workspace = true, optional = true } + +anyhow = { workspace = true } +axum = { workspace = true, optional = true } +cfg-if = { workspace = true } +chromiumoxide = { version = "0.6", features = [ + "tokio-runtime", +], optional = true } +clap = { workspace = true, features = ["derive"], optional = true } +csv = { version = "1.3", optional = true } +futures = { workspace = true } +inventory = { workspace = true } +pin-project-lite = { workspace = true } +serde = { workspace = true, features = ["derive"] } +serde_json = { workspace = true } +tokio = { workspace = true, features = ["full"], optional = true } +tokio-util = { workspace = true, features = ["compat"], optional = true } +toml = { version = "0.8", optional = true } +tower = { version = "0.4", optional = true } +tower-http = { version = "0.5", features = [ + "fs", + "set-header", +], optional = true } +tracing = { workspace = true } +tracing-subscriber = { workspace = true, features = ["env-filter"] } +web-time = { workspace = true } + +[target.'cfg(target_arch = "wasm32")'.dependencies] +wasm-bindgen = { workspace = true } +tlsn-wasm = { workspace = true } +wasm-bindgen-futures = { workspace = true } +web-spawn = { workspace = true } +ws_stream_wasm = { workspace = true } +gloo-utils = { version = "0.2", features = ["serde"] } + +[[bin]] +name = "main" +path = "bin/main.rs" +required-features = ["runner"] diff --git a/crates/harness/README.md b/crates/harness/README.md new file mode 100644 index 0000000000..f290ce3e81 --- /dev/null +++ b/crates/harness/README.md @@ -0,0 +1,30 @@ +# TLSNotary Harness + +This package provides a harness for testing and benchmarking the TLSNotary protocol with both native and browser support. + +## Getting started + +Run the following to see the available commands and options in the harness CLI. + +```sh +cargo run --release -- --help +``` + +## Tests + +See the CLI manual for available testing options. + +To add new tests, one can simply register a test anywhere in the harness source code (preferably within the tests module). + +See [existing tests](src/tests.rs) as an example. + +```rust +test!("test_basic", test_prover, test_verifier); +``` + +## Benches + +See the CLI manual for available benching options. + +To add or modify benchmarks, see the [`bench.toml`](bench.toml) file. + diff --git a/crates/harness/bench.toml b/crates/harness/bench.toml new file mode 100644 index 0000000000..11cd23db5f --- /dev/null +++ b/crates/harness/bench.toml @@ -0,0 +1,44 @@ +[[benches]] +name = "latency" +latency = [20, 50, 100] +upload = 1000 +download = 1000 +upload-size = 1024 +download-size = 4096 +defer-decryption = true +memory-profile = false + +# [[benches]] +# name = "download_bandwidth" +# upload = 250 +# upload-delay = 25 +# download = [10, 25, 50, 100, 250] +# download-delay = 25 +# upload-size = 1024 +# download-size = 4096 +# defer-decryption = true +# memory-profile = false + +# [[benches]] +# name = "upload_bandwidth" +# upload = [10, 25, 50, 100, 250] +# upload-delay = 25 +# download = 250 +# download-delay = 25 +# upload-size = 1024 +# download-size = 4096 +# defer-decryption = [false, true] +# memory-profile = false + +# [[benches]] +# name = "download_volume" +# upload = 250 +# upload-delay = 25 +# download = 250 +# download-delay = 25 +# upload-size = 1024 +# # Setting download-size higher than 45000 will cause a `Maximum call stack size exceeded` +# # error in the browser. +# download-size = [1024, 4096, 16384, 45000] +# defer-decryption = true +# memory-profile = true diff --git a/crates/harness/bin/main.rs b/crates/harness/bin/main.rs new file mode 100644 index 0000000000..683af2ca87 --- /dev/null +++ b/crates/harness/bin/main.rs @@ -0,0 +1,120 @@ +use std::{fs::metadata, io::Write}; + +use anyhow::{Context, Result}; +use clap::Parser; +use csv::WriterBuilder; +use tlsn_harness::{ + bench::{Config, Measurement}, + cli::{Cli, Command}, + runner::Runner, + test::{collect_tests, TestConfig, DEFAULT_TEST_TIMEOUT}, + Target, +}; +use tracing::debug; +use tracing_subscriber::EnvFilter; + +#[tokio::main(flavor = "multi_thread")] +async fn main() -> Result<()> { + tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .init(); + + let Cli { command } = tlsn_harness::cli::Cli::parse(); + + let Some(command) = command else { + return Err(anyhow::anyhow!("No command provided")); + }; + + match command { + Command::Test { + target, + name, + timeout, + } => { + let mut runner = match target.unwrap_or_default() { + Target::Native => Runner::new_native(), + Target::Browser => Runner::new_browser(), + }; + + runner.start().await?; + debug!("runner started"); + + let mut passed = 0; + let mut failed = 0; + // TODO: Run in parallel. + for name in collect_tests(name.as_deref()) { + println!("running test: '{}'", name); + let output = runner + .run_test(&TestConfig { + name: name.to_string(), + timeout: timeout.unwrap_or(DEFAULT_TEST_TIMEOUT), + }) + .await?; + + if !output.passed { + eprintln!( + "test failed: '{}' time={}, time_out={}", + name, output.time, output.timed_out + ); + failed += 1; + } else { + passed += 1; + } + } + + println!("passed: {}, failed: {}", passed, failed); + + runner.stop().await?; + + if failed > 0 { + std::process::exit(1); + } + } + Command::Bench { + target, + config, + output, + } => { + let config_path = config.unwrap_or_else(|| "bench.toml".to_string()); + let config: Config = toml::from_str( + &std::fs::read_to_string(config_path).context("failed to read config file")?, + ) + .context("failed to parse config")?; + + let output_path = output.unwrap_or_else(|| "metrics.csv".to_string()); + let mut file = std::fs::OpenOptions::new() + .create(true) + .append(true) + .open(&output_path) + .context("failed to open metrics file")?; + let mut writer = WriterBuilder::new() + // If file is not empty, assume that the CSV header is already present in the file. + .has_headers(metadata(output_path)?.len() == 0) + .from_writer(&mut file); + + let mut runner = match target.unwrap_or_default() { + Target::Native => Runner::new_native(), + Target::Browser => Runner::new_browser(), + }; + + runner.start().await?; + debug!("runner started"); + + for group in config.benches { + let instances = group.flatten(); + for config in instances { + let metrics = runner.run_bench(&config).await?; + + writer.serialize(Measurement::new(config, metrics))?; + writer.flush()?; + } + } + drop(writer); + file.flush()?; + + runner.stop().await?; + } + } + + Ok(()) +} diff --git a/crates/benches/binary/bin/plot.rs b/crates/harness/bin/plot.rs similarity index 99% rename from crates/benches/binary/bin/plot.rs rename to crates/harness/bin/plot.rs index dbc41abaec..aa7f7378d3 100644 --- a/crates/benches/binary/bin/plot.rs +++ b/crates/harness/bin/plot.rs @@ -1,4 +1,4 @@ -use tlsn_benches::metrics::Metrics; +use tlsn_benches::Metrics; use charming::{ component::{ diff --git a/crates/wasm-test-runner/run.sh b/crates/harness/build.sh similarity index 55% rename from crates/wasm-test-runner/run.sh rename to crates/harness/build.sh index f12cc3962a..b265b9aa8f 100755 --- a/crates/wasm-test-runner/run.sh +++ b/crates/harness/build.sh @@ -5,5 +5,4 @@ cd "$(dirname "$0")" RUSTFLAGS='-C target-feature=+atomics,+bulk-memory,+mutable-globals -C link-arg=--max-memory=4294967296' \ rustup run nightly \ - wasm-pack build ../wasm --target web --no-pack --out-dir=../wasm-test-runner/static/generated -- -Zbuild-std=panic_abort,std --features test && - RUST_LOG=info cargo run --release + wasm-pack build . --target web --no-pack --out-dir=./static/generated -- -Zbuild-std=panic_abort,std --no-default-features \ No newline at end of file diff --git a/crates/harness/src/bench.rs b/crates/harness/src/bench.rs new file mode 100644 index 0000000000..eceac9a358 --- /dev/null +++ b/crates/harness/src/bench.rs @@ -0,0 +1,195 @@ +mod io; +mod prover; +mod verifier; + +pub(crate) use io::Meter; +pub use prover::bench_prover; +pub use verifier::bench_verifier; + +use serde::{Deserialize, Serialize}; + +/// Transcript size padding to account for HTTP framing. +pub(crate) const PADDING: usize = 256; + +#[derive(Deserialize)] +#[serde(untagged)] +pub enum Field { + Single(T), + Multiple(Vec), +} + +#[derive(Deserialize)] +pub struct Config { + pub benches: Vec, +} + +#[derive(Deserialize)] +pub struct BenchItem { + pub name: String, + pub latency: Field, + pub upload: Field, + pub download: Field, + #[serde(rename = "upload-size")] + pub upload_size: Field, + #[serde(rename = "download-size")] + pub download_size: Field, + #[serde(rename = "defer-decryption")] + pub defer_decryption: Field, + #[serde(rename = "memory-profile")] + pub memory_profile: Field, +} + +impl BenchItem { + /// Flattens the config into a list of instances + pub fn flatten(self) -> Vec { + let mut instances = vec![]; + + let latency = match self.latency { + Field::Single(u) => vec![u], + Field::Multiple(u) => u, + }; + + let upload = match self.upload { + Field::Single(u) => vec![u], + Field::Multiple(u) => u, + }; + + let download = match self.download { + Field::Single(u) => vec![u], + Field::Multiple(u) => u, + }; + + let upload_size = match self.upload_size { + Field::Single(u) => vec![u], + Field::Multiple(u) => u, + }; + + let download_size = match self.download_size { + Field::Single(u) => vec![u], + Field::Multiple(u) => u, + }; + + let defer_decryption = match self.defer_decryption { + Field::Single(u) => vec![u], + Field::Multiple(u) => u, + }; + + let memory_profile = match self.memory_profile { + Field::Single(u) => vec![u], + Field::Multiple(u) => u, + }; + + for latency in latency { + for u in &upload { + for d in &download { + for us in &upload_size { + for ds in &download_size { + for dd in &defer_decryption { + for mp in &memory_profile { + instances.push(BenchConfig { + name: self.name.clone(), + latency, + upload: *u, + download: *d, + upload_size: *us, + download_size: *ds, + defer_decryption: *dd, + memory_profile: *mp, + }); + } + } + } + } + } + } + } + + instances + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct BrowserBenchConfig { + pub proxy_addr: (String, u16), + pub verifier_addr: (String, u16), + pub server_addr: (String, u16), + pub bench: BenchConfig, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct BenchConfig { + pub name: String, + pub latency: usize, + pub upload: usize, + pub download: usize, + pub upload_size: usize, + pub download_size: usize, + pub defer_decryption: bool, + /// Whether this instance should be used for memory profiling. + pub memory_profile: bool, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Metrics { + /// Time taken to preprocess the connection in seconds. + pub time_preprocess: u64, + /// TLS connection online time in seconds. + pub time_online: u64, + /// Total runtime of the benchmark in seconds. + pub time_total: u64, + /// Total amount of data uploaded to the verifier in bytes. + pub uploaded: u64, + /// Total amount of data downloaded from the verifier in bytes. + pub downloaded: u64, + /// Peak heap memory usage in bytes. + pub heap_max_bytes: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Measurement { + pub name: String, + pub latency: usize, + pub upload: usize, + pub download: usize, + pub upload_size: usize, + pub download_size: usize, + pub defer_decryption: bool, + /// Time taken to preprocess the connection in seconds. + pub time_preprocess: u64, + /// TLS connection online time in seconds. + pub time_online: u64, + /// Total runtime of the benchmark in seconds. + pub time_total: u64, + /// Total amount of data uploaded to the verifier in bytes. + pub uploaded: u64, + /// Total amount of data downloaded from the verifier in bytes. + pub downloaded: u64, + /// Peak heap memory usage in bytes. + pub heap_max_bytes: Option, +} + +impl Measurement { + pub fn new(config: BenchConfig, metrics: Metrics) -> Self { + Self { + name: config.name, + latency: config.latency, + upload: config.upload, + download: config.download, + upload_size: config.upload_size, + download_size: config.download_size, + defer_decryption: config.defer_decryption, + time_preprocess: metrics.time_preprocess, + time_online: metrics.time_online, + time_total: metrics.time_total, + uploaded: metrics.uploaded, + downloaded: metrics.downloaded, + heap_max_bytes: metrics.heap_max_bytes, + } + } +} + +/// Calculates burst rate in bps. +pub(crate) fn burst(rate: usize) -> usize { + // 2ms burst. + rate * 2 / 1000 +} diff --git a/crates/harness/src/bench/io.rs b/crates/harness/src/bench/io.rs new file mode 100644 index 0000000000..73c3a74172 --- /dev/null +++ b/crates/harness/src/bench/io.rs @@ -0,0 +1,78 @@ +use std::{ + io::Result, + pin::Pin, + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, + }, + task::{Context, Poll}, +}; + +use futures::{AsyncRead, AsyncWrite}; +use pin_project_lite::pin_project; + +pin_project! { + pub(crate) struct Meter { + sent: Arc, + recv: Arc, + #[pin] io: Io, + } +} + +impl Meter { + pub(crate) fn new(io: Io) -> Self { + Self { + sent: Arc::new(AtomicU64::new(0)), + recv: Arc::new(AtomicU64::new(0)), + io, + } + } + + pub(crate) fn sent(&self) -> Arc { + self.sent.clone() + } + + pub(crate) fn recv(&self) -> Arc { + self.recv.clone() + } +} + +impl AsyncWrite for Meter +where + Io: AsyncWrite, +{ + fn poll_write(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> Poll> { + let this = self.project(); + this.io.poll_write(cx, buf).map(|res| { + res.inspect(|n| { + this.sent.fetch_add(*n as u64, Ordering::Relaxed); + }) + }) + } + + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.project().io.poll_flush(cx) + } + + fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.project().io.poll_close(cx) + } +} + +impl AsyncRead for Meter +where + Io: AsyncRead, +{ + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll> { + let this = self.project(); + this.io.poll_read(cx, buf).map(|res| { + res.inspect(|n| { + this.recv.fetch_add(*n as u64, Ordering::Relaxed); + }) + }) + } +} diff --git a/crates/harness/src/bench/prover.rs b/crates/harness/src/bench/prover.rs new file mode 100644 index 0000000000..d88a9ae4cb --- /dev/null +++ b/crates/harness/src/bench/prover.rs @@ -0,0 +1,113 @@ +use std::sync::atomic::Ordering; + +use anyhow::Result; +use futures::{AsyncReadExt, AsyncWriteExt, FutureExt, TryFutureExt}; +use futures_limit::*; +use tls_core::verify::WebPkiVerifier; +use tlsn_common::config::ProtocolConfig; +use tlsn_core::{transcript::Idx, CryptoProvider}; +use tlsn_prover::{Prover, ProverConfig}; +use tlsn_server_fixture_certs::{CA_CERT_DER, SERVER_DOMAIN}; + +use crate::{ + bench::{burst, BenchConfig, Meter, Metrics, PADDING}, + spawn::spawn, + ProverProvider, +}; + +pub async fn bench_prover(provider: &mut ProverProvider, config: &BenchConfig) -> Result { + let write_rate = config.upload * 1_000_000; + let write_burst = burst(write_rate); + + let (verifier_io, delay_fut) = provider + .provide_verifier() + .await? + .limit_rate(write_burst, write_rate) + .delay(config.latency.div_ceil(2)); + + let verifier_io = Meter::new(verifier_io); + + let sent = verifier_io.sent(); + let recv = verifier_io.recv(); + + spawn(delay_fut.map(|_| ())); + + let mut builder = ProtocolConfig::builder(); + builder.max_sent_data(config.upload_size + PADDING); + + if !config.defer_decryption { + builder.max_recv_data_online(config.download_size + PADDING); + } + builder.max_recv_data(config.download_size + PADDING); + + let protocol_config = builder.build()?; + + let mut root_store = tls_core::anchors::RootCertStore::empty(); + root_store + .add(&tls_core::key::Certificate(CA_CERT_DER.to_vec())) + .unwrap(); + + let crypto_provider = CryptoProvider { + cert: WebPkiVerifier::new(root_store, None), + ..Default::default() + }; + + let prover = Prover::new( + ProverConfig::builder() + .defer_decryption_from_start(config.defer_decryption) + .protocol_config(protocol_config) + .server_name(SERVER_DOMAIN) + .crypto_provider(crypto_provider) + .build()?, + ); + + let time_start = web_time::Instant::now(); + + let prover = prover.setup(verifier_io).await?; + + let time_preprocess = time_start.elapsed().as_millis(); + let time_start_online = web_time::Instant::now(); + + let (mut conn, prover_fut) = prover.connect(provider.provide_server().await?).await?; + + let (time_online, prover) = futures::try_join!( + async { + let request = format!( + "GET /bytes?size={} HTTP/1.1\r\nConnection: close\r\nData: {}\r\n\r\n", + config.download_size, + String::from_utf8(vec![0x42u8; config.upload_size])?, + ); + + conn.write_all(request.as_bytes()).await?; + conn.close().await?; + + let mut response = Vec::new(); + conn.read_to_end(&mut response).await?; + + let time_online = time_start_online.elapsed().as_millis(); + + Ok(time_online) + }, + prover_fut.map_err(anyhow::Error::from) + )?; + + let mut prover = prover.start_prove(); + + let (sent_len, recv_len) = prover.transcript().len(); + + prover + .prove_transcript(Idx::new(0..sent_len), Idx::new(0..recv_len)) + .await?; + prover.finalize().await?; + + let time_total = time_start.elapsed().as_millis(); + + Ok(Metrics { + time_preprocess: time_preprocess as u64, + time_online: time_online as u64, + time_total: time_total as u64, + uploaded: sent.load(Ordering::Relaxed), + downloaded: recv.load(Ordering::Relaxed), + heap_max_bytes: None, + }) +} diff --git a/crates/harness/src/bench/verifier.rs b/crates/harness/src/bench/verifier.rs new file mode 100644 index 0000000000..915f8d3a06 --- /dev/null +++ b/crates/harness/src/bench/verifier.rs @@ -0,0 +1,86 @@ +use std::sync::atomic::Ordering; + +use anyhow::Result; +use futures::FutureExt; +use futures_limit::*; +use tls_core::verify::WebPkiVerifier; +use tlsn_common::config::ProtocolConfigValidator; +use tlsn_core::CryptoProvider; +use tlsn_server_fixture_certs::CA_CERT_DER; +use tlsn_verifier::{Verifier, VerifierConfig}; + +use crate::{ + bench::{burst, BenchConfig, Meter, Metrics, PADDING}, + spawn::spawn, + VerifierProvider, +}; + +pub async fn bench_verifier( + provider: &mut VerifierProvider, + config: &BenchConfig, +) -> Result { + let write_rate = config.download * 1_000_000; + let write_burst = burst(write_rate); + + let (io, delay_fut) = provider + .provide_prover() + .await? + .limit_rate(write_burst, write_rate) + .delay(config.latency.div_ceil(2)); + + let io = Meter::new(io); + + let sent = io.sent(); + let recv = io.recv(); + + spawn(delay_fut.map(|_| ())); + + let mut builder = ProtocolConfigValidator::builder(); + builder + .max_sent_data(config.upload_size + PADDING) + .max_recv_data(config.download_size + PADDING); + + let protocol_config = builder.build()?; + + let mut root_store = tls_core::anchors::RootCertStore::empty(); + root_store + .add(&tls_core::key::Certificate(CA_CERT_DER.to_vec())) + .unwrap(); + + let crypto_provider = CryptoProvider { + cert: WebPkiVerifier::new(root_store, None), + ..Default::default() + }; + + let verifier = Verifier::new( + VerifierConfig::builder() + .protocol_config_validator(protocol_config) + .crypto_provider(crypto_provider) + .build()?, + ); + + let time_start = web_time::Instant::now(); + + let verifier = verifier.setup(io).await?; + + let time_preprocess = time_start.elapsed().as_secs(); + let time_start_online = web_time::Instant::now(); + + let mut verifier = verifier.run().await?.start_verify(); + + let time_online = time_start_online.elapsed().as_secs(); + + verifier.receive().await?; + verifier.finalize().await?; + + let time_total = time_start.elapsed().as_secs(); + + Ok(Metrics { + time_preprocess, + time_online, + time_total, + uploaded: sent.load(Ordering::Relaxed), + downloaded: recv.load(Ordering::Relaxed), + heap_max_bytes: None, + }) +} diff --git a/crates/harness/src/cli.rs b/crates/harness/src/cli.rs new file mode 100644 index 0000000000..b2dba25c13 --- /dev/null +++ b/crates/harness/src/cli.rs @@ -0,0 +1,38 @@ +use clap::{Parser, Subcommand}; + +use crate::Target; + +#[derive(Parser)] +#[command(version, about, long_about = None)] +pub struct Cli { + #[command(subcommand)] + pub command: Option, +} + +#[derive(Subcommand)] +pub enum Command { + /// runs tests. + Test { + /// Target platform. + #[arg(long, default_value = "native")] + target: Option, + /// Name prefix filter. + #[arg(long)] + name: Option, + /// Timeout in seconds. + #[arg(long, default_value = "300")] + timeout: Option, + }, + /// runs benchmarks. + Bench { + /// Target platform. + #[arg(short, long, default_value = "native")] + target: Option, + /// Configuration path. + #[arg(short, long, default_value = "bench.toml")] + config: Option, + /// Output file path. + #[arg(short, long, default_value = "metrics.csv")] + output: Option, + }, +} diff --git a/crates/harness/src/config.rs b/crates/harness/src/config.rs new file mode 100644 index 0000000000..2f88f8b27f --- /dev/null +++ b/crates/harness/src/config.rs @@ -0,0 +1,2 @@ +/// Latency between the server and the prover. +pub const SERVER_LATENCY: usize = 50; diff --git a/crates/harness/src/io.rs b/crates/harness/src/io.rs new file mode 100644 index 0000000000..334b5d7b86 --- /dev/null +++ b/crates/harness/src/io.rs @@ -0,0 +1,5 @@ +use futures::{AsyncRead, AsyncWrite}; + +pub trait Io: AsyncRead + AsyncWrite + Send + Unpin + 'static {} + +impl Io for T where T: AsyncRead + AsyncWrite + Send + Unpin + 'static {} diff --git a/crates/harness/src/lib.rs b/crates/harness/src/lib.rs new file mode 100644 index 0000000000..4e48a15b92 --- /dev/null +++ b/crates/harness/src/lib.rs @@ -0,0 +1,38 @@ +pub mod bench; +#[cfg(feature = "runner")] +pub mod cli; +pub(crate) mod config; +pub mod io; +mod provider; +#[cfg(feature = "runner")] +pub mod runner; +pub(crate) mod spawn; +pub mod test; +mod tests; +#[cfg(target_arch = "wasm32")] +mod wasm; + +pub use provider::{ProverProvider, VerifierProvider}; +#[cfg(target_arch = "wasm32")] +pub use wasm::*; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[cfg_attr(feature = "runner", derive(clap::ValueEnum))] +pub enum Target { + Native, + Browser, +} + +impl Default for Target { + fn default() -> Self { + Self::Native + } +} + +pub static DEFAULT_SERVER_IP: &str = "127.0.0.1"; +pub static DEFAULT_WASM_PORT: u16 = 8013; +pub static DEFAULT_WS_PORT: u16 = 8080; +pub static DEFAULT_SERVER_PORT: u16 = 8083; +pub static DEFAULT_VERIFIER_PORT: u16 = 8010; +pub static DEFAULT_NOTARY_PORT: u16 = 8011; +pub static DEFAULT_PROVER_PORT: u16 = 8012; diff --git a/crates/harness/src/provider.rs b/crates/harness/src/provider.rs new file mode 100644 index 0000000000..227fdaaef1 --- /dev/null +++ b/crates/harness/src/provider.rs @@ -0,0 +1,171 @@ +#[cfg(not(target_arch = "wasm32"))] +mod native { + use crate::{config::SERVER_LATENCY, io::Io}; + use anyhow::Result; + use futures_limit::AsyncReadDelayExt; + use tokio::net::TcpListener; + use tokio_util::compat::TokioAsyncReadCompatExt; + + /// Provides IO for the prover. + pub struct ProverProvider { + server_addr: (String, u16), + verifier_addr: (String, u16), + } + + impl ProverProvider { + /// Creates a new provider. + pub(crate) fn new(server_addr: (String, u16), verifier_addr: (String, u16)) -> Self { + Self { + server_addr, + verifier_addr, + } + } + + /// Provides a connection to the server. + pub async fn provide_server(&self) -> Result { + let io = tokio::net::TcpStream::connect(self.server_addr.clone()).await?; + + let (io, delay) = io.compat().delay(SERVER_LATENCY / 2); + tokio::spawn(delay); + + Ok(io) + } + + /// Provides a connection to the verifier. + pub async fn provide_verifier(&self) -> Result { + let io = tokio::net::TcpStream::connect(self.verifier_addr.clone()).await?; + Ok(io.compat()) + } + } + + /// Provides IO for the verifier. + pub struct VerifierProvider { + addr: (String, u16), + listener: TcpListener, + } + + impl VerifierProvider { + pub(crate) async fn new(host: &str) -> Result { + let listener = TcpListener::bind((host, 0)).await?; + let port = listener.local_addr()?.port(); + + Ok(Self { + addr: (host.to_string(), port), + listener, + }) + } + + pub(crate) fn addr(&self) -> (String, u16) { + self.addr.clone() + } + + /// Provides a connection to the prover. + pub async fn provide_prover(&self) -> Result { + let (io, _) = self.listener.accept().await?; + Ok(io.compat()) + } + } +} + +#[cfg(not(target_arch = "wasm32"))] +pub use native::{ProverProvider, VerifierProvider}; + +#[cfg(target_arch = "wasm32")] +mod wasm { + use crate::{config::SERVER_LATENCY, io::Io}; + use anyhow::Result; + use futures::future::FutureExt; + use futures_limit::AsyncReadDelayExt; + + pub enum VerifierAddr { + Tcp { addr: (String, u16) }, + Ws { id: String }, + } + + pub struct ProverProvider { + proxy_addr: (String, u16), + server_addr: (String, u16), + verifier_addr: VerifierAddr, + } + + impl ProverProvider { + /// Creates a new provider. + pub(crate) fn new( + proxy_addr: (String, u16), + server_addr: (String, u16), + verifier_addr: VerifierAddr, + ) -> Self { + Self { + proxy_addr, + server_addr, + verifier_addr, + } + } + + /// Provides a connection to the server. + pub async fn provide_server(&self) -> Result { + let server_url = format!( + "ws://{}:{}/tcp?addr={}%3A{}", + &self.proxy_addr.0, self.proxy_addr.1, &self.server_addr.0, self.server_addr.1, + ); + let (_, io) = ws_stream_wasm::WsMeta::connect(server_url, None).await?; + + let (io, delay) = io.into_io().delay(SERVER_LATENCY / 2); + + wasm_bindgen_futures::spawn_local(delay.map(|_| ())); + + Ok(io) + } + + /// Provides a connection to the verifier. + pub async fn provide_verifier(&self) -> Result { + let url = match &self.verifier_addr { + VerifierAddr::Tcp { addr } => { + format!( + "ws://{}:{}/tcp?addr={}%3A{}", + &self.proxy_addr.0, self.proxy_addr.1, &addr.0, addr.1, + ) + } + VerifierAddr::Ws { id } => { + format!( + "ws://{}:{}/ws?id={}", + &self.proxy_addr.0, self.proxy_addr.1, &id, + ) + } + }; + + let (_, io) = ws_stream_wasm::WsMeta::connect(url, None).await?; + + Ok(io.into_io()) + } + } + + pub struct VerifierProvider { + proxy_addr: (String, u16), + addr: String, + } + + impl VerifierProvider { + pub(crate) fn new(proxy_addr: (String, u16), addr: &str) -> Self { + Self { + proxy_addr, + addr: addr.to_string(), + } + } + + /// Provides a connection to the prover. + pub async fn provide_prover(&self) -> Result { + let prover_url = format!( + "ws://{}:{}/ws?id={}", + &self.proxy_addr.0, self.proxy_addr.1, &self.addr, + ); + + let (_, io) = ws_stream_wasm::WsMeta::connect(prover_url, None).await?; + + Ok(io.into_io()) + } + } +} + +#[cfg(target_arch = "wasm32")] +pub use wasm::{ProverProvider, VerifierAddr, VerifierProvider}; diff --git a/crates/harness/src/runner.rs b/crates/harness/src/runner.rs new file mode 100644 index 0000000000..56ec30b84e --- /dev/null +++ b/crates/harness/src/runner.rs @@ -0,0 +1,62 @@ +mod browser; +mod native; +pub(crate) mod server_fixture; +pub(crate) mod tlsn_fixture; + +use anyhow::Result; + +use crate::{ + bench::{BenchConfig, Metrics}, + test::{TestConfig, TestOutput}, +}; + +pub struct Runner { + inner: Inner, +} + +impl Runner { + pub fn new_native() -> Self { + Self { + inner: Inner::Native(native::NativeRunner::new()), + } + } + + pub fn new_browser() -> Self { + Self { + inner: Inner::Browser(browser::BrowserRunner::new()), + } + } + + pub async fn start(&mut self) -> Result<()> { + match &mut self.inner { + Inner::Native(runner) => runner.start().await, + Inner::Browser(runner) => runner.start().await, + } + } + + pub async fn run_test(&self, test: &TestConfig) -> Result { + match &self.inner { + Inner::Native(runner) => runner.run_test(test).await, + Inner::Browser(runner) => runner.run_test(test).await, + } + } + + pub async fn run_bench(&self, bench: &BenchConfig) -> Result { + match &self.inner { + Inner::Native(runner) => runner.run_bench(bench).await, + Inner::Browser(runner) => runner.run_bench(bench).await, + } + } + + pub async fn stop(&mut self) -> Result<()> { + match &mut self.inner { + Inner::Native(runner) => runner.stop().await, + Inner::Browser(runner) => runner.stop().await, + } + } +} + +enum Inner { + Native(native::NativeRunner), + Browser(browser::BrowserRunner), +} diff --git a/crates/harness/src/runner/browser.rs b/crates/harness/src/runner/browser.rs new file mode 100644 index 0000000000..4aab820d8e --- /dev/null +++ b/crates/harness/src/runner/browser.rs @@ -0,0 +1,136 @@ +mod chrome_driver; +mod wasm_server; +mod ws_proxy; + +use crate::{ + runner::server_fixture, + test::{BrowserTestConfig, TestConfig, TestOutput}, + VerifierProvider, DEFAULT_SERVER_IP, DEFAULT_SERVER_PORT, DEFAULT_WS_PORT, +}; +use anyhow::{anyhow, Result}; +use chromiumoxide::{Browser, Page}; +use futures::TryFutureExt; + +use crate::bench::{bench_verifier, BenchConfig, BrowserBenchConfig, Metrics}; + +pub struct BrowserRunner { + state: State, +} + +impl BrowserRunner { + pub fn new() -> Self { + Self { state: State::Init } + } + + pub async fn start(&mut self) -> Result<()> { + tokio::spawn(wasm_server::start()); + tokio::spawn(ws_proxy::start()); + tokio::spawn(server_fixture::start()); + + let (browser, page) = chrome_driver::start_browser().await?; + + page.evaluate( + r#" + (async () => {{ + return await window.worker.init(); + }})(); + "#, + ) + .await?; + + self.state = State::Running { browser, page }; + + Ok(()) + } + + pub async fn run_test(&self, config: &TestConfig) -> Result { + let State::Running { page, .. } = &self.state else { + return Err(anyhow!("runner not started")); + }; + + let config = BrowserTestConfig { + test: config.clone(), + proxy_addr: (DEFAULT_SERVER_IP.to_string(), DEFAULT_WS_PORT), + server_addr: (DEFAULT_SERVER_IP.to_string(), DEFAULT_SERVER_PORT), + }; + + let config = serde_json::to_string(&config)?; + page.evaluate(format!( + r#" + (async () => {{ + const config = JSON.parse('{config}'); + const prover = window.worker.runTestProver(config); + const verifier = window.worker.runTestVerifier(config); + await Promise.all([prover, verifier]); + }})(); + "# + )) + .await?; + + Ok(TestOutput { + passed: true, + time: 0, + timed_out: false, + }) + } + + pub async fn run_bench(&self, bench: &BenchConfig) -> Result { + let State::Running { page, .. } = &self.state else { + return Err(anyhow!("browser not started")); + }; + + let mut verifier_provider = VerifierProvider::new(DEFAULT_SERVER_IP).await?; + + let config = BrowserBenchConfig { + proxy_addr: (DEFAULT_SERVER_IP.to_string(), DEFAULT_WS_PORT), + verifier_addr: verifier_provider.addr(), + server_addr: (DEFAULT_SERVER_IP.to_string(), DEFAULT_SERVER_PORT), + bench: bench.clone(), + }; + + let prover_task = async { + let config = serde_json::to_string(&config)?; + page.evaluate(format!( + r#" + (async () => {{ + const config = JSON.parse('{config}'); + return await window.worker.runBench(config); + }})(); + "# + )) + .await? + .into_value() + .map_err(anyhow::Error::from) + }; + + let verifier_task = { + let bench = bench.clone(); + tokio::spawn(async move { bench_verifier(&mut verifier_provider, &bench).await }) + .map_err(anyhow::Error::from) + }; + + let (metrics, _) = tokio::try_join!(prover_task, verifier_task)?; + + Ok(metrics) + } + + pub async fn stop(&mut self) -> Result<()> { + match &mut self.state { + State::Running { browser, .. } => { + browser.close().await?; + browser.wait().await?; + } + _ => {} + } + + self.state = State::Done; + + Ok(()) + } +} + +enum State { + Init, + Running { browser: Browser, page: Page }, + Done, +} diff --git a/crates/wasm-test-runner/src/chrome_driver.rs b/crates/harness/src/runner/browser/chrome_driver.rs similarity index 72% rename from crates/wasm-test-runner/src/chrome_driver.rs rename to crates/harness/src/runner/browser/chrome_driver.rs index 8ed271f0f8..f2eba0334e 100644 --- a/crates/wasm-test-runner/src/chrome_driver.rs +++ b/crates/harness/src/runner/browser/chrome_driver.rs @@ -1,7 +1,10 @@ use anyhow::{anyhow, Result}; use chromiumoxide::{ cdp::{ - browser_protocol::log::{EventEntryAdded, LogEntryLevel}, + browser_protocol::{ + log::{EventEntryAdded, LogEntryLevel}, + page::ReloadParams, + }, js_protocol::runtime::EventExceptionThrown, }, Browser, BrowserConfig, Page, @@ -10,26 +13,29 @@ use futures::{Future, FutureExt, StreamExt}; use std::{env, time::Duration}; use tracing::{debug, error, instrument}; -use crate::{TestResult, DEFAULT_SERVER_IP, DEFAULT_WASM_PORT}; +use crate::{DEFAULT_SERVER_IP, DEFAULT_WASM_PORT}; #[instrument] -pub async fn run() -> Result> { +pub async fn start_browser() -> Result<(Browser, Page)> { let config = BrowserConfig::builder() .request_timeout(Duration::from_secs(60)) + //.with_head() .disable_cache() - .incognito() // Run in incognito mode to avoid unexplained WS connection errors in chromiumoxide. + //.incognito() // Run in incognito mode to avoid unexplained WS connection errors in chromiumoxide. .build() .map_err(|s| anyhow!(s))?; debug!("launching chromedriver"); - let (mut browser, mut handler) = Browser::launch(config).await?; + let (browser, mut handler) = Browser::launch(config).await?; debug!("chromedriver started"); tokio::spawn(async move { while let Some(res) = handler.next().await { - res.unwrap(); + if let Err(e) = res { + error!("error: {:?}", e); + } } }); @@ -44,23 +50,11 @@ pub async fn run() -> Result> { tokio::spawn(register_listeners(&page).await?); + page.execute(ReloadParams::builder().ignore_cache(true).build()) + .await?; page.wait_for_navigation().await?; - let results: Vec = page - .evaluate( - r#" - (async () => { - await window.testWorker.init(); - return await window.testWorker.run(); - })(); - "#, - ) - .await? - .into_value()?; - - browser.close().await?; - browser.wait().await?; - Ok(results) + Ok((browser, page)) } async fn register_listeners(page: &Page) -> Result> { diff --git a/crates/wasm-test-runner/src/wasm_server.rs b/crates/harness/src/runner/browser/wasm_server.rs similarity index 88% rename from crates/wasm-test-runner/src/wasm_server.rs rename to crates/harness/src/runner/browser/wasm_server.rs index 9e01d903e6..4a05750e35 100644 --- a/crates/wasm-test-runner/src/wasm_server.rs +++ b/crates/harness/src/runner/browser/wasm_server.rs @@ -5,7 +5,6 @@ use axum::{ http::{HeaderName, HeaderValue}, Router, }; -use futures::Future; use tokio::net::TcpListener; use tower::ServiceBuilder; use tower_http::{services::ServeDir, set_header::SetResponseHeaderLayer}; @@ -14,7 +13,7 @@ use tracing::{info, instrument}; use crate::{DEFAULT_SERVER_IP, DEFAULT_WASM_PORT}; #[instrument] -pub async fn start() -> Result>> { +pub async fn start() -> Result<()> { let port: u16 = env::var("WASM_PORT") .map(|port| port.parse().expect("port should be valid integer")) .unwrap_or(DEFAULT_WASM_PORT); @@ -42,8 +41,7 @@ pub async fn start() -> Result>> { info!("listening on {}", listener.local_addr()?); - Ok(async move { - axum::serve(listener, app).await?; - Ok(()) - }) + axum::serve(listener, app).await?; + + Ok(()) } diff --git a/crates/wasm-test-runner/src/ws.rs b/crates/harness/src/runner/browser/ws_proxy.rs similarity index 84% rename from crates/wasm-test-runner/src/ws.rs rename to crates/harness/src/runner/browser/ws_proxy.rs index 0cc294ec54..5d98396caa 100644 --- a/crates/wasm-test-runner/src/ws.rs +++ b/crates/harness/src/runner/browser/ws_proxy.rs @@ -1,14 +1,13 @@ use std::{env, net::IpAddr}; use anyhow::{Context, Result}; -use futures::Future; use tokio::net::TcpListener; use tracing::{info, instrument}; use crate::{DEFAULT_SERVER_IP, DEFAULT_WS_PORT}; #[instrument] -pub async fn start() -> Result>> { +pub async fn start() -> Result<()> { let port: u16 = env::var("PROXY_PORT") .map(|port| port.parse().expect("port should be valid integer")) .unwrap_or(DEFAULT_WS_PORT); @@ -22,5 +21,7 @@ pub async fn start() -> Result>> { info!("listening on: {}", listener.local_addr()?); - Ok(websocket_relay::run(listener)) + websocket_relay::run(listener).await?; + + Ok(()) } diff --git a/crates/harness/src/runner/native.rs b/crates/harness/src/runner/native.rs new file mode 100644 index 0000000000..54ab1f7dcf --- /dev/null +++ b/crates/harness/src/runner/native.rs @@ -0,0 +1,135 @@ +use std::{ + panic::AssertUnwindSafe, + time::{Duration, Instant}, +}; + +use anyhow::{anyhow, Result}; +use futures::FutureExt; + +use crate::{ + runner::server_fixture, + test::{get_test, TestConfig, TestOutput}, + ProverProvider, VerifierProvider, DEFAULT_SERVER_IP, DEFAULT_SERVER_PORT, +}; + +use crate::bench::{bench_prover, bench_verifier, BenchConfig, Metrics}; + +pub struct NativeRunner { + state: State, +} + +impl NativeRunner { + pub fn new() -> Self { + Self { state: State::Init } + } + + pub async fn start(&mut self) -> Result<()> { + // Prebuild the PRF circuits, they're heavy. + hmac_sha256::build_circuits().await; + + tokio::spawn(server_fixture::start()); + + self.state = State::Running; + + Ok(()) + } + + pub async fn run_test(&self, config: &TestConfig) -> Result { + let State::Running = &self.state else { + return Err(anyhow!("runner not started")); + }; + + let test = get_test(&config.name).unwrap(); + + let mut verifier_provider = VerifierProvider::new(DEFAULT_SERVER_IP).await?; + let mut prover_provider = ProverProvider::new( + (DEFAULT_SERVER_IP.to_string(), DEFAULT_SERVER_PORT), + verifier_provider.addr(), + ); + + let start = Instant::now(); + let timeout = config.timeout; + + let prover_task = tokio::spawn(async move { + tokio::time::timeout( + Duration::from_secs(timeout), + AssertUnwindSafe((test.prover)(&mut prover_provider)).catch_unwind(), + ) + .await + }); + let verifier_task = tokio::spawn(async move { + tokio::time::timeout( + Duration::from_secs(timeout), + AssertUnwindSafe((test.verifier)(&mut verifier_provider)).catch_unwind(), + ) + .await + }); + + let (prover_result, verifier_result) = tokio::try_join!(prover_task, verifier_task)?; + + let Ok(prover_result) = prover_result else { + return Ok(TestOutput { + passed: false, + time: start.elapsed().as_secs(), + timed_out: true, + }); + }; + + let Ok(verifier_result) = verifier_result else { + return Ok(TestOutput { + passed: false, + time: start.elapsed().as_secs(), + timed_out: true, + }); + }; + + Ok(TestOutput { + passed: prover_result.is_ok() && verifier_result.is_ok(), + time: start.elapsed().as_secs(), + timed_out: false, + }) + } + + pub async fn run_bench(&self, bench: &BenchConfig) -> Result { + let State::Running = &self.state else { + return Err(anyhow!("runner not started")); + }; + + let mut verifier_provider = VerifierProvider::new(DEFAULT_SERVER_IP).await?; + let mut prover_provider = ProverProvider::new( + (DEFAULT_SERVER_IP.to_string(), DEFAULT_SERVER_PORT), + verifier_provider.addr(), + ); + + let prover_task = { + let bench = bench.clone(); + tokio::spawn(async move { bench_prover(&mut prover_provider, &bench).await }) + }; + + let verifier_task = { + let bench = bench.clone(); + tokio::spawn(async move { bench_verifier(&mut verifier_provider, &bench).await }) + }; + + let (metrics, _) = tokio::try_join!(prover_task, verifier_task)?; + + Ok(metrics?) + } + + pub async fn stop(&mut self) -> Result<()> { + match &mut self.state { + State::Running => {} + _ => {} + } + + self.state = State::Done; + + Ok(()) + } +} + +enum State { + Init, + Running, + Done, +} diff --git a/crates/wasm-test-runner/src/server_fixture.rs b/crates/harness/src/runner/server_fixture.rs similarity index 60% rename from crates/wasm-test-runner/src/server_fixture.rs rename to crates/harness/src/runner/server_fixture.rs index 6a56fb6c08..9a6bfde3a6 100644 --- a/crates/wasm-test-runner/src/server_fixture.rs +++ b/crates/harness/src/runner/server_fixture.rs @@ -1,17 +1,17 @@ use std::{env, net::IpAddr}; +use futures_limit::AsyncReadDelayExt; use tlsn_server_fixture; use anyhow::Result; -use futures::Future; use tokio::net::TcpListener; use tokio_util::compat::TokioAsyncReadCompatExt; use tracing::{info, instrument}; -use crate::{DEFAULT_SERVER_IP, DEFAULT_SERVER_PORT}; +use crate::{config::SERVER_LATENCY, DEFAULT_SERVER_IP, DEFAULT_SERVER_PORT}; #[instrument] -pub async fn start() -> Result>> { +pub async fn start() -> Result<()> { let port: u16 = env::var("SERVER_PORT") .map(|port| port.parse().expect("port should be valid integer")) .unwrap_or(DEFAULT_SERVER_PORT); @@ -23,12 +23,12 @@ pub async fn start() -> Result>> { info!("listening on: {}", listener.local_addr()?); - Ok(async move { - loop { - let (socket, addr) = listener.accept().await?; - info!("accepted connection from: {}", addr); + loop { + let (socket, addr) = listener.accept().await?; + info!("accepted connection from: {}", addr); - tokio::spawn(tlsn_server_fixture::bind(socket.compat())); - } - }) + let (io, delay) = socket.compat().delay(SERVER_LATENCY / 2); + tokio::spawn(delay); + tokio::spawn(tlsn_server_fixture::bind(io)); + } } diff --git a/crates/wasm-test-runner/src/tlsn_fixture.rs b/crates/harness/src/runner/tlsn_fixture.rs similarity index 85% rename from crates/wasm-test-runner/src/tlsn_fixture.rs rename to crates/harness/src/runner/tlsn_fixture.rs index d2a1366e8d..6d50cf1ad0 100644 --- a/crates/wasm-test-runner/src/tlsn_fixture.rs +++ b/crates/harness/src/runner/tlsn_fixture.rs @@ -1,7 +1,7 @@ use std::{env, net::IpAddr}; use anyhow::Result; -use futures::{AsyncReadExt, AsyncWriteExt, Future}; +use futures::{AsyncReadExt, AsyncWriteExt}; use tls_core::{anchors::RootCertStore, verify::WebPkiVerifier}; use tlsn_common::config::{ProtocolConfig, ProtocolConfigValidator}; use tlsn_core::{ @@ -20,7 +20,7 @@ use crate::{ }; #[instrument] -pub async fn start() -> Result>> { +pub async fn start() -> Result<()> { let verifier_port: u16 = env::var("VERIFIER_PORT") .map(|port| port.parse().expect("port should be valid integer")) .unwrap_or(DEFAULT_VERIFIER_PORT); @@ -38,30 +38,28 @@ pub async fn start() -> Result>> { let notary_listener = TcpListener::bind((addr, notary_port)).await?; let prover_listener = TcpListener::bind((addr, prover_port)).await?; - Ok(async move { - loop { - tokio::select! { - res = verifier_listener.accept() => { - let (socket, addr) = res?; - info!("verifier accepted connection from: {}", addr); - - tokio::spawn(handle_verifier(socket)); - }, - res = notary_listener.accept() => { - let (socket, addr) = res?; - info!("notary accepted connection from: {}", addr); - - tokio::spawn(handle_notary(socket)); - }, - res = prover_listener.accept() => { - let (socket, addr) = res?; - info!("prover accepted connection from: {}", addr); - - tokio::spawn(handle_prover(socket)); - }, - } + loop { + tokio::select! { + res = verifier_listener.accept() => { + let (socket, addr) = res?; + info!("verifier accepted connection from: {}", addr); + + tokio::spawn(handle_verifier(socket)); + }, + res = notary_listener.accept() => { + let (socket, addr) = res?; + info!("notary accepted connection from: {}", addr); + + tokio::spawn(handle_notary(socket)); + }, + res = prover_listener.accept() => { + let (socket, addr) = res?; + info!("prover accepted connection from: {}", addr); + + tokio::spawn(handle_prover(socket)); + }, } - }) + } } #[instrument(level = "debug", skip_all, err)] diff --git a/crates/harness/src/spawn.rs b/crates/harness/src/spawn.rs new file mode 100644 index 0000000000..72b8514179 --- /dev/null +++ b/crates/harness/src/spawn.rs @@ -0,0 +1,16 @@ +use cfg_if::cfg_if; +use std::future::Future; + +/// Spawns a future. +pub(crate) fn spawn(f: F) +where + F: Future + Send + 'static, +{ + cfg_if! { + if #[cfg(target_arch = "wasm32")] { + wasm_bindgen_futures::spawn_local(f); + } else { + tokio::spawn(f); + } + } +} diff --git a/crates/harness/src/test.rs b/crates/harness/src/test.rs new file mode 100644 index 0000000000..8b6bc9a945 --- /dev/null +++ b/crates/harness/src/test.rs @@ -0,0 +1,84 @@ +use std::{future::Future, pin::Pin}; + +use cfg_if::cfg_if; +use serde::{Deserialize, Serialize}; + +use crate::{ProverProvider, VerifierProvider}; + +pub const DEFAULT_TEST_TIMEOUT: u64 = 300; + +cfg_if! { + if #[cfg(target_arch = "wasm32")] { + pub type ProverTest = + for<'a> fn(&'a mut ProverProvider) -> Pin + 'a>>; + pub type VerifierTest = + for<'a> fn(&'a mut VerifierProvider) -> Pin + 'a>>; + } else { + pub type ProverTest = + for<'a> fn(&'a mut ProverProvider) -> Pin + Send + 'a>>; + pub type VerifierTest = + for<'a> fn(&'a mut VerifierProvider) -> Pin + Send + 'a>>; + } +} + +pub(crate) fn get_test(name: &str) -> Option<&'static Test> { + inventory::iter:: + .into_iter() + .find(|test| test.name == name) +} + +pub fn collect_tests(name: Option<&str>) -> Vec { + inventory::iter:: + .into_iter() + .filter_map(|test| { + if let Some(name) = name { + if test.name == name { + Some(test.name.to_string()) + } else { + None + } + } else { + Some(test.name.to_string()) + } + }) + .collect() +} + +pub struct Test { + pub name: &'static str, + pub prover: ProverTest, + pub verifier: VerifierTest, +} + +inventory::collect!(Test); + +macro_rules! test { + ($name:literal, $prover:ident, $verifier:ident) => { + inventory::submit!(crate::test::Test { + name: $name, + prover: move |p| Box::pin($prover(p)) as _, + verifier: move |v| Box::pin($verifier(v)) as _, + }); + }; +} +pub(crate) use test; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct TestConfig { + pub name: String, + pub timeout: u64, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct BrowserTestConfig { + pub test: TestConfig, + pub proxy_addr: (String, u16), + pub server_addr: (String, u16), +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct TestOutput { + pub passed: bool, + pub time: u64, + pub timed_out: bool, +} diff --git a/crates/harness/src/tests.rs b/crates/harness/src/tests.rs new file mode 100644 index 0000000000..bb75a32390 --- /dev/null +++ b/crates/harness/src/tests.rs @@ -0,0 +1,109 @@ +use futures::{AsyncReadExt, AsyncWriteExt}; +use tls_core::verify::WebPkiVerifier; +use tlsn_common::config::{ProtocolConfig, ProtocolConfigValidator}; +use tlsn_core::{transcript::Idx, CryptoProvider}; +use tlsn_prover::{Prover, ProverConfig}; +use tlsn_server_fixture_certs::{CA_CERT_DER, SERVER_DOMAIN}; +use tlsn_verifier::{Verifier, VerifierConfig}; + +use crate::{test::test, ProverProvider, VerifierProvider}; + +async fn test_prover(provider: &mut ProverProvider) { + let mut root_store = tls_core::anchors::RootCertStore::empty(); + root_store + .add(&tls_core::key::Certificate(CA_CERT_DER.to_vec())) + .unwrap(); + + let crypto_provider = CryptoProvider { + cert: WebPkiVerifier::new(root_store, None), + ..Default::default() + }; + + let mut builder = ProtocolConfig::builder(); + builder + .max_sent_data(4096) + .max_recv_data_online(4096) + .max_recv_data(4096); + + let prover = Prover::new( + ProverConfig::builder() + .server_name(SERVER_DOMAIN) + .protocol_config( + ProtocolConfig::builder() + .max_sent_data(4096) + .max_recv_data(4096) + .build() + .unwrap(), + ) + .crypto_provider(crypto_provider) + .build() + .unwrap(), + ); + + let prover = prover + .setup(provider.provide_verifier().await.unwrap()) + .await + .unwrap(); + + let (mut conn, fut) = prover + .connect(provider.provide_server().await.unwrap()) + .await + .unwrap(); + + let (_, prover) = futures::join!( + async { + conn.write_all(b"GET / HTTP/1.1\r\nConnection: close\r\n\r\n") + .await + .unwrap(); + conn.close().await.unwrap(); + + let mut response = vec![0u8; 1024]; + conn.read_to_end(&mut response).await.unwrap(); + }, + async { fut.await.unwrap() } + ); + + let mut prover = prover.start_prove(); + + let (sent_len, recv_len) = prover.transcript().len(); + + prover + .prove_transcript(Idx::new(0..sent_len), Idx::new(0..recv_len)) + .await + .unwrap(); + + prover.finalize().await.unwrap(); +} + +async fn test_verifier(provider: &mut VerifierProvider) { + let mut root_store = tls_core::anchors::RootCertStore::empty(); + root_store + .add(&tls_core::key::Certificate(CA_CERT_DER.to_vec())) + .unwrap(); + + let crypto_provider = CryptoProvider { + cert: WebPkiVerifier::new(root_store, None), + ..Default::default() + }; + + let config_validator = ProtocolConfigValidator::builder() + .max_sent_data(4096) + .max_recv_data(4096) + .build() + .unwrap(); + + let verifier = Verifier::new( + VerifierConfig::builder() + .protocol_config_validator(config_validator) + .crypto_provider(crypto_provider) + .build() + .unwrap(), + ); + + let _ = verifier + .verify(provider.provide_prover().await.unwrap()) + .await + .unwrap(); +} + +test!("test_basic", test_prover, test_verifier); diff --git a/crates/harness/src/wasm.rs b/crates/harness/src/wasm.rs new file mode 100644 index 0000000000..4d4dffcd8f --- /dev/null +++ b/crates/harness/src/wasm.rs @@ -0,0 +1,70 @@ +pub use tlsn_wasm::*; + +use gloo_utils::format::JsValueSerdeExt; +use wasm_bindgen::prelude::*; + +use crate::{ + bench::{bench_prover, BrowserBenchConfig}, + provider::{ProverProvider, VerifierAddr, VerifierProvider}, + test::{get_test, BrowserTestConfig}, +}; + +extern "C" { + fn __wasm_call_ctors(); +} + +#[wasm_bindgen(start)] +pub fn main() { + unsafe { __wasm_call_ctors() }; +} + +#[wasm_bindgen(js_name = "runTestProver")] +pub async fn run_test_prover(config: JsValue) -> Result<(), JsError> { + let config: BrowserTestConfig = config.into_serde()?; + + let test = get_test(&config.test.name).unwrap(); + + let mut provider = ProverProvider::new( + config.proxy_addr.clone(), + config.server_addr.clone(), + VerifierAddr::Ws { + id: config.test.name, + }, + ); + + (test.prover)(&mut provider).await; + + Ok(()) +} + +#[wasm_bindgen(js_name = "runTestVerifier")] +pub async fn run_test_verifier(config: JsValue) -> Result<(), JsError> { + let config: BrowserTestConfig = config.into_serde()?; + + let test = get_test(&config.test.name).unwrap(); + + let mut provider = VerifierProvider::new(config.proxy_addr.clone(), &config.test.name); + + (test.verifier)(&mut provider).await; + + Ok(()) +} + +#[wasm_bindgen(js_name = "runBench")] +pub async fn run_bench(config: JsValue) -> Result { + let config: BrowserBenchConfig = config.into_serde()?; + + let mut provider = ProverProvider::new( + config.proxy_addr.clone(), + config.server_addr.clone(), + VerifierAddr::Tcp { + addr: config.verifier_addr, + }, + ); + + let metrics = bench_prover(&mut provider, &config.bench) + .await + .map_err(|e| JsError::new(&e.to_string()))?; + + Ok(JsValue::from_serde(&metrics)?) +} diff --git a/crates/wasm-test-runner/static/favicon.ico b/crates/harness/static/favicon.ico similarity index 100% rename from crates/wasm-test-runner/static/favicon.ico rename to crates/harness/static/favicon.ico diff --git a/crates/wasm-test-runner/static/index.html b/crates/harness/static/index.html similarity index 100% rename from crates/wasm-test-runner/static/index.html rename to crates/harness/static/index.html diff --git a/crates/harness/static/index.js b/crates/harness/static/index.js new file mode 100644 index 0000000000..20609f9447 --- /dev/null +++ b/crates/harness/static/index.js @@ -0,0 +1,5 @@ +import * as Comlink from "https://unpkg.com/comlink/dist/esm/comlink.mjs"; + +const worker = Comlink.wrap(new Worker("worker.js", { type: "module" })); + +window.worker = worker; diff --git a/crates/harness/static/worker.js b/crates/harness/static/worker.js new file mode 100644 index 0000000000..94a1159414 --- /dev/null +++ b/crates/harness/static/worker.js @@ -0,0 +1,31 @@ +import * as Comlink from "https://unpkg.com/comlink/dist/esm/comlink.mjs"; +import initWasm, * as wasm from "./generated/tlsn_harness.js"; + +class WasmWorker { + async init() { + try { + console.log("initializing wasm"); + await initWasm(); + await wasm.initialize({ thread_count: navigator.hardwareConcurrency }); + } catch (e) { + console.error(e); + throw e; + } + } + + async runTestProver(config) { + return await wasm.runTestProver(config); + } + + async runTestVerifier(config) { + return await wasm.runTestVerifier(config); + } + + async runBench(bench) { + return await wasm.runBench(bench); + } +} + +const worker = new WasmWorker(); + +Comlink.expose(worker); diff --git a/crates/notary/server/notary-server.Dockerfile.dockerignore b/crates/notary/server/notary-server.Dockerfile.dockerignore index 07fcd15262..9d37928df8 100644 --- a/crates/notary/server/notary-server.Dockerfile.dockerignore +++ b/crates/notary/server/notary-server.Dockerfile.dockerignore @@ -1,4 +1,4 @@ # exclude Rust build artifacts ./target ./crates/wasm/pkg/ -./crates/wasm-test-runner/static/generated/ +./crates/harness/static/generated/ diff --git a/crates/tests-integration/Cargo.toml b/crates/tests-integration/Cargo.toml deleted file mode 100644 index a22e7cfb20..0000000000 --- a/crates/tests-integration/Cargo.toml +++ /dev/null @@ -1,23 +0,0 @@ -[package] -name = "tests-integration" -version = "0.0.0" -edition = "2021" -publish = false - -[dev-dependencies] -tlsn-core = { workspace = true } -tlsn-common = { workspace = true } -tlsn-prover = { workspace = true } -tlsn-server-fixture = { workspace = true } -tlsn-server-fixture-certs = { workspace = true } -tlsn-tls-core = { workspace = true } -tlsn-verifier = { workspace = true } - -futures = { workspace = true } -http-body-util = { workspace = true } -hyper = { workspace = true, features = ["client", "http1"] } -hyper-util = { workspace = true, features = ["full"] } -tokio = { workspace = true, features = ["rt", "rt-multi-thread", "macros"] } -tokio-util = { workspace = true } -tracing = { workspace = true } -tracing-subscriber = { workspace = true } diff --git a/crates/tests-integration/tests/defer_decryption.rs b/crates/tests-integration/tests/defer_decryption.rs deleted file mode 100644 index 88eda4e385..0000000000 --- a/crates/tests-integration/tests/defer_decryption.rs +++ /dev/null @@ -1,133 +0,0 @@ -use futures::{AsyncReadExt, AsyncWriteExt}; -use tls_core::verify::WebPkiVerifier; -use tlsn_common::config::{ProtocolConfig, ProtocolConfigValidator}; -use tlsn_core::{ - attestation::AttestationConfig, request::RequestConfig, signing::SignatureAlgId, - transcript::TranscriptCommitConfig, CryptoProvider, -}; -use tlsn_prover::{Prover, ProverConfig}; -use tlsn_server_fixture::bind; -use tlsn_server_fixture_certs::{CA_CERT_DER, SERVER_DOMAIN}; -use tlsn_verifier::{Verifier, VerifierConfig}; - -use tokio::io::{AsyncRead, AsyncWrite}; -use tokio_util::compat::TokioAsyncReadCompatExt; -use tracing::instrument; - -// Maximum number of bytes that can be sent from prover to server -const MAX_SENT_DATA: usize = 1 << 12; -// Maximum number of bytes that can be received by prover from server -const MAX_RECV_DATA: usize = 1 << 14; - -#[tokio::test(flavor = "multi_thread", worker_threads = 2)] -#[ignore] -async fn test_defer_decryption() { - tracing_subscriber::fmt::init(); - - let (socket_0, socket_1) = tokio::io::duplex(2 << 23); - - tokio::join!(prover(socket_0), notary(socket_1)); -} - -#[instrument(skip(notary_socket))] -async fn prover(notary_socket: T) { - let (client_socket, server_socket) = tokio::io::duplex(2 << 16); - - let server_task = tokio::spawn(bind(server_socket.compat())); - - let mut root_store = tls_core::anchors::RootCertStore::empty(); - root_store - .add(&tls_core::key::Certificate(CA_CERT_DER.to_vec())) - .unwrap(); - - let provider = CryptoProvider { - cert: WebPkiVerifier::new(root_store, None), - ..Default::default() - }; - - let prover = Prover::new( - ProverConfig::builder() - .server_name(SERVER_DOMAIN) - .protocol_config( - ProtocolConfig::builder() - .max_sent_data(MAX_SENT_DATA) - .max_recv_data(MAX_RECV_DATA) - .build() - .unwrap(), - ) - .crypto_provider(provider) - .build() - .unwrap(), - ) - .setup(notary_socket.compat()) - .await - .unwrap(); - - let (mut tls_connection, prover_fut) = prover.connect(client_socket.compat()).await.unwrap(); - let prover_task = tokio::spawn(prover_fut); - - tls_connection - .write_all(b"GET / HTTP/1.1\r\nConnection: close\r\n\r\n") - .await - .unwrap(); - tls_connection.close().await.unwrap(); - - let mut response = vec![0u8; 1024]; - tls_connection.read_to_end(&mut response).await.unwrap(); - - let _ = server_task.await.unwrap(); - - let mut prover = prover_task.await.unwrap().unwrap().start_notarize(); - let sent_tx_len = prover.transcript().sent().len(); - let recv_tx_len = prover.transcript().received().len(); - - let mut builder = TranscriptCommitConfig::builder(prover.transcript()); - - // Commit to everything - builder.commit_sent(&(0..sent_tx_len)).unwrap(); - builder.commit_recv(&(0..recv_tx_len)).unwrap(); - - let config = builder.build().unwrap(); - - prover.transcript_commit(config); - - let config = RequestConfig::default(); - - prover.finalize(&config).await.unwrap(); -} - -#[instrument(skip(socket))] -async fn notary(socket: T) { - let mut root_store = tls_core::anchors::RootCertStore::empty(); - root_store - .add(&tls_core::key::Certificate(CA_CERT_DER.to_vec())) - .unwrap(); - - let mut provider = CryptoProvider { - cert: WebPkiVerifier::new(root_store, None), - ..Default::default() - }; - - provider.signer.set_secp256k1(&[1u8; 32]).unwrap(); - - let config_validator = ProtocolConfigValidator::builder() - .max_sent_data(MAX_SENT_DATA) - .max_recv_data(MAX_RECV_DATA) - .build() - .unwrap(); - - let verifier = Verifier::new( - VerifierConfig::builder() - .protocol_config_validator(config_validator) - .crypto_provider(provider) - .build() - .unwrap(), - ); - - let config = AttestationConfig::builder() - .supported_signature_algs(vec![SignatureAlgId::SECP256K1]) - .build() - .unwrap(); - - _ = verifier.notarize(socket.compat(), &config).await.unwrap(); -} diff --git a/crates/tests-integration/tests/notarize.rs b/crates/tests-integration/tests/notarize.rs deleted file mode 100644 index 366a4d92f7..0000000000 --- a/crates/tests-integration/tests/notarize.rs +++ /dev/null @@ -1,151 +0,0 @@ -use tls_core::verify::WebPkiVerifier; -use tlsn_common::config::{ProtocolConfig, ProtocolConfigValidator}; -use tlsn_core::{ - attestation::AttestationConfig, request::RequestConfig, signing::SignatureAlgId, - transcript::TranscriptCommitConfig, CryptoProvider, -}; -use tlsn_prover::{Prover, ProverConfig}; -use tlsn_server_fixture::bind; -use tlsn_server_fixture_certs::{CA_CERT_DER, SERVER_DOMAIN}; -use tlsn_verifier::{Verifier, VerifierConfig}; - -use http_body_util::{BodyExt as _, Empty}; -use hyper::{body::Bytes, Request, StatusCode}; -use hyper_util::rt::TokioIo; -use tokio::io::{AsyncRead, AsyncWrite}; -use tokio_util::compat::{FuturesAsyncReadCompatExt, TokioAsyncReadCompatExt}; -use tracing::instrument; - -// Maximum number of bytes that can be sent from prover to server -const MAX_SENT_DATA: usize = 1 << 12; -// Maximum number of bytes that can be received by prover from server -const MAX_RECV_DATA: usize = 1 << 14; - -#[tokio::test] -#[ignore] -async fn notarize() { - tracing_subscriber::fmt::init(); - - let (socket_0, socket_1) = tokio::io::duplex(2 << 23); - - tokio::join!(prover(socket_0), notary(socket_1)); -} - -#[instrument(skip(notary_socket))] -async fn prover(notary_socket: T) { - let (client_socket, server_socket) = tokio::io::duplex(2 << 16); - - let server_task = tokio::spawn(bind(server_socket.compat())); - - let mut root_store = tls_core::anchors::RootCertStore::empty(); - root_store - .add(&tls_core::key::Certificate(CA_CERT_DER.to_vec())) - .unwrap(); - - let provider = CryptoProvider { - cert: WebPkiVerifier::new(root_store, None), - ..Default::default() - }; - - let protocol_config = ProtocolConfig::builder() - .max_sent_data(MAX_SENT_DATA) - .max_recv_data(MAX_RECV_DATA) - .max_recv_data_online(MAX_RECV_DATA) - .build() - .unwrap(); - - let prover = Prover::new( - ProverConfig::builder() - .server_name(SERVER_DOMAIN) - .defer_decryption_from_start(false) - .protocol_config(protocol_config) - .crypto_provider(provider) - .build() - .unwrap(), - ) - .setup(notary_socket.compat()) - .await - .unwrap(); - - let (tls_connection, prover_fut) = prover.connect(client_socket.compat()).await.unwrap(); - - let prover_task = tokio::spawn(prover_fut); - - let (mut request_sender, connection) = - hyper::client::conn::http1::handshake(TokioIo::new(tls_connection.compat())) - .await - .unwrap(); - - tokio::spawn(connection); - - let request = Request::builder() - .uri(format!("https://{}/bytes?size=16000", SERVER_DOMAIN)) - .header("Host", SERVER_DOMAIN) - .header("Connection", "close") - .method("GET") - .body(Empty::::new()) - .unwrap(); - - let response = request_sender.send_request(request).await.unwrap(); - - assert!(response.status() == StatusCode::OK); - - let payload = response.into_body().collect().await.unwrap().to_bytes(); - println!("{:?}", &String::from_utf8_lossy(&payload)); - - let _ = server_task.await.unwrap(); - - let mut prover = prover_task.await.unwrap().unwrap().start_notarize(); - let sent_tx_len = prover.transcript().sent().len(); - let recv_tx_len = prover.transcript().received().len(); - - let mut builder = TranscriptCommitConfig::builder(prover.transcript()); - - // Commit to everything - builder.commit_sent(&(0..sent_tx_len)).unwrap(); - builder.commit_recv(&(0..recv_tx_len)).unwrap(); - - let config = builder.build().unwrap(); - - prover.transcript_commit(config); - - let config = RequestConfig::default(); - - prover.finalize(&config).await.unwrap(); -} - -#[instrument(skip(socket))] -async fn notary(socket: T) { - let mut root_store = tls_core::anchors::RootCertStore::empty(); - root_store - .add(&tls_core::key::Certificate(CA_CERT_DER.to_vec())) - .unwrap(); - - let mut provider = CryptoProvider { - cert: WebPkiVerifier::new(root_store, None), - ..Default::default() - }; - - provider.signer.set_secp256k1(&[1u8; 32]).unwrap(); - - let config_validator = ProtocolConfigValidator::builder() - .max_sent_data(MAX_SENT_DATA) - .max_recv_data(MAX_RECV_DATA) - .build() - .unwrap(); - - let verifier = Verifier::new( - VerifierConfig::builder() - .protocol_config_validator(config_validator) - .crypto_provider(provider) - .build() - .unwrap(), - ); - - let config = AttestationConfig::builder() - .supported_signature_algs(vec![SignatureAlgId::SECP256K1]) - .build() - .unwrap(); - - _ = verifier.notarize(socket.compat(), &config).await.unwrap(); -} diff --git a/crates/tests-integration/tests/verify.rs b/crates/tests-integration/tests/verify.rs deleted file mode 100644 index 8f5befc72c..0000000000 --- a/crates/tests-integration/tests/verify.rs +++ /dev/null @@ -1,149 +0,0 @@ -use tls_core::{anchors::RootCertStore, verify::WebPkiVerifier}; -use tlsn_common::config::{ProtocolConfig, ProtocolConfigValidator}; -use tlsn_core::{ - transcript::{Idx, PartialTranscript}, - CryptoProvider, -}; -use tlsn_prover::{Prover, ProverConfig}; -use tlsn_server_fixture::bind; -use tlsn_server_fixture_certs::{CA_CERT_DER, SERVER_DOMAIN}; -use tlsn_verifier::{SessionInfo, Verifier, VerifierConfig}; - -use http_body_util::{BodyExt as _, Empty}; -use hyper::{body::Bytes, Request, StatusCode}; -use hyper_util::rt::TokioIo; - -use tokio::io::{AsyncRead, AsyncWrite}; -use tokio_util::compat::{FuturesAsyncReadCompatExt, TokioAsyncReadCompatExt}; -use tracing::instrument; - -// Maximum number of bytes that can be sent from prover to server -const MAX_SENT_DATA: usize = 1 << 11; -// Maximum number of bytes that can be received by prover from server -const MAX_RECV_DATA: usize = 1 << 11; - -#[tokio::test] -#[ignore] -async fn verify() { - tracing_subscriber::fmt::init(); - - let (socket_0, socket_1) = tokio::io::duplex(1 << 23); - - let (_, (partial_transcript, info)) = tokio::join!(prover(socket_0), verifier(socket_1)); - - assert_eq!( - partial_transcript.sent_authed(), - &Idx::new(0..partial_transcript.len_sent() - 1) - ); - assert_eq!( - partial_transcript.received_authed(), - &Idx::new(2..partial_transcript.len_received()) - ); - assert_eq!(info.server_name.as_str(), SERVER_DOMAIN); -} - -#[instrument(skip(notary_socket))] -async fn prover(notary_socket: T) { - let (client_socket, server_socket) = tokio::io::duplex(1 << 16); - - let server_task = tokio::spawn(bind(server_socket.compat())); - - let mut root_store = RootCertStore::empty(); - root_store - .add(&tls_core::key::Certificate(CA_CERT_DER.to_vec())) - .unwrap(); - - let provider = CryptoProvider { - cert: WebPkiVerifier::new(root_store, None), - ..Default::default() - }; - - let prover = Prover::new( - ProverConfig::builder() - .server_name(SERVER_DOMAIN) - .defer_decryption_from_start(true) - .protocol_config( - ProtocolConfig::builder() - .max_sent_data(MAX_SENT_DATA) - .max_recv_data(MAX_RECV_DATA) - .build() - .unwrap(), - ) - .crypto_provider(provider) - .build() - .unwrap(), - ) - .setup(notary_socket.compat()) - .await - .unwrap(); - - let (tls_connection, prover_fut) = prover.connect(client_socket.compat()).await.unwrap(); - - let prover_task = tokio::spawn(prover_fut); - - let (mut request_sender, connection) = - hyper::client::conn::http1::handshake(TokioIo::new(tls_connection.compat())) - .await - .unwrap(); - - tokio::spawn(connection); - - let request = Request::builder() - .uri(format!("https://{}", SERVER_DOMAIN)) - .header("Host", SERVER_DOMAIN) - .header("Connection", "close") - .method("GET") - .body(Empty::::new()) - .unwrap(); - - let response = request_sender.send_request(request).await.unwrap(); - - assert!(response.status() == StatusCode::OK); - - let payload = response.into_body().collect().await.unwrap().to_bytes(); - println!("{:?}", &String::from_utf8_lossy(&payload)); - - let _ = server_task.await.unwrap(); - - let mut prover = prover_task.await.unwrap().unwrap().start_prove(); - - let (sent_len, recv_len) = prover.transcript().len(); - - let idx_sent = Idx::new(0..sent_len - 1); - let idx_recv = Idx::new(2..recv_len); - - // Reveal parts of the transcript - prover.prove_transcript(idx_sent, idx_recv).await.unwrap(); - prover.finalize().await.unwrap(); -} - -#[instrument(skip(socket))] -async fn verifier( - socket: T, -) -> (PartialTranscript, SessionInfo) { - let mut root_store = RootCertStore::empty(); - root_store - .add(&tls_core::key::Certificate(CA_CERT_DER.to_vec())) - .unwrap(); - - let provider = CryptoProvider { - cert: WebPkiVerifier::new(root_store, None), - ..Default::default() - }; - - let config = VerifierConfig::builder() - .protocol_config_validator( - ProtocolConfigValidator::builder() - .max_sent_data(MAX_SENT_DATA) - .max_recv_data(MAX_RECV_DATA) - .build() - .unwrap(), - ) - .crypto_provider(provider) - .build() - .unwrap(); - - let verifier = Verifier::new(config); - - verifier.verify(socket.compat()).await.unwrap() -} diff --git a/crates/wasm-test-runner/Cargo.toml b/crates/wasm-test-runner/Cargo.toml deleted file mode 100644 index 5f2eedff7a..0000000000 --- a/crates/wasm-test-runner/Cargo.toml +++ /dev/null @@ -1,28 +0,0 @@ -[package] -name = "tlsn-wasm-test-runner" -version = "0.0.0" -edition = "2021" -publish = false - -[dependencies] -tlsn-common = { workspace = true } -tlsn-core = { workspace = true } -tlsn-prover = { workspace = true } -tlsn-server-fixture = { workspace = true } -tlsn-server-fixture-certs = { workspace = true } -tlsn-tls-core = { workspace = true } -tlsn-verifier = { workspace = true } - -websocket-relay = { workspace = true } - -anyhow = { workspace = true } -axum = { workspace = true } -chromiumoxide = { version = "0.6", features = ["tokio-runtime"] } -futures = { workspace = true } -serde = { workspace = true, features = ["derive"] } -tokio = { workspace = true, features = ["full"] } -tokio-util = { workspace = true, features = ["compat"] } -tower = { version = "0.4" } -tower-http = { version = "0.5", features = ["fs", "set-header"] } -tracing = { workspace = true } -tracing-subscriber = { workspace = true, features = ["env-filter"] } diff --git a/crates/wasm-test-runner/src/lib.rs b/crates/wasm-test-runner/src/lib.rs deleted file mode 100644 index 7791d26e94..0000000000 --- a/crates/wasm-test-runner/src/lib.rs +++ /dev/null @@ -1,39 +0,0 @@ -use std::fmt::Display; - -pub mod chrome_driver; -pub mod server_fixture; -pub mod tlsn_fixture; -pub mod wasm_server; -pub mod ws; - -pub static DEFAULT_SERVER_IP: &str = "127.0.0.1"; -pub static DEFAULT_WASM_PORT: u16 = 8013; -pub static DEFAULT_WS_PORT: u16 = 8080; -pub static DEFAULT_SERVER_PORT: u16 = 8083; -pub static DEFAULT_VERIFIER_PORT: u16 = 8010; -pub static DEFAULT_NOTARY_PORT: u16 = 8011; -pub static DEFAULT_PROVER_PORT: u16 = 8012; - -#[derive(Debug, serde::Deserialize)] -pub struct TestResult { - pub name: String, - pub passed: bool, - #[serde(default)] - pub duration_secs: f64, - pub error: Option, -} - -impl Display for TestResult { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - if self.passed { - write!(f, "{}: passed in {} seconds", self.name, self.duration_secs)?; - } else { - write!(f, "{}: failed", self.name)?; - if let Some(error) = &self.error { - write!(f, "\ncaused by: {}", error)?; - } - } - - Ok(()) - } -} diff --git a/crates/wasm-test-runner/src/main.rs b/crates/wasm-test-runner/src/main.rs deleted file mode 100644 index 7b8fb5fa73..0000000000 --- a/crates/wasm-test-runner/src/main.rs +++ /dev/null @@ -1,42 +0,0 @@ -use anyhow::Result; - -fn init_tracing() { - use tracing_subscriber::EnvFilter; - - tracing_subscriber::fmt() - .with_env_filter(EnvFilter::from_default_env()) - .init(); -} - -#[tokio::main] -async fn main() -> Result<()> { - init_tracing(); - - let fut_wasm = tlsn_wasm_test_runner::wasm_server::start().await?; - let fut_proxy = tlsn_wasm_test_runner::ws::start().await?; - let fut_tlsn = tlsn_wasm_test_runner::tlsn_fixture::start().await?; - let fut_server = tlsn_wasm_test_runner::server_fixture::start().await?; - - tokio::spawn(async move { - futures::future::try_join4(fut_wasm, fut_proxy, fut_tlsn, fut_server) - .await - .unwrap() - }); - - let results = tlsn_wasm_test_runner::chrome_driver::run().await?; - - for result in &results { - println!("{}", result); - } - - let passed = results.iter().filter(|r| r.passed).count(); - let failed = results.iter().filter(|r| !r.passed).count(); - - println!("{} passed, {} failed", passed, failed); - - if results.iter().any(|r| !r.passed) { - std::process::exit(1); - } - - Ok(()) -} diff --git a/crates/wasm-test-runner/static/index.js b/crates/wasm-test-runner/static/index.js deleted file mode 100644 index b8fe8e2fc0..0000000000 --- a/crates/wasm-test-runner/static/index.js +++ /dev/null @@ -1,5 +0,0 @@ -import * as Comlink from "https://unpkg.com/comlink/dist/esm/comlink.mjs"; - -const testWorker = Comlink.wrap(new Worker("worker.js", { type: "module" })); - -window.testWorker = testWorker; diff --git a/crates/wasm-test-runner/static/worker.js b/crates/wasm-test-runner/static/worker.js deleted file mode 100644 index 396046cd91..0000000000 --- a/crates/wasm-test-runner/static/worker.js +++ /dev/null @@ -1,50 +0,0 @@ -import * as Comlink from "https://unpkg.com/comlink/dist/esm/comlink.mjs"; -import init_wasm, { initialize } from "./generated/tlsn_wasm.js"; - -const module = await import("./generated/tlsn_wasm.js"); - -class TestWorker { - async init() { - try { - console.log("initializing wasm"); - await init_wasm(); - await initialize({ thread_count: navigator.hardwareConcurrency }); - } catch (e) { - console.error(e); - throw e; - } - } - - async run() { - let promises = []; - for (const [name, func] of Object.entries(module)) { - if (name.startsWith("test_") && (typeof func === 'function')) { - promises.push((async () => { - const start = performance.now(); - try { - await func(); - } catch (error) { - return { - name: name, - passed: false, - error: error.toString(), - } - } - - const duration_secs = (performance.now() - start) / 1000; - console.log(`Test ${name} passed in ${duration_secs} seconds`); - return { - name: name, - passed: true, - duration_secs, - } - })()); - } - } - return Promise.all(promises); - } -} - -const worker = new TestWorker(); - -Comlink.expose(worker); diff --git a/crates/wasm/Cargo.toml b/crates/wasm/Cargo.toml index a490fda2dd..ca462f0a1c 100644 --- a/crates/wasm/Cargo.toml +++ b/crates/wasm/Cargo.toml @@ -18,18 +18,15 @@ debug = false opt-level = "z" wasm-opt = true -[features] -default = [] -test = [] - [dependencies] -tlsn-common = { path = "../common" } -tlsn-core = { path = "../core" } -tlsn-prover = { path = "../prover" } +tlsn-common = { workspace = true } +tlsn-core = { workspace = true } +tlsn-hmac-sha256 = { workspace = true } +tlsn-prover = { workspace = true } tlsn-server-fixture-certs = { workspace = true } -tlsn-tls-client-async = { path = "../tls/client-async" } -tlsn-tls-core = { path = "../tls/core" } -tlsn-verifier = { path = "../verifier" } +tlsn-tls-client-async = { workspace = true } +tlsn-tls-core = { workspace = true } +tlsn-verifier = { workspace = true } bincode = { workspace = true } console_error_panic_hook = { version = "0.1" } @@ -49,10 +46,10 @@ tracing-subscriber = { workspace = true, features = ["time"] } tracing-web = { version = "0.1" } tsify-next = { version = "0.5", default-features = false, features = ["js"] } wasm-bindgen = { version = "0.2" } -wasm-bindgen-futures = { version = "0.4" } +wasm-bindgen-futures = { workspace = true } web-spawn = { workspace = true } -# Use the patched ws_stream_wasm to fix the issue https://github.com/najamelan/ws_stream_wasm/issues/12#issuecomment-1711902958 -ws_stream_wasm = { git = "https://github.com/tlsnotary/ws_stream_wasm", rev = "2ed12aad9f0236e5321f577672f309920b2aef51" } +web-time = { workspace = true } +ws_stream_wasm = { workspace = true } [target.'cfg(target_arch = "wasm32")'.dependencies] getrandom = { version = "0.2", features = ["js"] } diff --git a/crates/wasm/src/lib.rs b/crates/wasm/src/lib.rs index 35ad91668f..e90f351677 100644 --- a/crates/wasm/src/lib.rs +++ b/crates/wasm/src/lib.rs @@ -7,8 +7,6 @@ pub(crate) mod io; mod log; pub mod prover; -#[cfg(feature = "test")] -pub mod tests; pub mod types; pub mod verifier; @@ -20,9 +18,6 @@ use wasm_bindgen_futures::JsFuture; use crate::types::{Attestation, Presentation, Reveal, Secrets}; -#[cfg(feature = "test")] -pub use tests::*; - /// Initializes the module. #[wasm_bindgen] pub async fn initialize( @@ -44,6 +39,8 @@ pub async fn initialize( .build_global() .unwrap_throw(); + hmac_sha256::build_circuits().await; + Ok(()) } diff --git a/crates/wasm/src/tests.rs b/crates/wasm/src/tests.rs deleted file mode 100644 index 80689ed6c4..0000000000 --- a/crates/wasm/src/tests.rs +++ /dev/null @@ -1,186 +0,0 @@ -#![allow(clippy::single_range_in_vec_init)] - -use std::collections::HashMap; - -use tls_core::verify::WebPkiVerifier; -use tlsn_common::config::{ProtocolConfig, ProtocolConfigValidator}; -use tlsn_core::CryptoProvider; -use tlsn_prover::{Prover, ProverConfig}; -use tlsn_server_fixture_certs::{CA_CERT_DER, SERVER_DOMAIN}; -use tlsn_verifier::{Verifier, VerifierConfig}; -use wasm_bindgen::prelude::*; - -use crate::{ - build_presentation, - prover::JsProver, - types::{ - Attestation, Commit, HttpRequest, Method, NotarizationOutput, Presentation, Reveal, Secrets, - }, - verifier::JsVerifier, -}; - -#[wasm_bindgen] -pub async fn test_prove() -> Result<(), JsValue> { - let mut root_store = tls_core::anchors::RootCertStore::empty(); - root_store - .add(&tls_core::key::Certificate(CA_CERT_DER.to_vec())) - .unwrap(); - - let provider = CryptoProvider { - cert: WebPkiVerifier::new(root_store, None), - ..Default::default() - }; - - let prover = Prover::new( - ProverConfig::builder() - .server_name(SERVER_DOMAIN) - .protocol_config( - ProtocolConfig::builder() - .max_sent_data(1024) - .max_recv_data(1024) - .build() - .unwrap(), - ) - .crypto_provider(provider) - .build() - .unwrap(), - ); - - let mut prover = JsProver::from(prover); - - let uri = format!("https://{}/bytes?size=512", SERVER_DOMAIN); - - prover - .setup("ws://localhost:8080/tcp?addr=localhost%3A8010") - .await?; - - prover - .send_request( - "ws://localhost:8080/tcp?addr=localhost%3A8083", - HttpRequest { - method: Method::GET, - uri, - headers: HashMap::from([("Accept".to_string(), b"*".to_vec())]), - body: None, - }, - ) - .await?; - - prover - .reveal(Reveal { - sent: vec![0..10], - recv: vec![0..10], - }) - .await?; - - Ok(()) -} - -#[wasm_bindgen] -pub async fn test_notarize() -> Result<(), JsValue> { - let mut root_store = tls_core::anchors::RootCertStore::empty(); - root_store - .add(&tls_core::key::Certificate(CA_CERT_DER.to_vec())) - .unwrap(); - - let provider = CryptoProvider { - cert: WebPkiVerifier::new(root_store, None), - ..Default::default() - }; - - let prover = Prover::new( - ProverConfig::builder() - .server_name(SERVER_DOMAIN) - .protocol_config( - ProtocolConfig::builder() - .max_sent_data(1024) - .max_recv_data(1024) - .build() - .unwrap(), - ) - .crypto_provider(provider) - .build() - .unwrap(), - ); - - let mut prover = JsProver::from(prover); - - let uri = format!("https://{SERVER_DOMAIN}/bytes?size=512"); - - prover - .setup("ws://localhost:8080/tcp?addr=localhost%3A8011") - .await?; - - prover - .send_request( - "ws://localhost:8080/tcp?addr=localhost%3A8083", - HttpRequest { - method: Method::GET, - uri, - headers: HashMap::from([("Accept".to_string(), b"*".to_vec())]), - body: None, - }, - ) - .await?; - - let _ = prover.transcript()?; - - let NotarizationOutput { - attestation, - secrets, - } = prover - .notarize(Commit { - sent: vec![0..10], - recv: vec![0..10], - }) - .await?; - - let attestation = Attestation::deserialize(attestation.serialize())?; - let secrets = Secrets::deserialize(secrets.serialize())?; - - let presentation = build_presentation( - &attestation, - &secrets, - Reveal { - sent: vec![(0..10)], - recv: vec![(0..10)], - }, - )?; - - let _ = Presentation::deserialize(presentation.serialize())?; - - Ok(()) -} - -#[wasm_bindgen] -pub async fn test_verifier() -> Result<(), JsValue> { - let mut root_store = tls_core::anchors::RootCertStore::empty(); - root_store - .add(&tls_core::key::Certificate(CA_CERT_DER.to_vec())) - .unwrap(); - - let provider = CryptoProvider { - cert: WebPkiVerifier::new(root_store, None), - ..Default::default() - }; - - let config = VerifierConfig::builder() - .protocol_config_validator( - ProtocolConfigValidator::builder() - .max_sent_data(1024) - .max_recv_data(1024) - .build() - .unwrap(), - ) - .crypto_provider(provider) - .build() - .unwrap(); - - let mut verifier = JsVerifier::from(Verifier::new(config)); - verifier - .connect("ws://localhost:8080/tcp?addr=localhost%3A8012") - .await?; - verifier.verify().await?; - - Ok(()) -} diff --git a/pre-commit-check.sh b/pre-commit-check.sh index 132ec8009a..15ba728c6a 100755 --- a/pre-commit-check.sh +++ b/pre-commit-check.sh @@ -18,5 +18,5 @@ cargo clippy --all-features --all-targets -- -D warnings # Run tests # cargo test -# Run wasm tests -# ./crates/wasm-test-runner/run.sh +# Run integration tests +# cargo run -p tlsn-harness --release -- test