diff --git a/.cargo/config.toml b/.cargo/config.toml index 10762f94ca..9232b527ff 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -1,2 +1,3 @@ [target.wasm32-unknown-unknown] runner = "wasm-bindgen-test-runner" +rustflags = ["--cfg", 'getrandom_backend="wasm_js"'] diff --git a/.github/workflows/codecov.yml b/.github/workflows/codecov.yml index 83da040fd2..bf3cf62d4d 100644 --- a/.github/workflows/codecov.yml +++ b/.github/workflows/codecov.yml @@ -14,7 +14,21 @@ jobs: - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@stable - uses: taiki-e/install-action@cargo-llvm-cov - - run: cargo llvm-cov --all-features --workspace --lcov --output-path lcov.info + - shell: bash + # Run llvm-cov _without_ the "aws-lc-rs-fips" or "rustls-aws-lc-rs-fips" features, since + # they have complex build requirements: + # https://github.com/aws/aws-lc/blob/3263ce2a553e4e917217fb487f8c6f488fcb1866/BUILDING.md#build-prerequisites + # + # This list of features was determined using: + # cargo metadata --format-version 1 --no-deps \ + # | jq -r ' .packages[].features | keys[]' \ + # | sort -u \ + # | grep -vFx -e 'default' -e 'aws-lc-rs-fips' -e 'rustls-aws-lc-rs-fips' \ + # | paste -sd ',' - + run: | + cargo llvm-cov \ + --features="arbitrary,async-io,async-std,aws-lc-rs,bloom,direct-log,fast-apple-datapath,futures-io,json-output,lock_tracking,log,platform-verifier,ring,runtime-async-std,runtime-smol,runtime-tokio,rustls,rustls-aws-lc-rs,rustls-log,rustls-ring,serde,serde_json,smol,tracing" \ + --workspace --lcov --output-path lcov.info - name: Upload coverage to Codecov uses: codecov/codecov-action@v5 with: diff --git a/.github/workflows/project.yaml b/.github/workflows/project.yaml deleted file mode 100644 index 863440d784..0000000000 --- a/.github/workflows/project.yaml +++ /dev/null @@ -1,19 +0,0 @@ -name: Add PRs and Issues to Project - -on: - issues: - types: - - opened - pull_request: - types: - - opened - -jobs: - add-to-project: - name: Add to project - runs-on: ubuntu-latest - steps: - - uses: actions/add-to-project@v1.0.2 - with: - project-url: https://github.com/orgs/n0-computer/projects/1 - github-token: ${{ secrets.PROJECT_PAT }} \ No newline at end of file diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index a46fb48885..a9277b4828 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -6,97 +6,99 @@ on: pull_request: jobs: - test-freebsd: - # see https://github.com/actions/runner/issues/385 - # use https://github.com/vmactions/freebsd-vm for now - name: test on freebsd - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - name: test on freebsd - uses: vmactions/freebsd-vm@v1 - with: - usesh: true - mem: 4096 - copyback: false - prepare: | - pkg install -y curl - curl https://sh.rustup.rs -sSf --output rustup.sh - sh rustup.sh -y --profile minimal --default-toolchain stable - run: | - export PATH="$HOME/.cargo/bin:$PATH" - echo "===== rustc --version =====" - rustc --version - echo "===== freebsd-version =====" - freebsd-version - - cargo build --all-targets && cargo test && cargo test -- --ignored stress && cargo test --manifest-path fuzz/Cargo.toml && cargo test -p iroh-quinn-udp --benches - - test-netbsd: - name: test on netbsd - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - name: test on netbsd - uses: vmactions/netbsd-vm@v1 - with: - usesh: true - mem: 4096 - copyback: false - prepare: | - export PATH="/usr/sbin:/sbin:$PATH" - pkg_add curl - curl https://sh.rustup.rs -sSf --output rustup.sh - sh rustup.sh -y --profile minimal --default-toolchain stable - run: | - export PATH="$HOME/.cargo/bin:$PATH" - echo "===== rustc --version =====" - rustc --version - echo "===== uname -a =====" - uname -a - - cargo build --all-targets && cargo test && cargo test -- --ignored stress && cargo test --manifest-path fuzz/Cargo.toml && cargo test -p iroh-quinn-udp --benches - - test-solaris: - name: test on solaris - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - name: test on Solaris - uses: vmactions/solaris-vm@v1 - with: - release: "11.4-gcc" - usesh: true - mem: 4096 - copyback: false - prepare: | - source <(curl -s https://raw.githubusercontent.com/psumbera/solaris-rust/refs/heads/main/sh.rust-web-install) - echo "~~~~ rustc --version ~~~~" - rustc --version - echo "~~~~ Solaris-version ~~~~" - uname -a - # Unlike others, don't un-ignore stress tests, because they hang on Solaris - run: | - export PATH=$HOME/.rust_solaris/bin:$PATH - cargo build --all-targets && cargo test --manifest-path fuzz/Cargo.toml && cargo test -p iroh-quinn-udp --benches - - test-illumos: - name: test on illumos - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - name: test on Illumos - uses: vmactions/omnios-vm@v1 - with: - usesh: true - mem: 4096 - copyback: false - prepare: | - pkg install gcc14 curl pkg-config glib2 - curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile minimal - run: | - . "$HOME/.cargo/env" - cargo build --all-targets && cargo test && cargo test -- --ignored stress && cargo test --manifest-path fuzz/Cargo.toml && cargo test -p iroh-quinn-udp --benches + # test-freebsd: + # # see https://github.com/actions/runner/issues/385 + # # use https://github.com/vmactions/freebsd-vm for now + # name: test on freebsd + # runs-on: ubuntu-latest + # steps: + # - uses: actions/checkout@v4 + # - name: test on freebsd + # uses: vmactions/freebsd-vm@v1 + # with: + # usesh: true + # mem: 4096 + # copyback: false + # prepare: | + # pkg install -y curl + # curl https://sh.rustup.rs -sSf --output rustup.sh + # sh rustup.sh -y --profile minimal --default-toolchain stable + # run: | + # export PATH="$HOME/.cargo/bin:$PATH" + # echo "===== rustc --version =====" + # rustc --version + # echo "===== freebsd-version =====" + # freebsd-version + + # cargo build --locked --all-targets && cargo test --locked && cargo test --locked -- --ignored stress && cargo test --locked --manifest-path fuzz/Cargo.toml && cargo test --locked -p iroh-quinn-udp --benches + + # test-netbsd: + # name: test on netbsd + # runs-on: ubuntu-latest + # steps: + # - uses: actions/checkout@v4 + # - name: test on netbsd + # uses: vmactions/netbsd-vm@v1 + # with: + # usesh: true + # mem: 4096 + # copyback: false + # prepare: | + # export PATH="/usr/sbin:/sbin:$PATH" + # pkg_add curl + # curl https://sh.rustup.rs -sSf --output rustup.sh + # sh rustup.sh -y --profile minimal --default-toolchain stable + # run: | + # export PATH="$HOME/.cargo/bin:$PATH" + # echo "===== rustc --version =====" + # rustc --version + # echo "===== uname -a =====" + # uname -a + + # cargo build --locked --all-targets && cargo test --locked && cargo test --locked -- --ignored stress && cargo test --locked --manifest-path fuzz/Cargo.toml && cargo test --locked -p iroh-quinn-udp --benches + + # test-solaris: + # name: test on solaris + # runs-on: ubuntu-latest + # steps: + # - uses: actions/checkout@v4 + # - name: test on Solaris + # uses: vmactions/solaris-vm@v1 + # with: + # release: "11.4-gcc" + # usesh: true + # mem: 4096 + # copyback: false + # prepare: | + # source <(curl -s https://raw.githubusercontent.com/psumbera/solaris-rust/refs/heads/main/sh.rust-web-install) + # echo "~~~~ rustc --version ~~~~" + # rustc --version + # echo "~~~~ Solaris-version ~~~~" + # uname -a + # # Unlike others, don't un-ignore stress tests, because they hang on Solaris + # run: | + # export PATH=$HOME/.rust_solaris/bin:$PATH + # # Workaround for https://github.com/quinn-rs/quinn/issues/2218 + # export CARGO_HTTP_MULTIPLEXING=false + # cargo build --locked --all-targets && cargo test --locked --manifest-path fuzz/Cargo.toml && cargo test --locked -p quinn-udp --benches + + # test-illumos: + # name: test on illumos + # runs-on: ubuntu-latest + # steps: + # - uses: actions/checkout@v4 + # - name: test on Illumos + # uses: vmactions/omnios-vm@v1 + # with: + # usesh: true + # mem: 4096 + # copyback: false + # prepare: | + # pkg install gcc14 curl pkg-config glib2 + # curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile minimal + # run: | + # . "$HOME/.cargo/env" + # cargo build --locked --all-targets && cargo test --locked && cargo test --locked -- --ignored stress && cargo test --locked --manifest-path fuzz/Cargo.toml && cargo test --locked -p iroh-quinn-udp --benches test: strategy: @@ -118,16 +120,21 @@ jobs: steps: - uses: actions/checkout@v4 - - uses: mozilla-actions/sccache-action@v0.0.4 + - uses: mozilla-actions/sccache-action@v0.0.9 - uses: dtolnay/rust-toolchain@master with: toolchain: ${{ matrix.rust }} - - run: cargo build --all-targets - - run: cargo test - - run: cargo test -- --ignored stress - - run: cargo test --manifest-path fuzz/Cargo.toml + - uses: Swatinem/rust-cache@v2 + - run: cargo build --locked --all-targets + - run: cargo test --locked + - run: cargo test --locked -p iroh-quinn-udp --features fast-apple-datapath + if: ${{ runner.os }} == "macOS" + - run: cargo test --locked -- --ignored stress + - run: cargo test --locked --manifest-path fuzz/Cargo.toml if: ${{ matrix.rust }} == "stable" - - run: cargo test -p iroh-quinn-udp --benches + - run: cargo test --locked -p iroh-quinn-udp --benches + - run: cargo test --locked -p iroh-quinn-udp --benches --features fast-apple-datapath + if: ${{ runner.os }} == "macOS" test-aws-lc-rs: runs-on: ubuntu-latest @@ -136,50 +143,40 @@ jobs: - uses: dtolnay/rust-toolchain@stable - uses: Swatinem/rust-cache@v2 # Prevent feature unification from selecting *ring* as the crypto provider - - run: RUST_BACKTRACE=1 cargo test --manifest-path quinn-proto/Cargo.toml --no-default-features --features rustls-aws-lc-rs - - run: RUST_BACKTRACE=1 cargo test --manifest-path quinn/Cargo.toml --no-default-features --features rustls-aws-lc-rs,runtime-tokio + - run: RUST_BACKTRACE=1 cargo test --locked --manifest-path quinn-proto/Cargo.toml --no-default-features --features rustls-aws-lc-rs + - run: RUST_BACKTRACE=1 cargo test --locked --manifest-path quinn/Cargo.toml --no-default-features --features rustls-aws-lc-rs,runtime-tokio # FIPS - - run: RUST_BACKTRACE=1 cargo test --manifest-path quinn-proto/Cargo.toml --no-default-features --features rustls-aws-lc-rs-fips - - run: RUST_BACKTRACE=1 cargo test --manifest-path quinn/Cargo.toml --no-default-features --features rustls-aws-lc-rs-fips,runtime-tokio + - run: RUST_BACKTRACE=1 cargo test --locked --manifest-path quinn-proto/Cargo.toml --no-default-features --features rustls-aws-lc-rs-fips + - run: RUST_BACKTRACE=1 cargo test --locked --manifest-path quinn/Cargo.toml --no-default-features --features rustls-aws-lc-rs-fips,runtime-tokio wasm_test: name: test wasm32-unknown-unknown runs-on: ubuntu-latest steps: - - name: Checkout sources - uses: actions/checkout@v4 - - - name: Install stable toolchain - uses: dtolnay/rust-toolchain@stable - - - name: Add wasm target - run: rustup target add wasm32-unknown-unknown - - - name: Install nodejs v20 - uses: actions/setup-node@v4 + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + - run: rustup target add wasm32-unknown-unknown + - uses: actions/setup-node@v4 with: node-version: 20 + - uses: bytecodealliance/actions/wasm-tools/setup@v1 + - uses: cargo-bins/cargo-binstall@main - - name: Setup `wasm-tools` - uses: bytecodealliance/actions/wasm-tools/setup@v1 - - - name: Install cargo binstall - uses: cargo-bins/cargo-binstall@main - - - name: build wasm32 tests (quinn-proto) - run: cargo test -p iroh-quinn-proto --target wasm32-unknown-unknown --no-run + - run: cargo test --locked -p iroh-quinn-proto --target wasm32-unknown-unknown --no-run + - run: cargo check --locked -p iroh-quinn-udp --target wasm32-unknown-unknown --no-default-features --features=tracing,log + - run: cargo rustc --locked -p iroh-quinn --target wasm32-unknown-unknown --no-default-features --features=log,platform-verifier,rustls-ring --crate-type=cdylib # If the Wasm file contains any 'import "env"' declarations, then # some non-Wasm-compatible code made it into the final code. - - name: Check for 'import "env"' in Wasm + - name: Ensure no 'import "env"' in quinn_proto Wasm run: | - ! wasm-tools print --skeleton target/wasm32-unknown-unknown/debug/deps/iroh_quinn_proto-*.wasm | grep 'import "env"' - - - name: Install wasm-bindgen-test-runner - run: cargo binstall wasm-bindgen-cli --locked --no-confirm + ! wasm-tools print --skeleton target/wasm32-unknown-unknown/debug/deps/quinn_proto-*.wasm | grep 'import "env"' + - name: Ensure no 'import "env"' in quinn Wasm + run: | + ! wasm-tools print --skeleton target/wasm32-unknown-unknown/debug/quinn.wasm | grep 'import "env"' - - name: wasm32 test (quinn-proto) - run: cargo test -p iroh-quinn-proto --target wasm32-unknown-unknown + - run: cargo binstall wasm-bindgen-cli --locked --no-confirm + - run: cargo test --locked -p iroh-quinn-proto --target wasm32-unknown-unknown msrv: runs-on: ubuntu-latest @@ -188,9 +185,10 @@ jobs: SCCACHE_GHA_ENABLED: "on" steps: - uses: actions/checkout@v4 - - uses: mozilla-actions/sccache-action@v0.0.4 + - uses: mozilla-actions/sccache-action@v0.0.9 - uses: dtolnay/rust-toolchain@1.71.0 - - run: cargo check --lib --all-features -p iroh-quinn-udp -p iroh-quinn-proto -p iroh-quinn + - uses: Swatinem/rust-cache@v2 + - run: cargo check --locked --lib --all-features -p iroh-quinn-udp -p iroh-quinn-proto -p iroh-quinn lint: runs-on: ubuntu-latest @@ -199,24 +197,24 @@ jobs: SCCACHE_GHA_ENABLED: "on" steps: - uses: actions/checkout@v4 - - uses: mozilla-actions/sccache-action@v0.0.4 + - uses: mozilla-actions/sccache-action@v0.0.9 - uses: dtolnay/rust-toolchain@stable with: components: rustfmt, clippy - run: cargo fmt --all -- --check - - run: cargo check --manifest-path quinn/Cargo.toml --all-targets --no-default-features - - run: cargo clippy --all-targets -- -D warnings + - run: cargo check --locked --manifest-path quinn/Cargo.toml --all-targets --no-default-features + - run: cargo clippy --locked --all-targets -- -D warnings - uses: dtolnay/rust-toolchain@stable with: components: clippy - name: doc - run: cargo doc --no-deps --document-private-items + run: cargo doc --locked --no-deps --document-private-items env: RUSTDOCFLAGS: -Dwarnings - name: lint fuzz run: | cd fuzz - cargo clippy -- -D warnings + cargo clippy --locked -- -D warnings audit: runs-on: ubuntu-latest @@ -285,3 +283,18 @@ jobs: api-level: ${{ matrix.api-level }} arch: ${{ matrix.emulator-arch }} script: .github/workflows/rust-android-run-tests-on-emulator.sh + + # features: + # strategy: + # matrix: + # os: [ubuntu-latest, macos-latest, windows-latest] + # runs-on: ${{ matrix.os }} + # env: + # RUSTFLAGS: -Dwarnings + # # skip FIPS features outside of Linux + # SKIP_FEATURES: ${{ matrix.os != 'ubuntu-latest' && 'rustls-aws-lc-rs-fips,aws-lc-rs-fips' || '' }} + # steps: + # - uses: actions/checkout@v4 + # - uses: dtolnay/rust-toolchain@stable + # - uses: taiki-e/install-action@cargo-hack + # - run: cargo hack check --feature-powerset --depth 3 --optional-deps --no-dev-deps --ignore-private --skip "${{env.SKIP_FEATURES}}" diff --git a/.gitignore b/.gitignore index 1e9e04ba23..7cef681d89 100644 --- a/.gitignore +++ b/.gitignore @@ -1,7 +1,6 @@ **/target/ **/*.rs.bk -Cargo.lock .idea .DS_Store diff --git a/Cargo.lock b/Cargo.lock new file mode 100644 index 0000000000..77aede8712 --- /dev/null +++ b/Cargo.lock @@ -0,0 +1,2824 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "addr2line" +version = "0.24.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" +dependencies = [ + "gimli", +] + +[[package]] +name = "adler2" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" + +[[package]] +name = "aho-corasick" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +dependencies = [ + "memchr", +] + +[[package]] +name = "anes" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" + +[[package]] +name = "anstream" +version = "0.6.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b" +dependencies = [ + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "is_terminal_polyfill", + "utf8parse", +] + +[[package]] +name = "anstyle" +version = "1.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" + +[[package]] +name = "anstyle-parse" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c" +dependencies = [ + "windows-sys 0.59.0", +] + +[[package]] +name = "anstyle-wincon" +version = "3.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca3534e77181a9cc07539ad51f2141fe32f6c3ffd4df76db8ad92346b003ae4e" +dependencies = [ + "anstyle", + "once_cell", + "windows-sys 0.59.0", +] + +[[package]] +name = "anyhow" +version = "1.0.97" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcfed56ad506cb2c684a14971b8861fdc3baaaae314b9e5f9bb532cbe3ba7a4f" + +[[package]] +name = "arbitrary" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dde20b3d026af13f561bdd0f15edf01fc734f0dafcedbaf42bba506a9517f223" +dependencies = [ + "derive_arbitrary", +] + +[[package]] +name = "assert_matches" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" + +[[package]] +name = "async-channel" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" +dependencies = [ + "concurrent-queue", + "event-listener 2.5.3", + "futures-core", +] + +[[package]] +name = "async-channel" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89b47800b0be77592da0afd425cc03468052844aff33b84e33cc696f64e77b6a" +dependencies = [ + "concurrent-queue", + "event-listener-strategy", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-executor" +version = "1.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30ca9a001c1e8ba5149f91a74362376cc6bc5b919d92d988668657bd570bdcec" +dependencies = [ + "async-task", + "concurrent-queue", + "fastrand", + "futures-lite", + "slab", +] + +[[package]] +name = "async-fs" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebcd09b382f40fcd159c2d695175b2ae620ffa5f3bd6f664131efff4e8b9e04a" +dependencies = [ + "async-lock", + "blocking", + "futures-lite", +] + +[[package]] +name = "async-global-executor" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05b1b633a2115cd122d73b955eadd9916c18c8f510ec9cd1686404c60ad1c29c" +dependencies = [ + "async-channel 2.3.1", + "async-executor", + "async-io", + "async-lock", + "blocking", + "futures-lite", + "once_cell", +] + +[[package]] +name = "async-io" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43a2b323ccce0a1d90b449fd71f2a06ca7faa7c54c2751f06c9bd851fc061059" +dependencies = [ + "async-lock", + "cfg-if", + "concurrent-queue", + "futures-io", + "futures-lite", + "parking", + "polling", + "rustix", + "slab", + "tracing", + "windows-sys 0.59.0", +] + +[[package]] +name = "async-lock" +version = "3.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18" +dependencies = [ + "event-listener 5.4.0", + "event-listener-strategy", + "pin-project-lite", +] + +[[package]] +name = "async-net" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b948000fad4873c1c9339d60f2623323a0cfd3816e5181033c6a5cb68b2accf7" +dependencies = [ + "async-io", + "blocking", + "futures-lite", +] + +[[package]] +name = "async-process" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "63255f1dc2381611000436537bbedfe83183faa303a5a0edaf191edef06526bb" +dependencies = [ + "async-channel 2.3.1", + "async-io", + "async-lock", + "async-signal", + "async-task", + "blocking", + "cfg-if", + "event-listener 5.4.0", + "futures-lite", + "rustix", + "tracing", +] + +[[package]] +name = "async-signal" +version = "0.2.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "637e00349800c0bdf8bfc21ebbc0b6524abea702b0da4168ac00d070d0c0b9f3" +dependencies = [ + "async-io", + "async-lock", + "atomic-waker", + "cfg-if", + "futures-core", + "futures-io", + "rustix", + "signal-hook-registry", + "slab", + "windows-sys 0.59.0", +] + +[[package]] +name = "async-std" +version = "1.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "730294c1c08c2e0f85759590518f6333f0d5a0a766a27d519c1b244c3dfd8a24" +dependencies = [ + "async-channel 1.9.0", + "async-global-executor", + "async-io", + "async-lock", + "crossbeam-utils", + "futures-channel", + "futures-core", + "futures-io", + "futures-lite", + "gloo-timers", + "kv-log-macro", + "log", + "memchr", + "once_cell", + "pin-project-lite", + "pin-utils", + "slab", + "wasm-bindgen-futures", +] + +[[package]] +name = "async-task" +version = "4.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" + +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + +[[package]] +name = "autocfg" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" + +[[package]] +name = "aws-lc-fips-sys" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d9c2e952a1f57e8cbc78b058a968639e70c4ce8b9c0a5e6363d4e5670eed795" +dependencies = [ + "bindgen", + "cc", + "cmake", + "dunce", + "fs_extra", + "regex", +] + +[[package]] +name = "aws-lc-rs" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19b756939cb2f8dc900aa6dcd505e6e2428e9cae7ff7b028c49e3946efa70878" +dependencies = [ + "aws-lc-fips-sys", + "aws-lc-sys", + "zeroize", +] + +[[package]] +name = "aws-lc-sys" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9f7720b74ed28ca77f90769a71fd8c637a0137f6fae4ae947e1050229cff57f" +dependencies = [ + "bindgen", + "cc", + "cmake", + "dunce", + "fs_extra", +] + +[[package]] +name = "backtrace" +version = "0.3.74" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" +dependencies = [ + "addr2line", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", + "windows-targets 0.52.6", +] + +[[package]] +name = "base64" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" + +[[package]] +name = "bench" +version = "0.1.0" +dependencies = [ + "anyhow", + "bytes", + "clap", + "hdrhistogram", + "iroh-quinn", + "rcgen", + "rustls", + "tokio", + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "bencher" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7dfdb4953a096c551ce9ace855a604d702e6e62d77fac690575ae347571717f5" + +[[package]] +name = "bindgen" +version = "0.69.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "271383c67ccabffb7381723dea0672a673f292304fcb45c01cc648c7a8d58088" +dependencies = [ + "bitflags", + "cexpr", + "clang-sys", + "itertools", + "lazy_static", + "lazycell", + "log", + "prettyplease", + "proc-macro2", + "quote", + "regex", + "rustc-hash 1.1.0", + "shlex", + "syn", + "which", +] + +[[package]] +name = "bitflags" +version = "2.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c8214115b7bf84099f1309324e63141d4c5d7cc26862f97a0a857dbefe165bd" + +[[package]] +name = "blocking" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "703f41c54fc768e63e091340b424302bb1c29ef4aa0c7f10fe849dfb114d29ea" +dependencies = [ + "async-channel 2.3.1", + "async-task", + "futures-io", + "futures-lite", + "piper", +] + +[[package]] +name = "book" +version = "0.1.0" +dependencies = [ + "anyhow", + "bytes", + "iroh-quinn", + "rcgen", + "rustls", +] + +[[package]] +name = "bumpalo" +version = "3.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf" + +[[package]] +name = "bytemuck" +version = "1.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9134a6ef01ce4b366b50689c94f82c14bc72bc5d0386829828a2e2752ef7958c" + +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + +[[package]] +name = "bytes" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" + +[[package]] +name = "cast" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" + +[[package]] +name = "cc" +version = "1.2.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "525046617d8376e3db1deffb079e91cef90a89fc3ca5c185bbf8c9ecdd15cd5c" +dependencies = [ + "jobserver", + "libc", + "shlex", +] + +[[package]] +name = "cesu8" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d43a04d8753f35258c91f8ec639f792891f748a1edbd759cf1dcea3382ad83c" + +[[package]] +name = "cexpr" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" +dependencies = [ + "nom", +] + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + +[[package]] +name = "ciborium" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e" +dependencies = [ + "ciborium-io", + "ciborium-ll", + "serde", +] + +[[package]] +name = "ciborium-io" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757" + +[[package]] +name = "ciborium-ll" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" +dependencies = [ + "ciborium-io", + "half", +] + +[[package]] +name = "clang-sys" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" +dependencies = [ + "glob", + "libc", + "libloading", +] + +[[package]] +name = "clap" +version = "4.5.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8aa86934b44c19c50f87cc2790e19f54f7a67aedb64101c2e1a2e5ecfb73944" +dependencies = [ + "clap_builder", + "clap_derive", +] + +[[package]] +name = "clap_builder" +version = "4.5.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2414dbb2dd0695280da6ea9261e327479e9d37b0630f6b53ba2a11c60c679fd9" +dependencies = [ + "anstream", + "anstyle", + "clap_lex", + "strsim", +] + +[[package]] +name = "clap_derive" +version = "4.5.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09176aae279615badda0765c0c0b3f6ed53f4709118af73cf4655d85d1530cd7" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "clap_lex" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" + +[[package]] +name = "cmake" +version = "0.1.54" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7caa3f9de89ddbe2c607f4101924c5abec803763ae9534e4f4d7d8f84aa81f0" +dependencies = [ + "cc", +] + +[[package]] +name = "colorchoice" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" + +[[package]] +name = "combine" +version = "4.6.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd" +dependencies = [ + "bytes", + "memchr", +] + +[[package]] +name = "concurrent-queue" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "core-foundation" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b55271e5c8c478ad3f38ad24ef34923091e0548492a266d19b3c0b4d82574c63" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" + +[[package]] +name = "crc" +version = "3.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69e6e4d7b33a94f0991c26729976b10ebde1d34c3ee82408fb536164fa10d636" +dependencies = [ + "crc-catalog", +] + +[[package]] +name = "crc-catalog" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" + +[[package]] +name = "criterion" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f" +dependencies = [ + "anes", + "cast", + "ciborium", + "clap", + "criterion-plot", + "futures", + "is-terminal", + "itertools", + "num-traits", + "once_cell", + "oorandom", + "regex", + "serde", + "serde_derive", + "serde_json", + "tinytemplate", + "tokio", + "walkdir", +] + +[[package]] +name = "criterion-plot" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" +dependencies = [ + "cast", + "itertools", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" + +[[package]] +name = "crunchy" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43da5946c66ffcc7745f48db692ffbb10a83bfe0afd96235c5c2a4fb23994929" + +[[package]] +name = "deranged" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c9e6a11ca8224451684bc0d7d5a7adbf8f2fd6887261a1cfc3c0432f9d4068e" +dependencies = [ + "powerfmt", +] + +[[package]] +name = "derive_arbitrary" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30542c1ad912e0e3d22a1935c290e12e8a29d704a420177a31faad4a601a0800" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "directories-next" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "339ee130d97a610ea5a5872d2bbb130fdf68884ff09d3028b81bec8a1ac23bbc" +dependencies = [ + "cfg-if", + "dirs-sys-next", +] + +[[package]] +name = "dirs-sys-next" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d" +dependencies = [ + "libc", + "redox_users", + "winapi", +] + +[[package]] +name = "displaydoc" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "dunce" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" + +[[package]] +name = "either" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" + +[[package]] +name = "errno" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "976dd42dc7e85965fe702eb8164f21f450704bdde31faefd6471dba214cb594e" +dependencies = [ + "libc", + "windows-sys 0.59.0", +] + +[[package]] +name = "event-listener" +version = "2.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" + +[[package]] +name = "event-listener" +version = "5.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3492acde4c3fc54c845eaab3eed8bd00c7a7d881f78bfc801e43a93dec1331ae" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite", +] + +[[package]] +name = "event-listener-strategy" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8be9f3dfaaffdae2972880079a491a1a8bb7cbed0b8dd7a347f668b4150a3b93" +dependencies = [ + "event-listener 5.4.0", + "pin-project-lite", +] + +[[package]] +name = "fastbloom" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "27cea6e7f512d43b098939ff4d5a5d6fe3db07971e1d05176fe26c642d33f5b8" +dependencies = [ + "getrandom 0.3.2", + "rand", + "siphasher", + "wide", +] + +[[package]] +name = "fastrand" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" + +[[package]] +name = "form_urlencoded" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" +dependencies = [ + "percent-encoding", +] + +[[package]] +name = "fs_extra" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" + +[[package]] +name = "futures" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" + +[[package]] +name = "futures-io" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" + +[[package]] +name = "futures-lite" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5edaec856126859abb19ed65f39e90fea3a9574b9707f13539acf4abf7eb532" +dependencies = [ + "fastrand", + "futures-core", + "futures-io", + "parking", + "pin-project-lite", +] + +[[package]] +name = "futures-sink" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" + +[[package]] +name = "futures-task" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" + +[[package]] +name = "futures-util" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" +dependencies = [ + "futures-core", + "futures-sink", + "futures-task", + "pin-project-lite", + "pin-utils", +] + +[[package]] +name = "fuzz" +version = "0.1.0" +dependencies = [ + "arbitrary", + "iroh-quinn-proto", + "libfuzzer-sys", +] + +[[package]] +name = "getrandom" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" +dependencies = [ + "cfg-if", + "js-sys", + "libc", + "wasi 0.11.0+wasi-snapshot-preview1", + "wasm-bindgen", +] + +[[package]] +name = "getrandom" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73fea8450eea4bac3940448fb7ae50d91f034f941199fcd9d909a5a07aa455f0" +dependencies = [ + "cfg-if", + "js-sys", + "libc", + "r-efi", + "wasi 0.14.2+wasi-0.2.4", + "wasm-bindgen", +] + +[[package]] +name = "gimli" +version = "0.31.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" + +[[package]] +name = "glob" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" + +[[package]] +name = "gloo-timers" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbb143cf96099802033e0d4f4963b19fd2e0b728bcf076cd9cf7f6634f092994" +dependencies = [ + "futures-channel", + "futures-core", + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "half" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7db2ff139bba50379da6aa0766b52fdcb62cb5b263009b09ed58ba604e14bbd1" +dependencies = [ + "cfg-if", + "crunchy", +] + +[[package]] +name = "hdrhistogram" +version = "7.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "765c9198f173dd59ce26ff9f95ef0aafd0a0fe01fb9d72841bc5066a4c06511d" +dependencies = [ + "byteorder", + "num-traits", +] + +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + +[[package]] +name = "hermit-abi" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" + +[[package]] +name = "hermit-abi" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbd780fe5cc30f81464441920d82ac8740e2e46b29a6fad543ddd075229ce37e" + +[[package]] +name = "hex-literal" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" + +[[package]] +name = "home" +version = "0.5.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589533453244b0995c858700322199b2becb13b627df2851f64a2775d024abcf" +dependencies = [ + "windows-sys 0.59.0", +] + +[[package]] +name = "icu_collections" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locid" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_locid_transform" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_locid_transform_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_locid_transform_data" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7515e6d781098bf9f7205ab3fc7e9709d34554ae0b21ddbcb5febfa4bc7df11d" + +[[package]] +name = "icu_normalizer" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "utf16_iter", + "utf8_iter", + "write16", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c5e8338228bdc8ab83303f16b797e177953730f601a96c25d10cb3ab0daa0cb7" + +[[package]] +name = "icu_properties" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_locid_transform", + "icu_properties_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85fb8799753b75aee8d2a21d7c14d9f38921b54b3dbda10f5a3c7a7b82dba5e2" + +[[package]] +name = "icu_provider" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_provider_macros", + "stable_deref_trait", + "tinystr", + "writeable", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_provider_macros" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "idna" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" +dependencies = [ + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71" +dependencies = [ + "icu_normalizer", + "icu_properties", +] + +[[package]] +name = "iroh-quinn" +version = "0.13.0" +dependencies = [ + "anyhow", + "async-executor", + "async-fs", + "async-global-executor", + "async-io", + "async-std", + "bencher", + "bytes", + "cfg_aliases", + "clap", + "crc", + "directories-next", + "futures-io", + "iroh-quinn-proto", + "iroh-quinn-udp", + "pin-project-lite", + "rand", + "rcgen", + "rustc-hash 2.1.1", + "rustls", + "rustls-pemfile", + "smol", + "socket2", + "thiserror 2.0.12", + "tokio", + "tokio-stream", + "tracing", + "tracing-futures", + "tracing-subscriber", + "url", + "web-time", +] + +[[package]] +name = "iroh-quinn-proto" +version = "0.13.0" +dependencies = [ + "arbitrary", + "assert_matches", + "aws-lc-rs", + "bytes", + "fastbloom", + "getrandom 0.3.2", + "hex-literal", + "lazy_static", + "lru-slab", + "rand", + "rand_pcg", + "rcgen", + "ring", + "rustc-hash 2.1.1", + "rustls", + "rustls-pki-types", + "rustls-platform-verifier", + "slab", + "thiserror 2.0.12", + "tinyvec", + "tracing", + "tracing-subscriber", + "wasm-bindgen-test", + "web-time", +] + +[[package]] +name = "iroh-quinn-udp" +version = "0.5.12" +dependencies = [ + "cfg_aliases", + "criterion", + "libc", + "log", + "once_cell", + "socket2", + "tokio", + "tracing", + "windows-sys 0.59.0", +] + +[[package]] +name = "is-terminal" +version = "0.4.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e04d7f318608d35d4b61ddd75cbdaee86b023ebe2bd5a66ee0915f0bf93095a9" +dependencies = [ + "hermit-abi 0.5.0", + "libc", + "windows-sys 0.59.0", +] + +[[package]] +name = "is_terminal_polyfill" +version = "1.70.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" + +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" + +[[package]] +name = "jni" +version = "0.21.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a87aa2bb7d2af34197c04845522473242e1aa17c12f4935d5856491a7fb8c97" +dependencies = [ + "cesu8", + "cfg-if", + "combine", + "jni-sys", + "log", + "thiserror 1.0.69", + "walkdir", + "windows-sys 0.45.0", +] + +[[package]] +name = "jni-sys" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130" + +[[package]] +name = "jobserver" +version = "0.1.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38f262f097c174adebe41eb73d66ae9c06b2844fb0da69969647bbddd9b0538a" +dependencies = [ + "getrandom 0.3.2", + "libc", +] + +[[package]] +name = "js-sys" +version = "0.3.77" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" +dependencies = [ + "once_cell", + "wasm-bindgen", +] + +[[package]] +name = "kv-log-macro" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f" +dependencies = [ + "log", +] + +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" + +[[package]] +name = "lazycell" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" + +[[package]] +name = "libc" +version = "0.2.171" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c19937216e9d3aa9956d9bb8dfc0b0c8beb6058fc4f7a4dc4d850edf86a237d6" + +[[package]] +name = "libfuzzer-sys" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf78f52d400cf2d84a3a973a78a592b4adc535739e0a5597a0da6f0c357adc75" +dependencies = [ + "arbitrary", + "cc", +] + +[[package]] +name = "libloading" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" +dependencies = [ + "cfg-if", + "windows-targets 0.52.6", +] + +[[package]] +name = "libredox" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" +dependencies = [ + "bitflags", + "libc", +] + +[[package]] +name = "linux-raw-sys" +version = "0.4.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" + +[[package]] +name = "litemap" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23fb14cb19457329c82206317a5663005a4d404783dc74f4252769b0d5f42856" + +[[package]] +name = "log" +version = "0.4.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" +dependencies = [ + "value-bag", +] + +[[package]] +name = "lru-slab" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154" + +[[package]] +name = "matchers" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" +dependencies = [ + "regex-automata 0.1.10", +] + +[[package]] +name = "memchr" +version = "2.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" + +[[package]] +name = "minicov" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f27fe9f1cc3c22e1687f9446c2083c4c5fc7f0bcf1c7a86bdbded14985895b4b" +dependencies = [ + "cc", + "walkdir", +] + +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + +[[package]] +name = "miniz_oxide" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff70ce3e48ae43fa075863cef62e8b43b71a4f2382229920e0df362592919430" +dependencies = [ + "adler2", +] + +[[package]] +name = "mio" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" +dependencies = [ + "libc", + "wasi 0.11.0+wasi-snapshot-preview1", + "windows-sys 0.52.0", +] + +[[package]] +name = "nom" +version = "7.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" +dependencies = [ + "memchr", + "minimal-lexical", +] + +[[package]] +name = "nu-ansi-term" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" +dependencies = [ + "overload", + "winapi", +] + +[[package]] +name = "num-conv" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", +] + +[[package]] +name = "num_threads" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c7398b9c8b70908f6371f47ed36737907c87c52af34c268fed0bf0ceb92ead9" +dependencies = [ + "libc", +] + +[[package]] +name = "object" +version = "0.36.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" +dependencies = [ + "memchr", +] + +[[package]] +name = "once_cell" +version = "1.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" + +[[package]] +name = "oorandom" +version = "11.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" + +[[package]] +name = "openssl-probe" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" + +[[package]] +name = "overload" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" + +[[package]] +name = "parking" +version = "2.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" + +[[package]] +name = "pem" +version = "3.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38af38e8470ac9dee3ce1bae1af9c1671fffc44ddfd8bd1d0a3445bf349a8ef3" +dependencies = [ + "base64", + "serde", +] + +[[package]] +name = "percent-encoding" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" + +[[package]] +name = "perf" +version = "0.1.0" +dependencies = [ + "anyhow", + "bytes", + "clap", + "hdrhistogram", + "iroh-quinn", + "iroh-quinn-proto", + "rcgen", + "rustls", + "rustls-pemfile", + "serde", + "serde_json", + "socket2", + "tokio", + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "pin-project" +version = "1.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677f1add503faace112b9f1373e43e9e054bfdd22ff1a63c1bc485eaec6a6a8a" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "piper" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96c8c490f422ef9a4efd2cb5b42b76c8613d7e7dfc1caf667b8a3350a5acc066" +dependencies = [ + "atomic-waker", + "fastrand", + "futures-io", +] + +[[package]] +name = "polling" +version = "3.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a604568c3202727d1507653cb121dbd627a58684eb09a820fd746bee38b4442f" +dependencies = [ + "cfg-if", + "concurrent-queue", + "hermit-abi 0.4.0", + "pin-project-lite", + "rustix", + "tracing", + "windows-sys 0.59.0", +] + +[[package]] +name = "powerfmt" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + +[[package]] +name = "ppv-lite86" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" +dependencies = [ + "zerocopy", +] + +[[package]] +name = "prettyplease" +version = "0.2.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "664ec5419c51e34154eec046ebcba56312d5a2fc3b09a06da188e1ad21afadf6" +dependencies = [ + "proc-macro2", + "syn", +] + +[[package]] +name = "proc-macro2" +version = "1.0.94" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a31971752e70b8b2686d7e46ec17fb38dad4051d94024c88df49b667caea9c84" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "r-efi" +version = "5.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5" + +[[package]] +name = "rand" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3779b94aeb87e8bd4e834cee3650289ee9e0d5677f976ecdb6d219e5f4f6cd94" +dependencies = [ + "rand_chacha", + "rand_core", + "zerocopy", +] + +[[package]] +name = "rand_chacha" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" +dependencies = [ + "getrandom 0.3.2", +] + +[[package]] +name = "rand_pcg" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b48ac3f7ffaab7fac4d2376632268aa5f89abdb55f7ebf8f4d11fffccb2320f7" +dependencies = [ + "rand_core", +] + +[[package]] +name = "rcgen" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75e669e5202259b5314d1ea5397316ad400819437857b90861765f24c4cf80a2" +dependencies = [ + "pem", + "ring", + "rustls-pki-types", + "time", + "yasna", +] + +[[package]] +name = "redox_users" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" +dependencies = [ + "getrandom 0.2.15", + "libredox", + "thiserror 1.0.69", +] + +[[package]] +name = "regex" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata 0.4.9", + "regex-syntax 0.8.5", +] + +[[package]] +name = "regex-automata" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" +dependencies = [ + "regex-syntax 0.6.29", +] + +[[package]] +name = "regex-automata" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax 0.8.5", +] + +[[package]] +name = "regex-syntax" +version = "0.6.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" + +[[package]] +name = "regex-syntax" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" + +[[package]] +name = "ring" +version = "0.17.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" +dependencies = [ + "cc", + "cfg-if", + "getrandom 0.2.15", + "libc", + "untrusted", + "windows-sys 0.52.0", +] + +[[package]] +name = "rustc-demangle" +version = "0.1.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" + +[[package]] +name = "rustc-hash" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" + +[[package]] +name = "rustc-hash" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" + +[[package]] +name = "rustix" +version = "0.38.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" +dependencies = [ + "bitflags", + "errno", + "libc", + "linux-raw-sys", + "windows-sys 0.59.0", +] + +[[package]] +name = "rustls" +version = "0.23.25" +source = "git+https://github.com/n0-computer/rustls?rev=be02113e7837df60953d02c2bdd0f4634fef3a80#be02113e7837df60953d02c2bdd0f4634fef3a80" +dependencies = [ + "aws-lc-rs", + "log", + "once_cell", + "ring", + "rustls-pki-types", + "rustls-webpki", + "subtle", + "zeroize", +] + +[[package]] +name = "rustls-native-certs" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fcff2dd52b58a8d98a70243663a0d234c4e2b79235637849d15913394a247d3" +dependencies = [ + "openssl-probe", + "rustls-pki-types", + "schannel", + "security-framework", +] + +[[package]] +name = "rustls-pemfile" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" +dependencies = [ + "rustls-pki-types", +] + +[[package]] +name = "rustls-pki-types" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "917ce264624a4b4db1c364dcc35bfca9ded014d0a958cd47ad3e960e988ea51c" +dependencies = [ + "web-time", +] + +[[package]] +name = "rustls-platform-verifier" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a5467026f437b4cb2a533865eaa73eb840019a0916f4b9ec563c6e617e086c9" +dependencies = [ + "core-foundation", + "core-foundation-sys", + "jni", + "log", + "once_cell", + "rustls", + "rustls-native-certs", + "rustls-platform-verifier-android", + "rustls-webpki", + "security-framework", + "security-framework-sys", + "webpki-root-certs", + "windows-sys 0.59.0", +] + +[[package]] +name = "rustls-platform-verifier-android" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f87165f0995f63a9fbeea62b64d10b4d9d8e78ec6d7d51fb2125fda7bb36788f" + +[[package]] +name = "rustls-webpki" +version = "0.103.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fef8b8769aaccf73098557a87cd1816b4f9c7c16811c9c77142aa695c16f2c03" +dependencies = [ + "aws-lc-rs", + "ring", + "rustls-pki-types", + "untrusted", +] + +[[package]] +name = "rustversion" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eded382c5f5f786b989652c49544c4877d9f015cc22e145a5ea8ea66c2921cd2" + +[[package]] +name = "ryu" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" + +[[package]] +name = "safe_arch" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96b02de82ddbe1b636e6170c21be622223aea188ef2e139be0a5b219ec215323" +dependencies = [ + "bytemuck", +] + +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "schannel" +version = "0.1.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d" +dependencies = [ + "windows-sys 0.59.0", +] + +[[package]] +name = "security-framework" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "271720403f46ca04f7ba6f55d438f8bd878d6b8ca0a1046e8228c4145bcbb316" +dependencies = [ + "bitflags", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49db231d56a190491cb4aeda9527f1ad45345af50b0851622a7adb8c03b01c32" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "serde" +version = "1.0.219" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.219" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.140" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" +dependencies = [ + "itoa", + "memchr", + "ryu", + "serde", +] + +[[package]] +name = "sharded-slab" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "signal-hook-registry" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" +dependencies = [ + "libc", +] + +[[package]] +name = "siphasher" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" + +[[package]] +name = "slab" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" +dependencies = [ + "autocfg", +] + +[[package]] +name = "smallvec" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8917285742e9f3e1683f0a9c4e6b57960b7314d0b08d30d1ecd426713ee2eee9" + +[[package]] +name = "smol" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a33bd3e260892199c3ccfc487c88b2da2265080acb316cd920da72fdfd7c599f" +dependencies = [ + "async-channel 2.3.1", + "async-executor", + "async-fs", + "async-io", + "async-lock", + "async-net", + "async-process", + "blocking", + "futures-lite", +] + +[[package]] +name = "socket2" +version = "0.5.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f5fd57c80058a56cf5c777ab8a126398ece8e442983605d280a44ce79d0edef" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "stable_deref_trait" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" + +[[package]] +name = "strsim" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" + +[[package]] +name = "subtle" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" + +[[package]] +name = "syn" +version = "2.0.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b09a44accad81e1ba1cd74a32461ba89dee89095ba17b32f5d03683b1b1fc2a0" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "synstructure" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "thiserror" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" +dependencies = [ + "thiserror-impl 1.0.69", +] + +[[package]] +name = "thiserror" +version = "2.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708" +dependencies = [ + "thiserror-impl 2.0.12", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "thiserror-impl" +version = "2.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "thread_local" +version = "1.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" +dependencies = [ + "cfg-if", + "once_cell", +] + +[[package]] +name = "time" +version = "0.3.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a7619e19bc266e0f9c5e6686659d394bc57973859340060a69221e57dbc0c40" +dependencies = [ + "deranged", + "itoa", + "libc", + "num-conv", + "num_threads", + "powerfmt", + "serde", + "time-core", + "time-macros", +] + +[[package]] +name = "time-core" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9e9a38711f559d9e3ce1cdb06dd7c5b8ea546bc90052da6d06bb76da74bb07c" + +[[package]] +name = "time-macros" +version = "0.2.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3526739392ec93fd8b359c8e98514cb3e8e021beb4e5f597b00a0221f8ed8a49" +dependencies = [ + "num-conv", + "time-core", +] + +[[package]] +name = "tinystr" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" +dependencies = [ + "displaydoc", + "zerovec", +] + +[[package]] +name = "tinytemplate" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" +dependencies = [ + "serde", + "serde_json", +] + +[[package]] +name = "tinyvec" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09b3661f17e86524eccd4371ab0429194e0d7c008abb45f7a7495b1719463c71" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" + +[[package]] +name = "tokio" +version = "1.44.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6b88822cbe49de4185e3a4cbf8321dd487cf5fe0c5c65695fef6346371e9c48" +dependencies = [ + "backtrace", + "libc", + "mio", + "pin-project-lite", + "signal-hook-registry", + "socket2", + "tokio-macros", + "windows-sys 0.52.0", +] + +[[package]] +name = "tokio-macros" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tokio-stream" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tracing" +version = "0.1.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" +dependencies = [ + "log", + "pin-project-lite", + "tracing-core", +] + +[[package]] +name = "tracing-core" +version = "0.1.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" +dependencies = [ + "once_cell", +] + +[[package]] +name = "tracing-futures" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" +dependencies = [ + "pin-project", + "tracing", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" +dependencies = [ + "matchers", + "nu-ansi-term", + "once_cell", + "regex", + "sharded-slab", + "thread_local", + "time", + "tracing", + "tracing-core", +] + +[[package]] +name = "unicode-ident" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" + +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + +[[package]] +name = "url" +version = "2.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" +dependencies = [ + "form_urlencoded", + "idna", + "percent-encoding", +] + +[[package]] +name = "utf16_iter" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + +[[package]] +name = "utf8parse" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" + +[[package]] +name = "value-bag" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "943ce29a8a743eb10d6082545d861b24f9d1b160b7d741e0f2cdf726bec909c5" + +[[package]] +name = "walkdir" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" +dependencies = [ + "same-file", + "winapi-util", +] + +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + +[[package]] +name = "wasi" +version = "0.14.2+wasi-0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3" +dependencies = [ + "wit-bindgen-rt", +] + +[[package]] +name = "wasm-bindgen" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" +dependencies = [ + "cfg-if", + "once_cell", + "rustversion", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" +dependencies = [ + "bumpalo", + "log", + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61" +dependencies = [ + "cfg-if", + "js-sys", + "once_cell", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "wasm-bindgen-test" +version = "0.3.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66c8d5e33ca3b6d9fa3b4676d774c5778031d27a578c2b007f905acf816152c3" +dependencies = [ + "js-sys", + "minicov", + "wasm-bindgen", + "wasm-bindgen-futures", + "wasm-bindgen-test-macro", +] + +[[package]] +name = "wasm-bindgen-test-macro" +version = "0.3.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17d5042cc5fa009658f9a7333ef24291b1291a25b6382dd68862a7f3b969f69b" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "web-sys" +version = "0.3.77" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "web-time" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "webpki-root-certs" +version = "0.26.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09aed61f5e8d2c18344b3faa33a4c837855fe56642757754775548fee21386c4" +dependencies = [ + "rustls-pki-types", +] + +[[package]] +name = "which" +version = "4.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" +dependencies = [ + "either", + "home", + "once_cell", + "rustix", +] + +[[package]] +name = "wide" +version = "0.7.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41b5576b9a81633f3e8df296ce0063042a73507636cbe956c61133dd7034ab22" +dependencies = [ + "bytemuck", + "safe_arch", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-util" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" +dependencies = [ + "windows-sys 0.59.0", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows-sys" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" +dependencies = [ + "windows-targets 0.42.2", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-targets" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" +dependencies = [ + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_i686_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + +[[package]] +name = "wit-bindgen-rt" +version = "0.39.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" +dependencies = [ + "bitflags", +] + +[[package]] +name = "write16" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" + +[[package]] +name = "writeable" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" + +[[package]] +name = "yasna" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e17bb3549cc1321ae1296b9cdc2698e2b6cb1992adfa19a8c72e5b7a738f44cd" +dependencies = [ + "time", +] + +[[package]] +name = "yoke" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40" +dependencies = [ + "serde", + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "synstructure", +] + +[[package]] +name = "zerocopy" +version = "0.8.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2586fea28e186957ef732a5f8b3be2da217d65c5969d4b1e17f973ebbe876879" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.8.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a996a8f63c5c4448cd959ac1bab0aaa3306ccfd060472f85943ee0750f0169be" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "zerofrom" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "synstructure", +] + +[[package]] +name = "zeroize" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" + +[[package]] +name = "zerovec" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] diff --git a/Cargo.toml b/Cargo.toml index 948227d2e3..4fc7c1582a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,5 @@ [workspace] -members = ["quinn", "quinn-proto", "quinn-udp", "bench", "perf", "fuzz"] +members = ["quinn", "quinn-proto", "quinn-udp", "bench", "perf", "fuzz", "docs/book"] default-members = ["quinn", "quinn-proto", "quinn-udp", "bench", "perf"] resolver = "2" @@ -23,21 +23,24 @@ bytes = "1" clap = { version = "4.5", features = ["derive"] } crc = "3" directories-next = "2" +fastbloom = "0.9" futures-io = "0.3.19" -getrandom = { version = "0.2", default-features = false } +getrandom = { version = "0.3", default-features = false } hdrhistogram = { version = "7.2", default-features = false } hex-literal = "0.4" +lru-slab = "0.1.2" lazy_static = "1" log = "0.4.22" once_cell = "1.19" pin-project-lite = "0.2" -rand = "0.8" +rand = "0.9" rcgen = "0.13" ring = "0.17" rustc-hash = "2" -rustls = { version = "0.23.5", default-features = false, features = ["std"] } +# TODO(@divma): swap with version once rustls's changes are merged and published in a release +rustls = { git = "https://github.com/n0-computer/rustls", rev = "be02113e7837df60953d02c2bdd0f4634fef3a80", default-features = false, features = ["std"] } rustls-pemfile = "2" -rustls-platform-verifier = "0.4" +rustls-platform-verifier = "0.5" rustls-pki-types = "1.7" serde = { version = "1.0", features = ["derive"] } serde_json = "1" @@ -54,6 +57,7 @@ url = "2" wasm-bindgen-test = { version = "0.3.45" } web-time = "1" windows-sys = { version = ">=0.52, <=0.59", features = ["Win32_Foundation", "Win32_System_IO", "Win32_Networking_WinSock"] } +cfg_aliases = "0.2" # Fix minimal dependencies for indirect deps async-global-executor = "2.4.1" @@ -65,3 +69,7 @@ debug = true [profile.release] debug = true + +[patch.crates-io] +# TODO(@divma): once rustls's changes are merged and published in a release, either swap or re-eval if it's needed at all by then +rustls = { git = "https://github.com/n0-computer/rustls", rev = "be02113e7837df60953d02c2bdd0f4634fef3a80" } diff --git a/bench/src/bin/bulk.rs b/bench/src/bin/bulk.rs index d305db2237..41f2110860 100644 --- a/bench/src/bin/bulk.rs +++ b/bench/src/bin/bulk.rs @@ -11,10 +11,9 @@ use tokio::sync::Semaphore; use tracing::{info, trace}; use bench::{ - configure_tracing_subscriber, connect_client, drain_stream, rt, send_data_on_stream, + Opt, configure_tracing_subscriber, connect_client, drain_stream, rt, send_data_on_stream, server_endpoint, stats::{Stats, TransferResult}, - Opt, }; fn main() { diff --git a/bench/src/lib.rs b/bench/src/lib.rs index 081ec239b2..1eef325107 100644 --- a/bench/src/lib.rs +++ b/bench/src/lib.rs @@ -11,8 +11,8 @@ use bytes::Bytes; use clap::Parser; use quinn::crypto::rustls::QuicClientConfig; use rustls::{ - pki_types::{CertificateDer, PrivateKeyDer}, RootCertStore, + pki_types::{CertificateDer, PrivateKeyDer}, }; use tokio::runtime::{Builder, Runtime}; use tracing::trace; diff --git a/deny.toml b/deny.toml index 1fb8356f20..49ef4ea547 100644 --- a/deny.toml +++ b/deny.toml @@ -6,8 +6,10 @@ allow = [ "ISC", "MIT", "MPL-2.0", + "NCSA", "OpenSSL", "Unicode-3.0", + "Unicode-DFS-2016", ] private = { ignore = true } @@ -15,3 +17,14 @@ private = { ignore = true } name = "ring" expression = "ISC AND MIT AND OpenSSL" license-files = [{ path = "LICENSE", hash = 0xbd0eed23 }] + +[advisories] +ignore = [ + # `paste` is unmaintained + "RUSTSEC-2024-0436", +] + +[sources] +allow-git = [ + "https://github.com/n0-computer/rustls" +] diff --git a/docs/book/Cargo.toml b/docs/book/Cargo.toml new file mode 100644 index 0000000000..10f5b48f9f --- /dev/null +++ b/docs/book/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "book" +version = "0.1.0" +rust-version.workspace = true +edition.workspace = true +license.workspace = true +repository.workspace = true +keywords.workspace = true +categories.workspace = true + +[dependencies] +anyhow.workspace = true +bytes = { workspace = true } +quinn = { version = "0.13", path = "../../quinn", package = "iroh-quinn" } +rcgen.workspace = true +rustls.workspace = true diff --git a/docs/book/src/bin/certificate.rs b/docs/book/src/bin/certificate.rs new file mode 100644 index 0000000000..40de936981 --- /dev/null +++ b/docs/book/src/bin/certificate.rs @@ -0,0 +1,106 @@ +use std::{error::Error, sync::Arc}; + +use quinn::{ + ClientConfig, + crypto::rustls::{NoInitialCipherSuite, QuicClientConfig}, +}; +use rustls::{ + DigitallySignedStruct, SignatureScheme, + client::danger, + crypto::{CryptoProvider, verify_tls12_signature, verify_tls13_signature}, + pki_types::{ + CertificateDer, PrivateKeyDer, PrivatePkcs8KeyDer, ServerName, UnixTime, pem::PemObject, + }, +}; + +#[allow(unused_variables)] +fn main() { + let (self_signed_certs, self_signed_key) = generate_self_signed_cert().unwrap(); + let (certs, key) = read_certs_from_file().unwrap(); + let server_config = quinn::ServerConfig::with_single_cert(certs, key); + let client_config = quinn::ClientConfig::with_platform_verifier(); +} + +#[allow(dead_code)] // Included in `certificate.md` +fn configure_client() -> Result { + let crypto = rustls::ClientConfig::builder() + .dangerous() + .with_custom_certificate_verifier(SkipServerVerification::new()) + .with_no_client_auth(); + + Ok(ClientConfig::new(Arc::new(QuicClientConfig::try_from( + crypto, + )?))) +} + +// Implementation of `ServerCertVerifier` that verifies everything as trustworthy. +#[derive(Debug)] +struct SkipServerVerification(Arc); + +impl SkipServerVerification { + fn new() -> Arc { + Arc::new(Self(Arc::new(rustls::crypto::ring::default_provider()))) + } +} + +impl danger::ServerCertVerifier for SkipServerVerification { + fn verify_server_cert( + &self, + _end_entity: &CertificateDer<'_>, + _intermediates: &[CertificateDer<'_>], + _server_name: &ServerName<'_>, + _ocsp: &[u8], + _now: UnixTime, + ) -> Result { + Ok(danger::ServerCertVerified::assertion()) + } + fn verify_tls12_signature( + &self, + message: &[u8], + cert: &CertificateDer<'_>, + dss: &DigitallySignedStruct, + ) -> Result { + verify_tls12_signature( + message, + cert, + dss, + &self.0.signature_verification_algorithms, + ) + } + + fn verify_tls13_signature( + &self, + message: &[u8], + cert: &CertificateDer<'_>, + dss: &DigitallySignedStruct, + ) -> Result { + verify_tls13_signature( + message, + cert, + dss, + &self.0.signature_verification_algorithms, + ) + } + + fn supported_verify_schemes(&self) -> Vec { + self.0.signature_verification_algorithms.supported_schemes() + } +} + +fn generate_self_signed_cert() +-> Result<(CertificateDer<'static>, PrivatePkcs8KeyDer<'static>), Box> { + let cert = rcgen::generate_simple_self_signed(vec!["localhost".to_string()])?; + let cert_der = CertificateDer::from(cert.cert); + let key = PrivatePkcs8KeyDer::from(cert.key_pair.serialize_der()); + Ok((cert_der, key)) +} + +fn read_certs_from_file() +-> Result<(Vec>, PrivateKeyDer<'static>), Box> { + let certs = CertificateDer::pem_file_iter("./fullchain.pem") + .unwrap() + .map(|cert| cert.unwrap()) + .collect(); + let key = PrivateKeyDer::from_pem_file("./privkey.pem").unwrap(); + Ok((certs, key)) +} diff --git a/docs/book/src/bin/data-transfer.rs b/docs/book/src/bin/data-transfer.rs new file mode 100644 index 0000000000..cc027e12e5 --- /dev/null +++ b/docs/book/src/bin/data-transfer.rs @@ -0,0 +1,56 @@ +use bytes::Bytes; +use quinn::Connection; + +fn main() {} + +#[allow(dead_code, unused_variables)] // Included in `data-transfer.md` +async fn open_bidirectional_stream(connection: Connection) -> anyhow::Result<()> { + let (mut send, mut recv) = connection.open_bi().await?; + send.write_all(b"test").await?; + send.finish()?; + let received = recv.read_to_end(10).await?; + Ok(()) +} + +#[allow(dead_code)] // Included in `data-transfer.md` +async fn receive_bidirectional_stream(connection: Connection) -> anyhow::Result<()> { + while let Ok((mut send, mut recv)) = connection.accept_bi().await { + // Because it is a bidirectional stream, we can both send and receive. + println!("request: {:?}", recv.read_to_end(50).await?); + send.write_all(b"response").await?; + send.finish()?; + } + Ok(()) +} + +#[allow(dead_code)] // Included in `data-transfer.md` +async fn open_unidirectional_stream(connection: Connection) -> anyhow::Result<()> { + let mut send = connection.open_uni().await?; + send.write_all(b"test").await?; + send.finish()?; + Ok(()) +} + +#[allow(dead_code)] // Included in `data-transfer.md` +async fn receive_unidirectional_stream(connection: Connection) -> anyhow::Result<()> { + while let Ok(mut recv) = connection.accept_uni().await { + // Because it is a unidirectional stream, we can only receive not send back. + println!("{:?}", recv.read_to_end(50).await?); + } + Ok(()) +} + +#[allow(dead_code)] // Included in `data-transfer.md` +async fn send_unreliable(connection: Connection) -> anyhow::Result<()> { + connection.send_datagram(Bytes::from(&b"test"[..]))?; + Ok(()) +} + +#[allow(dead_code)] // Included in `data-transfer.md` +async fn receive_datagram(connection: Connection) -> anyhow::Result<()> { + while let Ok(received_bytes) = connection.read_datagram().await { + // Because it is a unidirectional stream, we can only receive not send back. + println!("request: {:?}", received_bytes); + } + Ok(()) +} diff --git a/docs/book/src/bin/set-up-connection.rs b/docs/book/src/bin/set-up-connection.rs new file mode 100644 index 0000000000..2a1c2b0a86 --- /dev/null +++ b/docs/book/src/bin/set-up-connection.rs @@ -0,0 +1,38 @@ +use quinn::{Endpoint, ServerConfig}; +use std::error::Error; +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +fn main() {} + +#[allow(dead_code, unused_variables)] // Included in `set-up-connection.md` +async fn server(config: ServerConfig) -> Result<(), Box> { + // Bind this endpoint to a UDP socket on the given server address. + let endpoint = Endpoint::server(config, SERVER_ADDR)?; + + // Start iterating over incoming connections. + while let Some(conn) = endpoint.accept().await { + let connection = conn.await?; + + // Save connection somewhere, start transferring, receiving data, see DataTransfer tutorial. + } + + Ok(()) +} + +#[allow(dead_code, unused_variables)] // Included in `set-up-connection.md` +async fn client() -> Result<(), Box> { + // Bind this endpoint to a UDP socket on the given client address. + let endpoint = Endpoint::client(CLIENT_ADDR)?; + + // Connect to the server passing in the server name which is supposed to be in the server certificate. + let connection = endpoint.connect(SERVER_ADDR, SERVER_NAME)?.await?; + + // Start transferring, receiving data, see data transfer page. + + Ok(()) +} + +const SERVER_NAME: &str = "localhost"; +const LOCALHOST_V4: IpAddr = IpAddr::V4(Ipv4Addr::LOCALHOST); +const CLIENT_ADDR: SocketAddr = SocketAddr::new(LOCALHOST_V4, 5000); +const SERVER_ADDR: SocketAddr = SocketAddr::new(LOCALHOST_V4, 5001); diff --git a/docs/book/src/quinn/certificate.md b/docs/book/src/quinn/certificate.md index b2973997cd..1c52a8c694 100644 --- a/docs/book/src/quinn/certificate.md +++ b/docs/book/src/quinn/certificate.md @@ -12,48 +12,20 @@ When the [rustls][3] `dangerous_configuration` feature flag is enabled, a client Start by adding a [rustls][3] dependency with the `dangerous_configuration` feature flag to your `Cargo.toml` file. ```toml -quinn = "*" -rustls = { version = "*", features = ["dangerous_configuration", "quic"] } +quinn = "0.11" +rustls = "0.23" ``` Then, allow the client to skip the certificate validation by implementing [ServerCertVerifier][ServerCertVerifier] and letting it assert verification for any server. ```rust -// Implementation of `ServerCertVerifier` that verifies everything as trustworthy. -struct SkipServerVerification; - -impl SkipServerVerification { - fn new() -> Arc { - Arc::new(Self) - } -} - -impl rustls::client::ServerCertVerifier for SkipServerVerification { - fn verify_server_cert( - &self, - _end_entity: &rustls::Certificate, - _intermediates: &[rustls::Certificate], - _server_name: &rustls::ServerName, - _scts: &mut dyn Iterator, - _ocsp_response: &[u8], - _now: std::time::SystemTime, - ) -> Result { - Ok(rustls::client::ServerCertVerified::assertion()) - } -} +{{#include ../bin/certificate.rs:36:88}} ``` After that, modify the [ClientConfig][ClientConfig] to use this [ServerCertVerifier][ServerCertVerifier] implementation. ```rust -fn configure_client() -> ClientConfig { - let crypto = rustls::ClientConfig::builder() - .with_safe_defaults() - .with_custom_certificate_verifier(SkipServerVerification::new()) - .with_no_client_auth(); - - ClientConfig::new(Arc::new(crypto)) -} +{{#include ../bin/certificate.rs:25:34}} ``` Finally, if you plug this [ClientConfig][ClientConfig] into the [Endpoint::set_default_client_config()][set_default_client_config] your client endpoint should verify all connections as trustworthy. @@ -73,15 +45,10 @@ This example uses [rcgen][4] to generate a certificate. Let's look at an example: ```rust -fn generate_self_signed_cert() -> Result<(rustls::Certificate, rustls::PrivateKey), Box> -{ - let cert = rcgen::generate_simple_self_signed(vec!["localhost".to_string()])?; - let key = rustls::PrivateKey(cert.serialize_private_key_der()); - Ok((rustls::Certificate(cert.serialize_der()?), key)) -} +{{#include ../bin/certificate.rs:90:96}} ``` -*Note that [generate_simple_self_signed][generate_simple_self_signed] returns a [Certificate][2] that can be serialized to both `.der` and `.pem` formats.* +_Note that [generate_simple_self_signed][generate_simple_self_signed] returns a [Certificate][2] that can be serialized to both `.der` and `.pem` formats._ ### Non-self-signed Certificates @@ -101,27 +68,7 @@ certbot asks for the required data and writes the certificates to `fullchain.pem These files can then be referenced in code. ```rust -use std::{error::Error, fs::File, io::BufReader}; - -pub fn read_certs_from_file( -) -> Result<(Vec, rustls::PrivateKey), Box> { - let mut cert_chain_reader = BufReader::new(File::open("./fullchain.pem")?); - let certs = rustls_pemfile::certs(&mut cert_chain_reader)? - .into_iter() - .map(rustls::Certificate) - .collect(); - - let mut key_reader = BufReader::new(File::open("./privkey.pem")?); - // if the file starts with "BEGIN RSA PRIVATE KEY" - // let mut keys = rustls_pemfile::rsa_private_keys(&mut key_reader)?; - // if the file starts with "BEGIN PRIVATE KEY" - let mut keys = rustls_pemfile::pkcs8_private_keys(&mut key_reader)?; - - assert_eq!(keys.len(), 1); - let key = rustls::PrivateKey(keys.remove(0)); - - Ok((certs, key)) -} +{{#include ../bin/certificate.rs:98:106}} ``` ### Configuring Certificates @@ -132,7 +79,7 @@ After configuring plug the configuration into the `Endpoint`. **Configure Server** ```rust -let server_config = ServerConfig::with_single_cert(certs, key)?; +{{#include ../bin/certificate.rs:20}} ``` This is the only thing you need to do for your server to be secured. @@ -140,7 +87,7 @@ This is the only thing you need to do for your server to be secured. **Configure Client** ```rust -let client_config = ClientConfig::with_native_roots(); +{{#include ../bin/certificate.rs:21}} ``` This is the only thing you need to do for your client to trust a server certificate signed by a conventional certificate authority. @@ -156,7 +103,6 @@ This is the only thing you need to do for your client to trust a server certific [5]: https://en.wikipedia.org/wiki/Self-signed_certificate#:~:text=In%20cryptography%20and%20computer%20security,a%20CA%20aim%20to%20provide. [6]: https://letsencrypt.org/getting-started/ [7]: https://certbot.eff.org/instructions - [ClientConfig]: https://docs.rs/quinn/latest/quinn/struct.ClientConfig.html [ServerCertVerifier]: https://docs.rs/rustls/latest/rustls/client/trait.ServerCertVerifier.html [set_default_client_config]: https://docs.rs/quinn/latest/quinn/struct.Endpoint.html#method.set_default_client_config diff --git a/docs/book/src/quinn/data-transfer.md b/docs/book/src/quinn/data-transfer.md index c6937a2765..75eb3da98f 100644 --- a/docs/book/src/quinn/data-transfer.md +++ b/docs/book/src/quinn/data-transfer.md @@ -6,19 +6,19 @@ This chapter continues with the subject of sending data over this connection. ## Multiplexing -Multiplexing is the act of combining data from multiple streams into a single stream. -This can have a significant positive effect on the performance of the application. -With QUIC, the programmer is in full control over the stream allocation. - +Multiplexing is the act of combining data from multiple streams into a single stream. +This can have a significant positive effect on the performance of the application. +With QUIC, the programmer is in full control over the stream allocation. + ## Stream Types QUIC provides support for both stream and message-based communication. Streams and messages can be initiated both on the client and server. -| Type | Description | Reference | -| :----- | :----- | :----- | -| **Bidirectional Stream** | two way stream communication. | see [open_bi][open_bi] | -| **Unidirectional Stream** | one way stream communication. | see [open_uni][open_uni] | +| Type | Description | Reference | +| :----------------------------------- | :-------------------------------------- | :--------------------------------- | +| **Bidirectional Stream** | two way stream communication. | see [open_bi][open_bi] | +| **Unidirectional Stream** | one way stream communication. | see [open_uni][open_uni] | | **Unreliable Messaging (extension)** | message based unreliable communication. | see [send_datagram][send_datagram] | ## How to Use @@ -28,103 +28,53 @@ New streams can be created with [Connection][Connection]'s [open_bi()][open_bi] ## Bidirectional Streams -With bidirectional streams, data can be sent in both directions. +With bidirectional streams, data can be sent in both directions. For example, from the connection initiator to the peer and the other way around. - -*open bidirectional stream* + +_open bidirectional stream_ ```rust -async fn open_bidirectional_stream(connection: Connection) -> anyhow::Result<()> { - let (mut send, recv) = connection - .open_bi() - .await?; - - send.write_all(b"test").await?; - send.finish().await?; - - let received = recv.read_to_end(10).await?; - - Ok(()) -} +{{#include ../bin/data-transfer.rs:7:13}} ``` -*iterate incoming bidirectional stream(s)* +_iterate incoming bidirectional stream(s)_ ```rust -async fn receive_bidirectional_stream(connection: Connection) -> anyhow::Result<()> { - while let Ok((mut send, recv)) = connection.accept_bi().await { - // Because it is a bidirectional stream, we can both send and receive. - println!("request: {:?}", recv.read_to_end(50).await?); - - send.write_all(b"response").await?; - send.finish().await?; - } - - Ok(()) -} +{{#include ../bin/data-transfer.rs:16:24}} ``` -## Unidirectional Streams +## Unidirectional Streams With unidirectional streams, you can carry data only in one direction: from the initiator of the stream to its peer. It is possible to get reliability without ordering (so no head-of-line blocking) by opening a new stream for each packet. -*open unidirectional stream* +_open unidirectional stream_ ```rust -async fn open_unidirectional_stream(connection: Connection)-> anyhow::Result<()> { - let mut send = connection - .open_uni() - .await?; - - send.write_all(b"test").await?; - send.finish().await?; - - Ok(()) -} +{{#include ../bin/data-transfer.rs:27:32}} ``` -*iterating incoming unidirectional stream(s)* +_iterating incoming unidirectional stream(s)_ ```rust -async fn receive_unidirectional_stream(connection: Connection) -> anyhow::Result<()> { - while let Ok(recv) = connection.accept_uni().await { - // Because it is a unidirectional stream, we can only receive not send back. - println!("{:?}", recv.read_to_end(50).await?); - } - - Ok(()) -} +{{#include ../bin/data-transfer.rs:35:41}} ``` ## Unreliable Messaging -With unreliable messaging, you can transfer data without reliability. +With unreliable messaging, you can transfer data without reliability. This could be useful if data arrival isn't essential or when high throughput is important. -*send datagram* +_send datagram_ ```rust -async fn send_unreliable(connection: Connection)-> anyhow::Result<()> { - connection - .send_datagram(b"test".into()) - .await?; - - Ok(()) -} +{{#include ../bin/data-transfer.rs:44:47}} ``` -*iterating datagram stream(s)* +_iterating datagram stream(s)_ ```rust -async fn receive_datagram(connection: Connection) -> anyhow::Result<()> { - while let Ok(received_bytes) = connection.read_datagram().await { - // Because it is a unidirectional stream, we can only receive not send back. - println!("request: {:?}", received); - } - - Ok(()) -} +{{#include ../bin/data-transfer.rs:50:56}} ``` [Endpoint]: https://docs.rs/quinn/latest/quinn/struct.Endpoint.html diff --git a/docs/book/src/quinn/set-up-connection.md b/docs/book/src/quinn/set-up-connection.md index f320064059..74caba1e74 100644 --- a/docs/book/src/quinn/set-up-connection.md +++ b/docs/book/src/quinn/set-up-connection.md @@ -1,73 +1,43 @@ # Connection Setup In the [previous chapter](certificate.md) we looked at how to configure a certificate. -This aspect is omitted in this chapter to prevent duplication. -But **remember** that this is required to get your [Endpoint][Endpoint] up and running. -This chapter explains how to set up a connection and prepare it for data transfer. +This aspect is omitted in this chapter to prevent duplication. +But **remember** that this is required to get your [Endpoint][Endpoint] up and running. +This chapter explains how to set up a connection and prepare it for data transfer. -It all starts with the [Endpoint][Endpoint] struct, this is the entry point of the library. +It all starts with the [Endpoint][Endpoint] struct, this is the entry point of the library. ## Example -Let's start by defining some constants. +Let's start by defining some constants. ```rust -static SERVER_NAME: &str = "localhost"; - -fn client_addr() -> SocketAddr { - "127.0.0.1:5000".parse::().unwrap() -} - -fn server_addr() -> SocketAddr { - "127.0.0.1:5001".parse::().unwrap() -} +{{#include ../bin/set-up-connection.rs:35:38}} ``` **Server** -First, the server endpoint should be bound to a socket. +First, the server endpoint should be bound to a socket. The [server()][server] method, which can be used for this, returns the `Endpoint` type. `Endpoint` is used to start outgoing connections and accept incoming connections. ```rust -async fn server() -> Result<(), Box> { - // Bind this endpoint to a UDP socket on the given server address. - let endpoint = Endpoint::server(config, server_addr())?; - - // Start iterating over incoming connections. - while let Some(conn) = endpoint.accept().await { - let mut connection = conn.await?; - - // Save connection somewhere, start transferring, receiving data, see DataTransfer tutorial. - } - - Ok(()) -} +{{#include ../bin/set-up-connection.rs:8:20}} ``` **Client** The [client()][client] returns only a `Endpoint` type. -The client needs to connect to the server using the [connect(server_name)][connect] method. +The client needs to connect to the server using the [connect(server_name)][connect] method. The `SERVER_NAME` argument is the DNS name, matching the certificate configured in the server. ```rust -async fn client() -> Result<(), Box> { - // Bind this endpoint to a UDP socket on the given client address. - let mut endpoint = Endpoint::client(client_addr()); - - // Connect to the server passing in the server name which is supposed to be in the server certificate. - let connection = endpoint.connect(server_addr(), SERVER_NAME)?.await?; - - // Start transferring, receiving data, see data transfer page. - - Ok(()) -} +{{#include ../bin/set-up-connection.rs:23:33}} ``` -

-[Next up](data-transfer.md), let's have a look at sending data over this connection. +

+[Next up](data-transfer.md), let's have a look at sending data over this connection. [Endpoint]: https://docs.rs/quinn/latest/quinn/struct.Endpoint.html [server]: https://docs.rs/quinn/latest/quinn/struct.Endpoint.html#method.server diff --git a/fuzz/fuzz_targets/packet.rs b/fuzz/fuzz_targets/packet.rs index a8320a87a6..7b0c473e21 100644 --- a/fuzz/fuzz_targets/packet.rs +++ b/fuzz/fuzz_targets/packet.rs @@ -4,8 +4,8 @@ extern crate proto; use libfuzzer_sys::fuzz_target; use proto::{ + DEFAULT_SUPPORTED_VERSIONS, FixedLengthConnectionIdParser, fuzzing::{PacketParams, PartialDecode}, - FixedLengthConnectionIdParser, DEFAULT_SUPPORTED_VERSIONS, }; fuzz_target!(|data: PacketParams| { diff --git a/perf/Cargo.toml b/perf/Cargo.toml index d63c604270..c62aafa267 100644 --- a/perf/Cargo.toml +++ b/perf/Cargo.toml @@ -6,6 +6,8 @@ license = "MIT OR Apache-2.0" publish = false [features] +# NOTE: Please keep this in sync with the feature list in `.github/workflows/codecov.yml`, see +# comment in that file for more information. default = ["json-output"] # Allow for json output from the perf client json-output = ["serde", "serde_json"] diff --git a/perf/src/bin/perf_client.rs b/perf/src/bin/perf_client.rs index a8ceeceec8..02d41baaa7 100644 --- a/perf/src/bin/perf_client.rs +++ b/perf/src/bin/perf_client.rs @@ -7,7 +7,7 @@ use std::{ use anyhow::{Context, Result}; use bytes::Bytes; use clap::Parser; -use quinn::{crypto::rustls::QuicClientConfig, TokioRuntime}; +use quinn::{TokioRuntime, crypto::rustls::QuicClientConfig}; use rustls::pki_types::{CertificateDer, ServerName, UnixTime}; use tokio::sync::Semaphore; use tracing::{debug, error, info}; diff --git a/perf/src/bin/perf_server.rs b/perf/src/bin/perf_server.rs index 74b527acab..f88008332c 100644 --- a/perf/src/bin/perf_server.rs +++ b/perf/src/bin/perf_server.rs @@ -3,11 +3,11 @@ use std::{fs, net::SocketAddr, path::PathBuf, sync::Arc, time::Duration}; use anyhow::{Context, Result}; use bytes::Bytes; use clap::Parser; -use quinn::{crypto::rustls::QuicServerConfig, TokioRuntime}; +use quinn::{TokioRuntime, crypto::rustls::QuicServerConfig}; use rustls::pki_types::{CertificateDer, PrivatePkcs8KeyDer}; use tracing::{debug, error, info}; -use perf::{bind_socket, noprotection::NoProtectionServerConfig, PERF_CIPHER_SUITES}; +use perf::{PERF_CIPHER_SUITES, bind_socket, noprotection::NoProtectionServerConfig}; #[derive(Parser)] #[clap(name = "server")] diff --git a/perf/src/lib.rs b/perf/src/lib.rs index 46d29acd08..7e10fcf40b 100644 --- a/perf/src/lib.rs +++ b/perf/src/lib.rs @@ -1,6 +1,7 @@ use std::net::SocketAddr; use anyhow::{Context, Result}; +use quinn::udp::UdpSocketState; use rustls::crypto::ring::cipher_suite; use socket2::{Domain, Protocol, Socket, Type}; use tracing::warn; @@ -25,14 +26,18 @@ pub fn bind_socket( socket .bind(&socket2::SockAddr::from(addr)) .context("binding endpoint")?; - socket - .set_send_buffer_size(send_buffer_size) + + let socket_state = UdpSocketState::new((&socket).into())?; + socket_state + .set_send_buffer_size((&socket).into(), send_buffer_size) .context("send buffer size")?; - socket - .set_recv_buffer_size(recv_buffer_size) + socket_state + .set_recv_buffer_size((&socket).into(), recv_buffer_size) .context("recv buffer size")?; - let buf_size = socket.send_buffer_size().context("send buffer size")?; + let buf_size = socket_state + .send_buffer_size((&socket).into()) + .context("send buffer size")?; if buf_size < send_buffer_size { warn!( "Unable to set desired send buffer size. Desired: {}, Actual: {}", @@ -40,7 +45,9 @@ pub fn bind_socket( ); } - let buf_size = socket.recv_buffer_size().context("recv buffer size")?; + let buf_size = socket_state + .recv_buffer_size((&socket).into()) + .context("recv buffer size")?; if buf_size < recv_buffer_size { warn!( "Unable to set desired recv buffer size. Desired: {}, Actual: {}", diff --git a/perf/src/noprotection.rs b/perf/src/noprotection.rs index 641b82fbd9..862b3ecccd 100644 --- a/perf/src/noprotection.rs +++ b/perf/src/noprotection.rs @@ -3,12 +3,12 @@ use std::sync::Arc; use bytes::BytesMut; use quinn_proto::{ + ConnectionId, PathId, Side, TransportError, crypto::{ - self, + self, CryptoError, rustls::{QuicClientConfig, QuicServerConfig}, - CryptoError, }, - transport_parameters, ConnectionId, Side, TransportError, + transport_parameters, }; /// A rustls TLS session which does not perform packet encryption/decryption (for debugging purpose) @@ -171,7 +171,7 @@ impl crypto::ServerConfig for NoProtectionServerConfig { // forward all calls to inner except those related to packet encryption/decryption impl crypto::PacketKey for NoProtectionPacketKey { - fn encrypt(&self, _packet: u64, buf: &mut [u8], header_len: usize) { + fn encrypt(&self, _path_id: PathId, _packet: u64, buf: &mut [u8], header_len: usize) { let (_header, payload_tag) = buf.split_at_mut(header_len); let (_payload, tag_storage) = payload_tag.split_at_mut(payload_tag.len() - self.inner.tag_len()); @@ -181,6 +181,7 @@ impl crypto::PacketKey for NoProtectionPacketKey { fn decrypt( &self, + _path_id: PathId, _packet: u64, _header: &[u8], payload: &mut BytesMut, diff --git a/perf/src/stats.rs b/perf/src/stats.rs index af258c0131..e042cbfb15 100644 --- a/perf/src/stats.rs +++ b/perf/src/stats.rs @@ -90,8 +90,12 @@ impl Stats { println!("Stream metrics:\n"); - println!(" │ Upload Duration │ Download Duration | FBL | Upload Throughput | Download Throughput"); - println!("──────┼─────────────────┼───────────────────┼────────────┼───────────────────┼────────────────────"); + println!( + " │ Upload Duration │ Download Duration | FBL | Upload Throughput | Download Throughput" + ); + println!( + "──────┼─────────────────┼───────────────────┼────────────┼───────────────────┼────────────────────" + ); let print_metric = |label: &'static str, get_metric: fn(&Histogram) -> u64| { println!( @@ -243,7 +247,8 @@ fn throughput_bytes_per_second(duration_in_micros: u64, size: u64) -> f64 { mod json { use crate::stats; use crate::stats::{Stats, StreamIntervalStats}; - use serde::{self, ser::SerializeStruct, Serialize, Serializer}; + use quinn::StreamId; + use serde::{self, Serialize, Serializer, ser::SerializeStruct}; use std::io::Write; use std::time::{SystemTime, UNIX_EPOCH}; @@ -339,7 +344,8 @@ mod json { #[derive(Serialize)] struct Stream { - id: u64, + #[serde(serialize_with = "serialize_stream_id")] + id: StreamId, start: f64, end: f64, seconds: f64, @@ -356,7 +362,7 @@ mod json { let bits_per_second = stats.bytes as f64 * 8.0 / period.seconds; Self { - id: stats.id.0, + id: stats.id, start: period.start, end: period.end, seconds: period.seconds, @@ -367,6 +373,10 @@ mod json { } } + fn serialize_stream_id(id: &StreamId, serializer: S) -> Result { + serializer.serialize_u64(u64::from(*id)) + } + #[derive(Serialize)] struct Sum { start: f64, diff --git a/quinn-proto/Cargo.toml b/quinn-proto/Cargo.toml index d6ee92af4e..9bd4d7ce49 100644 --- a/quinn-proto/Cargo.toml +++ b/quinn-proto/Cargo.toml @@ -11,9 +11,13 @@ categories.workspace = true workspace = ".." [features] -default = ["rustls-ring", "log"] +# NOTE: Please keep this in sync with the feature list in `.github/workflows/codecov.yml`, see +# comment in that file for more information. +default = ["rustls-ring", "log", "bloom"] aws-lc-rs = ["dep:aws-lc-rs", "aws-lc-rs?/aws-lc-sys", "aws-lc-rs?/prebuilt-nasm"] aws-lc-rs-fips = ["aws-lc-rs", "aws-lc-rs?/fips"] +# Enables BloomTokenLog, and uses it by default +bloom = ["dep:fastbloom"] # For backwards compatibility, `rustls` forwards to `rustls-ring` rustls = ["rustls-ring"] # Enable rustls with the `aws-lc-rs` crypto provider @@ -34,6 +38,8 @@ rustls-log = ["rustls?/logging"] arbitrary = { workspace = true, optional = true } aws-lc-rs = { workspace = true, optional = true } bytes = { workspace = true } +fastbloom = { workspace = true, optional = true } +lru-slab = { workspace = true } rustc-hash = { workspace = true } rand = { workspace = true } ring = { workspace = true, optional = true } @@ -48,13 +54,14 @@ tracing = { workspace = true } # wasm-bindgen is assumed for a wasm*-*-unknown target [target.'cfg(all(target_family = "wasm", target_os = "unknown"))'.dependencies] ring = { workspace = true, features = ["wasm32_unknown_unknown_js"] } -getrandom = { workspace = true, features = ["js"] } +getrandom = { workspace = true, features = ["wasm_js"] } rustls-pki-types = { workspace = true, features = ["web"] } # only added as dependency to enforce the `web` feature for this target web-time = { workspace = true } [dev-dependencies] assert_matches = { workspace = true } hex-literal = { workspace = true } +rand_pcg = "0.9" rcgen = { workspace = true } tracing-subscriber = { workspace = true } lazy_static = "1" diff --git a/quinn-proto/src/bloom_token_log.rs b/quinn-proto/src/bloom_token_log.rs new file mode 100644 index 0000000000..dc504e96f1 --- /dev/null +++ b/quinn-proto/src/bloom_token_log.rs @@ -0,0 +1,368 @@ +use std::{ + collections::HashSet, + f64::consts::LN_2, + hash::{BuildHasher, Hasher}, + mem::{size_of, take}, + sync::Mutex, +}; + +use fastbloom::BloomFilter; +use rustc_hash::FxBuildHasher; +use tracing::{trace, warn}; + +use crate::{Duration, SystemTime, TokenLog, TokenReuseError, UNIX_EPOCH}; + +/// Bloom filter-based [`TokenLog`] +/// +/// Parameterizable over an approximate maximum number of bytes to allocate. Starts out by storing +/// used tokens in a hash set. Once the hash set becomes too large, converts it to a bloom filter. +/// This achieves a memory profile of linear growth with an upper bound. +/// +/// Divides time into periods based on `lifetime` and stores two filters at any given moment, for +/// each of the two periods currently non-expired tokens could expire in. As such, turns over +/// filters as time goes on to avoid bloom filter false positive rate increasing infinitely over +/// time. +pub struct BloomTokenLog(Mutex); + +impl BloomTokenLog { + /// Construct with an approximate maximum memory usage and expected number of validation token + /// usages per expiration period + /// + /// Calculates the optimal bloom filter k number automatically. + pub fn new_expected_items(max_bytes: usize, expected_hits: u64) -> Self { + Self::new(max_bytes, optimal_k_num(max_bytes, expected_hits)) + } + + /// Construct with an approximate maximum memory usage and a [bloom filter k number][bloom] + /// + /// [bloom]: https://en.wikipedia.org/wiki/Bloom_filter + /// + /// If choosing a custom k number, note that `BloomTokenLog` always maintains two filters + /// between them and divides the allocation budget of `max_bytes` evenly between them. As such, + /// each bloom filter will contain `max_bytes * 4` bits. + pub fn new(max_bytes: usize, k_num: u32) -> Self { + Self(Mutex::new(State { + config: FilterConfig { + filter_max_bytes: max_bytes / 2, + k_num, + }, + period_1_start: UNIX_EPOCH, + filter_1: Filter::default(), + filter_2: Filter::default(), + })) + } +} + +impl TokenLog for BloomTokenLog { + fn check_and_insert( + &self, + nonce: u128, + issued: SystemTime, + lifetime: Duration, + ) -> Result<(), TokenReuseError> { + trace!(%nonce, "check_and_insert"); + + if lifetime.is_zero() { + // avoid divide-by-zero if lifetime is zero + return Err(TokenReuseError); + } + + let mut guard = self.0.lock().unwrap(); + let state = &mut *guard; + + // calculate how many periods past period 1 the token expires + let expires_at = issued + lifetime; + let Ok(periods_forward) = expires_at + .duration_since(state.period_1_start) + .map(|duration| duration.as_nanos() / lifetime.as_nanos()) + else { + // shouldn't happen unless time travels backwards or lifetime changes or the current + // system time is before the Unix epoch + warn!("BloomTokenLog presented with token too far in past"); + return Err(TokenReuseError); + }; + + // get relevant filter + let filter = match periods_forward { + 0 => &mut state.filter_1, + 1 => &mut state.filter_2, + 2 => { + // turn over filter 1 + state.filter_1 = take(&mut state.filter_2); + state.period_1_start += lifetime; + &mut state.filter_2 + } + _ => { + // turn over both filters + state.filter_1 = Filter::default(); + state.filter_2 = Filter::default(); + state.period_1_start = expires_at; + &mut state.filter_1 + } + }; + + // insert into the filter + // + // the token's nonce needs to guarantee uniqueness because of the role it plays in the + // encryption of the tokens, so it is 128 bits. but since the token log can tolerate false + // positives, we trim it down to 64 bits, which would still only have a small collision + // rate even at significant amounts of usage, while allowing us to store twice as many in + // the hash set variant. + // + // token nonce values are uniformly randomly generated server-side and cryptographically + // integrity-checked, so we don't need to employ secure hashing to trim it down to 64 bits, + // we can simply truncate. + // + // per the Rust reference, we can truncate by simply casting: + // https://doc.rust-lang.org/stable/reference/expressions/operator-expr.html#numeric-cast + filter.check_and_insert(nonce as u64, &state.config) + } +} + +/// Default to 20 MiB max memory consumption and expected one million hits +/// +/// With the default validation token lifetime of 2 weeks, this corresponds to one token usage per +/// 1.21 seconds. +impl Default for BloomTokenLog { + fn default() -> Self { + Self::new_expected_items(DEFAULT_MAX_BYTES, DEFAULT_EXPECTED_HITS) + } +} + +/// Lockable state of [`BloomTokenLog`] +struct State { + config: FilterConfig, + // filter_1 covers tokens that expire in the period starting at period_1_start and extending + // lifetime after. filter_2 covers tokens for the next lifetime after that. + period_1_start: SystemTime, + filter_1: Filter, + filter_2: Filter, +} + +/// Unchanging parameters governing [`Filter`] behavior +struct FilterConfig { + filter_max_bytes: usize, + k_num: u32, +} + +/// Period filter within [`State`] +enum Filter { + Set(HashSet), + Bloom(BloomFilter<512, FxBuildHasher>), +} + +impl Filter { + fn check_and_insert( + &mut self, + fingerprint: u64, + config: &FilterConfig, + ) -> Result<(), TokenReuseError> { + match self { + Self::Set(hset) => { + if !hset.insert(fingerprint) { + return Err(TokenReuseError); + } + + if hset.capacity() * size_of::() <= config.filter_max_bytes { + return Ok(()); + } + + // convert to bloom + // avoid panicking if user passed in filter_max_bytes of 0. we document that this + // limit is approximate, so just fudge it up to 1. + let mut bloom = BloomFilter::with_num_bits((config.filter_max_bytes * 8).max(1)) + .hasher(FxBuildHasher) + .hashes(config.k_num); + for item in &*hset { + bloom.insert(item); + } + *self = Self::Bloom(bloom); + } + Self::Bloom(bloom) => { + if bloom.insert(&fingerprint) { + return Err(TokenReuseError); + } + } + } + Ok(()) + } +} + +impl Default for Filter { + fn default() -> Self { + Self::Set(HashSet::default()) + } +} + +/// `BuildHasher` of `IdentityHasher` +#[derive(Default)] +struct IdentityBuildHasher; + +impl BuildHasher for IdentityBuildHasher { + type Hasher = IdentityHasher; + + fn build_hasher(&self) -> Self::Hasher { + IdentityHasher::default() + } +} + +/// Hasher that is the identity operation--it assumes that exactly 8 bytes will be hashed, and the +/// resultant hash is those bytes as a `u64` +#[derive(Default)] +struct IdentityHasher { + data: [u8; 8], + #[cfg(debug_assertions)] + wrote_8_byte_slice: bool, +} + +impl Hasher for IdentityHasher { + fn write(&mut self, bytes: &[u8]) { + #[cfg(debug_assertions)] + { + assert!(!self.wrote_8_byte_slice); + assert_eq!(bytes.len(), 8); + self.wrote_8_byte_slice = true; + } + self.data.copy_from_slice(bytes); + } + + fn finish(&self) -> u64 { + #[cfg(debug_assertions)] + assert!(self.wrote_8_byte_slice); + u64::from_ne_bytes(self.data) + } +} + +fn optimal_k_num(num_bytes: usize, expected_hits: u64) -> u32 { + // be more forgiving rather than panickey here. excessively high num_bits may occur if the user + // wishes it to be unbounded, so just saturate. expected_hits of 0 would cause divide-by-zero, + // so just fudge it up to 1 in that case. + let num_bits = (num_bytes as u64).saturating_mul(8); + let expected_hits = expected_hits.max(1); + // reference for this formula: https://programming.guide/bloom-filter-calculator.html + // optimal k = (m ln 2) / n + // wherein m is the number of bits, and n is the number of elements in the set. + // + // we also impose a minimum return value of 1, to avoid making the bloom filter entirely + // useless in the case that the user provided an absurdly high ratio of hits / bytes. + (((num_bits as f64 / expected_hits as f64) * LN_2).round() as u32).max(1) +} + +// remember to change the doc comment for `impl Default for BloomTokenLog` if these ever change +const DEFAULT_MAX_BYTES: usize = 10 << 20; +const DEFAULT_EXPECTED_HITS: u64 = 1_000_000; + +#[cfg(test)] +mod test { + use super::*; + use rand::prelude::*; + use rand_pcg::Pcg32; + + fn new_rng() -> impl Rng { + Pcg32::from_seed(0xdeadbeefdeadbeefdeadbeefdeadbeef_u128.to_le_bytes()) + } + + #[test] + fn identity_hash_test() { + let mut rng = new_rng(); + let builder = IdentityBuildHasher; + for _ in 0..100 { + let n = rng.random::(); + let hash = builder.hash_one(n); + assert_eq!(hash, n); + } + } + + #[test] + fn optimal_k_num_test() { + assert_eq!(optimal_k_num(10 << 20, 1_000_000), 58); + assert_eq!(optimal_k_num(10 << 20, 1_000_000_000_000_000), 1); + // assert that these don't panic: + optimal_k_num(10 << 20, 0); + optimal_k_num(usize::MAX, 1_000_000); + } + + #[test] + fn bloom_token_log_conversion() { + let mut rng = new_rng(); + let mut log = BloomTokenLog::new_expected_items(800, 200); + + let issued = SystemTime::now(); + let lifetime = Duration::from_secs(1_000_000); + + for i in 0..200 { + let token = rng.random::(); + let result = log.check_and_insert(token, issued, lifetime); + { + let filter = &log.0.lock().unwrap().filter_1; + if let Filter::Set(ref hset) = *filter { + assert!(hset.capacity() * size_of::() <= 800); + assert_eq!(hset.len(), i + 1); + assert!(result.is_ok()); + } else { + assert!(i > 10, "definitely bloomed too early"); + } + } + assert!(log.check_and_insert(token, issued, lifetime).is_err()); + } + + assert!( + matches!(log.0.get_mut().unwrap().filter_1, Filter::Bloom { .. }), + "didn't bloom" + ); + } + + #[test] + fn turn_over() { + let mut rng = new_rng(); + let log = BloomTokenLog::new_expected_items(800, 200); + let lifetime = Duration::from_secs(1_000); + let mut old = Vec::default(); + let mut accepted = 0; + + for i in 0..200 { + let token = rng.random::(); + let now = UNIX_EPOCH + lifetime * 10 + lifetime * i / 10; + let issued = now - lifetime.mul_f32(rng.random_range(0.0..3.0)); + let result = log.check_and_insert(token, issued, lifetime); + if result.is_ok() { + accepted += 1; + } + old.push((token, issued)); + let old_idx = rng.random_range(0..old.len()); + let (old_token, old_issued) = old[old_idx]; + assert!( + log.check_and_insert(old_token, old_issued, lifetime) + .is_err() + ); + } + assert!(accepted > 0); + } + + fn test_doesnt_panic(log: BloomTokenLog) { + let mut rng = new_rng(); + + let issued = SystemTime::now(); + let lifetime = Duration::from_secs(1_000_000); + + for _ in 0..200 { + let _ = log.check_and_insert(rng.random::(), issued, lifetime); + } + } + + #[test] + fn max_bytes_zero() { + // "max bytes" is documented to be approximate. but make sure it doesn't panic. + test_doesnt_panic(BloomTokenLog::new_expected_items(0, 200)); + } + + #[test] + fn expected_hits_zero() { + test_doesnt_panic(BloomTokenLog::new_expected_items(100, 0)); + } + + #[test] + fn k_num_zero() { + test_doesnt_panic(BloomTokenLog::new(100, 0)); + } +} diff --git a/quinn-proto/src/cid_generator.rs b/quinn-proto/src/cid_generator.rs index fc737eae70..e62415e8b4 100644 --- a/quinn-proto/src/cid_generator.rs +++ b/quinn-proto/src/cid_generator.rs @@ -2,9 +2,9 @@ use std::hash::Hasher; use rand::{Rng, RngCore}; -use crate::shared::ConnectionId; use crate::Duration; use crate::MAX_CID_SIZE; +use crate::shared::ConnectionId; /// Generates connection IDs for incoming connections pub trait ConnectionIdGenerator: Send + Sync { @@ -77,7 +77,7 @@ impl RandomConnectionIdGenerator { impl ConnectionIdGenerator for RandomConnectionIdGenerator { fn generate_cid(&mut self) -> ConnectionId { let mut bytes_arr = [0; MAX_CID_SIZE]; - rand::thread_rng().fill_bytes(&mut bytes_arr[..self.cid_len]); + rand::rng().fill_bytes(&mut bytes_arr[..self.cid_len]); ConnectionId::new(&bytes_arr[..self.cid_len]) } @@ -105,7 +105,7 @@ pub struct HashedConnectionIdGenerator { impl HashedConnectionIdGenerator { /// Create a generator with a random key pub fn new() -> Self { - Self::from_key(rand::thread_rng().gen()) + Self::from_key(rand::rng().random()) } /// Create a generator with a specific key @@ -135,7 +135,7 @@ impl Default for HashedConnectionIdGenerator { impl ConnectionIdGenerator for HashedConnectionIdGenerator { fn generate_cid(&mut self) -> ConnectionId { let mut bytes_arr = [0; NONCE_LEN + SIGNATURE_LEN]; - rand::thread_rng().fill_bytes(&mut bytes_arr[..NONCE_LEN]); + rand::rng().fill_bytes(&mut bytes_arr[..NONCE_LEN]); let mut hasher = rustc_hash::FxHasher::default(); hasher.write_u64(self.key); hasher.write(&bytes_arr[..NONCE_LEN]); diff --git a/quinn-proto/src/cid_queue.rs b/quinn-proto/src/cid_queue.rs index 9f35f9e291..d3c464d74a 100644 --- a/quinn-proto/src/cid_queue.rs +++ b/quinn-proto/src/cid_queue.rs @@ -1,6 +1,6 @@ use std::ops::Range; -use crate::{frame::NewConnectionId, ConnectionId, ResetToken}; +use crate::{ConnectionId, ResetToken, frame::NewConnectionId}; /// DataType stored in CidQueue buffer type CidData = (ConnectionId, Option); @@ -139,6 +139,7 @@ mod tests { fn cid(sequence: u64, retire_prior_to: u64) -> NewConnectionId { NewConnectionId { + path_id: None, sequence, id: ConnectionId::new(&[0xAB; 8]), reset_token: ResetToken::from([0xCD; crate::RESET_TOKEN_SIZE]), diff --git a/quinn-proto/src/config/mod.rs b/quinn-proto/src/config/mod.rs index 9cb4a45edc..04ef14c20a 100644 --- a/quinn-proto/src/config/mod.rs +++ b/quinn-proto/src/config/mod.rs @@ -11,14 +11,18 @@ use rustls::client::WebPkiServerVerifier; use rustls::pki_types::{CertificateDer, PrivateKeyDer}; use thiserror::Error; +#[cfg(feature = "bloom")] +use crate::BloomTokenLog; +#[cfg(not(feature = "bloom"))] +use crate::NoneTokenLog; #[cfg(any(feature = "rustls-aws-lc-rs", feature = "rustls-ring"))] -use crate::crypto::rustls::{configured_provider, QuicServerConfig}; +use crate::crypto::rustls::{QuicServerConfig, configured_provider}; use crate::{ + DEFAULT_SUPPORTED_VERSIONS, Duration, MAX_CID_SIZE, RandomConnectionIdGenerator, SystemTime, + TokenLog, TokenMemoryCache, TokenStore, VarInt, VarIntBoundsExceeded, cid_generator::{ConnectionIdGenerator, HashedConnectionIdGenerator}, crypto::{self, HandshakeTokenKey, HmacKey}, shared::ConnectionId, - Duration, RandomConnectionIdGenerator, SystemTime, VarInt, VarIntBoundsExceeded, - DEFAULT_SUPPORTED_VERSIONS, MAX_CID_SIZE, }; mod transport; @@ -100,15 +104,14 @@ impl EndpointConfig { Ok(self) } - /// Get the current value of `max_udp_payload_size` - /// - /// While most parameters don't need to be readable, this must be exposed to allow higher-level - /// layers, e.g. the `quinn` crate, to determine how large a receive buffer to allocate to - /// support an externally-defined `EndpointConfig`. - /// - /// While `get_` accessors are typically unidiomatic in Rust, we favor concision for setters, - /// which will be used far more heavily. - #[doc(hidden)] + /// Get the current value of [`max_udp_payload_size`](Self::max_udp_payload_size) + // + // While most parameters don't need to be readable, this must be exposed to allow higher-level + // layers, e.g. the `quinn` crate, to determine how large a receive buffer to allocate to + // support an externally-defined `EndpointConfig`. + // + // While `get_` accessors are typically unidiomatic in Rust, we favor concision for setters, + // which will be used far more heavily. pub fn get_max_udp_payload_size(&self) -> u64 { self.max_udp_payload_size.into() } @@ -179,7 +182,7 @@ impl Default for EndpointConfig { use ring::hmac; let mut reset_key = [0; 64]; - rand::thread_rng().fill_bytes(&mut reset_key); + rand::rng().fill_bytes(&mut reset_key); Self::new(Arc::new(hmac::Key::new(hmac::HMAC_SHA256, &reset_key))) } @@ -198,6 +201,9 @@ pub struct ServerConfig { /// Must be set to use TLS 1.3 only. pub crypto: Arc, + /// Configuration for sending and handling validation tokens + pub validation_token: ValidationTokenConfig, + /// Used to generate one-time AEAD keys to protect handshake tokens pub(crate) token_key: Arc, @@ -235,6 +241,8 @@ impl ServerConfig { migration: true, + validation_token: ValidationTokenConfig::default(), + preferred_address_v4: None, preferred_address_v6: None, @@ -252,6 +260,15 @@ impl ServerConfig { self } + /// Set a custom [`ValidationTokenConfig`] + pub fn validation_token_config( + &mut self, + validation_token: ValidationTokenConfig, + ) -> &mut Self { + self.validation_token = validation_token; + self + } + /// Private key used to authenticate data included in handshake tokens pub fn token_key(&mut self, value: Arc) -> &mut Self { self.token_key = value; @@ -377,7 +394,7 @@ impl ServerConfig { #[cfg(feature = "ring")] use ring::hkdf; - let rng = &mut rand::thread_rng(); + let rng = &mut rand::rng(); let mut master_key = [0u8; 64]; rng.fill_bytes(&mut master_key); let master_key = hkdf::Salt::new(hkdf::HKDF_SHA256, &[]).extract(&master_key); @@ -393,6 +410,7 @@ impl fmt::Debug for ServerConfig { // crypto not debug // token not debug .field("retry_token_lifetime", &self.retry_token_lifetime) + .field("validation_token", &self.validation_token) .field("migration", &self.migration) .field("preferred_address_v4", &self.preferred_address_v4) .field("preferred_address_v6", &self.preferred_address_v6) @@ -407,6 +425,120 @@ impl fmt::Debug for ServerConfig { } } +/// Configuration for sending and handling validation tokens in incoming connections +/// +/// Default values should be suitable for most internet applications. +/// +/// ## QUIC Tokens +/// +/// The QUIC protocol defines a concept of "[address validation][1]". Essentially, one side of a +/// QUIC connection may appear to be receiving QUIC packets from a particular remote UDP address, +/// but it will only consider that remote address "validated" once it has convincing evidence that +/// the address is not being [spoofed][2]. +/// +/// Validation is important primarily because of QUIC's "anti-amplification limit." This limit +/// prevents a QUIC server from sending a client more than three times the number of bytes it has +/// received from the client on a given address until that address is validated. This is designed +/// to mitigate the ability of attackers to use QUIC-based servers as reflectors in [amplification +/// attacks][3]. +/// +/// A path may become validated in several ways. The server is always considered validated by the +/// client. The client usually begins in an unvalidated state upon first connecting or migrating, +/// but then becomes validated through various mechanisms that usually take one network round trip. +/// However, in some cases, a client which has previously attempted to connect to a server may have +/// been given a one-time use cryptographically secured "token" that it can send in a subsequent +/// connection attempt to be validated immediately. +/// +/// There are two ways these tokens can originate: +/// +/// - If the server responds to an incoming connection with `retry`, a "retry token" is minted and +/// sent to the client, which the client immediately uses to attempt to connect again. Retry +/// tokens operate on short timescales, such as 15 seconds. +/// - If a client's path within an active connection is validated, the server may send the client +/// one or more "validation tokens," which the client may store for use in later connections to +/// the same server. Validation tokens may be valid for much longer lifetimes than retry token. +/// +/// The usage of validation tokens is most impactful in situations where 0-RTT data is also being +/// used--in particular, in situations where the server sends the client more than three times more +/// 0.5-RTT data than it has received 0-RTT data. Since the successful completion of a connection +/// handshake implicitly causes the client's address to be validated, transmission of 0.5-RTT data +/// is the main situation where a server might be sending application data to an address that could +/// be validated by token usage earlier than it would become validated without token usage. +/// +/// [1]: https://www.rfc-editor.org/rfc/rfc9000.html#section-8 +/// [2]: https://en.wikipedia.org/wiki/IP_address_spoofing +/// [3]: https://en.wikipedia.org/wiki/Denial-of-service_attack#Amplification +/// +/// These tokens should not be confused with "stateless reset tokens," which are similarly named +/// but entirely unrelated. +#[derive(Clone)] +pub struct ValidationTokenConfig { + pub(crate) lifetime: Duration, + pub(crate) log: Arc, + pub(crate) sent: u32, +} + +impl ValidationTokenConfig { + /// Duration after an address validation token was issued for which it's considered valid + /// + /// This refers only to tokens sent in NEW_TOKEN frames, in contrast to retry tokens. + /// + /// Defaults to 2 weeks. + pub fn lifetime(&mut self, value: Duration) -> &mut Self { + self.lifetime = value; + self + } + + #[allow(rustdoc::redundant_explicit_links)] // which links are redundant depends on features + /// Set a custom [`TokenLog`] + /// + /// If the `bloom` feature is enabled (which it is by default), defaults to a default + /// [`BloomTokenLog`][crate::BloomTokenLog], which is suitable for most internet applications. + /// + /// If the `bloom` feature is disabled, defaults to [`NoneTokenLog`][crate::NoneTokenLog], + /// which makes the server ignore all address validation tokens (that is, tokens originating + /// from NEW_TOKEN frames--retry tokens are not affected). + pub fn log(&mut self, log: Arc) -> &mut Self { + self.log = log; + self + } + + /// Number of address validation tokens sent to a client when its path is validated + /// + /// This refers only to tokens sent in NEW_TOKEN frames, in contrast to retry tokens. + /// + /// If the `bloom` feature is enabled (which it is by default), defaults to 2. Otherwise, + /// defaults to 0. + pub fn sent(&mut self, value: u32) -> &mut Self { + self.sent = value; + self + } +} + +impl Default for ValidationTokenConfig { + fn default() -> Self { + #[cfg(feature = "bloom")] + let log = Arc::new(BloomTokenLog::default()); + #[cfg(not(feature = "bloom"))] + let log = Arc::new(NoneTokenLog); + Self { + lifetime: Duration::from_secs(2 * 7 * 24 * 60 * 60), + log, + sent: if cfg!(feature = "bloom") { 2 } else { 0 }, + } + } +} + +impl fmt::Debug for ValidationTokenConfig { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("ServerValidationTokenConfig") + .field("lifetime", &self.lifetime) + // log not debug + .field("sent", &self.sent) + .finish_non_exhaustive() + } +} + /// Configuration for outgoing connections /// /// Default values should be suitable for most internet applications. @@ -419,6 +551,9 @@ pub struct ClientConfig { /// Cryptographic configuration to use pub(crate) crypto: Arc, + /// Validation token store to use + pub(crate) token_store: Arc, + /// Provider that populates the destination connection ID of Initial Packets pub(crate) initial_dst_cid_provider: Arc ConnectionId + Send + Sync>, @@ -432,6 +567,7 @@ impl ClientConfig { Self { transport: Default::default(), crypto, + token_store: Arc::new(TokenMemoryCache::default()), initial_dst_cid_provider: Arc::new(|| { RandomConnectionIdGenerator::new(MAX_CID_SIZE).generate_cid() }), @@ -461,6 +597,14 @@ impl ClientConfig { self } + /// Set a custom [`TokenStore`] + /// + /// Defaults to [`TokenMemoryCache`], which is suitable for most internet applications. + pub fn token_store(&mut self, store: Arc) -> &mut Self { + self.token_store = store; + self + } + /// Set the QUIC version to use pub fn version(&mut self, version: u32) -> &mut Self { self.version = version; @@ -493,6 +637,7 @@ impl fmt::Debug for ClientConfig { fmt.debug_struct("ClientConfig") .field("transport", &self.transport) // crypto not debug + // token_store not debug .field("version", &self.version) .finish_non_exhaustive() } diff --git a/quinn-proto/src/config/transport.rs b/quinn-proto/src/config/transport.rs index b85cb5884d..4b08f4a77f 100644 --- a/quinn-proto/src/config/transport.rs +++ b/quinn-proto/src/config/transport.rs @@ -1,8 +1,8 @@ -use std::{fmt, sync::Arc}; +use std::{fmt, num::NonZeroU32, sync::Arc}; use crate::{ - address_discovery, congestion, Duration, VarInt, VarIntBoundsExceeded, INITIAL_MTU, - MAX_UDP_PAYLOAD, + Duration, INITIAL_MTU, MAX_UDP_PAYLOAD, VarInt, VarIntBoundsExceeded, address_discovery, + congestion, }; /// Parameters governing the core QUIC state machine @@ -48,6 +48,8 @@ pub struct TransportConfig { pub(crate) enable_segmentation_offload: bool, pub(crate) address_discovery_role: address_discovery::Role, + + pub(crate) max_concurrent_multipath_paths: Option, } impl TransportConfig { @@ -340,14 +342,38 @@ impl TransportConfig { .receive_reports_from_peers(enabled); self } + + /// Enables the Multipath Extension for QUIC. + /// + /// Setting this to any nonzero value will enable the Multipath Extension for QUIC, + /// . + /// + /// The value provided specifies the number maximum number of paths this endpoint may open + /// concurrently when multipath is negotiated. For any path to be opened, the remote must + /// enable multipath as well. + pub fn max_concurrent_multipath_paths(&mut self, max_concurrent: u32) -> &mut Self { + self.max_concurrent_multipath_paths = NonZeroU32::new(max_concurrent); + self + } + + /// Get the initial max [`crate::PathId`] this endpoint allows. + /// + /// Returns `None` if multipath is disabled. + pub(crate) fn get_initial_max_path_id(&self) -> Option { + self.max_concurrent_multipath_paths + // a max_concurrent_multipath_paths value of 1 only allows the first path, which + // has id 0 + .map(|nonzero_concurrent| nonzero_concurrent.get() - 1) + .map(Into::into) + } } impl Default for TransportConfig { fn default() -> Self { const EXPECTED_RTT: u32 = 100; // ms const MAX_STREAM_BANDWIDTH: u32 = 12500 * 1000; // bytes/s - // Window size needed to avoid pipeline - // stalls + // Window size needed to avoid pipeline + // stalls const STREAM_RWND: u32 = MAX_STREAM_BANDWIDTH / 1000 * EXPECTED_RTT; Self { @@ -382,6 +408,9 @@ impl Default for TransportConfig { enable_segmentation_offload: true, address_discovery_role: address_discovery::Role::default(), + + // disabled multipath by default + max_concurrent_multipath_paths: None, } } } @@ -414,6 +443,7 @@ impl fmt::Debug for TransportConfig { congestion_controller_factory: _, enable_segmentation_offload, address_discovery_role, + max_concurrent_multipath_paths, } = self; fmt.debug_struct("TransportConfig") .field("max_concurrent_bidi_streams", max_concurrent_bidi_streams) @@ -442,6 +472,10 @@ impl fmt::Debug for TransportConfig { // congestion_controller_factory not debug .field("enable_segmentation_offload", enable_segmentation_offload) .field("address_discovery_role", address_discovery_role) + .field( + "max_concurrent_multipath_paths", + max_concurrent_multipath_paths, + ) .finish_non_exhaustive() } } diff --git a/quinn-proto/src/congestion.rs b/quinn-proto/src/congestion.rs index d949b00e06..27bd252376 100644 --- a/quinn-proto/src/congestion.rs +++ b/quinn-proto/src/congestion.rs @@ -1,7 +1,7 @@ //! Logic for controlling the rate at which data is sent -use crate::connection::RttEstimator; use crate::Instant; +use crate::connection::RttEstimator; use std::any::Any; use std::sync::Arc; diff --git a/quinn-proto/src/congestion/bbr/bw_estimation.rs b/quinn-proto/src/congestion/bbr/bw_estimation.rs index 8796f45729..84ea4e6875 100644 --- a/quinn-proto/src/congestion/bbr/bw_estimation.rs +++ b/quinn-proto/src/congestion/bbr/bw_estimation.rs @@ -3,7 +3,7 @@ use std::fmt::{Debug, Display, Formatter}; use super::min_max::MinMax; use crate::{Duration, Instant}; -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Default)] pub(crate) struct BandwidthEstimation { total_acked: u64, prev_total_acked: u64, @@ -11,7 +11,7 @@ pub(crate) struct BandwidthEstimation { prev_acked_time: Option, total_sent: u64, prev_total_sent: u64, - sent_time: Instant, + sent_time: Option, prev_sent_time: Option, max_filter: MinMax, acked_at_last_window: u64, @@ -21,8 +21,8 @@ impl BandwidthEstimation { pub(crate) fn on_sent(&mut self, now: Instant, bytes: u64) { self.prev_total_sent = self.total_sent; self.total_sent += bytes; - self.prev_sent_time = Some(self.sent_time); - self.sent_time = now; + self.prev_sent_time = self.sent_time; + self.sent_time = Some(now); } pub(crate) fn on_ack( @@ -43,14 +43,13 @@ impl BandwidthEstimation { None => return, }; - let send_rate = if self.sent_time > prev_sent_time { - Self::bw_from_delta( + let send_rate = match self.sent_time { + Some(sent_time) if sent_time > prev_sent_time => Self::bw_from_delta( self.total_sent - self.prev_total_sent, - self.sent_time - prev_sent_time, + sent_time - prev_sent_time, ) - .unwrap_or(0) - } else { - u64::MAX // will take the min of send and ack, so this is just a skip + .unwrap_or(0), + _ => u64::MAX, // will take the min of send and ack, so this is just a skip }; let ack_rate = match self.prev_acked_time { @@ -91,25 +90,6 @@ impl BandwidthEstimation { } } -impl Default for BandwidthEstimation { - fn default() -> Self { - Self { - total_acked: 0, - prev_total_acked: 0, - acked_time: None, - prev_acked_time: None, - total_sent: 0, - prev_total_sent: 0, - // The `sent_time` value set here is ignored; it is used in `on_ack()`, but will - // have been reset by `on_sent()` before that method is called. - sent_time: Instant::now(), - prev_sent_time: None, - max_filter: MinMax::default(), - acked_at_last_window: 0, - } - } -} - impl Display for BandwidthEstimation { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { write!( diff --git a/quinn-proto/src/congestion/bbr/mod.rs b/quinn-proto/src/congestion/bbr/mod.rs index 13f613cd04..272f229361 100644 --- a/quinn-proto/src/congestion/bbr/mod.rs +++ b/quinn-proto/src/congestion/bbr/mod.rs @@ -9,7 +9,7 @@ use crate::congestion::bbr::min_max::MinMax; use crate::connection::RttEstimator; use crate::{Duration, Instant}; -use super::{Controller, ControllerFactory, BASE_DATAGRAM_SIZE}; +use super::{BASE_DATAGRAM_SIZE, Controller, ControllerFactory}; mod bw_estimation; mod min_max; @@ -96,7 +96,7 @@ impl Bbr { bw_at_last_round: 0, round_wo_bw_gain: 0, ack_aggregation: AckAggregationState::default(), - random_number_generator: rand::rngs::StdRng::from_entropy(), + random_number_generator: rand::rngs::StdRng::from_os_rng(), } } @@ -115,7 +115,7 @@ impl Bbr { // follow each other. let mut rand_index = self .random_number_generator - .gen_range(0..K_PACING_GAIN.len() as u8 - 1); + .random_range(0..K_PACING_GAIN.len() as u8 - 1); if rand_index >= 1 { rand_index += 1; } diff --git a/quinn-proto/src/congestion/cubic.rs b/quinn-proto/src/congestion/cubic.rs index e29a2c24df..1d1ec62dc9 100644 --- a/quinn-proto/src/congestion/cubic.rs +++ b/quinn-proto/src/congestion/cubic.rs @@ -2,7 +2,7 @@ use std::any::Any; use std::cmp; use std::sync::Arc; -use super::{Controller, ControllerFactory, BASE_DATAGRAM_SIZE}; +use super::{BASE_DATAGRAM_SIZE, Controller, ControllerFactory}; use crate::connection::RttEstimator; use crate::{Duration, Instant}; @@ -180,8 +180,6 @@ impl Controller for Cubic { self.recovery_start_time = Some(now); // Fast convergence - #[allow(clippy::branches_sharing_code)] - // https://github.com/rust-lang/rust-clippy/issues/7198 if (self.window as f64) < self.cubic_state.w_max { self.cubic_state.w_max = self.window as f64 * (1.0 + BETA_CUBIC) / 2.0; } else { diff --git a/quinn-proto/src/congestion/new_reno.rs b/quinn-proto/src/congestion/new_reno.rs index 2eeb995150..120aa87776 100644 --- a/quinn-proto/src/congestion/new_reno.rs +++ b/quinn-proto/src/congestion/new_reno.rs @@ -1,9 +1,9 @@ use std::any::Any; use std::sync::Arc; -use super::{Controller, ControllerFactory, BASE_DATAGRAM_SIZE}; -use crate::connection::RttEstimator; +use super::{BASE_DATAGRAM_SIZE, Controller, ControllerFactory}; use crate::Instant; +use crate::connection::RttEstimator; /// A simple, standard congestion controller #[derive(Debug, Clone)] diff --git a/quinn-proto/src/connection/ack_frequency.rs b/quinn-proto/src/connection/ack_frequency.rs index ab06e45ac8..8de43d7c9d 100644 --- a/quinn-proto/src/connection/ack_frequency.rs +++ b/quinn-proto/src/connection/ack_frequency.rs @@ -1,8 +1,8 @@ +use crate::Duration; use crate::connection::spaces::PendingAcks; use crate::frame::AckFrequency; use crate::transport_parameters::TransportParameters; -use crate::Duration; -use crate::{AckFrequencyConfig, TransportError, VarInt, TIMER_GRANULARITY}; +use crate::{AckFrequencyConfig, TIMER_GRANULARITY, TransportError, VarInt}; /// State associated to ACK frequency pub(super) struct AckFrequencyState { diff --git a/quinn-proto/src/connection/assembler.rs b/quinn-proto/src/connection/assembler.rs index 94b3400854..2288f5ecc4 100644 --- a/quinn-proto/src/connection/assembler.rs +++ b/quinn-proto/src/connection/assembler.rs @@ -1,6 +1,6 @@ use std::{ cmp::Ordering, - collections::{binary_heap::PeekMut, BinaryHeap}, + collections::{BinaryHeap, binary_heap::PeekMut}, mem, }; diff --git a/quinn-proto/src/connection/cid_state.rs b/quinn-proto/src/connection/cid_state.rs index 31b279c3b0..6f8f579f8c 100644 --- a/quinn-proto/src/connection/cid_state.rs +++ b/quinn-proto/src/connection/cid_state.rs @@ -4,13 +4,19 @@ use std::collections::VecDeque; use rustc_hash::FxHashSet; use tracing::{debug, trace}; -use crate::{shared::IssuedCid, Duration, Instant, TransportError}; +use crate::{Duration, Instant, TransportError, shared::IssuedCid}; /// Local connection ID management pub(super) struct CidState { /// Timestamp when issued cids should be retired + /// + /// Each entry indicates the expiration of all timestamps up to the sequence number in + /// the entry. This means one entry can expire multiple CIDs if the sequence number + /// jumps by more than 1 between entries. retire_timestamp: VecDeque, /// Number of local connection IDs that have been issued in NEW_CONNECTION_ID frames. + /// + /// This is thus also the sequence number of the next CID to be issued. issued: u64, /// Sequence numbers of local connection IDs not yet retired by the peer active_seq: FxHashSet, @@ -20,7 +26,7 @@ pub(super) struct CidState { retire_seq: u64, /// cid length used to decode short packet cid_len: usize, - //// cid lifetime + /// cid lifetime cid_lifetime: Option, } @@ -52,8 +58,8 @@ impl CidState { this } - /// Find the next timestamp when previously issued CID should be retired - pub(crate) fn next_timeout(&mut self) -> Option { + /// Find the earliest time when previously issued CID should be retired + pub(crate) fn next_timeout(&self) -> Option { self.retire_timestamp.front().map(|nc| { trace!("CID {} will expire at {:?}", nc.sequence, nc.timestamp); nc.timestamp @@ -62,15 +68,11 @@ impl CidState { /// Track the lifetime of issued cids in `retire_timestamp` fn track_lifetime(&mut self, new_cid_seq: u64, now: Instant) { - let lifetime = match self.cid_lifetime { - Some(lifetime) => lifetime, - None => return, + let Some(lifetime) = self.cid_lifetime else { + return; }; - - let expire_timestamp = now.checked_add(lifetime); - let expire_at = match expire_timestamp { - Some(expire_at) => expire_at, - None => return, + let Some(expire_at) = now.checked_add(lifetime) else { + return; }; let last_record = self.retire_timestamp.back_mut(); @@ -132,6 +134,10 @@ impl CidState { } /// Update cid state when `NewIdentifiers` event is received + /// + /// These are newly generated CIDs which we'll send to the peer in + /// (PATH_)NEW_CONNECTION_ID frames in the next packet that is sent. This records them + /// and tracks their lifetime. pub(crate) fn new_cids(&mut self, ids: &[IssuedCid], now: Instant) { // `ids` could be `None` once active_connection_id_limit is set to 1 by peer let last_cid = match ids.last() { diff --git a/quinn-proto/src/connection/datagrams.rs b/quinn-proto/src/connection/datagrams.rs index c8d1091faf..0e0a2b7850 100644 --- a/quinn-proto/src/connection/datagrams.rs +++ b/quinn-proto/src/connection/datagrams.rs @@ -1,13 +1,13 @@ use std::collections::VecDeque; -use bytes::Bytes; +use bytes::{BufMut, Bytes}; use thiserror::Error; use tracing::{debug, trace}; use super::Connection; use crate::{ - frame::{Datagram, FrameStruct}, TransportError, + frame::{Datagram, FrameStruct}, }; /// API to control datagram traffic @@ -70,8 +70,9 @@ impl Datagrams<'_> { // We use the conservative overhead bound for any packet number, reducing the budget by at // most 3 bytes, so that PN size fluctuations don't cause users sending maximum-size // datagrams to suffer avoidable packet loss. - let max_size = self.conn.path.current_mtu() as usize - - self.conn.predict_1rtt_overhead(None) + // // TODO(@divma): wrong call + let max_size = self.conn.current_mtu() as usize + - self.conn.predict_1rtt_overhead_no_pn() - Datagram::SIZE_BOUND; let limit = self .conn @@ -163,13 +164,13 @@ impl DatagramState { /// /// Returns whether a frame was written. At most `max_size` bytes will be written, including /// framing. - pub(super) fn write(&mut self, buf: &mut Vec, max_size: usize) -> bool { + pub(super) fn write(&mut self, buf: &mut impl BufMut) -> bool { let datagram = match self.outgoing.pop_front() { Some(x) => x, None => return false, }; - if buf.len() + datagram.size(true) > max_size { + if buf.remaining_mut() < datagram.size(true) { // Future work: we could be more clever about cramming small datagrams into // mostly-full packets when a larger one is queued first self.outgoing.push_front(datagram); diff --git a/quinn-proto/src/connection/mod.rs b/quinn-proto/src/connection/mod.rs index c5f4f1a947..bcf48d6469 100644 --- a/quinn-proto/src/connection/mod.rs +++ b/quinn-proto/src/connection/mod.rs @@ -1,25 +1,30 @@ use std::{ cmp, - collections::VecDeque, + collections::{BTreeMap, VecDeque}, convert::TryFrom, fmt, io, mem, net::{IpAddr, SocketAddr}, sync::Arc, }; -use bytes::{Bytes, BytesMut}; +use bytes::{BufMut, Bytes, BytesMut}; use frame::StreamMetaVec; -use rand::{rngs::StdRng, Rng, SeedableRng}; +use rand::{Rng, SeedableRng, rngs::StdRng}; +use rustc_hash::FxHashMap; use thiserror::Error; use tracing::{debug, error, trace, trace_span, warn}; use crate::{ + Dir, Duration, EndpointConfig, Frame, INITIAL_MTU, Instant, MAX_CID_SIZE, MAX_STREAM_COUNT, + MIN_INITIAL_SIZE, Side, StreamId, TIMER_GRANULARITY, TokenStore, Transmit, TransportError, + TransportErrorCode, VarInt, cid_generator::ConnectionIdGenerator, cid_queue::CidQueue, coding::BufMutExt, config::{ServerConfig, TransportConfig}, + congestion::Controller, crypto::{self, KeyPair, Keys, PacketKey}, - frame::{self, Close, Datagram, FrameStruct, ObservedAddr}, + frame::{self, Close, Datagram, FrameStruct, NewToken, ObservedAddr}, packet::{ FixedLengthConnectionIdParser, Header, InitialHeader, InitialPacket, LongType, Packet, PacketNumber, PartialDecode, SpaceId, @@ -29,11 +34,8 @@ use crate::{ ConnectionEvent, ConnectionEventInner, ConnectionId, DatagramConnectionEvent, EcnCodepoint, EndpointEvent, EndpointEventInner, }, - token::ResetToken, + token::{ResetToken, Token, TokenPayload}, transport_parameters::TransportParameters, - Dir, Duration, EndpointConfig, Frame, Instant, Side, StreamId, Transmit, TransportError, - TransportErrorCode, VarInt, INITIAL_MTU, MAX_CID_SIZE, MAX_STREAM_COUNT, MIN_INITIAL_SIZE, - TIMER_GRANULARITY, }; mod ack_frequency; @@ -59,8 +61,8 @@ mod packet_crypto; use packet_crypto::{PrevCrypto, ZeroRttCrypto}; mod paths; -pub use paths::RttEstimator; -use paths::{PathData, PathResponses}; +use paths::PathData; +pub use paths::{PathEvent, PathId, PathStatus, RttEstimator}; mod send_buffer; @@ -69,7 +71,7 @@ mod spaces; pub use spaces::Retransmits; #[cfg(not(fuzzing))] use spaces::Retransmits; -use spaces::{PacketNumberFilter, PacketSpace, SendableFrames, SentPacket, ThinRetransmits}; +use spaces::{PacketSpace, SendableFrames, SentPacket, ThinRetransmits}; mod stats; pub use stats::{ConnectionStats, FrameStats, PathStats, UdpStats}; @@ -80,14 +82,16 @@ pub use streams::StreamsState; #[cfg(not(fuzzing))] use streams::StreamsState; pub use streams::{ - BytesSource, Chunks, ClosedStream, FinishError, ReadError, ReadableError, RecvStream, - SendStream, ShouldTransmit, StreamEvent, Streams, WriteError, Written, + Chunks, ClosedStream, FinishError, ReadError, ReadableError, RecvStream, SendStream, + ShouldTransmit, StreamEvent, Streams, WriteError, Written, }; mod timer; -use crate::congestion::Controller; use timer::{Timer, TimerTable}; +mod transmit_buf; +use transmit_buf::TransmitBuf; + /// Protocol state and logic for a single QUIC connection /// /// Objects of this type receive [`ConnectionEvent`]s and emit [`EndpointEvent`]s and application @@ -139,10 +143,14 @@ pub struct Connection { /// The "real" local IP address which was was used to receive the initial packet. /// This is only populated for the server case, and if known local_ip: Option, - path: PathData, + /// The [`PathData`] for each path + /// + /// This needs to be ordered because [`Connection::poll_transmit`] needs to + /// deterministically select the next PathId to send on. + // TODO(flub): well does it really? But deterministic is nice for now. + paths: BTreeMap, /// Whether MTU detection is supported in this environment allow_mtud: bool, - prev_path: Option<(ConnectionId, PathData)>, state: State, side: ConnectionSide, /// Whether or not 0-RTT was enabled during the handshake. Does not imply acceptance. @@ -171,7 +179,7 @@ pub struct Connection { spin: bool, /// Packet number spaces: initial, handshake, 1-RTT spaces: [PacketSpace; 3], - /// Highest usable packet number space + /// Highest usable [`SpaceId`] highest_space: SpaceId, /// 1-RTT keys used prior to a key update prev_crypto: Option, @@ -190,14 +198,11 @@ pub struct Connection { authentication_failures: u64, /// Why the connection was lost, if it has been error: Option, - /// Identifies Data-space packet numbers to skip. Not used in earlier spaces. - packet_number_filter: PacketNumberFilter, // // Queued non-retransmittable 1-RTT data // - /// Responses to PATH_CHALLENGE frames - path_responses: PathResponses, + /// If the CONNECTION_CLOSE frame needs to be sent close: bool, // @@ -205,12 +210,6 @@ pub struct Connection { // ack_frequency: AckFrequencyState, - // - // Loss Detection - // - /// The number of times a PTO has been sent without receiving an ack. - pto_count: u32, - // // Congestion Control // @@ -230,15 +229,41 @@ pub struct Connection { streams: StreamsState, /// Surplus remote CIDs for future use on new paths - rem_cids: CidQueue, - // Attributes of CIDs generated by local peer - local_cid_state: CidState, + /// + /// These are given out before multiple paths exist, also for paths that will never + /// exist. So if multipath is supported the number of paths here will be higher than + /// the actual number of paths in use. + rem_cids: FxHashMap, + /// Attributes of CIDs generated by local endpoint + local_cid_state: FxHashMap, /// State of the unreliable datagram extension datagrams: DatagramState, /// Connection level statistics stats: ConnectionStats, /// QUIC version used for the connection. version: u32, + + /// Local maximum [`PathId`] to be used. + /// + /// This is initially set to [`TransportConfig::get_initial_max_path_id`] when multipath is + /// negotiated, or to [`PathId::ZERO`] otherwise. + local_max_path_id: PathId, + /// Remote's maximum [`PathId`] to be used. + /// + /// This is initially set to the peer's [`TransportParameters::initial_max_path_id`] when + /// multipath is negotiated, or to [`PathId::ZERO`] otherwise. A peer may increase this limit + /// by sending [`Frame::MaxPathId`] frames. + remote_max_path_id: PathId, + /// The greatest [`PathId`] this connection has used. + /// + /// This is kept instead of calculated to account for abandoned paths for which data has been + /// purged. + max_path_id_in_use: PathId, +} + +struct PathState { + data: PathData, + prev: Option<(ConnectionId, PathData)>, } impl Connection { @@ -262,31 +287,51 @@ impl Connection { let path_validated = side_args.path_validated(); let connection_side = ConnectionSide::from(side_args); let side = connection_side.side(); - let initial_space = PacketSpace { - crypto: Some(crypto.initial_keys(&init_cid, side)), - ..PacketSpace::new(now) + let mut rng = StdRng::from_seed(rng_seed); + let initial_space = { + let mut space = PacketSpace::new(now, SpaceId::Initial, &mut rng); + space.crypto = Some(crypto.initial_keys(&init_cid, side)); + space + }; + let handshake_space = PacketSpace::new(now, SpaceId::Handshake, &mut rng); + #[cfg(test)] + let data_space = match config.deterministic_packet_numbers { + true => PacketSpace::new_deterministic(now, SpaceId::Data), + false => PacketSpace::new(now, SpaceId::Data, &mut rng), }; + #[cfg(not(test))] + let data_space = PacketSpace::new(now, SpaceId::Data, &mut rng); let state = State::Handshake(state::Handshake { rem_cid_set: side.is_server(), expected_token: Bytes::new(), client_hello: None, }); - let mut rng = StdRng::from_seed(rng_seed); - let mut this = Self { - endpoint_config, - crypto, - handshake_cid: loc_cid, - rem_handshake_cid: rem_cid, - local_cid_state: CidState::new( + let local_cid_state = FxHashMap::from_iter([( + PathId(0), + CidState::new( cid_gen.cid_len(), cid_gen.cid_lifetime(), now, if pref_addr_cid.is_some() { 2 } else { 1 }, ), - path: PathData::new(remote, allow_mtud, None, now, path_validated, &config), + )]); + + let path = PathData::new(remote, allow_mtud, None, now, &config); + let mut this = Self { + endpoint_config, + crypto, + handshake_cid: loc_cid, + rem_handshake_cid: rem_cid, + local_cid_state, + paths: BTreeMap::from_iter([( + PathId(0), + PathState { + data: path, + prev: None, + }, + )]), allow_mtud, local_ip, - prev_path: None, state, side: connection_side, zero_rtt_enabled: false, @@ -298,7 +343,7 @@ impl Connection { // simultaneous key update by both is just like a regular key update with a really fast // response. Inspired by quic-go's similar behavior of performing the first key update // at the 100th short-header packet. - key_phase_size: rng.gen_range(10..1000), + key_phase_size: rng.random_range(10..1000), peer_params: TransportParameters::default(), orig_rem_cid: rem_cid, initial_dst_cid: init_cid, @@ -306,9 +351,9 @@ impl Connection { lost_packets: 0, events: VecDeque::new(), endpoint_events: VecDeque::new(), - spin_enabled: config.allow_spin && rng.gen_ratio(7, 8), + spin_enabled: config.allow_spin && rng.random_ratio(7, 8), spin: false, - spaces: [initial_space, PacketSpace::new(now), PacketSpace::new(now)], + spaces: [initial_space, handshake_space, data_space], highest_space: SpaceId::Initial, prev_crypto: None, next_crypto: None, @@ -321,23 +366,12 @@ impl Connection { timers: TimerTable::default(), authentication_failures: 0, error: None, - #[cfg(test)] - packet_number_filter: match config.deterministic_packet_numbers { - false => PacketNumberFilter::new(&mut rng), - true => PacketNumberFilter::disabled(), - }, - #[cfg(not(test))] - packet_number_filter: PacketNumberFilter::new(&mut rng), - - path_responses: PathResponses::default(), close: false, ack_frequency: AckFrequencyState::new(get_max_ack_delay( &TransportParameters::default(), )), - pto_count: 0, - app_limited: false, receiving_ecn: false, total_authed_packets: 0, @@ -354,11 +388,19 @@ impl Connection { ), datagrams: DatagramState::default(), config, - rem_cids: CidQueue::new(rem_cid), + rem_cids: FxHashMap::from_iter([(PathId(0), CidQueue::new(rem_cid))]), rng, stats: ConnectionStats::default(), version, + + // peer params are not yet known, so multipath is not enabled + local_max_path_id: PathId::ZERO, + remote_max_path_id: PathId::ZERO, + max_path_id_in_use: PathId::ZERO, }; + if path_validated { + this.on_path_validated(PathId(0)); + } if side.is_client() { // Kick off the connection this.write_crypto(); @@ -376,7 +418,7 @@ impl Connection { /// - a call was made to `handle_timeout` #[must_use] pub fn poll_timeout(&mut self) -> Option { - self.timers.next_timeout() + self.timers.peek().map(|entry| entry.time) } /// Returns application-facing events @@ -439,6 +481,39 @@ impl Connection { } } + /// Opens a path + pub fn open_path(&mut self, _addr: SocketAddr, _initial_status: PathStatus) -> PathId { + todo!() + } + + /// Closes a path + pub fn close_path(&mut self, _id: PathId, _error_code: VarInt) { + todo!() + } + + /// Gets the [`PathData`] for a known [`PathId`]. + /// + /// Will panic if the path_id does not reference any known path. + #[track_caller] + fn path_data(&self, path_id: PathId) -> &PathData { + &self.paths.get(&path_id).expect("known path").data + } + + /// Gets the [`PathStatus`] for a known [`PathId`]. + /// + /// Will panic if the path_id does not reference any known path. + pub fn path_status(&self, path_id: PathId) -> PathStatus { + self.path_data(path_id).status + } + + /// Gets the [`PathData`] for a known [`PathId`]. + /// + /// Will panic if the path_id does not reference any known path. + #[track_caller] + fn path_data_mut(&mut self, path_id: PathId) -> &mut PathData { + &mut self.paths.get_mut(&path_id).expect("known path").data + } + /// Returns packets to transmit /// /// Connections should be polled for transmit after: @@ -458,76 +533,25 @@ impl Connection { assert!(max_datagrams != 0); let max_datagrams = match self.config.enable_segmentation_offload { false => 1, - true => max_datagrams.min(MAX_TRANSMIT_SEGMENTS), + true => max_datagrams, }; - let mut num_datagrams = 0; - // Position in `buf` of the first byte of the current UDP datagram. When coalescing QUIC - // packets, this can be earlier than the start of the current QUIC packet. - let mut datagram_start = 0; - let mut segment_size = usize::from(self.path.current_mtu()); - - // Send PATH_CHALLENGE for a previous path if necessary - if let Some((prev_cid, ref mut prev_path)) = self.prev_path { - if prev_path.challenge_pending { - prev_path.challenge_pending = false; - let token = prev_path - .challenge - .expect("previous path challenge pending without token"); - let destination = prev_path.remote; - debug_assert_eq!( - self.highest_space, - SpaceId::Data, - "PATH_CHALLENGE queued without 1-RTT keys" - ); - buf.reserve(MIN_INITIAL_SIZE as usize); + // Each call to poll_transmit can only send datagrams to one destination, because + // all datagrams in a GSO batch are for the same destination. Therefore only + // datagrams for one Path ID are produced for each poll_transmit call. - let buf_capacity = buf.capacity(); + // First, if we have to send a close, select a path for that. + // Next, all paths that have a PATH_CHALLENGE or PATH_RESPONSE pending. - // Use the previous CID to avoid linking the new path with the previous path. We - // don't bother accounting for possible retirement of that prev_cid because this is - // sent once, immediately after migration, when the CID is known to be valid. Even - // if a post-migration packet caused the CID to be retired, it's fair to pretend - // this is sent first. - let mut builder = PacketBuilder::new( - now, - SpaceId::Data, - prev_cid, - buf, - buf_capacity, - 0, - false, - self, - )?; - trace!("validating previous path with PATH_CHALLENGE {:08x}", token); - buf.write(frame::FrameType::PATH_CHALLENGE); - buf.write(token); - self.stats.frame_tx.path_challenge += 1; - - // An endpoint MUST expand datagrams that contain a PATH_CHALLENGE frame - // to at least the smallest allowed maximum datagram size of 1200 bytes, - // unless the anti-amplification limit for the path does not permit - // sending a datagram of this size - builder.pad_to(MIN_INITIAL_SIZE); - - builder.finish(self, buf); - self.stats.udp_tx.on_sent(1, buf.len()); - return Some(Transmit { - destination, - size: buf.len(), - ecn: None, - segment_size: None, - src_ip: self.local_ip, - }); - } - } + // For all AVAILABLE paths: + // - Is the path congestion blocked or pacing blocked? + // - call maybe_queue_ to ensure a tail-loss probe would be sent? + // - do we need to send a close message? + // - call can_send + // Once there's nothing more to send on the AVAILABLE paths, do the same for BACKUP paths - // If we need to send a probe, make sure we have something to send. - for space in SpaceId::iter() { - let request_immediate_ack = - space == SpaceId::Data && self.peer_supports_ack_frequency(); - self.spaces[space].maybe_queue_probe(request_immediate_ack, &self.streams); - } + // What about PATH_CHALLENGE or PATH_RESPONSE? We need to check if we need to send + // any of those. // Check whether we need to send a close message let close = match self.state { @@ -549,247 +573,177 @@ impl Connection { // Check whether we need to send an ACK_FREQUENCY frame if let Some(config) = &self.config.ack_frequency_config { + let rtt = self + .paths + .values() + .map(|p| p.data.rtt.get()) + .min() + .expect("one path exists"); self.spaces[SpaceId::Data].pending.ack_frequency = self .ack_frequency - .should_send_ack_frequency(self.path.rtt.get(), config, &self.peer_params) + .should_send_ack_frequency(rtt, config, &self.peer_params) && self.highest_space == SpaceId::Data && self.peer_supports_ack_frequency(); } - // Reserving capacity can provide more capacity than we asked for. However, we are not - // allowed to write more than `segment_size`. Therefore the maximum capacity is tracked - // separately. - let mut buf_capacity = 0; - + // Whether this packet can be coalesced with another one in the same datagram. let mut coalesce = true; - let mut builder_storage: Option = None; - let mut sent_frames = None; + + // Whether the last packet in the datagram must be padded so the datagram takes up + // to at least MIN_INITIAL_SIZE, or to the maximum segment size if this is smaller. let mut pad_datagram = false; + + // Whether congestion control stopped the next packet from being sent. Further + // packets could still be built, as e.g. tail-loss probes are not congestion + // limited. let mut congestion_blocked = false; - // Iterate over all spaces and find data to send - let mut space_idx = 0; - let spaces = [SpaceId::Initial, SpaceId::Handshake, SpaceId::Data]; - // This loop will potentially spend multiple iterations in the same `SpaceId`, - // so we cannot trivially rewrite it to take advantage of `SpaceId::iter()`. - while space_idx < spaces.len() { - let space_id = spaces[space_idx]; - // Number of bytes available for frames if this is a 1-RTT packet. We're guaranteed to - // be able to send an individual frame at least this large in the next 1-RTT - // packet. This could be generalized to support every space, but it's only needed to - // handle large fixed-size frames, which only exist in 1-RTT (application datagrams). We - // don't account for coalesced packets potentially occupying space because frames can - // always spill into the next datagram. - let pn = self.packet_number_filter.peek(&self.spaces[SpaceId::Data]); - let frame_space_1rtt = - segment_size.saturating_sub(self.predict_1rtt_overhead(Some(pn))); - - // Is there data or a close message to send in this space? - let can_send = self.space_can_send(space_id, frame_space_1rtt); - if can_send.is_empty() && (!close || self.spaces[space_id].crypto.is_none()) { - space_idx += 1; - continue; - } + // The packet number of the last built packet. + let mut last_packet_number = None; - let mut ack_eliciting = !self.spaces[space_id].pending.is_empty(&self.streams) - || self.spaces[space_id].ping_pending - || self.spaces[space_id].immediate_ack_pending; - if space_id == SpaceId::Data { - ack_eliciting |= self.can_send_1rtt(frame_space_1rtt); - } + let mut path_id = *self.paths.first_key_value().expect("one path must exist").0; + + // If there is any available path we only want to send frames to a backup path that + // must be sent on that path. + let have_available_path = self + .paths + .values() + .any(|path| path.data.status == PathStatus::Available); + + // Setup for the first path_id + let mut transmit = TransmitBuf::new( + buf, + max_datagrams, + self.path_data(path_id).current_mtu().into(), + ); + if let Some(challenge) = self.send_prev_path_challenge(now, &mut transmit, path_id) { + return Some(challenge); + } + let mut space_id = match path_id { + PathId(0) => SpaceId::Initial, + _ => SpaceId::Data, + }; - // Can we append more data into the current buffer? - // It is not safe to assume that `buf.len()` is the end of the data, - // since the last packet might not have been finished. - let buf_end = if let Some(builder) = &builder_storage { - buf.len().max(builder.min_size) + builder.tag_len + loop { + // Determine if anything can be sent in this packet number space (SpaceId + + // PathId). + let max_packet_size = if transmit.datagram_remaining_mut() > 0 { + // We are trying to coalesce another packet into this datagram. + transmit.datagram_remaining_mut() } else { - buf.len() + // A new datagram needs to be started. + transmit.segment_size() + }; + let can_send = self.space_can_send(space_id, path_id, max_packet_size, close); + let path_should_send = { + let path_exclusive_only = space_id == SpaceId::Data + && have_available_path + && self.path_data(path_id).status == PathStatus::Backup; + let path_should_send = if path_exclusive_only { + can_send.path_exclusive + } else { + !can_send.is_empty() + }; + let needs_loss_probe = self.spaces[space_id].for_path(path_id).loss_probes > 0; + path_should_send || needs_loss_probe || can_send.close }; - let tag_len = if let Some(ref crypto) = self.spaces[space_id].crypto { - crypto.packet.local.tag_len() - } else if space_id == SpaceId::Data { - self.zero_rtt_crypto.as_ref().expect( - "sending packets in the application data space requires known 0-RTT or 1-RTT keys", - ).packet.tag_len() + if !path_should_send && space_id < SpaceId::Data { + trace!(?space_id, ?path_id, "nothing to send"); + space_id = space_id.next(); + continue; + } + + let send_blocked = if path_should_send && transmit.datagram_remaining_mut() == 0 { + // Only check congestion control if a new datagram is needed. + self.path_congestion_check(space_id, path_id, &transmit, &can_send, now) } else { - unreachable!("tried to send {:?} packet without keys", space_id) + PathBlocked::No }; - if !coalesce || buf_capacity - buf_end < MIN_PACKET_SPACE + tag_len { - // We need to send 1 more datagram and extend the buffer for that. + if send_blocked != PathBlocked::No { + trace!(?space_id, ?path_id, ?send_blocked, "congestion blocked"); + congestion_blocked = true; + } + if send_blocked == PathBlocked::Congestion && space_id < SpaceId::Data { + // Higher spaces might still have tail-loss probes to send, which are not + // congestion blocked. + space_id = space_id.next(); + continue; + } + if !path_should_send || send_blocked != PathBlocked::No { + // Nothing more to send on this path, check the next path if possible. - // Is 1 more datagram allowed? - if buf_capacity >= segment_size * max_datagrams { - // No more datagrams allowed + // If there are any datagrams in the transmit, packets for another path can + // not be built. + if transmit.num_datagrams() > 0 { break; } - // Anti-amplification is only based on `total_sent`, which gets - // updated at the end of this method. Therefore we pass the amount - // of bytes for datagrams that are already created, as well as 1 byte - // for starting another datagram. If there is any anti-amplification - // budget left, we always allow a full MTU to be sent - // (see https://github.com/quinn-rs/quinn/issues/1082) - if self - .path - .anti_amplification_blocked(segment_size as u64 * num_datagrams + 1) - { - trace!("blocked by anti-amplification"); - break; - } + match self.paths.keys().find(|&&next| next > path_id) { + Some(next_path_id) => { + // See if this next path can send anything. + trace!(?space_id, ?path_id, ?next_path_id, "trying next path"); + path_id = *next_path_id; + space_id = SpaceId::Data; + + // update per path state + transmit.set_segment_size(self.path_data(path_id).current_mtu().into()); + if let Some(challenge) = + self.send_prev_path_challenge(now, &mut transmit, path_id) + { + return Some(challenge); + } - // Congestion control and pacing checks - // Tail loss probes must not be blocked by congestion, or a deadlock could arise - if ack_eliciting && self.spaces[space_id].loss_probes == 0 { - // Assume the current packet will get padded to fill the segment - let untracked_bytes = if let Some(builder) = &builder_storage { - buf_capacity - builder.partial_encode.start - } else { - 0 - } as u64; - debug_assert!(untracked_bytes <= segment_size as u64); - - let bytes_to_send = segment_size as u64 + untracked_bytes; - if self.path.in_flight.bytes + bytes_to_send >= self.path.congestion.window() { - space_idx += 1; - congestion_blocked = true; - // We continue instead of breaking here in order to avoid - // blocking loss probes queued for higher spaces. - trace!("blocked by congestion control"); continue; } - - // Check whether the next datagram is blocked by pacing - let smoothed_rtt = self.path.rtt.get(); - if let Some(delay) = self.path.pacing.delay( - smoothed_rtt, - bytes_to_send, - self.path.current_mtu(), - self.path.congestion.window(), - now, - ) { - self.timers.set(Timer::Pacing, delay); - congestion_blocked = true; - // Loss probes should be subject to pacing, even though - // they are not congestion controlled. - trace!("blocked by pacing"); + None => { + // Nothing more to send. + trace!(?space_id, ?path_id, "no higher path id to send on"); break; } } + } - // Finish current packet - if let Some(mut builder) = builder_storage.take() { - if pad_datagram { - builder.pad_to(MIN_INITIAL_SIZE); - } + // If the datagram is full, we need to start a new one. + if transmit.datagram_remaining_mut() == 0 { + if transmit.num_datagrams() >= transmit.max_datagrams() { + // No more datagrams allowed + break; + } - if num_datagrams > 1 { - // If too many padding bytes would be required to continue the GSO batch - // after this packet, end the GSO batch here. Ensures that fixed-size frames - // with heterogeneous sizes (e.g. application datagrams) won't inadvertently - // waste large amounts of bandwidth. The exact threshold is a bit arbitrary - // and might benefit from further tuning, though there's no universally - // optimal value. - // - // Additionally, if this datagram is a loss probe and `segment_size` is - // larger than `INITIAL_MTU`, then padding it to `segment_size` to continue - // the GSO batch would risk failure to recover from a reduction in path - // MTU. Loss probes are the only packets for which we might grow - // `buf_capacity` by less than `segment_size`. - const MAX_PADDING: usize = 16; - let packet_len_unpadded = cmp::max(builder.min_size, buf.len()) - - datagram_start - + builder.tag_len; - if packet_len_unpadded + MAX_PADDING < segment_size - || datagram_start + segment_size > buf_capacity - { - trace!( - "GSO truncated by demand for {} padding bytes or loss probe", - segment_size - packet_len_unpadded - ); - builder_storage = Some(builder); - break; - } + match self.spaces[space_id].for_path(path_id).loss_probes { + 0 => transmit.start_new_datagram(), + _ => { + // We need something to send for a tail-loss probe. + let request_immediate_ack = + space_id == SpaceId::Data && self.peer_supports_ack_frequency(); + self.spaces[space_id].maybe_queue_probe( + path_id, + request_immediate_ack, + &self.streams, + ); - // Pad the current datagram to GSO segment size so it can be included in the - // GSO batch. - builder.pad_to(segment_size as u16); - } + self.spaces[space_id].for_path(path_id).loss_probes -= 1; - builder.finish_and_track(now, self, sent_frames.take(), buf); - - if num_datagrams == 1 { - // Set the segment size for this GSO batch to the size of the first UDP - // datagram in the batch. Larger data that cannot be fragmented - // (e.g. application datagrams) will be included in a future batch. When - // sending large enough volumes of data for GSO to be useful, we expect - // packet sizes to usually be consistent, e.g. populated by max-size STREAM - // frames or uniformly sized datagrams. - segment_size = buf.len(); - // Clip the unused capacity out of the buffer so future packets don't - // overrun - buf_capacity = buf.len(); - - // Check whether the data we planned to send will fit in the reduced segment - // size. If not, bail out and leave it for the next GSO batch so we don't - // end up trying to send an empty packet. We can't easily compute the right - // segment size before the original call to `space_can_send`, because at - // that time we haven't determined whether we're going to coalesce with the - // first datagram or potentially pad it to `MIN_INITIAL_SIZE`. - if space_id == SpaceId::Data { - let frame_space_1rtt = - segment_size.saturating_sub(self.predict_1rtt_overhead(Some(pn))); - if self.space_can_send(space_id, frame_space_1rtt).is_empty() { - break; - } - } + // Clamp the datagram to at most the minimum MTU to ensure that loss + // probes can get through and enable recovery even if the path MTU + // has shrank unexpectedly. + transmit.start_new_datagram_with_size(std::cmp::min( + usize::from(INITIAL_MTU), + transmit.segment_size(), + )); } } - - // Allocate space for another datagram - let next_datagram_size_limit = match self.spaces[space_id].loss_probes { - 0 => segment_size, - _ => { - self.spaces[space_id].loss_probes -= 1; - // Clamp the datagram to at most the minimum MTU to ensure that loss probes - // can get through and enable recovery even if the path MTU has shrank - // unexpectedly. - usize::from(INITIAL_MTU) - } - }; - buf_capacity += next_datagram_size_limit; - if buf.capacity() < buf_capacity { - // We reserve the maximum space for sending `max_datagrams` upfront - // to avoid any reallocations if more datagrams have to be appended later on. - // Benchmarks have shown shown a 5-10% throughput improvement - // compared to continuously resizing the datagram buffer. - // While this will lead to over-allocation for small transmits - // (e.g. purely containing ACKs), modern memory allocators - // (e.g. mimalloc and jemalloc) will pool certain allocation sizes - // and therefore this is still rather efficient. - buf.reserve(max_datagrams * segment_size); - } - num_datagrams += 1; + trace!(count = transmit.num_datagrams(), "new datagram started"); coalesce = true; pad_datagram = false; - datagram_start = buf.len(); - - debug_assert_eq!( - datagram_start % segment_size, - 0, - "datagrams in a GSO batch must be aligned to the segment size" - ); - } else { - // We can append/coalesce the next packet into the current - // datagram. - // Finish current packet without adding extra padding - if let Some(builder) = builder_storage.take() { - builder.finish_and_track(now, self, sent_frames.take(), buf); - } } - debug_assert!(buf_capacity - buf.len() >= MIN_PACKET_SPACE); + // If coalescing another packet into the existing datagram, there should + // still be enough space for a whole packet. + if transmit.datagram_start_offset() < transmit.len() { + debug_assert!(transmit.datagram_remaining_mut() >= MIN_PACKET_SPACE); + } // // From here on, we've determined that a packet will definitely be sent. @@ -807,64 +761,63 @@ impl Connection { prev.update_unacked = false; } - debug_assert!( - builder_storage.is_none() && sent_frames.is_none(), - "Previous packet must have been finished" - ); - - let builder = builder_storage.insert(PacketBuilder::new( + // TODO(flub): I'm not particularly happy about this unwrap. But let's leave it + // for now until more stuff is settled. We probably should check earlier on + // in poll_transmit that we have a valid CID to use. + let mut builder = PacketBuilder::new( now, space_id, - self.rem_cids.active(), - buf, - buf_capacity, - datagram_start, - ack_eliciting, + path_id, + self.rem_cids.get(&path_id).unwrap().active(), + &mut transmit, + can_send.other, self, - )?); + )?; + last_packet_number = Some(builder.exact_number); coalesce = coalesce && !builder.short_header; - // https://tools.ietf.org/html/draft-ietf-quic-transport-34#section-14.1 + // https://www.rfc-editor.org/rfc/rfc9000.html#section-14.1 pad_datagram |= - space_id == SpaceId::Initial && (self.side.is_client() || ack_eliciting); + space_id == SpaceId::Initial && (self.side.is_client() || can_send.other); - if close { + if can_send.close { trace!("sending CONNECTION_CLOSE"); // Encode ACKs before the ConnectionClose message, to give the receiver // a better approximate on what data has been processed. This is // especially important with ack delay, since the peer might not // have gotten any other ACK for the data earlier on. - if !self.spaces[space_id].pending_acks.ranges().is_empty() { - Self::populate_acks( - now, - self.receiving_ecn, - &mut SentFrames::default(), - &mut self.spaces[space_id], - buf, - &mut self.stats, - ); - } + let mut sent_frames = SentFrames::default(); + let is_multipath_enabled = self.is_multipath_negotiated(); + Self::populate_acks( + now, + self.receiving_ecn, + &mut sent_frames, + &mut self.spaces[space_id], + is_multipath_enabled, + &mut builder.frame_space_mut(), + &mut self.stats, + ); // Since there only 64 ACK frames there will always be enough space // to encode the ConnectionClose frame too. However we still have the // check here to prevent crashes if something changes. debug_assert!( - buf.len() + frame::ConnectionClose::SIZE_BOUND < builder.max_size, + builder.frame_space_remaining() > frame::ConnectionClose::SIZE_BOUND, "ACKs should leave space for ConnectionClose" ); - if buf.len() + frame::ConnectionClose::SIZE_BOUND < builder.max_size { - let max_frame_size = builder.max_size - buf.len(); + if frame::ConnectionClose::SIZE_BOUND < builder.frame_space_remaining() { + let max_frame_size = builder.frame_space_remaining(); match self.state { State::Closed(state::Closed { ref reason }) => { if space_id == SpaceId::Data || reason.is_transport_layer() { - reason.encode(buf, max_frame_size) + reason.encode(&mut builder.frame_space_mut(), max_frame_size) } else { frame::ConnectionClose { error_code: TransportErrorCode::APPLICATION_ERROR, frame_type: None, reason: Bytes::new(), } - .encode(buf, max_frame_size) + .encode(&mut builder.frame_space_mut(), max_frame_size) } } State::Draining => frame::ConnectionClose { @@ -872,51 +825,56 @@ impl Connection { frame_type: None, reason: Bytes::new(), } - .encode(buf, max_frame_size), + .encode(&mut builder.frame_space_mut(), max_frame_size), _ => unreachable!( "tried to make a close packet when the connection wasn't closed" ), } } - if space_id == self.highest_space { + builder.finish_and_track(now, self, path_id, sent_frames, pad_datagram); + if space_id == self.highest_space && path_id == *self.paths.keys().max().unwrap() { // Don't send another close packet self.close = false; // `CONNECTION_CLOSE` is the final packet break; } else { - // Send a close frame in every possible space for robustness, per RFC9000 - // "Immediate Close during the Handshake". Don't bother trying to send anything - // else. - space_idx += 1; + // Send a close frame in every possible space for robustness, per + // RFC9000 "Immediate Close during the Handshake". Don't bother trying + // to send anything else. + space_id = space_id.next(); continue; } } - // Send an off-path PATH_RESPONSE. Prioritized over on-path data to ensure that path - // validation can occur while the link is saturated. - if space_id == SpaceId::Data && num_datagrams == 1 { - if let Some((token, remote)) = self.path_responses.pop_off_path(&self.path.remote) { - // `unwrap` guaranteed to succeed because `builder_storage` was populated just - // above. - let mut builder = builder_storage.take().unwrap(); + // Send an off-path PATH_RESPONSE. Prioritized over on-path data to ensure that + // path validation can occur while the link is saturated. + if space_id == SpaceId::Data && builder.buf.num_datagrams() == 1 { + let path = self.path_data_mut(path_id); + if let Some((token, remote)) = path.path_responses.pop_off_path(path.remote) { + // TODO(flub): We need to use the right CID! We shouldn't use the same + // CID as the current active one for the path. Though see also + // https://github.com/quinn-rs/quinn/issues/2184 trace!("PATH_RESPONSE {:08x} (off-path)", token); - buf.write(frame::FrameType::PATH_RESPONSE); - buf.write(token); + builder + .frame_space_mut() + .write(frame::FrameType::PATH_RESPONSE); + builder.frame_space_mut().write(token); self.stats.frame_tx.path_response += 1; builder.pad_to(MIN_INITIAL_SIZE); builder.finish_and_track( now, self, - Some(SentFrames { + path_id, + SentFrames { non_retransmits: true, ..SentFrames::default() - }), - buf, + }, + false, ); - self.stats.udp_tx.on_sent(1, buf.len()); + self.stats.udp_tx.on_sent(1, transmit.len()); return Some(Transmit { destination: remote, - size: buf.len(), + size: transmit.len(), ecn: None, segment_size: None, src_ip: self.local_ip, @@ -924,8 +882,19 @@ impl Connection { } } - let sent = - self.populate_packet(now, space_id, buf, builder.max_size, builder.exact_number); + let sent_frames = { + let path_exclusive_only = + have_available_path && self.path_data(path_id).status == PathStatus::Backup; + let pn = builder.exact_number; + self.populate_packet( + now, + space_id, + path_id, + path_exclusive_only, + &mut builder.frame_space_mut(), + pn, + ) + }; // ACK-only packets should only be sent when explicitly allowed. If we write them due to // any other reason, there is a bug which leads to one component announcing write @@ -934,70 +903,141 @@ impl Connection { // frames aren't queued, so that lack of space in the datagram isn't the reason for just // writing ACKs. debug_assert!( - !(sent.is_ack_only(&self.streams) + !(sent_frames.is_ack_only(&self.streams) && !can_send.acks && can_send.other - && (buf_capacity - builder.datagram_start) == self.path.current_mtu() as usize + && builder.buf.segment_size() + == self.path_data(path_id).current_mtu() as usize && self.datagrams.outgoing.is_empty()), "SendableFrames was {can_send:?}, but only ACKs have been written" ); - pad_datagram |= sent.requires_padding; + pad_datagram |= sent_frames.requires_padding; - if sent.largest_acked.is_some() { + if sent_frames.largest_acked.is_some() { self.spaces[space_id].pending_acks.acks_sent(); self.timers.stop(Timer::MaxAckDelay); } - // Keep information about the packet around until it gets finalized - sent_frames = Some(sent); + // Now we need to finish the packet. Before we do so we need to know if we will + // be coalescing the next packet into this one, or will be ending the datagram + // as well. Because if this is the last packet in the datagram more padding + // might be needed because of the packet type, or to fill the GSO segment size. + + // Are we allowed to coalesce AND is there enough space for another *packet* in + // this datagram AND is there another packet to send in this or the next space? + if coalesce + && builder + .buf + .datagram_remaining_mut() + .saturating_sub(builder.predict_packet_end()) + > MIN_PACKET_SPACE + && self + .next_send_space(space_id, path_id, builder.buf, close) + .is_some() + // && (matches!( + // self.space_ready_to_send(path_id, space_id, builder.buf, close, now), + // SendReady::Frames(can_send) if !can_send.is_empty(), + // ) || matches!( + // self.space_ready_to_send(path_id, space_id.next(), builder.buf, close, now), + // SendReady::Frames(can_send) if !can_send.is_empty(), + // ) || matches!( + // self.space_ready_to_send(path_id, space_id.next().next(), builder.buf, close, now), + // SendReady::Frames(can_send) if !can_send.is_empty(), + // )) + { + // We can append/coalesce the next packet into the current + // datagram. Finish the current packet without adding extra padding. + builder.finish_and_track(now, self, path_id, sent_frames, false); + } else { + // We need a new datagram for the next packet. Finish the current + // packet with padding. + if builder.buf.num_datagrams() > 1 { + // If too many padding bytes would be required to continue the + // GSO batch after this packet, end the GSO batch here. Ensures + // that fixed-size frames with heterogeneous sizes + // (e.g. application datagrams) won't inadvertently waste large + // amounts of bandwidth. The exact threshold is a bit arbitrary + // and might benefit from further tuning, though there's no + // universally optimal value. + // + // Additionally, if this datagram is a loss probe and + // `segment_size` is larger than `INITIAL_MTU`, then padding it + // to `segment_size` to continue the GSO batch would risk + // failure to recover from a reduction in path MTU. Loss probes + // are the only packets for which we might grow `buf_capacity` + // by less than `segment_size`. + const MAX_PADDING: usize = 16; + if builder.buf.datagram_remaining_mut() + > builder.predict_packet_end() + MAX_PADDING + { + trace!( + "GSO truncated by demand for {} padding bytes", + builder.buf.datagram_remaining_mut() - builder.predict_packet_end() + ); + builder.finish_and_track(now, self, path_id, sent_frames, pad_datagram); + break; + } + + // Pad the current datagram to GSO segment size so it can be + // included in the GSO batch. + builder.pad_to(builder.buf.segment_size() as u16); + } - // Don't increment space_idx. - // We stay in the current space and check if there is more data to send. - } + builder.finish_and_track(now, self, path_id, sent_frames, pad_datagram); - // Finish the last packet - if let Some(mut builder) = builder_storage { - if pad_datagram { - builder.pad_to(MIN_INITIAL_SIZE); + if transmit.num_datagrams() == 1 { + transmit.clip_datagram_size(); + } } - let last_packet_number = builder.exact_number; - builder.finish_and_track(now, self, sent_frames, buf); - self.path - .congestion - .on_sent(now, buf.len() as u64, last_packet_number); } - self.app_limited = buf.is_empty() && !congestion_blocked; + if let Some(last_packet_number) = last_packet_number { + // Note that when sending in multiple packet spaces the last packet number will + // be the one from the highest packet space. + self.path_data_mut(path_id).congestion.on_sent( + now, + transmit.len() as u64, + last_packet_number, + ); + } + + self.app_limited = transmit.is_empty() && !congestion_blocked; // Send MTU probe if necessary - if buf.is_empty() && self.state.is_established() { + if transmit.is_empty() && self.state.is_established() { let space_id = SpaceId::Data; + let next_pn = self.spaces[space_id].for_path(path_id).peek_tx_number(); let probe_size = self - .path + .path_data_mut(path_id) .mtud - .poll_transmit(now, self.packet_number_filter.peek(&self.spaces[space_id]))?; + .poll_transmit(now, next_pn)?; - let buf_capacity = probe_size as usize; - buf.reserve(buf_capacity); + debug_assert_eq!(transmit.num_datagrams(), 0); + transmit.start_new_datagram_with_size(probe_size as usize); + debug_assert_eq!(transmit.datagram_start_offset(), 0); + // TODO(flub): I'm not particularly happy about this unwrap. But let's leave it + // for now until more stuff is settled. We probably should check earlier on + // in poll_transmit that we have a valid CID to use. let mut builder = PacketBuilder::new( now, space_id, - self.rem_cids.active(), - buf, - buf_capacity, - 0, + path_id, + self.rem_cids.get(&path_id).unwrap().active(), + &mut transmit, true, self, )?; // We implement MTU probes as ping packets padded up to the probe size - buf.write(frame::FrameType::PING); + builder.frame_space_mut().write(frame::FrameType::PING); self.stats.frame_tx.ping += 1; // If supported by the peer, we want no delays to the probe's ACK if self.peer_supports_ack_frequency() { - buf.write(frame::FrameType::IMMEDIATE_ACK); + builder + .frame_space_mut() + .write(frame::FrameType::IMMEDIATE_ACK); self.stats.frame_tx.immediate_ack += 1; } @@ -1006,41 +1046,204 @@ impl Connection { non_retransmits: true, ..Default::default() }; - builder.finish_and_track(now, self, Some(sent_frames), buf); + builder.finish_and_track(now, self, path_id, sent_frames, false); - self.stats.path.sent_plpmtud_probes += 1; - num_datagrams = 1; + self.stats + .paths + .entry(path_id) + .or_default() + .sent_plpmtud_probes += 1; trace!(?probe_size, "writing MTUD probe"); } - if buf.is_empty() { + if transmit.is_empty() { return None; } - trace!("sending {} bytes in {} datagrams", buf.len(), num_datagrams); - self.path.total_sent = self.path.total_sent.saturating_add(buf.len() as u64); + trace!( + segment_size = transmit.segment_size(), + last_datagram_len = transmit.len() % transmit.segment_size(), + "sending {} bytes in {} datagrams", + transmit.len(), + transmit.num_datagrams() + ); + self.path_data_mut(path_id) + .inc_total_sent(transmit.len() as u64); - self.stats.udp_tx.on_sent(num_datagrams, buf.len()); + self.stats + .udp_tx + .on_sent(transmit.num_datagrams() as u64, transmit.len()); Some(Transmit { - destination: self.path.remote, - size: buf.len(), - ecn: if self.path.sending_ecn { + destination: self.path_data(path_id).remote, + size: transmit.len(), + ecn: if self.path_data(path_id).sending_ecn { Some(EcnCodepoint::Ect0) } else { None }, - segment_size: match num_datagrams { + segment_size: match transmit.num_datagrams() { 1 => None, - _ => Some(segment_size), + _ => Some(transmit.segment_size()), }, src_ip: self.local_ip, }) } + /// Returns the [`SpaceId`] of the next packet space which has data to send + /// + /// This takes into account the space available to frames in the next datagram. + // TODO(flub): This duplication is not nice. + fn next_send_space( + &mut self, + current_space_id: SpaceId, + path_id: PathId, + buf: &TransmitBuf<'_>, + close: bool, + ) -> Option { + // Number of bytes available for frames if this is a 1-RTT packet. We're guaranteed + // to be able to send an individual frame at least this large in the next 1-RTT + // packet. This could be generalized to support every space, but it's only needed to + // handle large fixed-size frames, which only exist in 1-RTT (application + // datagrams). We don't account for coalesced packets potentially occupying space + // because frames can always spill into the next datagram. + let mut space_id = current_space_id; + loop { + let can_send = self.space_can_send(space_id, path_id, buf.segment_size(), close); + if !can_send.is_empty() || (close && self.spaces[space_id].crypto.is_some()) { + return Some(space_id); + } + space_id = match space_id { + SpaceId::Initial => SpaceId::Handshake, + SpaceId::Handshake => SpaceId::Data, + SpaceId::Data => break, + } + } + None + } + + /// Checks if creating a new datagram would be blocked by congestion control + fn path_congestion_check( + &mut self, + space_id: SpaceId, + path_id: PathId, + transmit: &TransmitBuf<'_>, + can_send: &SendableFrames, + now: Instant, + ) -> PathBlocked { + // Anti-amplification is only based on `total_sent`, which gets updated after + // the transmit is sent. Therefore we pass the amount of bytes for datagrams + // that are already created, as well as 1 byte for starting another datagram. If + // there is any anti-amplification budget left, we always allow a full MTU to be + // sent (see https://github.com/quinn-rs/quinn/issues/1082). + if self + .path_data(path_id) + .anti_amplification_blocked(transmit.len() as u64 + 1) + { + trace!(?space_id, ?path_id, "blocked by anti-amplification"); + return PathBlocked::AntiAmplification; + } + + // Congestion control check. + // Tail loss probes must not be blocked by congestion, or a deadlock could arise. + let bytes_to_send = transmit.segment_size() as u64; + let need_loss_probe = self.spaces[space_id].for_path(path_id).loss_probes > 0; + + if can_send.other && !need_loss_probe && !can_send.close { + let path = self.path_data(path_id); + if path.in_flight.bytes + bytes_to_send >= path.congestion.window() { + trace!(?space_id, %path_id, "blocked by congestion control"); + return PathBlocked::Congestion; + } + } + + // Pacing check. + if let Some(delay) = self.path_data_mut(path_id).pacing_delay(bytes_to_send, now) { + self.timers.set(Timer::Pacing(path_id), delay); + // Loss probes and CONNECTION_CLOSE should be subject to pacing, even though + // they are not congestion controlled. + trace!(?space_id, ?path_id, "blocked by pacing"); + return PathBlocked::Pacing; + } + + PathBlocked::No + } + + /// Send PATH_CHALLENGE for a previous path if necessary + /// + /// QUIC-TRANSPORT section 9.3.3 + /// + fn send_prev_path_challenge( + &mut self, + now: Instant, + buf: &mut TransmitBuf<'_>, + path_id: PathId, + ) -> Option { + let (prev_cid, prev_path) = self.paths.get_mut(&path_id)?.prev.as_mut()?; + if !prev_path.challenge_pending { + return None; + } + prev_path.challenge_pending = false; + let token = prev_path + .challenge + .expect("previous path challenge pending without token"); + let destination = prev_path.remote; + debug_assert_eq!( + self.highest_space, + SpaceId::Data, + "PATH_CHALLENGE queued without 1-RTT keys" + ); + buf.start_new_datagram_with_size(MIN_INITIAL_SIZE as usize); + + // Use the previous CID to avoid linking the new path with the previous path. We + // don't bother accounting for possible retirement of that prev_cid because this is + // sent once, immediately after migration, when the CID is known to be valid. Even + // if a post-migration packet caused the CID to be retired, it's fair to pretend + // this is sent first. + debug_assert_eq!(buf.datagram_start_offset(), 0); + let mut builder = + PacketBuilder::new(now, SpaceId::Data, path_id, *prev_cid, buf, false, self)?; + trace!("validating previous path with PATH_CHALLENGE {:08x}", token); + builder + .frame_space_mut() + .write(frame::FrameType::PATH_CHALLENGE); + builder.frame_space_mut().write(token); + self.stats.frame_tx.path_challenge += 1; + + // An endpoint MUST expand datagrams that contain a PATH_CHALLENGE frame + // to at least the smallest allowed maximum datagram size of 1200 bytes, + // unless the anti-amplification limit for the path does not permit + // sending a datagram of this size + builder.pad_to(MIN_INITIAL_SIZE); + + builder.finish(self); + self.stats.udp_tx.on_sent(1, buf.len()); + + Some(Transmit { + destination, + size: buf.len(), + ecn: None, + segment_size: None, + src_ip: self.local_ip, + }) + } + /// Indicate what types of frames are ready to send for the given space - fn space_can_send(&self, space_id: SpaceId, frame_space_1rtt: usize) -> SendableFrames { + /// + /// *packet_size* is the number of bytes available to build the next packet. *close* + /// *indicates whether a CONNECTION_CLOSE frame needs to be sent. + fn space_can_send( + &mut self, + space_id: SpaceId, + path_id: PathId, + packet_size: usize, + close: bool, + ) -> SendableFrames { + let pn = self.spaces[SpaceId::Data] + .for_path(path_id) + .peek_tx_number(); + let frame_space_1rtt = packet_size.saturating_sub(self.predict_1rtt_overhead(pn, path_id)); if self.spaces[space_id].crypto.is_none() && (space_id != SpaceId::Data || self.zero_rtt_crypto.is_none() @@ -1049,10 +1252,13 @@ impl Connection { // No keys available for this space return SendableFrames::empty(); } - let mut can_send = self.spaces[space_id].can_send(&self.streams); + let mut can_send = self.spaces[space_id].can_send(path_id, &self.streams); if space_id == SpaceId::Data { - can_send.other |= self.can_send_1rtt(frame_space_1rtt); + can_send |= self.can_send_1rtt(path_id, frame_space_1rtt); } + + can_send.close = close && self.spaces[space_id].crypto.is_some(); + can_send } @@ -1062,11 +1268,12 @@ impl Connection { /// (including application `Event`s, `EndpointEvent`s and outgoing datagrams) that should be /// extracted through the relevant methods. pub fn handle_event(&mut self, event: ConnectionEvent) { - use self::ConnectionEventInner::*; + use ConnectionEventInner::*; match event.0 { Datagram(DatagramConnectionEvent { now, remote, + path_id, ecn, first_decode, remaining, @@ -1074,38 +1281,46 @@ impl Connection { // If this packet could initiate a migration and we're a client or a server that // forbids migration, drop the datagram. This could be relaxed to heuristically // permit NAT-rebinding-like migration. - if remote != self.path.remote && !self.side.remote_may_migrate() { + if remote != self.path_data(path_id).remote && !self.side.remote_may_migrate() { trace!("discarding packet from unrecognized peer {}", remote); return; } - let was_anti_amplification_blocked = self.path.anti_amplification_blocked(1); + let was_anti_amplification_blocked = + self.path_data(path_id).anti_amplification_blocked(1); self.stats.udp_rx.datagrams += 1; self.stats.udp_rx.bytes += first_decode.len() as u64; let data_len = first_decode.len(); - self.handle_decode(now, remote, ecn, first_decode); + self.handle_decode(now, remote, path_id, ecn, first_decode); // The current `path` might have changed inside `handle_decode`, // since the packet could have triggered a migration. Make sure // the data received is accounted for the most recent path by accessing // `path` after `handle_decode`. - self.path.total_recvd = self.path.total_recvd.saturating_add(data_len as u64); + self.path_data_mut(path_id).inc_total_recvd(data_len as u64); if let Some(data) = remaining { self.stats.udp_rx.bytes += data.len() as u64; - self.handle_coalesced(now, remote, ecn, data); + self.handle_coalesced(now, remote, path_id, ecn, data); } if was_anti_amplification_blocked { // A prior attempt to set the loss detection timer may have failed due to // anti-amplification, so ensure it's set now. Prevents a handshake deadlock if // the server's first flight is lost. - self.set_loss_detection_timer(now); + self.set_loss_detection_timer(now, path_id); } } - NewIdentifiers(ids, now) => { - self.local_cid_state.new_cids(&ids, now); + NewIdentifiers(ids, now, cid_len, cid_lifetime) => { + let path_id = ids.first().map(|issued| issued.path_id).unwrap_or_default(); + debug_assert!(ids.iter().all(|issued| issued.path_id == path_id)); + let cid_state = self + .local_cid_state + .entry(path_id) + .or_insert_with(|| CidState::new(cid_len, cid_lifetime, now, 0)); + cid_state.new_cids(&ids, now); + ids.into_iter().rev().for_each(|frame| { self.spaces[SpaceId::Data].pending.new_cids.push(frame); }); @@ -1131,12 +1346,9 @@ impl Connection { /// `Instant` that was output by `poll_timeout`; however spurious extra calls will simply /// no-op and therefore are safe. pub fn handle_timeout(&mut self, now: Instant) { - for &timer in &Timer::VALUES { - if !self.timers.is_expired(timer, now) { - continue; - } - self.timers.stop(timer); - trace!(timer = ?timer, "timeout"); + while let Some(timer) = self.timers.expire_before(now) { + // TODO(@divma): remove `at` when the unicorn is born + trace!(?timer, at=?now, "timeout"); match timer { Timer::Close => { self.state = State::Drained; @@ -1145,36 +1357,54 @@ impl Connection { Timer::Idle => { self.kill(ConnectionError::TimedOut); } - Timer::KeepAlive => { + Timer::KeepAlive(path_id) => { trace!("sending keep-alive"); - self.ping(); + self.ping(path_id); } - Timer::LossDetection => { - self.on_loss_detection_timeout(now); + Timer::LossDetection(path_id) => { + self.on_loss_detection_timeout(now, path_id); } Timer::KeyDiscard => { self.zero_rtt_crypto = None; self.prev_crypto = None; } - Timer::PathValidation => { + Timer::PathValidation(path_id) => { + let Some(path) = self.paths.get_mut(&path_id) else { + continue; + }; debug!("path validation failed"); - if let Some((_, prev)) = self.prev_path.take() { - self.path = prev; + if let Some((_, prev)) = path.prev.take() { + path.data = prev; } - self.path.challenge = None; - self.path.challenge_pending = false; + path.data.challenge = None; + path.data.challenge_pending = false; } - Timer::Pacing => trace!("pacing timer expired"), + Timer::Pacing(path_id) => trace!(?path_id, "pacing timer expired"), Timer::PushNewCid => { - // Update `retire_prior_to` field in NEW_CONNECTION_ID frame - let num_new_cid = self.local_cid_state.on_cid_timeout().into(); - if !self.state.is_closed() { - trace!( - "push a new cid to peer RETIRE_PRIOR_TO field {}", - self.local_cid_state.retire_prior_to() - ); - self.endpoint_events - .push_back(EndpointEventInner::NeedIdentifiers(now, num_new_cid)); + while let Some((path_id, when)) = self.next_cid_retirement() { + if when > now { + break; + } + match self.local_cid_state.get_mut(&path_id) { + None => error!(?path_id, "No local CID state for path"), + Some(cid_state) => { + // Update `retire_prior_to` field in NEW_CONNECTION_ID frame + let num_new_cid = cid_state.on_cid_timeout().into(); + if !self.state.is_closed() { + trace!( + "push a new CID to peer RETIRE_PRIOR_TO field {}", + cid_state.retire_prior_to() + ); + self.endpoint_events.push_back( + EndpointEventInner::NeedIdentifiers( + path_id, + now, + num_new_cid, + ), + ); + } + } + } } } Timer::MaxAckDelay => { @@ -1222,25 +1452,36 @@ impl Connection { } /// Returns connection statistics - pub fn stats(&self) -> ConnectionStats { - let mut stats = self.stats; - stats.path.rtt = self.path.rtt.get(); - stats.path.cwnd = self.path.congestion.window(); - stats.path.current_mtu = self.path.mtud.current_mtu(); - - stats + pub fn stats(&mut self) -> ConnectionStats { + for (path_id, path) in self.paths.iter() { + let stats = self.stats.paths.entry(*path_id).or_default(); + stats.rtt = path.data.rtt.get(); + stats.cwnd = path.data.congestion.window(); + stats.current_mtu = path.data.mtud.current_mtu(); + } + self.stats.clone() } /// Ping the remote endpoint /// /// Causes an ACK-eliciting packet to be transmitted. - pub fn ping(&mut self) { - self.spaces[self.highest_space].ping_pending = true; + pub fn ping(&mut self, path: PathId) { + // TODO(@divma): for_path should not be used, we should check if the path still exists + self.spaces[self.highest_space].for_path(path).ping_pending = true; + } + + /// Update traffic keys spontaneously + /// + /// This can be useful for testing key updates, as they otherwise only happen infrequently. + pub fn force_key_update(&mut self) { + self.update_keys(None, false); } + // Compatibility wrapper for quinn < 0.11.7. Remove for 0.12. #[doc(hidden)] + #[deprecated] pub fn initiate_key_update(&mut self) { - self.update_keys(None, false); + self.force_key_update(); } /// Get a session reference @@ -1299,7 +1540,11 @@ impl Connection { /// The latest socket address for this connection's peer pub fn remote_address(&self) -> SocketAddr { - self.path.remote + // say we keep this, this should return at worst the same that the poll_transmit logic + // would use + // so basically completely wrong as well + // TODO(@divma): halp + self.path_data(PathId(0)).remote } /// The local IP address which was used when the peer established @@ -1317,12 +1562,15 @@ impl Connection { /// Current best estimate of this connection's latency (round-trip-time) pub fn rtt(&self) -> Duration { - self.path.rtt.get() + // this should return at worst the same that the poll_transmit logic would use + // TODO(@divma): wrong + self.path_data(PathId(0)).rtt.get() } /// Current state of this connection's congestion controller, for debugging purposes pub fn congestion_state(&self) -> &dyn Controller { - self.path.congestion.as_ref() + // TODO(@divma): same as everything, wrong + self.path_data(PathId(0)).congestion.as_ref() } /// Resets path-specific settings. @@ -1336,7 +1584,13 @@ impl Connection { /// faster or reduce loss to settle on optimal values by restarting from the initial /// configuration in the [`TransportConfig`]. pub fn path_changed(&mut self, now: Instant) { - self.path.reset(now, &self.config); + // TODO(@divma): evaluate how this is used + // wrong call in the multipath case anyhow + self.paths + .get_mut(&PathId(0)) + .expect("this might fail") + .data + .reset(now, &self.config); } /// Modify the number of remotely initiated streams that may be concurrently open @@ -1367,17 +1621,54 @@ impl Connection { } } + /// Whether the Multipath for QUIC extension is enabled. + /// + /// Multipath is only enabled after the handshake is completed and if it was enabled by both + /// peers. + pub fn is_multipath_negotiated(&self) -> bool { + // TODO(flub): I believe it might be a TRANSPORT_ERROR if multipath is enabled but + // there's a zero-lenth CID. + !self.is_handshaking() + && !self.handshake_cid.is_empty() + && !self.rem_handshake_cid.is_empty() + && self.config.max_concurrent_multipath_paths.is_some() + && self.peer_params.initial_max_path_id.is_some() + } + fn on_ack_received( &mut self, now: Instant, space: SpaceId, ack: frame::Ack, ) -> Result<(), TransportError> { - if ack.largest >= self.spaces[space].next_packet_number { + // All ACKs are referencing path 0 + let path = PathId::ZERO; + self.inner_on_ack_received(now, space, path, ack) + } + + fn on_path_ack_received( + &mut self, + now: Instant, + space: SpaceId, + path_ack: frame::PathAck, + ) -> Result<(), TransportError> { + let (ack, path) = path_ack.into_ack(); + self.inner_on_ack_received(now, space, path, ack) + } + + /// Handles an ACK frame acknowledging packets sent on *path*. + fn inner_on_ack_received( + &mut self, + now: Instant, + space: SpaceId, + path: PathId, + ack: frame::Ack, + ) -> Result<(), TransportError> { + if ack.largest >= self.spaces[space].for_path(path).next_packet_number { return Err(TransportError::PROTOCOL_VIOLATION("unsent packet acked")); } let new_largest = { - let space = &mut self.spaces[space]; + let space = &mut self.spaces[space].for_path(path); if space .largest_acked_packet .map_or(true, |pn| ack.largest > pn) @@ -1398,8 +1689,8 @@ impl Connection { // Avoid DoS from unreasonably huge ack ranges by filtering out just the new acks. let mut newly_acked = ArrayRangeSet::new(); for range in ack.iter() { - self.packet_number_filter.check_ack(space, range.clone())?; - for (&pn, _) in self.spaces[space].sent_packets.range(range) { + self.spaces[space].for_path(path).check_ack(range.clone())?; + for (&pn, _) in self.spaces[space].for_path(path).sent_packets.range(range) { newly_acked.insert_one(pn); } } @@ -1410,38 +1701,42 @@ impl Connection { let mut ack_eliciting_acked = false; for packet in newly_acked.elts() { - if let Some(info) = self.spaces[space].take(packet) { + if let Some(info) = self.spaces[space].for_path(path).take(packet) { if let Some(acked) = info.largest_acked { // Assume ACKs for all packets below the largest acknowledged in `packet` have // been received. This can cause the peer to spuriously retransmit if some of // our earlier ACKs were lost, but allows for simpler state tracking. See // discussion at // https://www.rfc-editor.org/rfc/rfc9000.html#name-limiting-ranges-by-tracking - self.spaces[space].pending_acks.subtract_below(acked); + self.spaces[space].pending_acks.subtract_below(path, acked); } ack_eliciting_acked |= info.ack_eliciting; // Notify MTU discovery that a packet was acked, because it might be an MTU probe - let mtu_updated = self.path.mtud.on_acked(space, packet, info.size); + let path_data = self.path_data_mut(path); + // TODO(@divma): all this needs to go into path + let mtu_updated = path_data.mtud.on_acked(space, packet, info.size); if mtu_updated { - self.path + path_data .congestion - .on_mtu_update(self.path.mtud.current_mtu()); + .on_mtu_update(path_data.mtud.current_mtu()); } // Notify ack frequency that a packet was acked, because it might contain an ACK_FREQUENCY frame self.ack_frequency.on_acked(packet); - self.on_packet_acked(now, packet, info); + self.on_packet_acked(now, path, packet, info); } } - self.path.congestion.on_end_acks( - now, - self.path.in_flight.bytes, - self.app_limited, - self.spaces[space].largest_acked_packet, - ); + let largest_ackd = self.spaces[space].for_path(path).largest_acked_packet; + let app_limited = self.app_limited; + let path_data = self.path_data_mut(path); + let in_flight = path_data.in_flight.bytes; + + path_data + .congestion + .on_end_acks(now, in_flight, app_limited, largest_ackd); if new_largest && ack_eliciting_acked { let ack_delay = if space != SpaceId::Data { @@ -1452,40 +1747,49 @@ impl Connection { Duration::from_micros(ack.delay << self.peer_params.ack_delay_exponent.0), ) }; - let rtt = instant_saturating_sub(now, self.spaces[space].largest_acked_packet_sent); - self.path.rtt.update(ack_delay, rtt); - if self.path.first_packet_after_rtt_sample.is_none() { - self.path.first_packet_after_rtt_sample = - Some((space, self.spaces[space].next_packet_number)); + let rtt = instant_saturating_sub( + now, + self.spaces[space].for_path(path).largest_acked_packet_sent, + ); + + let next_pn = self.spaces[space].for_path(path).next_packet_number; + let path_data = self.path_data_mut(path); + // TODO(@divma): should be a method of path, should be contained in a single place + path_data.rtt.update(ack_delay, rtt); + if path_data.first_packet_after_rtt_sample.is_none() { + path_data.first_packet_after_rtt_sample = Some((space, next_pn)); } } // Must be called before crypto/pto_count are clobbered - self.detect_lost_packets(now, space, true); + self.detect_lost_packets(now, space, path, true); - if self.peer_completed_address_validation() { - self.pto_count = 0; + if self.peer_completed_address_validation(path) { + self.path_data_mut(path).pto_count = 0; } // Explicit congestion notification - if self.path.sending_ecn { + // TODO(@divma): this code is a good example of logic that should be contained in a single + // place but it's split between the path data and the packet number space data, we should + // find a way to make this work without two lookups + if self.path_data(path).sending_ecn { if let Some(ecn) = ack.ecn { // We only examine ECN counters from ACKs that we are certain we received in transmit // order, allowing us to compute an increase in ECN counts to compare against the number // of newly acked packets that remains well-defined in the presence of arbitrary packet // reordering. if new_largest { - let sent = self.spaces[space].largest_acked_packet_sent; - self.process_ecn(now, space, newly_acked.len() as u64, ecn, sent); + let sent = self.spaces[space].for_path(path).largest_acked_packet_sent; + self.process_ecn(now, space, path, newly_acked.len() as u64, ecn, sent); } } else { // We always start out sending ECN, so any ack that doesn't acknowledge it disables it. debug!("ECN not acknowledged by peer"); - self.path.sending_ecn = false; + self.path_data_mut(path).sending_ecn = false; } } - self.set_loss_detection_timer(now); + self.set_loss_detection_timer(now, path); Ok(()) } @@ -1494,42 +1798,48 @@ impl Connection { &mut self, now: Instant, space: SpaceId, + path: PathId, newly_acked: u64, ecn: frame::EcnCounts, largest_sent_time: Instant, ) { - match self.spaces[space].detect_ecn(newly_acked, ecn) { + match self.spaces[space] + .for_path(path) + .detect_ecn(newly_acked, ecn) + { Err(e) => { debug!("halting ECN due to verification failure: {}", e); - self.path.sending_ecn = false; + + self.path_data_mut(path).sending_ecn = false; // Wipe out the existing value because it might be garbage and could interfere with // future attempts to use ECN on new paths. - self.spaces[space].ecn_feedback = frame::EcnCounts::ZERO; + self.spaces[space].for_path(path).ecn_feedback = frame::EcnCounts::ZERO; } Ok(false) => {} Ok(true) => { - self.stats.path.congestion_events += 1; - self.path - .congestion - .on_congestion_event(now, largest_sent_time, false, 0); + self.stats.paths.entry(path).or_default().congestion_events += 1; + self.path_data_mut(path).congestion.on_congestion_event( + now, + largest_sent_time, + false, + 0, + ); } } } // Not timing-aware, so it's safe to call this for inferred acks, such as arise from // high-latency handshakes - fn on_packet_acked(&mut self, now: Instant, pn: u64, info: SentPacket) { - self.remove_in_flight(pn, &info); - if info.ack_eliciting && self.path.challenge.is_none() { + fn on_packet_acked(&mut self, now: Instant, path_id: PathId, pn: u64, info: SentPacket) { + self.remove_in_flight(path_id, pn, &info); + let app_limited = self.app_limited; + let path = self.path_data_mut(path_id); + if info.ack_eliciting && path.challenge.is_none() { // Only pass ACKs to the congestion controller if we are not validating the current // path, so as to ignore any ACKs from older paths still coming in. - self.path.congestion.on_ack( - now, - info.time_sent, - info.size.into(), - self.app_limited, - &self.path.rtt, - ); + let rtt = path.rtt; + path.congestion + .on_ack(now, info.time_sent, info.size.into(), app_limited, &rtt); } // Update state for confirmed delivery of frames @@ -1560,53 +1870,82 @@ impl Connection { .set(Timer::KeyDiscard, start + self.pto(space) * 3); } - fn on_loss_detection_timeout(&mut self, now: Instant) { - if let Some((_, pn_space)) = self.loss_time_and_space() { + /// Handle a [`Timer::LossDetection`] timeout. + /// + /// This timer expires for two reasons: + /// - An ACK-eliciting packet we sent should be considered lost. + /// - The PTO may have expired and a tail-loss probe needs to be scheduled. + /// + /// The former needs us to schedule re-transmission of the lost data. + /// + /// The latter means we have not received an ACK for an ack-eliciting packet we sent + /// within the PTO time-window. We need to schedule a tail-loss probe, an ack-eliciting + /// packet, to try and elicit new acknowledgements. These new acknowledgements will + /// indicate whether the previously sent packets were lost or not. + fn on_loss_detection_timeout(&mut self, now: Instant, path_id: PathId) { + if let Some((_, pn_space)) = self.loss_time_and_space(path_id) { // Time threshold loss Detection - self.detect_lost_packets(now, pn_space, false); - self.set_loss_detection_timer(now); + self.detect_lost_packets(now, pn_space, path_id, false); + self.set_loss_detection_timer(now, path_id); return; } - let (_, space) = match self.pto_time_and_space(now) { + let (_, space) = match self.pto_time_and_space(now, path_id) { Some(x) => x, None => { error!("PTO expired while unset"); return; } }; + let in_flight = self.path_data(path_id).in_flight.bytes; trace!( - in_flight = self.path.in_flight.bytes, - count = self.pto_count, + in_flight, + count = self.path_data(path_id).pto_count, ?space, + ?path_id, "PTO fired" ); - let count = match self.path.in_flight.ack_eliciting { + let count = match self.path_data(path_id).in_flight.ack_eliciting { // A PTO when we're not expecting any ACKs must be due to handshake anti-amplification // deadlock preventions 0 => { - debug_assert!(!self.peer_completed_address_validation()); + debug_assert!(!self.peer_completed_address_validation(path_id)); 1 } // Conventional loss probe _ => 2, }; - self.spaces[space].loss_probes = self.spaces[space].loss_probes.saturating_add(count); - self.pto_count = self.pto_count.saturating_add(1); - self.set_loss_detection_timer(now); + let pns = self.spaces[space].for_path(path_id); + pns.loss_probes = pns.loss_probes.saturating_add(count); + let path_data = self.path_data_mut(path_id); + path_data.pto_count = path_data.pto_count.saturating_add(1); + self.set_loss_detection_timer(now, path_id); } - fn detect_lost_packets(&mut self, now: Instant, pn_space: SpaceId, due_to_ack: bool) { + // TODO(@divma): some docs wouldn't kill + fn detect_lost_packets( + &mut self, + now: Instant, + pn_space: SpaceId, + path_id: PathId, + due_to_ack: bool, + ) { let mut lost_packets = Vec::::new(); let mut lost_mtu_probe = None; - let in_flight_mtu_probe = self.path.mtud.in_flight_mtu_probe(); - let rtt = self.path.rtt.conservative(); + + let path = self.path_data_mut(path_id); + let in_flight_mtu_probe = path.mtud.in_flight_mtu_probe(); + let rtt = path.rtt.conservative(); + let loss_delay = cmp::max(rtt.mul_f32(self.config.time_threshold), TIMER_GRANULARITY); // Packets sent before this time are deemed lost. let lost_send_time = now.checked_sub(loss_delay).unwrap(); - let largest_acked_packet = self.spaces[pn_space].largest_acked_packet.unwrap(); + let largest_acked_packet = self.spaces[pn_space] + .for_path(path_id) + .largest_acked_packet + .unwrap(); // TODO(@divma): ??? let packet_threshold = self.config.packet_threshold as u64; let mut size_of_lost_packets = 0u64; @@ -1619,7 +1958,7 @@ impl Connection { let mut prev_packet = None; let mut in_persistent_congestion = false; - let space = &mut self.spaces[pn_space]; + let space = &mut self.spaces[pn_space].for_path(path_id); space.loss_time = None; for (&packet, info) in space.sent_packets.range(0..largest_acked_packet) { @@ -1646,7 +1985,10 @@ impl Connection { } // Persistent congestion must start after the first RTT sample None if self - .path + .paths + .get_mut(&path_id) + .expect("known path") + .data .first_packet_after_rtt_sample .is_some_and(|x| x < (pn_space, packet)) => { @@ -1671,43 +2013,58 @@ impl Connection { // OnPacketsLost if let Some(largest_lost) = lost_packets.last().cloned() { - let old_bytes_in_flight = self.path.in_flight.bytes; - let largest_lost_sent = self.spaces[pn_space].sent_packets[&largest_lost].time_sent; + let old_bytes_in_flight = self.path_data_mut(path_id).in_flight.bytes; + let largest_lost_sent = + self.spaces[pn_space].for_path(path_id).sent_packets[&largest_lost].time_sent; self.lost_packets += lost_packets.len() as u64; - self.stats.path.lost_packets += lost_packets.len() as u64; - self.stats.path.lost_bytes += size_of_lost_packets; + let path_stats = self.stats.paths.entry(path_id).or_default(); + path_stats.lost_packets += lost_packets.len() as u64; + path_stats.lost_bytes += size_of_lost_packets; trace!( "packets lost: {:?}, bytes lost: {}", - lost_packets, - size_of_lost_packets + lost_packets, size_of_lost_packets ); for &packet in &lost_packets { - let info = self.spaces[pn_space].take(packet).unwrap(); // safe: lost_packets is populated just above - self.remove_in_flight(packet, &info); + let info = self.spaces[pn_space] + .for_path(path_id) + .take(packet) + .unwrap(); // safe: lost_packets is populated just above + self.remove_in_flight(path_id, packet, &info); for frame in info.stream_frames { self.streams.retransmit(frame); } self.spaces[pn_space].pending |= info.retransmits; - self.path.mtud.on_non_probe_lost(packet, info.size); + self.path_data_mut(path_id) + .mtud + .on_non_probe_lost(packet, info.size); } - if self.path.mtud.black_hole_detected(now) { - self.stats.path.black_holes_detected += 1; - self.path - .congestion - .on_mtu_update(self.path.mtud.current_mtu()); + let path = self.path_data_mut(path_id); + if path.mtud.black_hole_detected(now) { + path.congestion.on_mtu_update(path.mtud.current_mtu()); if let Some(max_datagram_size) = self.datagrams().max_size() { self.datagrams.drop_oversized(max_datagram_size); } + self.stats + .paths + .entry(path_id) + .or_default() + .black_holes_detected += 1; } // Don't apply congestion penalty for lost ack-only packets - let lost_ack_eliciting = old_bytes_in_flight != self.path.in_flight.bytes; + let lost_ack_eliciting = + old_bytes_in_flight != self.path_data_mut(path_id).in_flight.bytes; if lost_ack_eliciting { - self.stats.path.congestion_events += 1; - self.path.congestion.on_congestion_event( + // TODO(@divma): needs fixing + self.stats + .paths + .entry(path_id) + .or_default() + .congestion_events += 1; + self.path_data_mut(path_id).congestion.on_congestion_event( now, largest_lost_sent, in_persistent_congestion, @@ -1718,35 +2075,64 @@ impl Connection { // Handle a lost MTU probe if let Some(packet) = lost_mtu_probe { - let info = self.spaces[SpaceId::Data].take(packet).unwrap(); // safe: lost_mtu_probe is omitted from lost_packets, and therefore must not have been removed yet - self.remove_in_flight(packet, &info); - self.path.mtud.on_probe_lost(); - self.stats.path.lost_plpmtud_probes += 1; + let info = self.spaces[SpaceId::Data] + .for_path(path_id) + .take(packet) + .unwrap(); // safe: lost_mtu_probe is omitted from lost_packets, and + // therefore must not have been removed yet + self.remove_in_flight(path_id, packet, &info); + self.path_data_mut(path_id).mtud.on_probe_lost(); + self.stats + .paths + .entry(path_id) + .or_default() + .lost_plpmtud_probes += 1; } } - fn loss_time_and_space(&self) -> Option<(Instant, SpaceId)> { + /// Returns the earliest time packets should be declared lost for all spaces on a path. + fn loss_time_and_space(&self, path_id: PathId) -> Option<(Instant, SpaceId)> { SpaceId::iter() - .filter_map(|id| Some((self.spaces[id].loss_time?, id))) + .filter_map(|id| { + self.spaces[id] + .number_spaces + .get(&path_id) + .and_then(|pns| pns.loss_time) + .map(|time| (time, id)) + }) .min_by_key(|&(time, _)| time) } - fn pto_time_and_space(&self, now: Instant) -> Option<(Instant, SpaceId)> { - let backoff = 2u32.pow(self.pto_count.min(MAX_BACKOFF_EXPONENT)); - let mut duration = self.path.rtt.pto_base() * backoff; - - if self.path.in_flight.ack_eliciting == 0 { - debug_assert!(!self.peer_completed_address_validation()); + /// Returns the earliest next PTO should fire for all spaces on a path. + fn pto_time_and_space(&mut self, now: Instant, path_id: PathId) -> Option<(Instant, SpaceId)> { + let pto_count = self.path_data(path_id).pto_count; + let backoff = 2u32.pow(pto_count.min(MAX_BACKOFF_EXPONENT)); + let mut duration = self.path_data_mut(path_id).rtt.pto_base() * backoff; + + if path_id == PathId::ZERO + && self + .paths + .get(&PathId::ZERO) + .map(|path| path.data.in_flight.ack_eliciting) + == Some(0) + && !self.peer_completed_address_validation(PathId::ZERO) + { + // Address Validation during Connection Establishment: + // https://www.rfc-editor.org/rfc/rfc9000.html#section-8.1. To prevent a + // deadlock if an Initial or Handshake packet from the server is lost and the + // server can not send more due to its anti-amplification limit the client must + // send another packet on PTO. let space = match self.highest_space { SpaceId::Handshake => SpaceId::Handshake, _ => SpaceId::Initial, }; + return Some((now + duration, space)); } let mut result = None; for space in SpaceId::iter() { - if self.spaces[space].in_flight == 0 { + if self.spaces[space].for_path(path_id).in_flight == 0 { continue; } if space == SpaceId::Data { @@ -1757,34 +2143,59 @@ impl Connection { // Include max_ack_delay and backoff for ApplicationData. duration += self.ack_frequency.max_ack_delay_for_pto() * backoff; } - let last_ack_eliciting = match self.spaces[space].time_of_last_ack_eliciting_packet { + let last_ack_eliciting = match self.spaces[space] + .for_path(path_id) + .time_of_last_ack_eliciting_packet + { Some(time) => time, None => continue, }; let pto = last_ack_eliciting + duration; if result.map_or(true, |(earliest_pto, _)| pto < earliest_pto) { + if self.path_data(path_id).anti_amplification_blocked(1) { + // Nothing would be able to be sent. + continue; + } + if self.path_data(path_id).in_flight.ack_eliciting == 0 { + // Nothing ack-eliciting, no PTO to arm/fire. + continue; + } result = Some((pto, space)); } } result } - #[allow(clippy::suspicious_operation_groupings)] - fn peer_completed_address_validation(&self) -> bool { + fn peer_completed_address_validation(&mut self, path: PathId) -> bool { + // TODO(flub): This logic needs updating for multipath if self.side.is_server() || self.state.is_closed() { return true; } // The server is guaranteed to have validated our address if any of our handshake or 1-RTT // packets are acknowledged or we've seen HANDSHAKE_DONE and discarded handshake keys. self.spaces[SpaceId::Handshake] + .for_path(path) .largest_acked_packet .is_some() - || self.spaces[SpaceId::Data].largest_acked_packet.is_some() + || self.spaces[SpaceId::Data] + .for_path(path) + .largest_acked_packet + .is_some() || (self.spaces[SpaceId::Data].crypto.is_some() && self.spaces[SpaceId::Handshake].crypto.is_none()) + + // TODO(@divma): I'm worried about acessing for a generic path in the handshake space + // when it shouldn't be possible. } - fn set_loss_detection_timer(&mut self, now: Instant) { + /// Resets the the [`Timer::LossDetection`] timer to the next instant it may be needed + /// + /// The timer must fire if either: + /// - An ack-eliciting packet we sent needs to be declared lost. + /// - A tail-loss probe needs to be sent. + /// + /// See [`Connection::on_loss_detection_timeout`] for details. + fn set_loss_detection_timer(&mut self, now: Instant, path_id: PathId) { if self.state.is_closed() { // No loss detection takes place on closed connections, and `close_common` already // stopped time timer. Ensure we don't restart it inadvertently, e.g. in response to a @@ -1792,60 +2203,50 @@ impl Connection { return; } - if let Some((loss_time, _)) = self.loss_time_and_space() { + if let Some((loss_time, _)) = self.loss_time_and_space(path_id) { // Time threshold loss detection. - self.timers.set(Timer::LossDetection, loss_time); - return; - } - - if self.path.anti_amplification_blocked(1) { - // We wouldn't be able to send anything, so don't bother. - self.timers.stop(Timer::LossDetection); - return; - } - - if self.path.in_flight.ack_eliciting == 0 && self.peer_completed_address_validation() { - // There is nothing to detect lost, so no timer is set. However, the client needs to arm - // the timer if the server might be blocked by the anti-amplification limit. - self.timers.stop(Timer::LossDetection); + self.timers.set(Timer::LossDetection(path_id), loss_time); return; } // Determine which PN space to arm PTO for. // Calculate PTO duration - if let Some((timeout, _)) = self.pto_time_and_space(now) { - self.timers.set(Timer::LossDetection, timeout); + if let Some((timeout, _)) = self.pto_time_and_space(now, path_id) { + self.timers.set(Timer::LossDetection(path_id), timeout); } else { - self.timers.stop(Timer::LossDetection); + self.timers.stop(Timer::LossDetection(path_id)); } } /// Probe Timeout + // TODO(flub): This needs a PathId parameter fn pto(&self, space: SpaceId) -> Duration { let max_ack_delay = match space { - SpaceId::Initial | SpaceId::Handshake => Duration::new(0, 0), + SpaceId::Initial | SpaceId::Handshake => Duration::ZERO, SpaceId::Data => self.ack_frequency.max_ack_delay_for_pto(), }; - self.path.rtt.pto_base() + max_ack_delay + // TODO(@divma): fix + self.path_data(PathId(0)).rtt.pto_base() + max_ack_delay } fn on_packet_authenticated( &mut self, now: Instant, space_id: SpaceId, + path_id: PathId, ecn: Option, packet: Option, spin: bool, is_1rtt: bool, ) { self.total_authed_packets += 1; - self.reset_keep_alive(now); + self.reset_keep_alive(path_id, now); self.reset_idle_timeout(now, space_id); self.permit_idle_reset = true; self.receiving_ecn |= ecn.is_some(); if let Some(x) = ecn { let space = &mut self.spaces[space_id]; - space.ecn_counters += x; + space.for_path(path_id).ecn_counters += x; if x.is_ce() { space.pending_acks.set_immediate_ack_required(); @@ -1867,7 +2268,7 @@ impl Connection { } } let space = &mut self.spaces[space_id]; - space.pending_acks.insert_one(packet, now); + space.pending_acks.insert_one(path_id, packet, now); if packet >= space.rx_packet { space.rx_packet = packet; // Update outgoing spin bit, inverting iff we're the client @@ -1888,20 +2289,29 @@ impl Connection { self.timers.set(Timer::Idle, now + dt); } - fn reset_keep_alive(&mut self, now: Instant) { + fn reset_keep_alive(&mut self, path_id: PathId, now: Instant) { let interval = match self.config.keep_alive_interval { Some(x) if self.state.is_established() => x, _ => return, }; - self.timers.set(Timer::KeepAlive, now + interval); + self.timers.set(Timer::KeepAlive(path_id), now + interval); } + /// Sets the timer for when a previously issued CID should be retired next. fn reset_cid_retirement(&mut self) { - if let Some(t) = self.local_cid_state.next_timeout() { + if let Some((_path, t)) = self.next_cid_retirement() { self.timers.set(Timer::PushNewCid, t); } } + /// The next time when a previously issued CID should be retired. + fn next_cid_retirement(&self) -> Option<(PathId, Instant)> { + self.local_cid_state + .iter() + .filter_map(|(path_id, cid_state)| cid_state.next_timeout().map(|t| (*path_id, t))) + .min_by_key(|(_path_id, timeout)| *timeout) + } + /// Handle the already-decrypted first packet from the client /// /// Decrypting the first packet in the `Endpoint` allows stateless packet handling to be more @@ -1919,7 +2329,8 @@ impl Connection { let _guard = span.enter(); debug_assert!(self.side.is_server()); let len = packet.header_data.len() + packet.payload.len(); - self.path.total_recvd = len as u64; + let path_id = PathId(0); + self.path_data_mut(path_id).total_recvd = len as u64; match self.state { State::Handshake(ref mut state) => { @@ -1928,18 +2339,20 @@ impl Connection { _ => unreachable!("first packet must be delivered in Handshake state"), } + // The first packet is always on PathId(0) self.on_packet_authenticated( now, SpaceId::Initial, + path_id, ecn, Some(packet_number), false, false, ); - self.process_decrypted_packet(now, remote, Some(packet_number), packet.into())?; + self.process_decrypted_packet(now, remote, path_id, Some(packet_number), packet.into())?; if let Some(data) = remaining { - self.handle_coalesced(now, remote, ecn, data); + self.handle_coalesced(now, remote, path_id, ecn, data); } Ok(()) } @@ -2104,35 +2517,44 @@ impl Connection { } let space = &mut self.spaces[space_id]; space.crypto = None; - space.time_of_last_ack_eliciting_packet = None; - space.loss_time = None; - space.in_flight = 0; - let sent_packets = mem::take(&mut space.sent_packets); + let path_space = space.for_path(PathId::ZERO); + path_space.time_of_last_ack_eliciting_packet = None; + path_space.loss_time = None; + path_space.in_flight = 0; + let sent_packets = mem::take(&mut path_space.sent_packets); for (pn, packet) in sent_packets.into_iter() { - self.remove_in_flight(pn, &packet); + self.remove_in_flight(PathId::ZERO, pn, &packet); } - self.set_loss_detection_timer(now) + self.set_loss_detection_timer(now, PathId::ZERO) } fn handle_coalesced( &mut self, now: Instant, remote: SocketAddr, + path_id: PathId, ecn: Option, data: BytesMut, ) { - self.path.total_recvd = self.path.total_recvd.saturating_add(data.len() as u64); + self.path_data_mut(path_id) + .inc_total_recvd(data.len() as u64); let mut remaining = Some(data); + let cid_len = self + .local_cid_state + .values() + .map(|cid_state| cid_state.cid_len()) + .next() + .expect("one cid_state must exist"); while let Some(data) = remaining { match PartialDecode::new( data, - &FixedLengthConnectionIdParser::new(self.local_cid_state.cid_len()), + &FixedLengthConnectionIdParser::new(cid_len), &[self.version], self.endpoint_config.grease_quic_bit, ) { Ok((partial_decode, rest)) => { remaining = rest; - self.handle_decode(now, remote, ecn, partial_decode); + self.handle_decode(now, remote, path_id, ecn, partial_decode); } Err(e) => { trace!("malformed header: {}", e); @@ -2146,6 +2568,7 @@ impl Connection { &mut self, now: Instant, remote: SocketAddr, + path_id: PathId, ecn: Option, partial_decode: PartialDecode, ) { @@ -2155,7 +2578,14 @@ impl Connection { self.zero_rtt_crypto.as_ref(), self.peer_params.stateless_reset_token, ) { - self.handle_packet(now, remote, ecn, decoded.packet, decoded.stateless_reset); + self.handle_packet( + now, + remote, + path_id, + ecn, + decoded.packet, + decoded.stateless_reset, + ); } } @@ -2163,6 +2593,7 @@ impl Connection { &mut self, now: Instant, remote: SocketAddr, + path_id: PathId, ecn: Option, packet: Option, stateless_reset: bool, @@ -2178,7 +2609,7 @@ impl Connection { ); } - if self.is_handshaking() && remote != self.path.remote { + if self.is_handshaking() && remote != self.path_data(path_id).remote { debug!("discarding packet with unexpected remote during handshake"); return; } @@ -2189,7 +2620,7 @@ impl Connection { let decrypted = match packet { None => Err(None), Some(mut packet) => self - .decrypt_packet(now, &mut packet) + .decrypt_packet(now, path_id, &mut packet) .map(move |number| (packet, number)), }; let result = match decrypted { @@ -2224,8 +2655,11 @@ impl Connection { }; let _guard = span.enter(); - let is_duplicate = |n| self.spaces[packet.header.space()].dedup.insert(n); - if number.is_some_and(is_duplicate) { + let dedup = self.spaces[packet.header.space()] + .dedup + .entry(path_id) + .or_default(); + if number.is_some_and(|n| dedup.insert(n)) { debug!("discarding possible duplicate packet"); return; } else if self.state.is_handshake() && packet.header.is_short() { @@ -2253,13 +2687,14 @@ impl Connection { self.on_packet_authenticated( now, packet.header.space(), + path_id, ecn, number, spin, packet.header.is_1rtt(), ); } - self.process_decrypted_packet(now, remote, number, packet) + self.process_decrypted_packet(now, remote, path_id, number, packet) } } }; @@ -2307,7 +2742,7 @@ impl Connection { // Transmit CONNECTION_CLOSE if necessary if let State::Closed(_) = self.state { - self.close = remote == self.path.remote; + self.close = remote == self.path_data(path_id).remote; } } @@ -2315,14 +2750,19 @@ impl Connection { &mut self, now: Instant, remote: SocketAddr, + path_id: PathId, number: Option, packet: Packet, ) -> Result<(), ConnectionError> { let state = match self.state { State::Established => { match packet.header.space() { - SpaceId::Data => self.process_payload(now, remote, number.unwrap(), packet)?, - _ if packet.header.has_frames() => self.process_early_payload(now, packet)?, + SpaceId::Data => { + self.process_payload(now, remote, path_id, number.unwrap(), packet)? + } + _ if packet.header.has_frames() => { + self.process_early_payload(now, path_id, packet)? + } _ => { trace!("discarding unexpected pre-handshake packet"); } @@ -2361,17 +2801,26 @@ impl Connection { Header::Retry { src_cid: rem_cid, .. } => { + debug_assert_eq!(path_id, PathId(0)); if self.side.is_server() { return Err(TransportError::PROTOCOL_VIOLATION("client sent Retry").into()); } + let is_valid_retry = self + .rem_cids + .get(&path_id) + .map(|cids| cids.active()) + .map(|orig_dst_cid| { + self.crypto.is_valid_retry( + &orig_dst_cid, + &packet.header_data, + &packet.payload, + ) + }) + .unwrap_or_default(); if self.total_authed_packets > 1 || packet.payload.len() <= 16 // token + 16 byte tag - || !self.crypto.is_valid_retry( - &self.rem_cids.active(), - &packet.header_data, - &packet.payload, - ) + || !is_valid_retry { trace!("discarding invalid Retry"); // - After the client has received and processed an Initial or Retry @@ -2387,33 +2836,38 @@ impl Connection { trace!("retrying with CID {}", rem_cid); let client_hello = state.client_hello.take().unwrap(); self.retry_src_cid = Some(rem_cid); - self.rem_cids.update_initial_cid(rem_cid); + self.rem_cids + .get_mut(&path_id) + .expect("PathId(0) not yet abandoned, is_valid_retry would have been false") + .update_initial_cid(rem_cid); self.rem_handshake_cid = rem_cid; let space = &mut self.spaces[SpaceId::Initial]; - if let Some(info) = space.take(0) { - self.on_packet_acked(now, 0, info); + if let Some(info) = space.for_path(PathId(0)).take(0) { + self.on_packet_acked(now, PathId(0), 0, info); }; - self.discard_space(now, SpaceId::Initial); // Make sure we clean up after any retransmitted Initials - self.spaces[SpaceId::Initial] = PacketSpace { - crypto: Some(self.crypto.initial_keys(&rem_cid, self.side.side())), - next_packet_number: self.spaces[SpaceId::Initial].next_packet_number, - crypto_offset: client_hello.len() as u64, - ..PacketSpace::new(now) - }; - self.spaces[SpaceId::Initial] - .pending - .crypto - .push_back(frame::Crypto { + self.discard_space(now, SpaceId::Initial); // Make sure we clean up after + // any retransmitted Initials + self.spaces[SpaceId::Initial] = { + let mut space = PacketSpace::new(now, SpaceId::Initial, &mut self.rng); + space.crypto = Some(self.crypto.initial_keys(&rem_cid, self.side.side())); + space.crypto_offset = client_hello.len() as u64; + space.for_path(path_id).next_packet_number = self.spaces[SpaceId::Initial] + .for_path(path_id) + .next_packet_number; + space.pending.crypto.push_back(frame::Crypto { offset: 0, data: client_hello, }); + space + }; // Retransmit all 0-RTT data - let zero_rtt = mem::take(&mut self.spaces[SpaceId::Data].sent_packets); + let zero_rtt = + mem::take(&mut self.spaces[SpaceId::Data].for_path(PathId(0)).sent_packets); for (pn, info) in zero_rtt { - self.remove_in_flight(pn, &info); + self.remove_in_flight(PathId(0), pn, &info); self.spaces[SpaceId::Data].pending |= info.retransmits; } self.streams.retransmit_all_for_0rtt(); @@ -2435,6 +2889,7 @@ impl Connection { src_cid: rem_cid, .. } => { + debug_assert_eq!(path_id, PathId(0)); if rem_cid != self.rem_handshake_cid { debug!( "discarding packet with mismatched remote CID: {} != {}", @@ -2442,9 +2897,9 @@ impl Connection { ); return Ok(()); } - self.path.validated = true; + self.on_path_validated(path_id); - self.process_early_payload(now, packet)?; + self.process_early_payload(now, path_id, packet)?; if self.state.is_closed() { return Ok(()); } @@ -2476,10 +2931,11 @@ impl Connection { self.spaces[SpaceId::Data].pending = Retransmits::default(); // Discard 0-RTT packets - let sent_packets = - mem::take(&mut self.spaces[SpaceId::Data].sent_packets); + let sent_packets = mem::take( + &mut self.spaces[SpaceId::Data].for_path(path_id).sent_packets, + ); for (pn, packet) in sent_packets { - self.remove_in_flight(pn, &packet); + self.remove_in_flight(path_id, pn, &packet); } } else { self.accepted_0rtt = true; @@ -2487,8 +2943,9 @@ impl Connection { } } if let Some(token) = params.stateless_reset_token { + let remote = self.path_data(path_id).remote; self.endpoint_events - .push_back(EndpointEventInner::ResetToken(self.path.remote, token)); + .push_back(EndpointEventInner::ResetToken(remote, token)); } self.handle_peer_params(params)?; self.issue_first_cids(now); @@ -2501,15 +2958,23 @@ impl Connection { self.events.push_back(Event::Connected); self.state = State::Established; trace!("established"); + + // Multipath can only be enabled after the state has reached Established. + // So this can not happen any earlier. + self.issue_first_path_cids(now); Ok(()) } Header::Initial(InitialHeader { src_cid: rem_cid, .. }) => { + debug_assert_eq!(path_id, PathId(0)); if !state.rem_cid_set { trace!("switching remote CID to {}", rem_cid); let mut state = state.clone(); - self.rem_cids.update_initial_cid(rem_cid); + self.rem_cids + .get_mut(&path_id) + .expect("PathId(0) not yet abandoned") + .update_initial_cid(rem_cid); self.rem_handshake_cid = rem_cid; self.orig_rem_cid = rem_cid; state.rem_cid_set = true; @@ -2523,7 +2988,7 @@ impl Connection { } let starting_space = self.highest_space; - self.process_early_payload(now, packet)?; + self.process_early_payload(now, path_id, packet)?; if self.side.is_server() && starting_space == SpaceId::Initial @@ -2547,7 +3012,7 @@ impl Connection { ty: LongType::ZeroRtt, .. } => { - self.process_payload(now, remote, number.unwrap(), packet)?; + self.process_payload(now, remote, path_id, number.unwrap(), packet)?; Ok(()) } Header::VersionNegotiate { .. } => { @@ -2577,9 +3042,11 @@ impl Connection { fn process_early_payload( &mut self, now: Instant, + path_id: PathId, packet: Packet, ) -> Result<(), TransportError> { debug_assert_ne!(packet.header.space(), SpaceId::Data); + debug_assert_eq!(path_id, PathId(0)); let payload_len = packet.payload.len(); let mut ack_eliciting = false; for result in frame::Iter::new(packet.payload.freeze())? { @@ -2603,12 +3070,16 @@ impl Connection { Frame::Ack(ack) => { self.on_ack_received(now, packet.header.space(), ack)?; } + Frame::PathAck(ack) => { + self.on_path_ack_received(now, packet.header.space(), ack)?; + } Frame::Close(reason) => { self.error = Some(reason.into()); self.state = State::Draining; return Ok(()); } _ => { + dbg!(&frame); let mut err = TransportError::PROTOCOL_VIOLATION("illegal frame type in handshake"); err.frame = Some(frame.ty()); @@ -2628,10 +3099,12 @@ impl Connection { Ok(()) } + /// Processes the packet payload, always in the data space. fn process_payload( &mut self, now: Instant, remote: SocketAddr, + path_id: PathId, number: u64, packet: Packet, ) -> Result<(), TransportError> { @@ -2647,7 +3120,7 @@ impl Connection { let frame = result?; let span = match frame { Frame::Padding => continue, - _ => Some(trace_span!("frame", ty = %frame.ty())), + _ => trace_span!("frame", ty = %frame.ty()), }; self.stats.frame_rx.record(&frame); @@ -2668,7 +3141,7 @@ impl Connection { } } - let _guard = span.as_ref().map(|x| x.enter()); + let _guard = span.enter(); if packet.header.is_0rtt() { match frame { Frame::Crypto(_) | Frame::Close(Close::Application(_)) => { @@ -2704,30 +3177,53 @@ impl Connection { Frame::Ack(ack) => { self.on_ack_received(now, SpaceId::Data, ack)?; } + Frame::PathAck(ack) => { + self.on_path_ack_received(now, SpaceId::Data, ack)?; + } Frame::Padding | Frame::Ping => {} Frame::Close(reason) => { close = Some(reason); } Frame::PathChallenge(token) => { - self.path_responses.push(number, token, remote); - if remote == self.path.remote { + // A PATH_CHALLENGE can create a new path. + let path = &mut self + .paths + .entry(path_id) + .or_insert_with(|| { + let peer_max_udp_payload_size = + u16::try_from(self.peer_params.max_udp_payload_size.into_inner()) + .unwrap_or(u16::MAX); + let data = PathData::new( + remote, + self.allow_mtud, + Some(peer_max_udp_payload_size), + now, + &self.config, + ); + PathState { data, prev: None } + }) + .data; + path.path_responses.push(number, token, remote); + if remote == path.remote { // PATH_CHALLENGE on active path, possible off-path packet forwarding // attack. Send a non-probing packet to recover the active path. match self.peer_supports_ack_frequency() { - true => self.immediate_ack(), - false => self.ping(), + true => self.immediate_ack(path_id), + false => self.ping(path_id), } } } Frame::PathResponse(token) => { - if self.path.challenge == Some(token) && remote == self.path.remote { + // TODO(@divma): make an effort to move to path + let path = self.paths.get_mut(&path_id).expect("known path"); + if path.data.challenge == Some(token) && remote == path.data.remote { trace!("new path validated"); - self.timers.stop(Timer::PathValidation); - self.path.challenge = None; - self.path.validated = true; - if let Some((_, ref mut prev_path)) = self.prev_path { - prev_path.challenge = None; - prev_path.challenge_pending = false; + self.timers.stop(Timer::PathValidation(path_id)); + path.data.challenge = None; + path.data.validated = true; + if let Some((_, ref mut prev)) = path.prev { + prev.challenge = None; + prev.challenge_pending = false; } } else { debug!(token, "ignoring invalid PATH_RESPONSE"); @@ -2788,24 +3284,35 @@ impl Connection { } self.streams.received_stop_sending(id, error_code); } - Frame::RetireConnectionId { sequence } => { - let allow_more_cids = self - .local_cid_state - .on_cid_retirement(sequence, self.peer_params.issue_cids_limit())?; - self.endpoint_events - .push_back(EndpointEventInner::RetireConnectionId( - now, - sequence, - allow_more_cids, - )); + Frame::RetireConnectionId(frame::RetireConnectionId { path_id, sequence }) => { + match self.local_cid_state.get_mut(&path_id.unwrap_or_default()) { + None => error!(?path_id, "RETIRE_CONNECTION_ID for unknown path"), + Some(cid_state) => { + let allow_more_cids = cid_state + .on_cid_retirement(sequence, self.peer_params.issue_cids_limit())?; + self.endpoint_events + .push_back(EndpointEventInner::RetireConnectionId( + now, + path_id.unwrap_or_default(), + sequence, + allow_more_cids, + )); + } + } } Frame::NewConnectionId(frame) => { trace!( + path_id = ?frame.path_id, sequence = frame.sequence, id = %frame.id, retire_prior_to = frame.retire_prior_to, ); - if self.rem_cids.active().is_empty() { + let path_id = frame.path_id.unwrap_or_default(); + let rem_cids = self + .rem_cids + .entry(path_id) + .or_insert_with(|| CidQueue::new(frame.id)); + if rem_cids.active().is_empty() { return Err(TransportError::PROTOCOL_VIOLATION( "NEW_CONNECTION_ID when CIDs aren't in use", )); @@ -2817,7 +3324,7 @@ impl Connection { } use crate::cid_queue::InsertError; - match self.rem_cids.insert(frame) { + match rem_cids.insert(frame) { Ok(None) => {} Ok(Some((retired, reset_token))) => { let pending_retired = @@ -2835,8 +3342,8 @@ impl Connection { "queued too many retired CIDs", )); } - pending_retired.extend(retired); - self.set_reset_token(reset_token); + pending_retired.extend(retired.map(|seq| (path_id, seq))); + self.set_reset_token(remote, reset_token); } Err(InsertError::ExceedsLimit) => { return Err(TransportError::CONNECTION_ID_LIMIT_ERROR("")); @@ -2849,26 +3356,38 @@ impl Connection { self.spaces[SpaceId::Data] .pending .retire_cids - .push(frame.sequence); + .push((path_id, frame.sequence)); continue; } }; - if self.side.is_server() && self.rem_cids.active_seq() == 0 { + if self.side.is_server() + && path_id == PathId(0) + && self + .rem_cids + .get(&PathId(0)) + .map(|cids| cids.active_seq() == 0) + .unwrap_or_default() + { // We're a server still using the initial remote CID for the client, so // let's switch immediately to enable clientside stateless resets. - self.update_rem_cid(); + self.update_rem_cid(PathId(0)); } } - Frame::NewToken { token } => { - if self.side.is_server() { + Frame::NewToken(NewToken { token }) => { + let ConnectionSide::Client { + token_store, + server_name, + .. + } = &self.side + else { return Err(TransportError::PROTOCOL_VIOLATION("client sent NEW_TOKEN")); - } + }; if token.is_empty() { return Err(TransportError::FRAME_ENCODING_ERROR("empty token")); } trace!("got new token"); - // TODO: Cache, or perhaps forward to user? + token_store.insert(server_name, token); } Frame::Datagram(datagram) => { if self @@ -2933,8 +3452,9 @@ impl Connection { )); } - if remote == self.path.remote { - if let Some(updated) = self.path.update_observed_addr_report(observed) { + let path = self.path_data_mut(path_id); + if remote == path.remote { + if let Some(updated) = path.update_observed_addr_report(observed) { self.events.push_back(Event::ObservedAddr(updated)); } } else { @@ -2942,14 +3462,77 @@ impl Connection { migration_observed_addr = Some(observed) } } + Frame::PathAbandon(_) => { + // TODO(@divma): jump ship? + } + Frame::PathAvailable(info) => { + if self.is_multipath_negotiated() { + self.on_path_available(info.path_id, info.is_backup, info.status_seq_no); + } else { + return Err(TransportError::PROTOCOL_VIOLATION( + "received PATH_AVAILABLE frame when not multipath was not negotiated", + )); + } + } + Frame::MaxPathId(path_id) => { + if self.is_multipath_negotiated() { + // frames that do not increase the path id are ignored + self.remote_max_path_id = self.remote_max_path_id.max(path_id); + } else { + return Err(TransportError::PROTOCOL_VIOLATION( + "received MAX_PATH_ID frame when not multipath was not negotiated", + )); + } + } + Frame::PathsBlocked(max_path_id) => { + // Receipt of a value of Maximum Path Identifier or Path Identifier that is higher than the local maximum value MUST + // be treated as a connection error of type PROTOCOL_VIOLATION. + // Ref + if self.is_multipath_negotiated() { + if self.local_max_path_id > max_path_id { + return Err(TransportError::PROTOCOL_VIOLATION( + "PATHS_BLOCKED maximum path identifier was larger than local maximum", + )); + } + debug!("received PATHS_BLOCKED({:?})", max_path_id); + } else { + return Err(TransportError::PROTOCOL_VIOLATION( + "received PATHS_BLOCKED frame when not multipath was not negotiated", + )); + } + } + Frame::PathCidsBlocked(path_id) => { + // Nothing to do. This is recorded in the frame stats, but otherwise we + // always issue all CIDs we're allowed to issue, so either this is an + // impatient peer or a bug on our side. + + // Receipt of a value of Maximum Path Identifier or Path Identifier that is higher than the local maximum value MUST + // be treated as a connection error of type PROTOCOL_VIOLATION. + // Ref + if self.is_multipath_negotiated() { + if self.local_max_path_id > path_id { + return Err(TransportError::PROTOCOL_VIOLATION( + "PATH_CIDS_BLOCKED path identifier was larger than local maximum", + )); + } + debug!("received PATH_CIDS_BLOCKED({:?})", path_id); + } else { + return Err(TransportError::PROTOCOL_VIOLATION( + "received PATH_CIDS_BLOCKED frame when not multipath was not negotiated", + )); + } + } } } let space = &mut self.spaces[SpaceId::Data]; - if space - .pending_acks - .packet_received(now, number, ack_eliciting, &space.dedup) - { + if space.pending_acks.packet_received( + now, + path_id, + number, + ack_eliciting, + space.dedup.entry(path_id).or_default(), + ) { self.timers .set(Timer::MaxAckDelay, now + self.ack_frequency.max_ack_delay); } @@ -2967,9 +3550,9 @@ impl Connection { self.close = true; } - if remote != self.path.remote + if number == self.spaces[SpaceId::Data].rx_packet && !is_probing_packet - && number == self.spaces[SpaceId::Data].rx_packet + && remote != self.path_data(path_id).remote { let ConnectionSide::Server { ref server_config } = self.side else { panic!("packets from unknown remote should be dropped by clients"); @@ -2978,22 +3561,34 @@ impl Connection { server_config.migration, "migration-initiating packets should have been dropped immediately" ); - self.migrate(now, remote, migration_observed_addr); + self.migrate(path_id, now, remote, migration_observed_addr); // Break linkability, if possible - self.update_rem_cid(); + self.update_rem_cid(path_id); self.spin = false; } Ok(()) } - fn migrate(&mut self, now: Instant, remote: SocketAddr, observed_addr: Option) { - trace!(%remote, "migration initiated"); + fn migrate( + &mut self, + path_id: PathId, + now: Instant, + remote: SocketAddr, + observed_addr: Option, + ) { + trace!(%remote, ?path_id, "migration initiated"); + // TODO(@divma): conditions for path migration in multipath are very specific, check them + // again to prevent path migrations that should actually create a new path + // Reset rtt/congestion state for new path unless it looks like a NAT rebinding. // Note that the congestion window will not grow until validation terminates. Helps mitigate // amplification attacks performed by spoofing source addresses. - let mut new_path = if remote.is_ipv4() && remote.ip() == self.path.remote.ip() { - PathData::from_previous(remote, &self.path, now) + let prev_pto = self.pto(SpaceId::Data); + let known_path = self.paths.get_mut(&path_id).expect("known path"); + let path = &mut known_path.data; + let mut new_path = if remote.is_ipv4() && remote.ip() == path.remote.ip() { + PathData::from_previous(remote, path, now) } else { let peer_max_udp_payload_size = u16::try_from(self.peer_params.max_udp_payload_size.into_inner()) @@ -3003,88 +3598,128 @@ impl Connection { self.allow_mtud, Some(peer_max_udp_payload_size), now, - false, &self.config, ) }; - new_path.last_observed_addr_report = self.path.last_observed_addr_report.clone(); + new_path.last_observed_addr_report = path.last_observed_addr_report.clone(); if let Some(report) = observed_addr { if let Some(updated) = new_path.update_observed_addr_report(report) { self.events.push_back(Event::ObservedAddr(updated)); } } - new_path.challenge = Some(self.rng.gen()); + new_path.challenge = Some(self.rng.random()); new_path.challenge_pending = true; - let prev_pto = self.pto(SpaceId::Data); - let mut prev = mem::replace(&mut self.path, new_path); + let mut prev = mem::replace(path, new_path); // Don't clobber the original path if the previous one hasn't been validated yet if prev.challenge.is_none() { - prev.challenge = Some(self.rng.gen()); + prev.challenge = Some(self.rng.random()); prev.challenge_pending = true; // We haven't updated the remote CID yet, this captures the remote CID we were using on // the previous path. - self.prev_path = Some((self.rem_cids.active(), prev)); + + known_path.prev = Some((self.rem_cids.get(&path_id).unwrap().active(), prev)); } self.timers.set( - Timer::PathValidation, + Timer::PathValidation(path_id), now + 3 * cmp::max(self.pto(SpaceId::Data), prev_pto), ); } /// Handle a change in the local address, i.e. an active migration pub fn local_address_changed(&mut self) { - self.update_rem_cid(); - self.ping(); + // TODO(flub): if multipath is enabled this needs to create a new path entirely. + self.update_rem_cid(PathId(0)); + // TODO(@divma): sending pings to paths that might no longer exist! + self.ping(PathId(0)); } /// Switch to a previously unused remote connection ID, if possible - fn update_rem_cid(&mut self) { - let (reset_token, retired) = match self.rem_cids.next() { - Some(x) => x, - None => return, - }; + fn update_rem_cid(&mut self, path_id: PathId) { + let (reset_token, retired) = + match self.rem_cids.get_mut(&path_id).and_then(|cids| cids.next()) { + Some(x) => x, + None => return, + }; // Retire the current remote CID and any CIDs we had to skip. self.spaces[SpaceId::Data] .pending .retire_cids - .extend(retired); - self.set_reset_token(reset_token); + .extend(retired.map(|seq| (path_id, seq))); + let remote = self.path_data(path_id).remote; + self.set_reset_token(remote, reset_token); } - fn set_reset_token(&mut self, reset_token: ResetToken) { + /// Sends this reset token to the endpoint. + /// + /// The endpoint needs to have reset-tokens for past connections so that it can still + /// use those for stateless resets when the connection state is dropped. See RFC 9000 + /// section 10.3. Stateless Reset. + /// + /// Reset tokens are different for each path, the endpoint identifies paths by peer + /// socket address however, not by path ID. + fn set_reset_token(&mut self, remote: SocketAddr, reset_token: ResetToken) { self.endpoint_events - .push_back(EndpointEventInner::ResetToken( - self.path.remote, - reset_token, - )); + .push_back(EndpointEventInner::ResetToken(remote, reset_token)); self.peer_params.stateless_reset_token = Some(reset_token); } /// Issue an initial set of connection IDs to the peer upon connection fn issue_first_cids(&mut self, now: Instant) { - if self.local_cid_state.cid_len() == 0 { + if self + .local_cid_state + .get(&PathId(0)) + .expect("PathId(0) exists when the connection is created") + .cid_len() + == 0 + { return; } // Subtract 1 to account for the CID we supplied while handshaking let n = self.peer_params.issue_cids_limit() - 1; self.endpoint_events - .push_back(EndpointEventInner::NeedIdentifiers(now, n)); + .push_back(EndpointEventInner::NeedIdentifiers(PathId(0), now, n)); + } + + /// Issues an initial set of CIDs to the peer starting from the next available [`PathId`] in + /// use up to the maximum + fn issue_first_path_cids(&mut self, now: Instant) { + if let Some(PathId(max_path_id)) = self.max_path_id() { + let start_path_id = self.max_path_id_in_use.0 + 1; + for n in start_path_id..=max_path_id { + self.endpoint_events + .push_back(EndpointEventInner::NeedIdentifiers( + PathId(n), + now, + self.peer_params.issue_cids_limit(), + )); + } + } } + /// Populates a packet with frames + /// + /// This tries to fit as many frames as possible into the packet. + /// + /// *path_exclusive_only* means to only build frames which can only be sent on this + /// *path. This is used in multipath for backup paths while there is still an active + /// *path. fn populate_packet( &mut self, now: Instant, space_id: SpaceId, - buf: &mut Vec, - max_size: usize, + path_id: PathId, + path_exclusive_only: bool, + buf: &mut impl BufMut, pn: u64, ) -> SentFrames { let mut sent = SentFrames::default(); + let is_multipath_negotiated = self.is_multipath_negotiated(); let space = &mut self.spaces[space_id]; + let path = &mut self.paths.get_mut(&path_id).expect("known path").data; let is_0rtt = space_id == SpaceId::Data && space.crypto.is_none(); space.pending_acks.maybe_ack_non_eliciting(); @@ -3098,54 +3733,29 @@ impl Connection { } // OBSERVED_ADDR - let mut send_observed_address = - |space_id: SpaceId, - buf: &mut Vec, - max_size: usize, - space: &mut PacketSpace, - sent: &mut SentFrames, - stats: &mut ConnectionStats, - skip_sent_check: bool| { - // should only be sent within Data space and only if allowed by extension - // negotiation - // send is also skipped if the path has already sent an observed address - let send_allowed = self - .config - .address_discovery_role - .should_report(&self.peer_params.address_discovery_role); - let send_required = - space.pending.observed_addr || !self.path.observed_addr_sent || skip_sent_check; - if space_id != SpaceId::Data || !send_allowed || !send_required { - return; - } - - let observed = - frame::ObservedAddr::new(self.path.remote, self.next_observed_addr_seq_no); - - if buf.len() + observed.size() < max_size { - observed.write(buf); + if !path_exclusive_only + && space_id == SpaceId::Data + && self + .config + .address_discovery_role + .should_report(&self.peer_params.address_discovery_role) + && (!path.observed_addr_sent || space.pending.observed_addr) + { + let frame = frame::ObservedAddr::new(path.remote, self.next_observed_addr_seq_no); + if buf.remaining_mut() > frame.size() { + frame.write(buf); - self.next_observed_addr_seq_no = - self.next_observed_addr_seq_no.saturating_add(1u8); - self.path.observed_addr_sent = true; + self.next_observed_addr_seq_no = self.next_observed_addr_seq_no.saturating_add(1u8); + path.observed_addr_sent = true; - stats.frame_tx.observed_addr += 1; - sent.retransmits.get_or_create().observed_addr = true; - space.pending.observed_addr = false; - } - }; - send_observed_address( - space_id, - buf, - max_size, - space, - &mut sent, - &mut self.stats, - false, - ); + self.stats.frame_tx.observed_addr += 1; + sent.retransmits.get_or_create().observed_addr = true; + space.pending.observed_addr = false; + } + } // PING - if mem::replace(&mut space.ping_pending, false) { + if mem::replace(&mut space.for_path(path_id).ping_pending, false) { trace!("PING"); buf.write(frame::FrameType::PING); sent.non_retransmits = true; @@ -3153,7 +3763,7 @@ impl Connection { } // IMMEDIATE_ACK - if mem::replace(&mut space.immediate_ack_pending, false) { + if mem::replace(&mut space.for_path(path_id).immediate_ack_pending, false) { trace!("IMMEDIATE_ACK"); buf.write(frame::FrameType::IMMEDIATE_ACK); sent.non_retransmits = true; @@ -3161,19 +3771,20 @@ impl Connection { } // ACK - if space.pending_acks.can_send() { + if !path_exclusive_only && space.pending_acks.can_send() { Self::populate_acks( now, self.receiving_ecn, &mut sent, space, + is_multipath_negotiated, buf, &mut self.stats, ); } // ACK_FREQUENCY - if mem::replace(&mut space.pending.ack_frequency, false) { + if !path_exclusive_only && mem::replace(&mut space.pending.ack_frequency, false) { let sequence_number = self.ack_frequency.next_sequence_number(); // Safe to unwrap because this is always provided when ACK frequency is enabled @@ -3181,7 +3792,7 @@ impl Connection { // Ensure the delay is within bounds to avoid a PROTOCOL_VIOLATION error let max_ack_delay = self.ack_frequency.candidate_max_ack_delay( - self.path.rtt.get(), + path.rtt.get(), config, &self.peer_params, ); @@ -3203,32 +3814,45 @@ impl Connection { } // PATH_CHALLENGE - if buf.len() + 9 < max_size && space_id == SpaceId::Data { + if buf.remaining_mut() > 9 && space_id == SpaceId::Data { // Transmit challenges with every outgoing frame on an unvalidated path - if let Some(token) = self.path.challenge { + if let Some(token) = path.challenge { // But only send a packet solely for that purpose at most once - self.path.challenge_pending = false; + path.challenge_pending = false; sent.non_retransmits = true; sent.requires_padding = true; trace!("PATH_CHALLENGE {:08x}", token); buf.write(frame::FrameType::PATH_CHALLENGE); buf.write(token); - send_observed_address( - space_id, - buf, - max_size, - space, - &mut sent, - &mut self.stats, - true, - ); + // Always include an OBSERVED_ADDR frame with a PATH_CHALLENGE, regardless + // of whether one has already been sent on this path. + if space_id == SpaceId::Data + && self + .config + .address_discovery_role + .should_report(&self.peer_params.address_discovery_role) + { + let frame = + frame::ObservedAddr::new(path.remote, self.next_observed_addr_seq_no); + if buf.remaining_mut() > frame.size() { + frame.write(buf); + + self.next_observed_addr_seq_no = + self.next_observed_addr_seq_no.saturating_add(1u8); + path.observed_addr_sent = true; + + self.stats.frame_tx.observed_addr += 1; + sent.retransmits.get_or_create().observed_addr = true; + space.pending.observed_addr = false; + } + } } } // PATH_RESPONSE - if buf.len() + 9 < max_size && space_id == SpaceId::Data { - if let Some(token) = self.path_responses.pop_on_path(&self.path.remote) { + if buf.remaining_mut() > 9 && space_id == SpaceId::Data { + if let Some(token) = path.path_responses.pop_on_path(path.remote) { sent.non_retransmits = true; sent.requires_padding = true; trace!("PATH_RESPONSE {:08x}", token); @@ -3239,20 +3863,31 @@ impl Connection { // NOTE: this is technically not required but might be useful to ride the // request/response nature of path challenges to refresh an observation // Since PATH_RESPONSE is a probing frame, this is allowed by the spec. - send_observed_address( - space_id, - buf, - max_size, - space, - &mut sent, - &mut self.stats, - true, - ); + if space_id == SpaceId::Data + && self + .config + .address_discovery_role + .should_report(&self.peer_params.address_discovery_role) + { + let frame = + frame::ObservedAddr::new(path.remote, self.next_observed_addr_seq_no); + if buf.remaining_mut() > frame.size() { + frame.write(buf); + + self.next_observed_addr_seq_no = + self.next_observed_addr_seq_no.saturating_add(1u8); + path.observed_addr_sent = true; + + self.stats.frame_tx.observed_addr += 1; + sent.retransmits.get_or_create().observed_addr = true; + space.pending.observed_addr = false; + } + } } } // CRYPTO - while buf.len() + frame::Crypto::SIZE_BOUND < max_size && !is_0rtt { + while !path_exclusive_only && buf.remaining_mut() > frame::Crypto::SIZE_BOUND && !is_0rtt { let mut frame = match space.pending.crypto.pop_front() { Some(x) => x, None => break, @@ -3262,8 +3897,7 @@ impl Connection { // Since the offset is known, we can reserve the exact size required to encode it. // For length we reserve 2bytes which allows to encode up to 2^14, // which is more than what fits into normally sized QUIC frames. - let max_crypto_data_size = max_size - - buf.len() + let max_crypto_data_size = buf.remaining_mut() - 1 // Frame Type - VarInt::size(unsafe { VarInt::from_u64_unchecked(frame.offset) }) - 2; // Maximum encoded length for frame size, given we send less than 2^14 bytes @@ -3299,24 +3933,55 @@ impl Connection { &mut space.pending, &mut sent.retransmits, &mut self.stats.frame_tx, - max_size, ); } // NEW_CONNECTION_ID - while buf.len() + 44 < max_size { + let cid_len = self + .local_cid_state + .values() + .map(|cid_state| cid_state.cid_len()) + .max() + .expect("some local CID state must exist"); + let new_cid_size_bound = + frame::NewConnectionId::size_bound(is_multipath_negotiated, cid_len); + while !path_exclusive_only && buf.remaining_mut() > new_cid_size_bound { let issued = match space.pending.new_cids.pop() { Some(x) => x, None => break, }; - trace!( - sequence = issued.sequence, - id = %issued.id, - "NEW_CONNECTION_ID" - ); + let retire_prior_to = self + .local_cid_state + .get(&issued.path_id) + .map(|cid_state| cid_state.retire_prior_to()) + .unwrap_or_else(|| { + error!(path_id = ?issued.path_id, "Missing local CID state"); + 0 + }); + let cid_path_id = match is_multipath_negotiated { + true => { + trace!( + path_id = ?issued.path_id, + sequence = issued.sequence, + id = %issued.id, + "PATH_NEW_CONNECTION_ID", + ); + Some(issued.path_id) + } + false => { + trace!( + sequence = issued.sequence, + id = %issued.id, + "NEW_CONNECTION_ID" + ); + debug_assert_eq!(issued.path_id, PathId(0)); + None + } + }; frame::NewConnectionId { + path_id: cid_path_id, sequence: issued.sequence, - retire_prior_to: self.local_cid_state.retire_prior_to(), + retire_prior_to, id: issued.id, reset_token: issued.reset_token, } @@ -3326,22 +3991,29 @@ impl Connection { } // RETIRE_CONNECTION_ID - while buf.len() + frame::RETIRE_CONNECTION_ID_SIZE_BOUND < max_size { - let seq = match space.pending.retire_cids.pop() { - Some(x) => x, + let retire_cid_bound = frame::RetireConnectionId::size_bound(is_multipath_negotiated); + while !path_exclusive_only && buf.remaining_mut() > retire_cid_bound { + let (path_id, sequence) = match space.pending.retire_cids.pop() { + Some((PathId(0), seq)) if !is_multipath_negotiated => (None, seq), + Some((path_id, seq)) => (Some(path_id), seq), None => break, }; - trace!(sequence = seq, "RETIRE_CONNECTION_ID"); - buf.write(frame::FrameType::RETIRE_CONNECTION_ID); - buf.write_var(seq); - sent.retransmits.get_or_create().retire_cids.push(seq); + trace!(?path_id, sequence, "RETIRE_CONNECTION_ID"); + frame::RetireConnectionId { path_id, sequence }.write(buf); + sent.retransmits + .get_or_create() + .retire_cids + .push((path_id.unwrap_or_default(), sequence)); self.stats.frame_tx.retire_connection_id += 1; } // DATAGRAM let mut sent_datagrams = false; - while buf.len() + Datagram::SIZE_BOUND < max_size && space_id == SpaceId::Data { - match self.datagrams.write(buf, max_size) { + while !path_exclusive_only + && buf.remaining_mut() > Datagram::SIZE_BOUND + && space_id == SpaceId::Data + { + match self.datagrams.write(buf) { true => { sent_datagrams = true; sent.non_retransmits = true; @@ -3355,11 +4027,53 @@ impl Connection { self.datagrams.send_blocked = false; } + // NEW_TOKEN + while let Some(remote_addr) = space.pending.new_tokens.pop() { + if path_exclusive_only { + break; + } + debug_assert_eq!(space_id, SpaceId::Data); + let ConnectionSide::Server { server_config } = &self.side else { + panic!("NEW_TOKEN frames should not be enqueued by clients"); + }; + + if remote_addr != path.remote { + // NEW_TOKEN frames contain tokens bound to a client's IP address, and are only + // useful if used from the same IP address. Thus, we abandon enqueued NEW_TOKEN + // frames upon an path change. Instead, when the new path becomes validated, + // NEW_TOKEN frames may be enqueued for the new path instead. + continue; + } + + let token = Token::new( + TokenPayload::Validation { + ip: remote_addr.ip(), + issued: server_config.time_source.now(), + }, + &mut self.rng, + ); + let new_token = NewToken { + token: token.encode(&*server_config.token_key).into(), + }; + + if buf.remaining_mut() < new_token.size() { + space.pending.new_tokens.push(remote_addr); + break; + } + + new_token.encode(buf); + sent.retransmits + .get_or_create() + .new_tokens + .push(remote_addr); + self.stats.frame_tx.new_token += 1; + } + // STREAM - if space_id == SpaceId::Data { - sent.stream_frames = - self.streams - .write_stream_frames(buf, max_size, self.config.send_fairness); + if !path_exclusive_only && space_id == SpaceId::Data { + sent.stream_frames = self + .streams + .write_stream_frames(buf, self.config.send_fairness); self.stats.frame_tx.stream += sent.stream_frames.len() as u64; } @@ -3367,49 +4081,50 @@ impl Connection { } /// Write pending ACKs into a buffer - /// - /// This method assumes ACKs are pending, and should only be called if - /// `!PendingAcks::ranges().is_empty()` returns `true`. fn populate_acks( now: Instant, receiving_ecn: bool, sent: &mut SentFrames, space: &mut PacketSpace, - buf: &mut Vec, + send_path_acks: bool, + buf: &mut impl BufMut, stats: &mut ConnectionStats, ) { - debug_assert!(!space.pending_acks.ranges().is_empty()); - // 0-RTT packets must never carry acks (which would have to be of handshake packets) debug_assert!(space.crypto.is_some(), "tried to send ACK in 0-RTT"); - let ecn = if receiving_ecn { - Some(&space.ecn_counters) - } else { - None - }; - sent.largest_acked = space.pending_acks.ranges().max(); - let delay_micros = space.pending_acks.ack_delay(now).as_micros() as u64; - - // TODO: This should come from `TransportConfig` if that gets configurable. - let ack_delay_exp = TransportParameters::default().ack_delay_exponent; - let delay = delay_micros >> ack_delay_exp.into_inner(); - - trace!( - "ACK {:?}, Delay = {}us", - space.pending_acks.ranges(), - delay_micros - ); - - frame::Ack::encode(delay as _, space.pending_acks.ranges(), ecn, buf); - stats.frame_tx.acks += 1; + for (path_id, ranges) in space.pending_acks.ranges() { + if !send_path_acks && *path_id != PathId::ZERO { + continue; + } + let ecn = receiving_ecn + .then_some(&space.number_spaces) + .and_then(|map| map.get(path_id)) + .map(|pns| pns.ecn_counters); + + // TODO(flub): Figure out how this is used and what to do about it. + sent.largest_acked = ranges.max(); + + let delay_micros = space.pending_acks.ack_delay(*path_id, now).as_micros() as u64; + // TODO: This should come from `TransportConfig` if that gets configurable. + let ack_delay_exp = TransportParameters::default().ack_delay_exponent; + let delay = delay_micros >> ack_delay_exp.into_inner(); + + if send_path_acks { + trace!("PATH_ACK {:?}, Delay = {}us", ranges, delay_micros); + frame::PathAck::encode(*path_id, delay as _, ranges, ecn.as_ref(), buf); + stats.frame_tx.path_acks += 1; + } else { + trace!("ACK {:?}, Delay = {}us", ranges, delay_micros); + frame::Ack::encode(delay as _, ranges, ecn.as_ref(), buf); + stats.frame_tx.acks += 1; + } + } } fn close_common(&mut self) { trace!("connection closed"); - for &timer in &Timer::VALUES { - self.timers.stop(timer); - } + self.timers.reset(); } fn set_close_timer(&mut self, now: Instant) { @@ -3419,6 +4134,7 @@ impl Connection { /// Handle transport parameters received from the peer fn handle_peer_params(&mut self, params: TransportParameters) -> Result<(), TransportError> { + // TODO(@divma): check if we need to validate anything regarding multipath here if Some(self.orig_rem_cid) != params.initial_src_cid || (self.side.is_client() && (Some(self.initial_dst_cid) != params.original_dst_cid @@ -3439,28 +4155,52 @@ impl Connection { self.idle_timeout = negotiate_max_idle_timeout(self.config.max_idle_timeout, Some(params.max_idle_timeout)); trace!("negotiated max idle timeout {:?}", self.idle_timeout); + let path_id = PathId(0); + if let Some(ref info) = params.preferred_address { - self.rem_cids.insert(frame::NewConnectionId { + // During the handshake PathId(0) exists. + self.rem_cids.get_mut(&path_id).expect("not yet abandoned").insert(frame::NewConnectionId { + path_id: None, sequence: 1, id: info.connection_id, reset_token: info.stateless_reset_token, retire_prior_to: 0, - }).expect("preferred address CID is the first received, and hence is guaranteed to be legal"); + }) + .expect( + "preferred address CID is the first received, and hence is guaranteed to be legal", + ); + let remote = self.path_data(path_id).remote; + self.set_reset_token(remote, info.stateless_reset_token); } self.ack_frequency.peer_max_ack_delay = get_max_ack_delay(¶ms); + + if let (Some(local_max_path_id), Some(remote_max_path_id)) = ( + self.config.get_initial_max_path_id(), + params.initial_max_path_id, + ) { + // multipath is enabled, register the local and remote maximums + self.local_max_path_id = local_max_path_id; + self.remote_max_path_id = remote_max_path_id; + debug!(initial_max_path_id=%local_max_path_id.min(remote_max_path_id), "multipath negotiated"); + } + self.peer_params = params; - self.path.mtud.on_peer_max_udp_payload_size_received( - u16::try_from(self.peer_params.max_udp_payload_size.into_inner()).unwrap_or(u16::MAX), - ); + let peer_max_udp_payload_size = + u16::try_from(self.peer_params.max_udp_payload_size.into_inner()).unwrap_or(u16::MAX); + self.path_data_mut(path_id) + .mtud + .on_peer_max_udp_payload_size_received(peer_max_udp_payload_size); } fn decrypt_packet( &mut self, now: Instant, + path_id: PathId, packet: &mut Packet, ) -> Result, Option> { let result = packet_crypto::decrypt_packet_body( packet, + path_id, &self.spaces, self.zero_rtt_crypto.as_ref(), self.key_phase, @@ -3510,7 +4250,9 @@ impl Connection { .packet, mem::replace(self.next_crypto.as_mut().unwrap(), new), ); - self.spaces[SpaceId::Data].sent_with_keys = 0; + self.spaces[SpaceId::Data] + .iter_paths_mut() + .for_each(|s| s.sent_with_keys = 0); self.prev_crypto = Some(PrevCrypto { crypto: old, end_packet, @@ -3527,19 +4269,22 @@ impl Connection { /// /// According to the spec, this will result in an error if the remote endpoint does not support /// the Acknowledgement Frequency extension - pub(crate) fn immediate_ack(&mut self) { - self.spaces[self.highest_space].immediate_ack_pending = true; + pub(crate) fn immediate_ack(&mut self, path_id: PathId) { + self.spaces[self.highest_space] + .for_path(path_id) + .immediate_ack_pending = true; } /// Decodes a packet, returning its decrypted payload, so it can be inspected in tests #[cfg(test)] pub(crate) fn decode_packet(&self, event: &ConnectionEvent) -> Option> { - let (first_decode, remaining) = match &event.0 { + let (path_id, first_decode, remaining) = match &event.0 { ConnectionEventInner::Datagram(DatagramConnectionEvent { + path_id, first_decode, remaining, .. - }) => (first_decode, remaining), + }) => (path_id, first_decode, remaining), _ => return None, }; @@ -3557,6 +4302,7 @@ impl Connection { let mut packet = decrypted_header.packet?; packet_crypto::decrypt_packet_body( &mut packet, + *path_id, &self.spaces, self.zero_rtt_crypto.as_ref(), self.key_phase, @@ -3572,27 +4318,33 @@ impl Connection { /// acknowledged or declared lost. #[cfg(test)] pub(crate) fn bytes_in_flight(&self) -> u64 { - self.path.in_flight.bytes + // TODO(@divma): consider including for multipath? + self.path_data(PathId(0)).in_flight.bytes } /// Number of bytes worth of non-ack-only packets that may be sent #[cfg(test)] pub(crate) fn congestion_window(&self) -> u64 { - self.path - .congestion + let path = self.path_data(PathId(0)); + path.congestion .window() - .saturating_sub(self.path.in_flight.bytes) + .saturating_sub(path.in_flight.bytes) } - /// Whether no timers but keepalive, idle, rtt and pushnewcid are running + /// Whether no timers but keepalive, idle, rtt, pushnewcid, and key discard are running #[cfg(test)] pub(crate) fn is_idle(&self) -> bool { - Timer::VALUES - .iter() - .filter(|&&t| t != Timer::KeepAlive && t != Timer::PushNewCid) - .filter_map(|&t| Some((t, self.timers.get(t)?))) - .min_by_key(|&(_, time)| time) - .map_or(true, |(timer, _)| timer == Timer::Idle) + let current_timers = self.timers.values(); + current_timers + .into_iter() + .filter(|entry| { + !matches!( + entry.timer, + Timer::KeepAlive(_) | Timer::PushNewCid | Timer::KeyDiscard + ) + }) + .min_by_key(|entry| entry.time) + .map_or(true, |entry| entry.timer == Timer::Idle) } /// Total number of outgoing packets that have been deemed lost @@ -3604,67 +4356,97 @@ impl Connection { /// Whether explicit congestion notification is in use on outgoing packets. #[cfg(test)] pub(crate) fn using_ecn(&self) -> bool { - self.path.sending_ecn + self.path_data(PathId(0)).sending_ecn } /// The number of received bytes in the current path #[cfg(test)] pub(crate) fn total_recvd(&self) -> u64 { - self.path.total_recvd + self.path_data(PathId(0)).total_recvd } #[cfg(test)] pub(crate) fn active_local_cid_seq(&self) -> (u64, u64) { - self.local_cid_state.active_seq() + self.local_cid_state.get(&PathId(0)).unwrap().active_seq() + } + + #[cfg(test)] + #[track_caller] + pub(crate) fn active_local_path_cid_seq(&self, path_id: u32) -> (u64, u64) { + self.local_cid_state + .get(&PathId(path_id)) + .unwrap() + .active_seq() } /// Instruct the peer to replace previously issued CIDs by sending a NEW_CONNECTION_ID frame /// with updated `retire_prior_to` field set to `v` #[cfg(test)] pub(crate) fn rotate_local_cid(&mut self, v: u64, now: Instant) { - let n = self.local_cid_state.assign_retire_seq(v); + let n = self + .local_cid_state + .get_mut(&PathId(0)) + .unwrap() + .assign_retire_seq(v); self.endpoint_events - .push_back(EndpointEventInner::NeedIdentifiers(now, n)); + .push_back(EndpointEventInner::NeedIdentifiers(PathId(0), now, n)); } - /// Check the current active remote CID sequence + /// Check the current active remote CID sequence for `PathId(0)` #[cfg(test)] pub(crate) fn active_rem_cid_seq(&self) -> u64 { - self.rem_cids.active_seq() + self.rem_cids.get(&PathId(0)).unwrap().active_seq() } /// Returns the detected maximum udp payload size for the current path #[cfg(test)] pub(crate) fn path_mtu(&self) -> u16 { - self.path.current_mtu() + self.path_data(PathId(0)).current_mtu() } /// Whether we have 1-RTT data to send /// - /// See also `self.space(SpaceId::Data).can_send()` - fn can_send_1rtt(&self, max_size: usize) -> bool { - self.streams.can_send_stream_data() - || self.path.challenge_pending - || self - .prev_path - .as_ref() - .is_some_and(|(_, x)| x.challenge_pending) - || !self.path_responses.is_empty() + /// This checks for frames that can only be sent in the data space (1-RTT): + /// - Pending PATH_CHALLENGE frames on the active and previous path if just migrated. + /// - Pending PATH_RESPONSE frames. + /// - Pending data to send in STREAM frames. + /// - Pending DATAGRAM frames to send. + /// + /// See also [`PacketSpace::can_send`] which keeps track of all other frame types that + /// may need to be sent. + fn can_send_1rtt(&self, path_id: PathId, max_size: usize) -> SendableFrames { + let path_exclusive = self.paths.get(&path_id).is_some_and(|path| { + path.data.challenge_pending + || path + .prev + .as_ref() + .is_some_and(|(_, path)| path.challenge_pending) + || !path.data.path_responses.is_empty() + }); + let other = self.streams.can_send_stream_data() || self .datagrams .outgoing .front() - .is_some_and(|x| x.size(true) <= max_size) + .is_some_and(|x| x.size(true) <= max_size); + SendableFrames { + acks: false, + other, + close: false, + path_exclusive, + } } /// Update counters to account for a packet becoming acknowledged, lost, or abandoned - fn remove_in_flight(&mut self, pn: u64, packet: &SentPacket) { + fn remove_in_flight(&mut self, path_id: PathId, pn: u64, packet: &SentPacket) { + // TODO(@divma): this should be completely moved into path + let path_mig_data = self.paths.get_mut(&path_id).expect("known path"); // Visit known paths from newest to oldest to find the one `pn` was sent on - for path in [&mut self.path] + for path_data in [&mut path_mig_data.data] .into_iter() - .chain(self.prev_path.as_mut().map(|(_, data)| data)) + .chain(path_mig_data.prev.as_mut().map(|(_, data)| data)) { - if path.remove_in_flight(pn, packet) { + if path_data.remove_in_flight(pn, packet) { return; } } @@ -3682,7 +4464,8 @@ impl Connection { /// /// Buffers passed to [`Connection::poll_transmit`] should be at least this large. pub fn current_mtu(&self) -> u16 { - self.path.current_mtu() + // TODO(@divma): fix + self.path_data(PathId(0)).current_mtu() } /// Size of non-frame data for a 1-RTT packet @@ -3691,19 +4474,38 @@ impl Connection { /// frames. Changes if the length of the remote connection ID changes, which is expected to be /// rare. If `pn` is specified, may additionally change unpredictably due to variations in /// latency and packet loss. - fn predict_1rtt_overhead(&self, pn: Option) -> usize { - let pn_len = match pn { - Some(pn) => PacketNumber::new( - pn, - self.spaces[SpaceId::Data].largest_acked_packet.unwrap_or(0), - ) - .len(), - // Upper bound - None => 4, - }; + fn predict_1rtt_overhead(&mut self, pn: u64, path: PathId) -> usize { + let pn_len = PacketNumber::new( + pn, + self.spaces[SpaceId::Data] + .for_path(path) + .largest_acked_packet + .unwrap_or(0), + ) + .len(); // 1 byte for flags - 1 + self.rem_cids.active().len() + pn_len + self.tag_len_1rtt() + 1 + self + .rem_cids + .get(&path) + .map(|cids| cids.active().len()) + .unwrap_or(20) // Max CID len in QUIC v1 + + pn_len + + self.tag_len_1rtt() + } + + fn predict_1rtt_overhead_no_pn(&self) -> usize { + let pn_len = 4; + + let cid_len = self + .rem_cids + .values() + .map(|cids| cids.active().len()) + .max() + .unwrap_or(20); // Max CID len in QUIC v1 + + // 1 byte for flags + 1 + cid_len + pn_len + self.tag_len_1rtt() } fn tag_len_1rtt(&self) -> usize { @@ -3716,6 +4518,50 @@ impl Connection { // but that would needlessly prevent sending datagrams during 0-RTT. key.map_or(16, |x| x.tag_len()) } + + /// Mark the path as validated, and enqueue NEW_TOKEN frames to be sent as appropriate + fn on_path_validated(&mut self, path_id: PathId) { + self.path_data_mut(path_id).validated = true; + let ConnectionSide::Server { server_config } = &self.side else { + return; + }; + let remote_addr = self.path_data(path_id).remote; + let new_tokens = &mut self.spaces[SpaceId::Data as usize].pending.new_tokens; + new_tokens.clear(); + for _ in 0..server_config.validation_token.sent { + new_tokens.push(remote_addr); + } + } + + /// Handle new path availability information + fn on_path_available(&mut self, path_id: PathId, is_backup: bool, status_seq_no: VarInt) { + if let Some(path_data) = self.paths.get_mut(&path_id) { + let data = &mut path_data.data; + // If a newer sequence no was sent, update the information. + if data.status_seq_no < Some(status_seq_no) { + data.status = if is_backup { + PathStatus::Backup + } else { + PathStatus::Available + }; + data.status_seq_no.replace(status_seq_no); + } + } else { + debug!("PATH_AVAILABLE received unknown path {:?}", path_id); + } + } + + /// Returns the maximum [`PathId`] to be used in this connection. + /// + /// This is calculated as minimum between the local and remote's maximums when multipath is + /// enabled, or `None` when disabled. + fn max_path_id(&self) -> Option { + if self.is_multipath_negotiated() { + Some(self.remote_max_path_id.min(self.local_max_path_id)) + } else { + None + } + } } impl fmt::Debug for Connection { @@ -3726,11 +4572,21 @@ impl fmt::Debug for Connection { } } +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +enum PathBlocked { + No, + AntiAmplification, + Congestion, + Pacing, +} + /// Fields of `Connection` specific to it being client-side or server-side enum ConnectionSide { Client { /// Sent in every outgoing Initial packet. Always empty after Initial keys are discarded token: Bytes, + token_store: Arc, + server_name: String, }, Server { server_config: Arc, @@ -3764,8 +4620,13 @@ impl ConnectionSide { impl From for ConnectionSide { fn from(side: SideArgs) -> Self { match side { - SideArgs::Client => Self::Client { - token: Bytes::new(), + SideArgs::Client { + token_store, + server_name, + } => Self::Client { + token: token_store.take(&server_name).unwrap_or_default(), + token_store, + server_name, }, SideArgs::Server { server_config, @@ -3778,7 +4639,10 @@ impl From for ConnectionSide { /// Parameters to `Connection::new` specific to it being client-side or server-side pub(crate) enum SideArgs { - Client, + Client { + token_store: Arc, + server_name: String, + }, Server { server_config: Arc, pref_addr_cid: Option, @@ -3856,7 +4720,7 @@ impl From for ConnectionError { // For compatibility with API consumers impl From for io::Error { fn from(x: ConnectionError) -> Self { - use self::ConnectionError::*; + use ConnectionError::*; let kind = match x { TimedOut => io::ErrorKind::TimedOut, Reset => io::ErrorKind::ConnectionReset, @@ -3953,14 +4817,12 @@ pub enum Event { DatagramsUnblocked, /// Received an observation of our external address from the peer. ObservedAddr(SocketAddr), + /// (Multi)Path events + Path(PathEvent), } fn instant_saturating_sub(x: Instant, y: Instant) -> Duration { - if x > y { - x - y - } else { - Duration::new(0, 0) - } + if x > y { x - y } else { Duration::ZERO } } fn get_max_ack_delay(params: &TransportParameters) -> Duration { @@ -3987,13 +4849,6 @@ const MIN_PACKET_SPACE: usize = MAX_HANDSHAKE_OR_0RTT_HEADER_SIZE + 32; const MAX_HANDSHAKE_OR_0RTT_HEADER_SIZE: usize = 1 + 4 + 1 + MAX_CID_SIZE + 1 + MAX_CID_SIZE + VarInt::from_u32(u16::MAX as u32).size() + 4; -/// The maximum amount of datagrams that are sent in a single transmit -/// -/// This can be lower than the maximum platform capabilities, to avoid excessive -/// memory allocations when calling `poll_transmit()`. Benchmarks have shown -/// that numbers around 10 are a good compromise. -const MAX_TRANSMIT_SEGMENTS: usize = 10; - /// Perform key updates this many packets before the AEAD confidentiality limit. /// /// Chosen arbitrarily, intended to be large enough to prevent spurious connection loss. diff --git a/quinn-proto/src/connection/mtud.rs b/quinn-proto/src/connection/mtud.rs index a20f15d8e6..b690731b41 100644 --- a/quinn-proto/src/connection/mtud.rs +++ b/quinn-proto/src/connection/mtud.rs @@ -1,4 +1,4 @@ -use crate::{packet::SpaceId, Instant, MtuDiscoveryConfig, MAX_UDP_PAYLOAD}; +use crate::{Instant, MAX_UDP_PAYLOAD, MtuDiscoveryConfig, packet::SpaceId}; use std::cmp; use tracing::trace; @@ -518,9 +518,9 @@ const BLACK_HOLE_THRESHOLD: usize = 3; #[cfg(test)] mod tests { use super::*; - use crate::packet::SpaceId; use crate::Duration; use crate::MAX_UDP_PAYLOAD; + use crate::packet::SpaceId; use assert_matches::assert_matches; fn default_mtud() -> MtuDiscovery { diff --git a/quinn-proto/src/connection/pacing.rs b/quinn-proto/src/connection/pacing.rs index 0e7df523ab..2e469948cb 100644 --- a/quinn-proto/src/connection/pacing.rs +++ b/quinn-proto/src/connection/pacing.rs @@ -73,7 +73,7 @@ impl Pacer { } // we disable pacing for extremely large windows - if window > u32::MAX.into() { + if window > u64::from(u32::MAX) { return None; } @@ -104,7 +104,7 @@ impl Pacer { let unscaled_delay = smoothed_rtt .checked_mul((bytes_to_send.max(self.capacity) - self.tokens) as _) - .unwrap_or_else(|| Duration::new(u64::MAX, 999_999_999)) + .unwrap_or(Duration::MAX) / window; // divisions come before multiplications to prevent overflow @@ -160,15 +160,21 @@ mod tests { let new_instant = old_instant + Duration::from_micros(15); let rtt = Duration::from_micros(400); - assert!(Pacer::new(rtt, 30000, 1500, new_instant) - .delay(Duration::from_micros(0), 0, 1500, 1, old_instant) - .is_none()); - assert!(Pacer::new(rtt, 30000, 1500, new_instant) - .delay(Duration::from_micros(0), 1600, 1500, 1, old_instant) - .is_none()); - assert!(Pacer::new(rtt, 30000, 1500, new_instant) - .delay(Duration::from_micros(0), 1500, 1500, 3000, old_instant) - .is_none()); + assert!( + Pacer::new(rtt, 30000, 1500, new_instant) + .delay(Duration::from_micros(0), 0, 1500, 1, old_instant) + .is_none() + ); + assert!( + Pacer::new(rtt, 30000, 1500, new_instant) + .delay(Duration::from_micros(0), 1600, 1500, 1, old_instant) + .is_none() + ); + assert!( + Pacer::new(rtt, 30000, 1500, new_instant) + .delay(Duration::from_micros(0), 1500, 1500, 3000, old_instant) + .is_none() + ); } #[test] diff --git a/quinn-proto/src/connection/packet_builder.rs b/quinn-proto/src/connection/packet_builder.rs index 011420b9da..9fbd4d536c 100644 --- a/quinn-proto/src/connection/packet_builder.rs +++ b/quinn-proto/src/connection/packet_builder.rs @@ -1,18 +1,27 @@ -use bytes::Bytes; +use bytes::{BufMut, Bytes}; use rand::Rng; use tracing::{trace, trace_span}; -use super::{spaces::SentPacket, Connection, SentFrames}; +use super::{Connection, PathId, SentFrames, TransmitBuf, spaces::SentPacket}; use crate::{ + ConnectionId, Instant, MIN_INITIAL_SIZE, TransportError, TransportErrorCode, connection::ConnectionSide, frame::{self, Close}, - packet::{Header, InitialHeader, LongType, PacketNumber, PartialEncode, SpaceId, FIXED_BIT}, - ConnectionId, Instant, TransportError, TransportErrorCode, + packet::{FIXED_BIT, Header, InitialHeader, LongType, PacketNumber, PartialEncode, SpaceId}, }; -pub(super) struct PacketBuilder { - pub(super) datagram_start: usize, +/// QUIC packet builder +/// +/// This allows building QUIC packets: it takes care of writing the header, allows writing +/// frames and on [`PacketBuilder::finish`] (or [`PacketBuilder::finish_and_track`]) it +/// encrypts the packet so it is ready to be sent on the wire. +/// +/// The builder manages the write buffer into which the packet is written, and directly +/// implements [`BufMut`] to write frames into the packet. +pub(super) struct PacketBuilder<'a, 'b> { + pub(super) buf: &'a mut TransmitBuf<'b>, pub(super) space: SpaceId, + path: PathId, pub(super) partial_encode: PartialEncode, pub(super) ack_eliciting: bool, pub(super) exact_number: u64, @@ -20,14 +29,11 @@ pub(super) struct PacketBuilder { /// Smallest absolute position in the associated buffer that must be occupied by this packet's /// frames pub(super) min_size: usize, - /// Largest absolute position in the associated buffer that may be occupied by this packet's - /// frames - pub(super) max_size: usize, pub(super) tag_len: usize, pub(super) _span: tracing::span::EnteredSpan, } -impl PacketBuilder { +impl<'a, 'b> PacketBuilder<'a, 'b> { /// Write a new packet header to `buffer` and determine the packet's properties /// /// Marks the connection drained and returns `None` if the confidentiality limit would be @@ -35,19 +41,21 @@ impl PacketBuilder { pub(super) fn new( now: Instant, space_id: SpaceId, + path_id: PathId, dst_cid: ConnectionId, - buffer: &mut Vec, - buffer_capacity: usize, - datagram_start: usize, + buffer: &'a mut TransmitBuf<'b>, ack_eliciting: bool, conn: &mut Connection, - ) -> Option { + ) -> Option + where + 'b: 'a, + { let version = conn.version; // Initiate key update if we're approaching the confidentiality limit - let sent_with_keys = conn.spaces[space_id].sent_with_keys; + let sent_with_keys = conn.spaces[space_id].sent_with_keys(); if space_id == SpaceId::Data { if sent_with_keys >= conn.key_phase_size { - conn.initiate_key_update(); + conn.force_key_update(); } } else { let confidentiality_limit = conn.spaces[space_id] @@ -78,14 +86,13 @@ impl PacketBuilder { } let space = &mut conn.spaces[space_id]; - let exact_number = match space_id { - SpaceId::Data => conn.packet_number_filter.allocate(&mut conn.rng, space), - _ => space.get_tx_number(), - }; - + let exact_number = space.for_path(path_id).get_tx_number(&mut conn.rng); let span = trace_span!("send", space = ?space_id, pn = exact_number).entered(); - let number = PacketNumber::new(exact_number, space.largest_acked_packet.unwrap_or(0)); + let number = PacketNumber::new( + exact_number, + space.for_path(path_id).largest_acked_packet.unwrap_or(0), + ); let header = match space_id { SpaceId::Data if space.crypto.is_some() => Header::Short { dst_cid, @@ -93,7 +100,7 @@ impl PacketBuilder { spin: if conn.spin_enabled { conn.spin } else { - conn.rng.gen() + conn.rng.random() }, key_phase: conn.key_phase, }, @@ -123,8 +130,8 @@ impl PacketBuilder { }), }; let partial_encode = header.encode(buffer); - if conn.peer_params.grease_quic_bit && conn.rng.gen() { - buffer[partial_encode.start] ^= FIXED_BIT; + if conn.peer_params.grease_quic_bit && conn.rng.random() { + buffer.as_mut_slice()[partial_encode.start] ^= FIXED_BIT; } let (sample_size, tag_len) = if let Some(ref crypto) = space.crypto { @@ -151,17 +158,17 @@ impl PacketBuilder { buffer.len() + (sample_size + 4).saturating_sub(number.len() + tag_len), partial_encode.start + dst_cid.len() + 6, ); - let max_size = buffer_capacity - tag_len; + let max_size = buffer.datagram_max_offset() - tag_len; debug_assert!(max_size >= min_size); Some(Self { - datagram_start, + buf: buffer, space: space_id, + path: path_id, partial_encode, exact_number, short_header: header.is_short(), min_size, - max_size, tag_len, ack_eliciting, _span: span, @@ -176,25 +183,34 @@ impl PacketBuilder { // already. self.min_size = Ord::max( self.min_size, - self.datagram_start + (min_size as usize) - self.tag_len, + self.buf.datagram_start_offset() + (min_size as usize) - self.tag_len, ); } + /// Returns a writable buffer limited to the remaining frame space + /// + /// The [`BufMut::remaining_mut`] call on the returned buffer indicates the amount of + /// space available to write QUIC frames into. + // In rust 1.82 we can use `-> impl BufMut + use<'_, 'a, 'b>` + pub(super) fn frame_space_mut(&mut self) -> bytes::buf::Limit<&mut TransmitBuf<'b>> { + self.buf.limit(self.frame_space_remaining()) + } + pub(super) fn finish_and_track( - self, + mut self, now: Instant, conn: &mut Connection, - sent: Option, - buffer: &mut Vec, + path_id: PathId, + sent: SentFrames, + pad_datagram: bool, ) { + if pad_datagram { + self.pad_to(MIN_INITIAL_SIZE); + } let ack_eliciting = self.ack_eliciting; let exact_number = self.exact_number; let space_id = self.space; - let (size, padded) = self.finish(conn, buffer); - let sent = match sent { - Some(sent) => sent, - None => return, - }; + let (size, padded) = self.finish(conn); let size = match padded || ack_eliciting { true => size as u16, @@ -210,29 +226,39 @@ impl PacketBuilder { stream_frames: sent.stream_frames, }; - conn.path - .sent(exact_number, packet, &mut conn.spaces[space_id]); - conn.stats.path.sent_packets += 1; - conn.reset_keep_alive(now); + conn.paths.get_mut(&path_id).unwrap().data.sent( + path_id, + exact_number, + packet, + &mut conn.spaces[space_id], + ); + conn.stats.paths.entry(path_id).or_default().sent_packets += 1; + conn.reset_keep_alive(path_id, now); if size != 0 { if ack_eliciting { - conn.spaces[space_id].time_of_last_ack_eliciting_packet = Some(now); + conn.spaces[space_id] + .for_path(path_id) + .time_of_last_ack_eliciting_packet = Some(now); if conn.permit_idle_reset { conn.reset_idle_timeout(now, space_id); } conn.permit_idle_reset = false; } - conn.set_loss_detection_timer(now); - conn.path.pacing.on_transmit(size); + conn.set_loss_detection_timer(now, path_id); + conn.path_data_mut(path_id).pacing.on_transmit(size); } } /// Encrypt packet, returning the length of the packet and whether padding was added - pub(super) fn finish(self, conn: &mut Connection, buffer: &mut Vec) -> (usize, bool) { - let pad = buffer.len() < self.min_size; + pub(super) fn finish(self, conn: &mut Connection) -> (usize, bool) { + debug_assert!( + self.buf.len() <= self.buf.datagram_max_offset() - self.tag_len, + "packet exceeds maximum size" + ); + let pad = self.buf.len() < self.min_size; if pad { - trace!("PADDING * {}", self.min_size - buffer.len()); - buffer.resize(self.min_size, 0); + trace!("PADDING * {}", self.min_size - self.buf.len()); + self.buf.put_bytes(0, self.min_size - self.buf.len()); } let space = &conn.spaces[self.space]; @@ -251,15 +277,35 @@ impl PacketBuilder { "Mismatching crypto tag len" ); - buffer.resize(buffer.len() + packet_crypto.tag_len(), 0); + self.buf.put_bytes(0, packet_crypto.tag_len()); let encode_start = self.partial_encode.start; - let packet_buf = &mut buffer[encode_start..]; + let packet_buf = &mut self.buf.as_mut_slice()[encode_start..]; + // for packet protection, PathId(0) and no path are equivalent. self.partial_encode.finish( packet_buf, header_crypto, - Some((self.exact_number, packet_crypto)), + Some((self.exact_number, self.path, packet_crypto)), ); - (buffer.len() - encode_start, pad) + let packet_len = self.buf.len() - encode_start; + trace!(size = %packet_len, short_header = %self.short_header, "wrote packet"); + (packet_len, pad) + } + + /// The number of additional bytes the current packet would take up if it was finished now + /// + /// This will include any padding which is required to make the size large enough to be + /// encrypted correctly. + pub(super) fn predict_packet_end(&self) -> usize { + self.buf.len().max(self.min_size) + self.tag_len - self.buf.len() + } + + /// Returns the remaining space in the packet that can be taken up by QUIC frames + /// + /// This leaves space in the datagram for the cryptographic tag that needs to be written + /// when the packet is finished. + pub(super) fn frame_space_remaining(&self) -> usize { + let max_offset = self.buf.datagram_max_offset() - self.tag_len; + max_offset.saturating_sub(self.buf.len()) } } diff --git a/quinn-proto/src/connection/packet_crypto.rs b/quinn-proto/src/connection/packet_crypto.rs index 0d3063aa91..2001829aa4 100644 --- a/quinn-proto/src/connection/packet_crypto.rs +++ b/quinn-proto/src/connection/packet_crypto.rs @@ -1,11 +1,13 @@ use tracing::{debug, trace}; +use crate::Instant; use crate::connection::spaces::PacketSpace; use crate::crypto::{HeaderKey, KeyPair, PacketKey}; use crate::packet::{Packet, PartialDecode, SpaceId}; use crate::token::ResetToken; -use crate::Instant; -use crate::{TransportError, RESET_TOKEN_SIZE}; +use crate::{RESET_TOKEN_SIZE, TransportError}; + +use super::PathId; /// Removes header protection of a packet, or returns `None` if the packet was dropped pub(super) fn unprotect_header( @@ -68,6 +70,7 @@ pub(super) struct UnprotectHeaderResult { /// Decrypts a packet's body in-place pub(super) fn decrypt_packet_body( packet: &mut Packet, + path_id: PathId, spaces: &[PacketSpace; 3], zero_rtt_crypto: Option<&ZeroRttCrypto>, conn_key_phase: bool, @@ -110,7 +113,7 @@ pub(super) fn decrypt_packet_body( }; crypto - .decrypt(number, &packet.header_data, &mut packet.payload) + .decrypt(path_id, number, &packet.header_data, &mut packet.payload) .map_err(|_| { trace!("decryption failed with packet number {}", number); None diff --git a/quinn-proto/src/connection/paths.rs b/quinn-proto/src/connection/paths.rs index 043e6d10e2..bb5f75e0d5 100644 --- a/quinn-proto/src/connection/paths.rs +++ b/quinn-proto/src/connection/paths.rs @@ -8,10 +8,59 @@ use super::{ spaces::{PacketSpace, SentPacket}, }; use crate::{ - congestion, frame::ObservedAddr, packet::SpaceId, Duration, Instant, TransportConfig, - TIMER_GRANULARITY, + Duration, Instant, TIMER_GRANULARITY, TransportConfig, VarInt, coding, congestion, + frame::ObservedAddr, packet::SpaceId, }; +/// Id representing different paths when using multipath extension +// TODO(@divma): improve docs, reconsider access to inner +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Default, Hash)] +pub struct PathId(pub(crate) u32); + +impl coding::Codec for PathId { + fn decode(r: &mut B) -> coding::Result { + let v = VarInt::decode(r)?; + let v = u32::try_from(v.0).map_err(|_| coding::UnexpectedEnd)?; + Ok(Self(v)) + } + + fn encode(&self, w: &mut B) { + VarInt(self.0.into()).encode(w) + } +} + +impl PathId { + /// The maximum path ID allowed. + pub const MAX: Self = Self(u32::MAX); + + /// The 0 path id. + pub const ZERO: Self = Self(0); + + pub(crate) fn size(&self) -> usize { + VarInt(self.0 as u64).size() + } + + /// Saturating integer addition. Computes self + rhs, saturating at the numeric bounds instead + /// of overflowing. + pub fn saturating_add(self, rhs: impl Into) -> Self { + let rhs = rhs.into(); + let inner = self.0.saturating_add(rhs.0); + Self(inner) + } +} + +impl std::fmt::Display for PathId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.0.fmt(f) + } +} + +impl> From for PathId { + fn from(source: T) -> Self { + Self(source.into()) + } +} + /// Description of a particular network path pub(super) struct PathData { pub(super) remote: SocketAddr, @@ -24,6 +73,8 @@ pub(super) struct PathData { pub(super) pacing: Pacer, pub(super) challenge: Option, pub(super) challenge_pending: bool, + /// Pending responses to PATH_CHALLENGE frames + pub(super) path_responses: PathResponses, /// Whether we're certain the peer can both send and receive on this address /// /// Initially equal to `use_stateless_retry` for servers, and becomes false again on every @@ -45,11 +96,17 @@ pub(super) struct PathData { pub(super) observed_addr_sent: bool, /// Observed address frame with the largest sequence number received from the peer on this path. pub(super) last_observed_addr_report: Option, + /// The QUIC-MULTIPATH path status + pub(super) status: PathStatus, + /// The sequence number of the received PATH_AVAILABLE and PATH_BACKUP frames. + pub(super) status_seq_no: Option, /// Number of the first packet sent on this path /// /// Used to determine whether a packet was sent on an earlier path. Insufficient to determine if /// a packet was sent on a later path. first_packet: Option, + /// The number of times a PTO has been sent without receiving an ack. + pub(super) pto_count: u32, } impl PathData { @@ -58,7 +115,6 @@ impl PathData { allow_mtud: bool, peer_max_udp_payload_size: Option, now: Instant, - validated: bool, config: &TransportConfig, ) -> Self { let congestion = config @@ -78,7 +134,8 @@ impl PathData { congestion, challenge: None, challenge_pending: false, - validated, + path_responses: PathResponses::default(), + validated: false, total_sent: 0, total_recvd: 0, mtud: config @@ -100,7 +157,10 @@ impl PathData { in_flight: InFlight::new(), observed_addr_sent: false, last_observed_addr_report: None, + status: Default::default(), + status_seq_no: None, first_packet: None, + pto_count: 0, } } @@ -118,6 +178,7 @@ impl PathData { congestion, challenge: None, challenge_pending: false, + path_responses: PathResponses::default(), validated: false, total_sent: 0, total_recvd: 0, @@ -126,7 +187,10 @@ impl PathData { in_flight: InFlight::new(), observed_addr_sent: false, last_observed_addr_report: None, + status: prev.status, + status_seq_no: prev.status_seq_no, first_packet: None, + pto_count: 0, } } @@ -154,12 +218,19 @@ impl PathData { } /// Account for transmission of `packet` with number `pn` in `space` - pub(super) fn sent(&mut self, pn: u64, packet: SentPacket, space: &mut PacketSpace) { + pub(super) fn sent( + &mut self, + path: PathId, + pn: u64, + packet: SentPacket, + space: &mut PacketSpace, + ) { self.in_flight.insert(&packet); if self.first_packet.is_none() { self.first_packet = Some(pn); } - self.in_flight.bytes -= space.sent(pn, packet); + // TODO(@divma): why is Path receiving a path_id?? + self.in_flight.bytes -= space.for_path(path).sent(pn, packet); } /// Remove `packet` with number `pn` from this path's congestion control counters, or return @@ -172,6 +243,30 @@ impl PathData { true } + /// Increment the total size of sent UDP datagrams + pub(super) fn inc_total_sent(&mut self, inc: u64) { + self.total_sent = self.total_sent.saturating_add(inc); + } + + /// Increment the total size of received UDP datagrams + pub(super) fn inc_total_recvd(&mut self, inc: u64) { + self.total_recvd = self.total_recvd.saturating_add(inc); + } + + /// Return how long we need to wait before sending `bytes_to_send` + /// + /// See [`Pacer::delay`]. + pub(super) fn pacing_delay(&mut self, bytes_to_send: u64, now: Instant) -> Option { + let smoothed_rtt = self.rtt.get(); + self.pacing.delay( + smoothed_rtt, + bytes_to_send, + self.current_mtu(), + self.congestion.window(), + now, + ) + } + /// Updates the last observed address report received on this path. /// /// If the address was updated, it's returned to be informed to the application. @@ -307,9 +402,9 @@ impl PathResponses { } } - pub(crate) fn pop_off_path(&mut self, remote: &SocketAddr) -> Option<(u64, SocketAddr)> { + pub(crate) fn pop_off_path(&mut self, remote: SocketAddr) -> Option<(u64, SocketAddr)> { let response = *self.pending.last()?; - if response.remote == *remote { + if response.remote == remote { // We don't bother searching further because we expect that the on-path response will // get drained in the immediate future by a call to `pop_on_path` return None; @@ -318,9 +413,9 @@ impl PathResponses { Some((response.token, response.remote)) } - pub(crate) fn pop_on_path(&mut self, remote: &SocketAddr) -> Option { + pub(crate) fn pop_on_path(&mut self, remote: SocketAddr) -> Option { let response = *self.pending.last()?; - if response.remote != *remote { + if response.remote != remote { // We don't bother searching further because we expect that the off-path response will // get drained in the immediate future by a call to `pop_off_path` return None; @@ -338,6 +433,7 @@ impl PathResponses { struct PathResponse { /// The packet number the corresponding PATH_CHALLENGE was received in packet: u64, + /// The token of the PATH_CHALLENGE token: u64, /// The address the corresponding PATH_CHALLENGE was received from remote: SocketAddr, @@ -378,3 +474,57 @@ impl InFlight { self.ack_eliciting -= u64::from(packet.ack_eliciting); } } + +/// The QUIC-MULTIPATH path status +/// +/// See section "3.3 Path Status Management": +/// +#[derive(Debug, Copy, Clone, Default, PartialEq, Eq)] +pub enum PathStatus { + /// Paths marked with as available will be used when scheduling packets + /// + /// If multiple paths are available, packets will be scheduled on whichever has + /// capacity. + #[default] + Available, + /// Paths marked as backup will only be used if there are no available paths + /// + /// If the max_idle_timeout is specified the path will be kept alive so that it does not + /// expire. + Backup, +} + +/// Application events about paths +#[derive(Debug, PartialEq, Eq)] +pub enum PathEvent { + /// A new path has been opened + Opened { + /// Which path is now open + id: PathId, + }, + /// A path has been closed + Closed { + /// Which path has been closed + id: PathId, + /// Error code supplied by the peer + /// See + /// for a list of known errors. + error_code: VarInt, + }, +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_path_id_saturating_add() { + // add within range behaves normally + let large: PathId = u16::MAX.into(); + let next = u32::from(u16::MAX) + 1; + assert_eq!(large.saturating_add(1u8), PathId::from(next)); + + // outside range saturates + assert_eq!(PathId::MAX.saturating_add(1u8), PathId::MAX) + } +} diff --git a/quinn-proto/src/connection/send_buffer.rs b/quinn-proto/src/connection/send_buffer.rs index cd41c3ef78..53a7416efc 100644 --- a/quinn-proto/src/connection/send_buffer.rs +++ b/quinn-proto/src/connection/send_buffer.rs @@ -2,7 +2,7 @@ use std::{collections::VecDeque, ops::Range}; use bytes::{Buf, Bytes}; -use crate::{range_set::RangeSet, VarInt}; +use crate::{VarInt, range_set::RangeSet}; /// Buffer of outgoing retransmittable stream data #[derive(Default, Debug)] @@ -339,7 +339,7 @@ mod tests { buf.ack(4..7); assert_eq!(aggregate_unacked(&buf), &MSG[9..]); buf.ack(0..MSG_LEN); - assert_eq!(aggregate_unacked(&buf), &[]); + assert_eq!(aggregate_unacked(&buf), &[] as &[u8]); } #[test] diff --git a/quinn-proto/src/connection/spaces.rs b/quinn-proto/src/connection/spaces.rs index 0d0edad68d..ef7bea7c27 100644 --- a/quinn-proto/src/connection/spaces.rs +++ b/quinn-proto/src/connection/spaces.rs @@ -6,18 +6,18 @@ use std::{ }; use rand::Rng; -use rustc_hash::FxHashSet; -use tracing::trace; +use rustc_hash::{FxHashMap, FxHashSet}; +use tracing::{error, trace}; -use super::assembler::Assembler; +use super::{PathId, assembler::Assembler}; use crate::{ - connection::StreamsState, crypto::Keys, frame, packet::SpaceId, range_set::ArrayRangeSet, - shared::IssuedCid, Dir, Duration, Instant, StreamId, TransportError, VarInt, + Dir, Duration, Instant, SocketAddr, StreamId, TransportError, VarInt, connection::StreamsState, + crypto::Keys, frame, packet::SpaceId, range_set::ArrayRangeSet, shared::IssuedCid, }; pub(super) struct PacketSpace { pub(super) crypto: Option, - pub(super) dedup: Dedup, + pub(super) dedup: FxHashMap, /// Highest received packet number pub(super) rx_packet: u64, @@ -26,84 +26,71 @@ pub(super) struct PacketSpace { /// Packet numbers to acknowledge pub(super) pending_acks: PendingAcks, - /// The packet number of the next packet that will be sent, if any. In the Data space, the - /// packet number stored here is sometimes skipped by [`PacketNumberFilter`] logic. - pub(super) next_packet_number: u64, - /// The largest packet number the remote peer acknowledged in an ACK frame. - pub(super) largest_acked_packet: Option, - pub(super) largest_acked_packet_sent: Instant, - /// The highest-numbered ACK-eliciting packet we've sent - pub(super) largest_ack_eliciting_sent: u64, - /// Number of packets in `sent_packets` with numbers above `largest_ack_eliciting_sent` - pub(super) unacked_non_ack_eliciting_tail: u64, - /// Transmitted but not acked - // We use a BTreeMap here so we can efficiently query by range on ACK and for loss detection - pub(super) sent_packets: BTreeMap, - /// Number of explicit congestion notification codepoints seen on incoming packets - pub(super) ecn_counters: frame::EcnCounts, - /// Recent ECN counters sent by the peer in ACK frames - /// - /// Updated (and inspected) whenever we receive an ACK with a new highest acked packet - /// number. Stored per-space to simplify verification, which would otherwise have difficulty - /// distinguishing between ECN bleaching and counts having been updated by a near-simultaneous - /// ACK already processed in another space. - pub(super) ecn_feedback: frame::EcnCounts, - /// Incoming cryptographic handshake stream pub(super) crypto_stream: Assembler, /// Current offset of outgoing cryptographic handshake stream pub(super) crypto_offset: u64, - /// The time the most recently sent retransmittable packet was sent. - pub(super) time_of_last_ack_eliciting_packet: Option, - /// The time at which the earliest sent packet in this space will be considered lost based on - /// exceeding the reordering window in time. Only set for packets numbered prior to a packet - /// that has been acknowledged. - pub(super) loss_time: Option, - /// Number of tail loss probes to send - pub(super) loss_probes: u32, - pub(super) ping_pending: bool, - pub(super) immediate_ack_pending: bool, - /// Number of congestion control "in flight" bytes - pub(super) in_flight: u64, - /// Number of packets sent in the current key phase - pub(super) sent_with_keys: u64, + /// Multipath packet number spaces + /// + /// Each [`PathId`] has it's own [`PacketNumberSpace`]. Only the [`SpaceId::Data`] can + /// have multiple packet number spaces, the other spaces only have a number space for + /// `PathId(0)`, which is populated at creation. + pub(super) number_spaces: BTreeMap, } impl PacketSpace { - pub(super) fn new(now: Instant) -> Self { + pub(super) fn new(now: Instant, space: SpaceId, rng: &mut (impl Rng + ?Sized)) -> Self { + let number_space_0 = PacketNumberSpace::new(now, space, rng); Self { crypto: None, - dedup: Dedup::new(), + dedup: Default::default(), rx_packet: 0, - pending: Retransmits::default(), pending_acks: PendingAcks::new(), - - next_packet_number: 0, - largest_acked_packet: None, - largest_acked_packet_sent: now, - largest_ack_eliciting_sent: 0, - unacked_non_ack_eliciting_tail: 0, - sent_packets: BTreeMap::new(), - ecn_counters: frame::EcnCounts::ZERO, - ecn_feedback: frame::EcnCounts::ZERO, - crypto_stream: Assembler::new(), crypto_offset: 0, + number_spaces: BTreeMap::from([(PathId(0), number_space_0)]), + } + } - time_of_last_ack_eliciting_packet: None, - loss_time: None, - loss_probes: 0, - ping_pending: false, - immediate_ack_pending: false, - in_flight: 0, - sent_with_keys: 0, + #[cfg(test)] + pub(super) fn new_deterministic(now: Instant, space: SpaceId) -> Self { + let number_space_0 = PacketNumberSpace::new_deterministic(now, space); + Self { + crypto: None, + dedup: Default::default(), + rx_packet: 0, + pending: Retransmits::default(), + pending_acks: PendingAcks::new(), + crypto_stream: Assembler::new(), + crypto_offset: 0, + number_spaces: BTreeMap::from([(PathId(0), number_space_0)]), } } + /// Returns the [`PacketNumberSpace`] for a path + /// + /// When multipath is disabled use `PathId(0)`. + // TODO(flub): Note that this only exists as `&mut self` because it creates a new + // [`PacketNumberSpace`] if one is not yet available for a path. This forces a few + // more `&mut` references to users than strictly needed. An alternative would be to + // return an Option but that would need to be handled for all callers. This could be + // worth exploring once we have all the main multipath bits fitted. + pub(super) fn for_path(&mut self, path: PathId) -> &mut PacketNumberSpace { + self.number_spaces + .entry(path) + .or_insert_with(PacketNumberSpace::new_default) + } + + pub(super) fn iter_paths_mut(&mut self) -> impl Iterator { + self.number_spaces.values_mut() + } + /// Queue data for a tail loss probe (or anti-amplification deadlock prevention) packet /// + /// Does nothing if no tail loss probe needs to be sent. + /// /// Probes are sent similarly to normal packets when an expected ACK has not arrived. We never /// deem a packet lost until we receive an ACK that should have included it, but if a trailing /// run of packets (or their ACKs) are lost, this might not happen in a timely fashion. We send @@ -116,17 +103,18 @@ impl PacketSpace { /// anti-amplification deadlock and we just make something up. pub(super) fn maybe_queue_probe( &mut self, + path_id: PathId, request_immediate_ack: bool, streams: &StreamsState, ) { - if self.loss_probes == 0 { + if self.for_path(path_id).loss_probes == 0 { return; } if request_immediate_ack { // The probe should be ACKed without delay (should only be used in the Data space and // when the peer supports the acknowledgement frequency extension) - self.immediate_ack_pending = true; + self.for_path(path_id).immediate_ack_pending = true; } // Retransmit the data of the oldest in-flight packet @@ -135,7 +123,12 @@ impl PacketSpace { return; } - for packet in self.sent_packets.values_mut() { + // We use retransmits from any path. + for packet in self + .number_spaces + .values_mut() + .flat_map(|s| s.sent_packets.values_mut()) + { if !packet.retransmits.is_empty(streams) { // Remove retransmitted data from the old packet so we don't end up retransmitting // it *again* even if the copy we're sending now gets acknowledged. @@ -147,28 +140,231 @@ impl PacketSpace { // Nothing new to send and nothing to retransmit, so fall back on a ping. This should only // happen in rare cases during the handshake when the server becomes blocked by // anti-amplification. - self.ping_pending = true; + if !self.for_path(path_id).immediate_ack_pending { + self.for_path(path_id).ping_pending = true; + } + } + + /// Whether there is anything to send in this space + /// + /// For the data space [`Connection::can_send_1rtt`] also needs to be consulted. + /// + /// [`Connection::can_send_1rtt`]: super::Connection::can_send_1rtt + pub(super) fn can_send(&self, path_id: PathId, streams: &StreamsState) -> SendableFrames { + let acks = self.pending_acks.can_send(); + let path_exclusive = self + .number_spaces + .get(&path_id) + .is_some_and(|s| s.ping_pending || s.immediate_ack_pending); + let other = !self.pending.is_empty(streams) || path_exclusive; + SendableFrames { + acks, + other, + close: false, + path_exclusive, + } + } + + /// The number of packets sent with the current crypto keys + /// + /// Used to know if a key update is needed. + pub(super) fn sent_with_keys(&self) -> u64 { + self.number_spaces.values().map(|s| s.sent_with_keys).sum() + } +} + +impl Index for [PacketSpace; 3] { + type Output = PacketSpace; + fn index(&self, space: SpaceId) -> &PacketSpace { + &self.as_ref()[space as usize] + } +} + +impl IndexMut for [PacketSpace; 3] { + fn index_mut(&mut self, space: SpaceId) -> &mut PacketSpace { + &mut self.as_mut()[space as usize] + } +} + +/// The per-path packet number space to support multipath. +/// +/// This contains the data specific to a per-path packet number space. You should access +/// this via [`PacketSpace::for_path`]. +pub(super) struct PacketNumberSpace { + /// The packet number of the next packet that will be sent, if any. In the Data space, the + /// packet number stored here is sometimes skipped by [`PacketNumberFilter`] logic. + pub(super) next_packet_number: u64, + /// The largest packet number the remote peer acknowledged in an ACK frame. + pub(super) largest_acked_packet: Option, + pub(super) largest_acked_packet_sent: Instant, + /// The highest-numbered ACK-eliciting packet we've sent + pub(super) largest_ack_eliciting_sent: u64, + /// Number of packets in `sent_packets` with numbers above `largest_ack_eliciting_sent` + pub(super) unacked_non_ack_eliciting_tail: u64, + /// Transmitted but not acked + // We use a BTreeMap here so we can efficiently query by range on ACK and for loss detection + pub(super) sent_packets: BTreeMap, + /// Number of explicit congestion notification codepoints seen on incoming packets + pub(super) ecn_counters: frame::EcnCounts, + /// Recent ECN counters sent by the peer in ACK frames + /// + /// Updated (and inspected) whenever we receive an ACK with a new highest acked packet + /// number. Stored per-space to simplify verification, which would otherwise have difficulty + /// distinguishing between ECN bleaching and counts having been updated by a near-simultaneous + /// ACK already processed in another space. + pub(super) ecn_feedback: frame::EcnCounts, + /// Number of congestion control "in flight" bytes + pub(super) in_flight: u64, + /// Number of packets sent in the current key phase + pub(super) sent_with_keys: u64, + /// A PING frame needs to be sent on this path + pub(super) ping_pending: bool, + /// An IMMEDIATE_ACK (draft-ietf-quic-ack-frequency) frame needs to be sent on this path + pub(super) immediate_ack_pending: bool, + + // + // Loss Detection + // + /// The time the most recently sent retransmittable packet was sent. + pub(super) time_of_last_ack_eliciting_packet: Option, + /// Earliest time when we might declare a packet lost. + /// + /// The time at which the earliest sent packet in this space will be considered lost + /// based on exceeding the reordering window in time. Only set for packets numbered + /// prior to a packet that has been acknowledged. + pub(super) loss_time: Option, + /// Number of tail loss probes to send + pub(super) loss_probes: u32, + + /// Packet numbers to skip, only used in the data package space. + pn_filter: Option, +} + +impl PacketNumberSpace { + fn new(now: Instant, space: SpaceId, rng: &mut (impl Rng + ?Sized)) -> Self { + let pn_filter = match space { + SpaceId::Initial | SpaceId::Handshake => None, + SpaceId::Data => Some(PacketNumberFilter::new(rng)), + }; + Self { + next_packet_number: 0, + largest_acked_packet: None, + largest_acked_packet_sent: now, + largest_ack_eliciting_sent: 0, + unacked_non_ack_eliciting_tail: 0, + sent_packets: BTreeMap::new(), + ecn_counters: frame::EcnCounts::ZERO, + ecn_feedback: frame::EcnCounts::ZERO, + in_flight: 0, + sent_with_keys: 0, + ping_pending: false, + immediate_ack_pending: false, + time_of_last_ack_eliciting_packet: None, + loss_time: None, + loss_probes: 0, + pn_filter, + } + } + + #[cfg(test)] + fn new_deterministic(now: Instant, space: SpaceId) -> Self { + let pn_filter = match space { + SpaceId::Initial | SpaceId::Handshake => None, + SpaceId::Data => Some(PacketNumberFilter::disabled()), + }; + Self { + next_packet_number: 0, + largest_acked_packet: None, + largest_acked_packet_sent: now, + largest_ack_eliciting_sent: 0, + unacked_non_ack_eliciting_tail: 0, + sent_packets: BTreeMap::new(), + ecn_counters: frame::EcnCounts::ZERO, + ecn_feedback: frame::EcnCounts::ZERO, + in_flight: 0, + sent_with_keys: 0, + ping_pending: false, + immediate_ack_pending: false, + time_of_last_ack_eliciting_packet: None, + loss_time: None, + loss_probes: 0, + pn_filter, + } + } + + /// Creates a default PacketNumberSpace + /// + /// This allows us to be type-safe about always being able to access a + /// PacketNumberSpace. While the space will work it will not skip packet numbers to + /// protect against eaget ack attacks. + fn new_default() -> Self { + error!("PacketNumberSpace created by default"); + Self { + next_packet_number: 0, + largest_acked_packet: None, + largest_acked_packet_sent: Instant::now(), + largest_ack_eliciting_sent: 0, + unacked_non_ack_eliciting_tail: 0, + sent_packets: BTreeMap::new(), + ecn_counters: frame::EcnCounts::ZERO, + ecn_feedback: frame::EcnCounts::ZERO, + in_flight: 0, + sent_with_keys: 0, + ping_pending: false, + immediate_ack_pending: false, + time_of_last_ack_eliciting_packet: None, + loss_time: None, + loss_probes: 0, + pn_filter: None, + } } /// Get the next outgoing packet number in this space /// /// In the Data space, the connection's [`PacketNumberFilter`] must be used rather than calling /// this directly. - pub(super) fn get_tx_number(&mut self) -> u64 { + pub(super) fn get_tx_number(&mut self, rng: &mut (impl Rng + ?Sized)) -> u64 { // TODO: Handle packet number overflow gracefully assert!(self.next_packet_number < 2u64.pow(62)); - let x = self.next_packet_number; + let mut pn = self.next_packet_number; self.next_packet_number += 1; self.sent_with_keys += 1; - x + + // Skip this number if the filter says so, only enabled in the data space + if let Some(ref mut filter) = self.pn_filter { + if filter.skip_pn(pn, rng) { + pn = self.next_packet_number; + self.next_packet_number += 1; + self.sent_with_keys += 1; + } + } + pn } - pub(super) fn can_send(&self, streams: &StreamsState) -> SendableFrames { - let acks = self.pending_acks.can_send(); - let other = - !self.pending.is_empty(streams) || self.ping_pending || self.immediate_ack_pending; + pub(super) fn peek_tx_number(&mut self) -> u64 { + let pn = self.next_packet_number; + if let Some(ref filter) = self.pn_filter { + if pn == filter.next_skipped_packet_number { + return pn + 1; + } + } + pn + } - SendableFrames { acks, other } + /// Checks whether a skipped packet number was ACKed. + pub(super) fn check_ack( + &self, + range: std::ops::RangeInclusive, + ) -> Result<(), TransportError> { + if let Some(ref filter) = self.pn_filter { + if filter + .prev_skipped_packet_number + .is_some_and(|pn| range.contains(&pn)) + { + return Err(TransportError::PROTOCOL_VIOLATION("unsent packet acked")); + } + } + Ok(()) } /// Verifies sanity of an ECN block and returns whether congestion was encountered. @@ -259,19 +455,6 @@ impl PacketSpace { } } -impl Index for [PacketSpace; 3] { - type Output = PacketSpace; - fn index(&self, space: SpaceId) -> &PacketSpace { - &self.as_ref()[space as usize] - } -} - -impl IndexMut for [PacketSpace; 3] { - fn index_mut(&mut self, space: SpaceId) -> &mut PacketSpace { - &mut self.as_mut()[space as usize] - } -} - /// Represents one or more packets subject to retransmission #[derive(Debug, Clone)] pub(super) struct SentPacket { @@ -306,10 +489,27 @@ pub struct Retransmits { pub(super) max_stream_data: FxHashSet, pub(super) crypto: VecDeque, pub(super) new_cids: Vec, - pub(super) retire_cids: Vec, + pub(super) retire_cids: Vec<(PathId, u64)>, pub(super) ack_frequency: bool, pub(super) handshake_done: bool, pub(super) observed_addr: bool, + /// For each enqueued NEW_TOKEN frame, a copy of the path's remote address + /// + /// There are 2 reasons this is unusual: + /// + /// - If the path changes, NEW_TOKEN frames bound for the old path are not retransmitted on the + /// new path. That is why this field stores the remote address: so that ones for old paths + /// can be filtered out. + /// - If a token is lost, a new randomly generated token is re-transmitted, rather than the + /// original. This is so that if both transmissions are received, the client won't risk + /// sending the same token twice. That is why this field does _not_ store any actual token. + /// + /// It is true that a QUIC endpoint will only want to effectively have NEW_TOKEN frames + /// enqueued for its current path at a given point in time. Based on that, we could conceivably + /// change this from a vector to an `Option<(SocketAddr, usize)>` or just a `usize` or + /// something. However, due to the architecture of Quinn, it is considerably simpler to not do + /// that; consider what such a change would mean for implementing `BitOrAssign` on Self. + pub(super) new_tokens: Vec, } impl Retransmits { @@ -328,6 +528,7 @@ impl Retransmits { && !self.ack_frequency && !self.handshake_done && !self.observed_addr + && self.new_tokens.is_empty() } } @@ -350,6 +551,7 @@ impl ::std::ops::BitOrAssign for Retransmits { self.ack_frequency |= rhs.ack_frequency; self.handshake_done |= rhs.handshake_done; self.observed_addr |= rhs.observed_addr; + self.new_tokens.extend_from_slice(&rhs.new_tokens); } } @@ -416,6 +618,7 @@ impl ThinRetransmits { /// ^ ^ ^ /// window highest next /// ``` +#[derive(Debug, Default)] pub(super) struct Dedup { window: Window, /// Lowest packet number higher than all yet authenticated. @@ -433,6 +636,7 @@ const WINDOW_SIZE: u64 = 1 + mem::size_of::() as u64 * 8; impl Dedup { /// Construct an empty window positioned at the start. + #[cfg(test)] pub(super) fn new() -> Self { Self { window: 0, next: 0 } } @@ -448,7 +652,7 @@ impl Dedup { pub(super) fn insert(&mut self, packet: u64) -> bool { if let Some(diff) = packet.checked_sub(self.next) { // Right of window - self.window = (self.window << 1 | 1) + self.window = ((self.window << 1) | 1) .checked_shl(cmp::min(diff, u64::from(u32::MAX)) as u32) .unwrap_or(0); self.next = packet + 1; @@ -536,8 +740,18 @@ impl Dedup { /// Indicates which data is available for sending #[derive(Clone, Copy, PartialEq, Eq, Debug)] pub(super) struct SendableFrames { + /// Whether there ACK frames to send, these are not ack-eliciting pub(super) acks: bool, + /// Whether there are any other frames to send, these are ack-eliciting pub(super) other: bool, + /// Whether there is a CONNECTION_CLOSE to send, this is not ack-eliciting + pub(super) close: bool, + /// Whether there are frames to send, which can only be sent on the path queried + /// + /// These are ack-eliciting, and a subset of [`SendableFrames::other`]. This is useful + /// for QUIC-MULTIPATH, which may desire not to send any frames on a backup path, which + /// can also be sent on an active path. + pub(super) path_exclusive: bool, } impl SendableFrames { @@ -546,12 +760,23 @@ impl SendableFrames { Self { acks: false, other: false, + close: false, + path_exclusive: false, } } /// Whether no data is sendable pub(super) fn is_empty(&self) -> bool { - !self.acks && !self.other + !self.acks && !self.other && !self.close && !self.path_exclusive + } +} + +impl ::std::ops::BitOrAssign for SendableFrames { + fn bitor_assign(&mut self, rhs: Self) { + self.acks |= rhs.acks; + self.other |= rhs.other; + self.close |= rhs.close; + self.path_exclusive |= rhs.path_exclusive; } } @@ -579,16 +804,20 @@ pub(super) struct PendingAcks { /// The earliest ack-eliciting packet since the last ACK was sent, used to calculate the moment /// upon which `max_ack_delay` elapses earliest_ack_eliciting_since_last_ack_sent: Option, - /// The packet number ranges of ack-eliciting packets the peer hasn't confirmed receipt of ACKs - /// for - ranges: ArrayRangeSet, - /// The packet with the largest packet number, and the time upon which it was received (used to - /// calculate ACK delay in [`PendingAcks::ack_delay`]) - largest_packet: Option<(u64, Instant)>, + /// Packet number ranges for which to still send acknowledgements. + /// + /// These are packet number ranges of ack-eliciting packets the peer has sent and which + /// need to be acknowledged. Packet numbers are only removed from here once the peer has + /// acknowledged the ACKs for them. + ranges: FxHashMap, + /// The largest packet number received and the time it was received + /// + /// Used to calculate ACK delay in [`PendingAcks::ack_delay`]. + largest_packet: FxHashMap, /// The ack-eliciting packet we have received with the largest packet number - largest_ack_eliciting_packet: Option, + largest_ack_eliciting_packet: FxHashMap, /// The largest acknowledged packet number sent in an ACK frame - largest_acked: Option, + largest_acked: FxHashMap, } impl PendingAcks { @@ -600,10 +829,10 @@ impl PendingAcks { ack_eliciting_threshold: 1, reordering_threshold: 1, earliest_ack_eliciting_since_last_ack_sent: None, - ranges: ArrayRangeSet::default(), - largest_packet: None, - largest_ack_eliciting_packet: None, - largest_acked: None, + ranges: Default::default(), + largest_packet: Default::default(), + largest_ack_eliciting_packet: Default::default(), + largest_acked: Default::default(), } } @@ -627,13 +856,16 @@ impl PendingAcks { /// Whether any ACK frames can be sent pub(super) fn can_send(&self) -> bool { - self.immediate_ack_required && !self.ranges.is_empty() + // This always checks all the paths. If any other path is present then multipath is + // assumed to be enabled. + self.immediate_ack_required && self.ranges.values().any(|ranges| !ranges.is_empty()) } /// Returns the delay since the packet with the largest packet number was received - pub(super) fn ack_delay(&self, now: Instant) -> Duration { + pub(super) fn ack_delay(&self, path_id: PathId, now: Instant) -> Duration { self.largest_packet - .map_or(Duration::default(), |(_, received)| now - received) + .get(&path_id) + .map_or(Duration::default(), |(_, received)| now - *received) } /// Handle receipt of a new packet @@ -642,6 +874,7 @@ impl PendingAcks { pub(super) fn packet_received( &mut self, now: Instant, + path_id: PathId, packet_number: u64, ack_eliciting: bool, dedup: &Dedup, @@ -651,13 +884,19 @@ impl PendingAcks { return false; } - let prev_largest_ack_eliciting = self.largest_ack_eliciting_packet.unwrap_or(0); + let prev_largest_ack_eliciting = self + .largest_ack_eliciting_packet + .get(&path_id) + .copied() + .unwrap_or(0); // Track largest ack-eliciting packet - self.largest_ack_eliciting_packet = self - .largest_ack_eliciting_packet - .map(|pn| pn.max(packet_number)) - .or(Some(packet_number)); + self.largest_ack_eliciting_packet + .entry(path_id) + .and_modify(|pn| { + *pn = (*pn).max(packet_number); + }) + .or_insert(packet_number); // Handle ack_eliciting_threshold self.ack_eliciting_since_last_ack_sent += 1; @@ -666,7 +905,7 @@ impl PendingAcks { // Handle out-of-order packets self.immediate_ack_required |= - self.is_out_of_order(packet_number, prev_largest_ack_eliciting, dedup); + self.is_out_of_order(path_id, packet_number, prev_largest_ack_eliciting, dedup); // Arm max_ack_delay timer if necessary if self.earliest_ack_eliciting_since_last_ack_sent.is_none() && !self.can_send() { @@ -679,6 +918,7 @@ impl PendingAcks { fn is_out_of_order( &self, + path_id: PathId, packet_number: u64, prev_largest_ack_eliciting: u64, dedup: &Dedup, @@ -693,8 +933,10 @@ impl PendingAcks { _ => { // From acknowledgement frequency draft, section 6.1: send an ACK immediately if // doing so would cause the sender to detect a new packet loss - let Some((largest_acked, largest_unacked)) = - self.largest_acked.zip(self.largest_ack_eliciting_packet) + let Some((&largest_acked, &largest_unacked)) = self + .largest_acked + .get(&path_id) + .zip(self.largest_ack_eliciting_packet.get(&path_id)) else { return false; }; @@ -731,29 +973,36 @@ impl PendingAcks { self.ack_eliciting_since_last_ack_sent = 0; self.non_ack_eliciting_since_last_ack_sent = 0; self.earliest_ack_eliciting_since_last_ack_sent = None; - self.largest_acked = self.largest_ack_eliciting_packet; + self.largest_acked.clear(); + self.largest_acked + .extend(&self.largest_ack_eliciting_packet); } /// Insert one packet that needs to be acknowledged - pub(super) fn insert_one(&mut self, packet: u64, now: Instant) { - self.ranges.insert_one(packet); + pub(super) fn insert_one(&mut self, path_id: PathId, packet: u64, now: Instant) { + let ranges = self.ranges.entry(path_id).or_default(); + ranges.insert_one(packet); - if self.largest_packet.map_or(true, |(pn, _)| packet > pn) { - self.largest_packet = Some((packet, now)); + if ranges.len() > MAX_ACK_BLOCKS { + ranges.pop_min(); } - if self.ranges.len() > MAX_ACK_BLOCKS { - self.ranges.pop_min(); + let largest_packet = self.largest_packet.entry(path_id).or_insert((packet, now)); + if packet > largest_packet.0 { + largest_packet.0 = packet; + largest_packet.1 = now; } } /// Remove ACKs of packets numbered at or below `max` from the set of pending ACKs - pub(super) fn subtract_below(&mut self, max: u64) { - self.ranges.remove(0..(max + 1)); + pub(super) fn subtract_below(&mut self, path_id: PathId, max: u64) { + self.ranges + .get_mut(&path_id) + .map(|ranges| ranges.remove(0..(max + 1))); } /// Returns the set of currently pending ACK ranges - pub(super) fn ranges(&self) -> &ArrayRangeSet { + pub(super) fn ranges(&self) -> &FxHashMap { &self.ranges } @@ -804,7 +1053,7 @@ impl PacketNumberFilter { // First skipped PN is in 0..64 let exponent = 6; Self { - next_skipped_packet_number: rng.gen_range(0..2u64.saturating_pow(exponent)), + next_skipped_packet_number: rng.random_range(0..2u64.saturating_pow(exponent)), prev_skipped_packet_number: None, exponent, } @@ -819,48 +1068,20 @@ impl PacketNumberFilter { } } - pub(super) fn peek(&self, space: &PacketSpace) -> u64 { - let n = space.next_packet_number; + /// Whether to use the provided packet number (false) or to skip it (true) + pub(super) fn skip_pn(&mut self, n: u64, rng: &mut (impl Rng + ?Sized)) -> bool { if n != self.next_skipped_packet_number { - return n; - } - n + 1 - } - - pub(super) fn allocate( - &mut self, - rng: &mut (impl Rng + ?Sized), - space: &mut PacketSpace, - ) -> u64 { - let n = space.get_tx_number(); - if n != self.next_skipped_packet_number { - return n; + return false; } trace!("skipping pn {n}"); // Skip this packet number, and choose the next one to skip self.prev_skipped_packet_number = Some(self.next_skipped_packet_number); let next_exponent = self.exponent.saturating_add(1); - self.next_skipped_packet_number = - rng.gen_range(2u64.saturating_pow(self.exponent)..2u64.saturating_pow(next_exponent)); + self.next_skipped_packet_number = rng + .random_range(2u64.saturating_pow(self.exponent)..2u64.saturating_pow(next_exponent)); self.exponent = next_exponent; - - space.get_tx_number() - } - - pub(super) fn check_ack( - &self, - space_id: SpaceId, - range: std::ops::RangeInclusive, - ) -> Result<(), TransportError> { - if space_id == SpaceId::Data - && self - .prev_skipped_packet_number - .is_some_and(|x| range.contains(&x)) - { - return Err(TransportError::PROTOCOL_VIOLATION("unsent packet acked")); - } - Ok(()) + true } } @@ -998,7 +1219,7 @@ mod test { let mut acks = PendingAcks::new(); let mut dedup = Dedup::new(); dedup.insert(0); - acks.packet_received(Instant::now(), 0, true, &dedup); + acks.packet_received(Instant::now(), PathId::ZERO, 0, true, &dedup); assert!(!acks.immediate_ack_required); } @@ -1010,8 +1231,8 @@ mod test { // Receive ack-eliciting packet dedup.insert(0); let now = Instant::now(); - acks.insert_one(0, now); - acks.packet_received(now, 0, true, &dedup); + acks.insert_one(PathId(0), 0, now); + acks.packet_received(now, PathId::ZERO, 0, true, &dedup); // Sanity check assert!(!acks.ranges.is_empty()); @@ -1030,30 +1251,30 @@ mod test { let t1 = Instant::now(); let t2 = t1 + Duration::from_millis(2); let t3 = t2 + Duration::from_millis(5); - assert_eq!(acks.ack_delay(t1), Duration::from_millis(0)); - assert_eq!(acks.ack_delay(t2), Duration::from_millis(0)); - assert_eq!(acks.ack_delay(t3), Duration::from_millis(0)); + assert_eq!(acks.ack_delay(PathId::ZERO, t1), Duration::from_millis(0)); + assert_eq!(acks.ack_delay(PathId::ZERO, t2), Duration::from_millis(0)); + assert_eq!(acks.ack_delay(PathId::ZERO, t3), Duration::from_millis(0)); // In-order packet dedup.insert(0); - acks.insert_one(0, t1); - acks.packet_received(t1, 0, true, &dedup); - assert_eq!(acks.ack_delay(t1), Duration::from_millis(0)); - assert_eq!(acks.ack_delay(t2), Duration::from_millis(2)); - assert_eq!(acks.ack_delay(t3), Duration::from_millis(7)); + acks.insert_one(PathId::ZERO, 0, t1); + acks.packet_received(t1, PathId::ZERO, 0, true, &dedup); + assert_eq!(acks.ack_delay(PathId::ZERO, t1), Duration::from_millis(0)); + assert_eq!(acks.ack_delay(PathId::ZERO, t2), Duration::from_millis(2)); + assert_eq!(acks.ack_delay(PathId::ZERO, t3), Duration::from_millis(7)); // Out of order (higher than expected) dedup.insert(3); - acks.insert_one(3, t2); - acks.packet_received(t2, 3, true, &dedup); - assert_eq!(acks.ack_delay(t2), Duration::from_millis(0)); - assert_eq!(acks.ack_delay(t3), Duration::from_millis(5)); + acks.insert_one(PathId::ZERO, 3, t2); + acks.packet_received(t2, PathId::ZERO, 3, true, &dedup); + assert_eq!(acks.ack_delay(PathId::ZERO, t2), Duration::from_millis(0)); + assert_eq!(acks.ack_delay(PathId::ZERO, t3), Duration::from_millis(5)); // Out of order (lower than expected, so previous instant is kept) dedup.insert(2); - acks.insert_one(2, t3); - acks.packet_received(t3, 2, true, &dedup); - assert_eq!(acks.ack_delay(t3), Duration::from_millis(5)); + acks.insert_one(PathId::ZERO, 2, t3); + acks.packet_received(t3, PathId::ZERO, 2, true, &dedup); + assert_eq!(acks.ack_delay(PathId::ZERO, t3), Duration::from_millis(5)); } #[test] diff --git a/quinn-proto/src/connection/stats.rs b/quinn-proto/src/connection/stats.rs index 31f5f1d142..c681672605 100644 --- a/quinn-proto/src/connection/stats.rs +++ b/quinn-proto/src/connection/stats.rs @@ -1,6 +1,10 @@ //! Connection statistics -use crate::{frame::Frame, Dir, Duration}; +use rustc_hash::FxHashMap; + +use crate::{Dir, Duration, frame::Frame}; + +use super::PathId; /// Statistics about UDP datagrams transmitted or received on a connection #[derive(Default, Debug, Copy, Clone)] @@ -30,6 +34,7 @@ impl UdpStats { #[allow(missing_docs)] pub struct FrameStats { pub acks: u64, + pub path_acks: u64, pub ack_frequency: u64, pub crypto: u64, pub connection_close: u64, @@ -54,6 +59,11 @@ pub struct FrameStats { pub stop_sending: u64, pub stream: u64, pub observed_addr: u64, + pub path_abandon: u64, + pub path_available: u64, + pub max_path_id: u64, + pub paths_blocked: u64, + pub path_cids_blocked: u64, } impl FrameStats { @@ -62,11 +72,12 @@ impl FrameStats { Frame::Padding => {} Frame::Ping => self.ping += 1, Frame::Ack(_) => self.acks += 1, + Frame::PathAck(_) => self.path_acks += 1, Frame::ResetStream(_) => self.reset_stream += 1, Frame::StopSending(_) => self.stop_sending += 1, Frame::Crypto(_) => self.crypto += 1, Frame::Datagram(_) => self.datagram += 1, - Frame::NewToken { .. } => self.new_token += 1, + Frame::NewToken(_) => self.new_token += 1, Frame::MaxData(_) => self.max_data += 1, Frame::MaxStreamData { .. } => self.max_stream_data += 1, Frame::MaxStreams { dir, .. } => { @@ -87,6 +98,7 @@ impl FrameStats { } } Frame::NewConnectionId(_) => self.new_connection_id += 1, + // TODO(@divma): split stats? Frame::RetireConnectionId { .. } => self.retire_connection_id += 1, Frame::PathChallenge(_) => self.path_challenge += 1, Frame::PathResponse(_) => self.path_response += 1, @@ -95,6 +107,14 @@ impl FrameStats { Frame::ImmediateAck => self.immediate_ack += 1, Frame::HandshakeDone => self.handshake_done = self.handshake_done.saturating_add(1), Frame::ObservedAddr(_) => self.observed_addr += 1, + Frame::PathAbandon(_) => self.path_abandon = self.path_abandon.saturating_add(1), + // TODO(@divma): split stats? + Frame::PathAvailable(_) => self.path_available = self.path_available.saturating_add(1), + Frame::MaxPathId(_) => self.max_path_id = self.max_path_id.saturating_add(1), + Frame::PathsBlocked(_) => self.paths_blocked = self.paths_blocked.saturating_add(1), + Frame::PathCidsBlocked(_) => { + self.path_cids_blocked = self.path_cids_blocked.saturating_add(1) + } } } } @@ -104,6 +124,7 @@ impl std::fmt::Debug for FrameStats { f.debug_struct("FrameStats") .field("ACK", &self.acks) .field("ACK_FREQUENCY", &self.ack_frequency) + .field("PATH_ACK", &self.path_acks) .field("CONNECTION_CLOSE", &self.connection_close) .field("CRYPTO", &self.crypto) .field("DATA_BLOCKED", &self.data_blocked) @@ -158,7 +179,7 @@ pub struct PathStats { } /// Connection statistics -#[derive(Debug, Default, Copy, Clone)] +#[derive(Debug, Default, Clone)] #[non_exhaustive] pub struct ConnectionStats { /// Statistics about UDP datagrams transmitted on a connection @@ -170,5 +191,5 @@ pub struct ConnectionStats { /// Statistics about frames received on a connection pub frame_rx: FrameStats, /// Statistics related to the current transmission path - pub path: PathStats, + pub paths: FxHashMap, } diff --git a/quinn-proto/src/connection/streams/mod.rs b/quinn-proto/src/connection/streams/mod.rs index 9a4b62c3a5..53e42815ee 100644 --- a/quinn-proto/src/connection/streams/mod.rs +++ b/quinn-proto/src/connection/streams/mod.rs @@ -1,5 +1,5 @@ use std::{ - collections::{hash_map, BinaryHeap}, + collections::{BinaryHeap, hash_map}, io, }; @@ -9,8 +9,9 @@ use tracing::trace; use super::spaces::{Retransmits, ThinRetransmits}; use crate::{ + Dir, StreamId, VarInt, connection::streams::state::{get_or_insert_recv, get_or_insert_send}, - frame, Dir, StreamId, VarInt, + frame, }; mod recv; @@ -19,8 +20,8 @@ pub use recv::{Chunks, ReadError, ReadableError}; mod send; pub(crate) use send::{ByteSlice, BytesArray}; -pub use send::{BytesSource, FinishError, WriteError, Written}; -use send::{Send, SendState}; +use send::{BytesSource, Send, SendState}; +pub use send::{FinishError, WriteError, Written}; mod state; #[allow(unreachable_pub)] // fuzzing only @@ -508,19 +509,12 @@ impl ShouldTransmit { } /// Error indicating that a stream has not been opened or has already been finished or reset -#[derive(Debug, Error, Clone, PartialEq, Eq)] +#[derive(Debug, Default, Error, Clone, PartialEq, Eq)] #[error("closed stream")] pub struct ClosedStream { _private: (), } -impl ClosedStream { - #[doc(hidden)] // For use in quinn only - pub fn new() -> Self { - Self { _private: () } - } -} - impl From for io::Error { fn from(x: ClosedStream) -> Self { Self::new(io::ErrorKind::NotConnected, x) diff --git a/quinn-proto/src/connection/streams/recv.rs b/quinn-proto/src/connection/streams/recv.rs index a8dc79bbc6..1aee535439 100644 --- a/quinn-proto/src/connection/streams/recv.rs +++ b/quinn-proto/src/connection/streams/recv.rs @@ -8,7 +8,7 @@ use super::state::get_or_insert_recv; use super::{ClosedStream, Retransmits, ShouldTransmit, StreamId, StreamsState}; use crate::connection::assembler::{Assembler, Chunk, IllegalOrderedRead}; use crate::connection::streams::state::StreamRecv; -use crate::{frame, TransportError, VarInt}; +use crate::{TransportError, VarInt, frame}; #[derive(Debug, Default)] pub(super) struct Recv { @@ -176,7 +176,7 @@ impl Recv { if offset != final_offset.into_inner() { return Err(TransportError::FINAL_SIZE_ERROR("inconsistent value")); } - } else if self.end > final_offset.into() { + } else if self.end > u64::from(final_offset) { return Err(TransportError::FINAL_SIZE_ERROR( "lower than high water mark", )); diff --git a/quinn-proto/src/connection/streams/send.rs b/quinn-proto/src/connection/streams/send.rs index f3800fad8c..7b3db809a1 100644 --- a/quinn-proto/src/connection/streams/send.rs +++ b/quinn-proto/src/connection/streams/send.rs @@ -1,7 +1,7 @@ use bytes::Bytes; use thiserror::Error; -use crate::{connection::send_buffer::SendBuffer, frame, VarInt}; +use crate::{VarInt, connection::send_buffer::SendBuffer, frame}; #[derive(Debug)] pub(super) struct Send { @@ -32,7 +32,7 @@ impl Send { /// Whether the stream has been reset pub(super) fn is_reset(&self) -> bool { - matches!(self.state, SendState::ResetSent { .. }) + matches!(self.state, SendState::ResetSent) } pub(super) fn finish(&mut self) -> Result<(), FinishError> { @@ -227,7 +227,7 @@ impl BytesSource for ByteSlice<'_> { /// /// The purpose of this data type is to defer conversion as long as possible, /// so that no heap allocation is required in case no data is writable. -pub trait BytesSource { +pub(super) trait BytesSource { /// Returns the next chunk from the source of owned chunks. /// /// This method will consume parts of the source. diff --git a/quinn-proto/src/connection/streams/state.rs b/quinn-proto/src/connection/streams/state.rs index 90b853b7be..fe30e1bd2b 100644 --- a/quinn-proto/src/connection/streams/state.rs +++ b/quinn-proto/src/connection/streams/state.rs @@ -1,5 +1,5 @@ use std::{ - collections::{hash_map, VecDeque}, + collections::{VecDeque, hash_map}, convert::TryFrom, mem, }; @@ -13,11 +13,11 @@ use super::{ StreamHalf, ThinRetransmits, }; use crate::{ + Dir, MAX_STREAM_COUNT, Side, StreamId, TransportError, VarInt, coding::BufMutExt, connection::stats::FrameStats, frame::{self, FrameStruct, StreamMetaVec}, transport_parameters::TransportParameters, - Dir, Side, StreamId, TransportError, VarInt, MAX_STREAM_COUNT, }; /// Wrapper around `Recv` that facilitates reusing `Recv` instances @@ -411,14 +411,13 @@ impl StreamsState { pub(in crate::connection) fn write_control_frames( &mut self, - buf: &mut Vec, + buf: &mut impl BufMut, pending: &mut Retransmits, retransmits: &mut ThinRetransmits, stats: &mut FrameStats, - max_size: usize, ) { // RESET_STREAM - while buf.len() + frame::ResetStream::SIZE_BOUND < max_size { + while buf.remaining_mut() > frame::ResetStream::SIZE_BOUND { let (id, error_code) = match pending.reset_stream.pop() { Some(x) => x, None => break, @@ -442,7 +441,7 @@ impl StreamsState { } // STOP_SENDING - while buf.len() + frame::StopSending::SIZE_BOUND < max_size { + while buf.remaining_mut() > frame::StopSending::SIZE_BOUND { let frame = match pending.stop_sending.pop() { Some(x) => x, None => break, @@ -461,7 +460,7 @@ impl StreamsState { } // MAX_DATA - if pending.max_data && buf.len() + 9 < max_size { + if pending.max_data && buf.remaining_mut() > 9 { pending.max_data = false; // `local_max_data` can grow bigger than `VarInt`. @@ -484,7 +483,7 @@ impl StreamsState { } // MAX_STREAM_DATA - while buf.len() + 17 < max_size { + while buf.remaining_mut() > 17 { let id = match pending.max_stream_data.iter().next() { Some(x) => *x, None => break, @@ -516,7 +515,7 @@ impl StreamsState { // MAX_STREAMS for dir in Dir::iter() { - if !pending.max_stream_id[dir as usize] || buf.len() + 9 >= max_size { + if !pending.max_stream_id[dir as usize] || buf.remaining_mut() <= 9 { continue; } @@ -525,8 +524,7 @@ impl StreamsState { self.sent_max_remote[dir as usize] = self.max_remote[dir as usize]; trace!( value = self.max_remote[dir as usize], - "MAX_STREAMS ({:?})", - dir + "MAX_STREAMS ({:?})", dir ); buf.write(match dir { Dir::Uni => frame::FrameType::MAX_STREAMS_UNI, @@ -542,21 +540,14 @@ impl StreamsState { pub(crate) fn write_stream_frames( &mut self, - buf: &mut Vec, - max_buf_size: usize, + buf: &mut impl BufMut, fair: bool, ) -> StreamMetaVec { let mut stream_frames = StreamMetaVec::new(); - while buf.len() + frame::Stream::SIZE_BOUND < max_buf_size { - if max_buf_size - .checked_sub(buf.len() + frame::Stream::SIZE_BOUND) - .is_none() - { - break; - } - - // Pop the stream of the highest priority that currently has pending data - // If the stream still has some pending data left after writing, it will be reinserted, otherwise not + while buf.remaining_mut() > frame::Stream::SIZE_BOUND { + // Pop the stream of the highest priority that currently has pending data. If + // the stream still has some pending data left after writing, it will be + // reinserted, otherwise not let Some(stream) = self.pending.pop() else { break; }; @@ -578,7 +569,7 @@ impl StreamsState { // Now that we know the `StreamId`, we can better account for how many bytes // are required to encode it. - let max_buf_size = max_buf_size - buf.len() - 1 - VarInt::size(id.into()); + let max_buf_size = buf.remaining_mut() - 1 - VarInt::size(id.into()); let (offsets, encode_length) = stream.pending.poll_transmit(max_buf_size); let fin = offsets.end == stream.pending.offset() && matches!(stream.state, SendState::DataSent { .. }); @@ -980,8 +971,8 @@ pub(super) fn get_or_insert_recv( mod tests { use super::*; use crate::{ - connection::State as ConnState, connection::Streams, ReadableError, RecvStream, SendStream, - TransportErrorCode, WriteError, + ReadableError, RecvStream, SendStream, TransportErrorCode, WriteError, + connection::State as ConnState, connection::Streams, }; use bytes::Bytes; @@ -1300,10 +1291,12 @@ mod tests { let error_code = 0u32.into(); stream.state.received_stop_sending(id, error_code); - assert!(stream - .state - .events - .contains(&StreamEvent::Stopped { id, error_code })); + assert!( + stream + .state + .events + .contains(&StreamEvent::Stopped { id, error_code }) + ); stream.state.events.clear(); assert_eq!(stream.write(&[]), Err(WriteError::Stopped(error_code))); @@ -1379,7 +1372,7 @@ mod tests { high.write(b"high").unwrap(); let mut buf = Vec::with_capacity(40); - let meta = server.write_stream_frames(&mut buf, 40, true); + let meta = server.write_stream_frames(&mut buf, true); assert_eq!(meta[0].id, id_high); assert_eq!(meta[1].id, id_mid); assert_eq!(meta[2].id, id_low); @@ -1437,16 +1430,18 @@ mod tests { }; high.set_priority(-1).unwrap(); - let mut buf = Vec::with_capacity(1000); - let meta = server.write_stream_frames(&mut buf, 40, true); + let mut buf = Vec::with_capacity(1000).limit(40); + let meta = server.write_stream_frames(&mut buf, true); assert_eq!(meta.len(), 1); assert_eq!(meta[0].id, id_high); // After requeuing we should end up with 2 priorities - not 3 assert_eq!(server.pending.len(), 2); + let mut buf = buf.into_inner(); + // Send the remaining data. The initial mid priority one should go first now - let meta = server.write_stream_frames(&mut buf, 1000, true); + let meta = server.write_stream_frames(&mut buf, true); assert_eq!(meta.len(), 2); assert_eq!(meta[0].id, id_mid); assert_eq!(meta[1].id, id_high); @@ -1506,12 +1501,13 @@ mod tests { // loop until all the streams are written loop { - let buf_len = buf.len(); - let meta = server.write_stream_frames(&mut buf, buf_len + 40, fair); + let mut chunk_buf = buf.limit(40); + let meta = server.write_stream_frames(&mut chunk_buf, fair); if meta.is_empty() { break; } metas.extend(meta); + buf = chunk_buf.into_inner(); } assert!(!server.can_send_stream_data()); @@ -1574,11 +1570,12 @@ mod tests { stream_b.write(&[b'b'; 100]).unwrap(); let mut metas = vec![]; - let mut buf = Vec::with_capacity(1024); + let buf = Vec::with_capacity(1024); // Write the first chunk of stream_a - let buf_len = buf.len(); - let meta = server.write_stream_frames(&mut buf, buf_len + 40, false); + let mut chunk_buf = buf.limit(40); + let meta = server.write_stream_frames(&mut chunk_buf, false); + let mut buf = chunk_buf.into_inner(); assert!(!meta.is_empty()); metas.extend(meta); @@ -1594,8 +1591,9 @@ mod tests { // loop until all the streams are written loop { - let buf_len = buf.len(); - let meta = server.write_stream_frames(&mut buf, buf_len + 40, false); + let mut chunk_buf = buf.limit(40); + let meta = server.write_stream_frames(&mut chunk_buf, false); + buf = chunk_buf.into_inner(); if meta.is_empty() { break; } diff --git a/quinn-proto/src/connection/timer.rs b/quinn-proto/src/connection/timer.rs index 566652d0da..e645ed4d3a 100644 --- a/quinn-proto/src/connection/timer.rs +++ b/quinn-proto/src/connection/timer.rs @@ -1,65 +1,145 @@ +use std::collections::{BinaryHeap, binary_heap::PeekMut}; + +use rustc_hash::FxHashMap; + use crate::Instant; -#[derive(Debug, Copy, Clone, Ord, PartialOrd, Eq, PartialEq)] +use super::PathId; + +#[derive(Debug, Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash)] pub(crate) enum Timer { /// When to send an ack-eliciting probe packet or declare unacked packets lost - LossDetection = 0, + LossDetection(PathId), /// When to close the connection after no activity - Idle = 1, + Idle, /// When the close timer expires, the connection has been gracefully terminated. - Close = 2, + Close, /// When keys are discarded because they should not be needed anymore - KeyDiscard = 3, + KeyDiscard, /// When to give up on validating a new path to the peer - PathValidation = 4, + PathValidation(PathId), /// When to send a `PING` frame to keep the connection alive - KeepAlive = 5, + KeepAlive(PathId), /// When pacing will allow us to send a packet - Pacing = 6, + Pacing(PathId), /// When to invalidate old CID and proactively push new one via NEW_CONNECTION_ID frame - PushNewCid = 7, + PushNewCid, /// When to send an immediate ACK if there are unacked ack-eliciting packets of the peer - MaxAckDelay = 8, + MaxAckDelay, } -impl Timer { - pub(crate) const VALUES: [Self; 9] = [ - Self::LossDetection, - Self::Idle, - Self::Close, - Self::KeyDiscard, - Self::PathValidation, - Self::KeepAlive, - Self::Pacing, - Self::PushNewCid, - Self::MaxAckDelay, - ]; +/// Keeps track of the nearest timeout for each `Timer` +/// +/// The [`TimerTable`] is advanced with [`TimerTable::expire_before`]. +#[derive(Debug, Clone, Default)] +pub(crate) struct TimerTable { + most_recent_timeout: FxHashMap, + timeout_queue: BinaryHeap, } -/// A table of data associated with each distinct kind of `Timer` -#[derive(Debug, Copy, Clone, Default)] -pub(crate) struct TimerTable { - data: [Option; 10], +#[derive(Debug, Clone, PartialEq, Eq)] +pub(super) struct TimerEntry { + pub(super) time: Instant, + pub(super) timer: Timer, +} + +impl Ord for TimerEntry { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + // `timeout_queue` is a max heap so we need to reverse the order to efficiently pop the + // next timeout + self.time + .cmp(&other.time) + .then_with(|| self.timer.cmp(&other.timer)) + .reverse() + } +} + +impl PartialOrd for TimerEntry { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } } impl TimerTable { + /// Sets the timer unconditionally pub(super) fn set(&mut self, timer: Timer, time: Instant) { - self.data[timer as usize] = Some(time); + self.most_recent_timeout.insert(timer, time); + self.timeout_queue.push(TimerEntry { time, timer }); } pub(super) fn get(&self, timer: Timer) -> Option { - self.data[timer as usize] + self.most_recent_timeout.get(&timer).copied() } pub(super) fn stop(&mut self, timer: Timer) { - self.data[timer as usize] = None; + self.most_recent_timeout.remove(&timer); } - pub(super) fn next_timeout(&self) -> Option { - self.data.iter().filter_map(|&x| x).min() + /// Get the next queued timeout + /// + /// Obsolete timers will be purged. + pub(super) fn peek(&mut self) -> Option { + while let Some(timer_entry) = self.timeout_queue.peek_mut() { + if self.most_recent_timeout.get(&timer_entry.timer) != Some(&timer_entry.time) { + // obsolete timeout + PeekMut::pop(timer_entry); + continue; + } + return Some(timer_entry.clone()); + } + + None } - pub(super) fn is_expired(&self, timer: Timer, after: Instant) -> bool { - self.data[timer as usize].is_some_and(|x| x <= after) + /// Remove the next timer up until `now`, including it + pub(super) fn expire_before(&mut self, now: Instant) -> Option { + let TimerEntry { time, timer } = self.peek()?; + if time <= now { + self.most_recent_timeout.remove(&timer); + self.timeout_queue.pop(); + return Some(timer); + } + + None + } + + pub(super) fn reset(&mut self) { + self.most_recent_timeout.clear(); + self.timeout_queue.clear(); + } + + #[cfg(test)] + pub(super) fn values(&self) -> Vec { + let mut values = self.timeout_queue.clone().into_sorted_vec(); + values.retain(|entry| self.most_recent_timeout.get(&entry.timer) == Some(&entry.time)); + values + } +} + +#[cfg(test)] +mod tests { + use std::time::Duration; + + use super::*; + + #[test] + fn timer_table() { + let mut timers = TimerTable::default(); + let sec = Duration::from_secs(1); + let now = Instant::now() + Duration::from_secs(10); + timers.set(Timer::Idle, now - 3 * sec); + timers.set(Timer::Close, now - 2 * sec); + timers.set(Timer::Idle, now); + + assert_eq!( + timers.peek(), + Some(TimerEntry { + timer: Timer::Close, + time: now - 2 * sec + }) + ); + assert_eq!(timers.expire_before(now), Some(Timer::Close)); + assert_eq!(timers.expire_before(now), Some(Timer::Idle)); + assert_eq!(timers.expire_before(now), None); } } diff --git a/quinn-proto/src/connection/transmit_buf.rs b/quinn-proto/src/connection/transmit_buf.rs new file mode 100644 index 0000000000..5a5f1f4a2d --- /dev/null +++ b/quinn-proto/src/connection/transmit_buf.rs @@ -0,0 +1,242 @@ +use bytes::BufMut; +use tracing::trace; + +use crate::packet::BufLen; + +/// The buffer in which to write datagrams for [`Connection::poll_transmit`] +/// +/// The `poll_transmit` function writes zero or more datagrams to a buffer. Multiple +/// datagrams are possible in case GSO (Generic Segmentation Offload) is supported. +/// +/// This buffer tracks datagrams being written to it. There is always a "current" datagram, +/// which is started by calling [`TransmitBuf::start_new_datagram`]. Writing to the buffer +/// is done through the [`BufMut`] interface. +/// +/// Usually a datagram contains one QUIC packet, though QUIC-TRANSPORT 12.2 Coalescing +/// Packets allows for placing multiple packets into a single datagram provided all but the +/// last packet uses long headers. This is normally used during connection setup where often +/// the initial, handshake and sometimes even a 1-RTT packet can be coalesced into a single +/// datagram. +/// +/// Inside a single packet multiple QUIC frames are written. +/// +/// The buffer managed here is passed straight to the OS' `sendmsg` call (or variant) once +/// `poll_transmit` returns. So needs to contain the datagrams as they are sent on the +/// wire. +/// +/// [`Connection::poll_transmit`]: super::Connection::poll_transmit +#[derive(Debug)] +pub(super) struct TransmitBuf<'a> { + /// The buffer itself, packets are written to this buffer + buf: &'a mut Vec, + /// Offset into the buffer at which the current datagram starts + /// + /// Note that when coalescing packets this might be before the start of the current + /// packet. + datagram_start: usize, + /// The maximum offset allowed to be used for the current datagram in the buffer + /// + /// The first and last datagram in a batch are allowed to be smaller then the maximum + /// size. All datagrams in between need to be exactly this size. + buf_capacity: usize, + /// The maximum number of datagrams allowed to write into [`TransmitBuf::buf`] + max_datagrams: usize, + /// The number of datagrams already (partially) written into the buffer + /// + /// Incremented by a call to [`TransmitBuf::start_new_datagram`]. + pub(super) num_datagrams: usize, + /// The segment size of this GSO batch + /// + /// The segment size is the size of each datagram in the GSO batch, only the last + /// datagram in the batch may be smaller. + /// + /// For the first datagram this is set to the maximum size a datagram is allowed to be: + /// the current path MTU. After the first datagram is finished this is reduced to the + /// size of the first datagram and can no longer change. + segment_size: usize, +} + +impl<'a> TransmitBuf<'a> { + pub(super) fn new(buf: &'a mut Vec, max_datagrams: usize, mtu: usize) -> Self { + Self { + buf, + datagram_start: 0, + buf_capacity: 0, + max_datagrams, + num_datagrams: 0, + segment_size: mtu, + } + } + + pub(super) fn set_segment_size(&mut self, mtu: usize) { + debug_assert!( + self.datagram_start == 0 && self.buf_capacity == 0 && self.num_datagrams == 0, + "can only change the segment size if nothing has been written yet" + ); + + self.segment_size = mtu; + } + + /// Starts a datagram with a custom datagram size + /// + /// This is a specialized version of [`TransmitBuf::start_new_datagram`] which sets the + /// datagram size. Useful for e.g. PATH_CHALLENGE, tail-loss probes or MTU probes. + /// + /// After the first datagram you can never increase the segment size. If you decrease + /// the size of a datagram in a batch, it must be the last datagram of the batch. + pub(super) fn start_new_datagram_with_size(&mut self, datagram_size: usize) { + // Only reserve space for this datagram, usually it is the last one in the batch. + let max_capacity_hint = datagram_size; + self.new_datagram_inner(datagram_size, max_capacity_hint) + } + + /// Starts a new datagram in the transmit buffer + /// + /// If this starts the second datagram the segment size will be set to the size of the + /// first datagram. + /// + /// If the underlying buffer does not have enough capacity yet this will allocate enough + /// capacity for all the datagrams allowed in a single batch. Use + /// [`TransmitBuf::start_new_datagram_with_size`] if you know you will need less. + pub(super) fn start_new_datagram(&mut self) { + // We reserve the maximum space for sending `max_datagrams` upfront to avoid any + // reallocations if more datagrams have to be appended later on. Benchmarks have + // shown a 5-10% throughput improvement compared to continuously resizing the + // datagram buffer. While this will lead to over-allocation for small transmits + // (e.g. purely containing ACKs), modern memory allocators (e.g. mimalloc and + // jemalloc) will pool certain allocation sizes and therefore this is still rather + // efficient. + let max_capacity_hint = self.max_datagrams * self.segment_size; + self.new_datagram_inner(self.segment_size, max_capacity_hint) + } + + fn new_datagram_inner(&mut self, datagram_size: usize, max_capacity_hint: usize) { + debug_assert!(self.num_datagrams < self.max_datagrams); + if self.num_datagrams == 1 { + // Set the segment size to the size of the first datagram. + self.segment_size = self.buf.len(); + } + if self.num_datagrams >= 1 { + debug_assert!(datagram_size <= self.segment_size); + if datagram_size < self.segment_size { + // If this is a GSO batch and this datagram is smaller than the segment + // size, this must be the last datagram in the batch. + self.max_datagrams = self.num_datagrams + 1; + } + } + self.datagram_start = self.buf.len(); + debug_assert_eq!( + self.datagram_start % self.segment_size, + 0, + "datagrams in a GSO batch must be aligned to the segment size" + ); + self.buf_capacity = self.datagram_start + datagram_size; + if self.buf_capacity > self.buf.capacity() { + self.buf + .reserve_exact(max_capacity_hint.saturating_sub(self.buf.capacity())); + } + self.num_datagrams += 1; + } + + /// Clips the datagram size to the current size + /// + /// Only valid for the first datagram, when the datagram might be smaller than the + /// segment size. Needed before estimating the available space in the next datagram + /// based on [`TransmitBuf::segment_size`]. + /// + /// Use [`TransmitBuf::start_new_datagram_with_size`] if you need to reduce the size of + /// the last datagram in a batch. + pub(super) fn clip_datagram_size(&mut self) { + debug_assert_eq!(self.num_datagrams, 1); + if self.buf.len() < self.segment_size { + trace!( + segment_size = self.buf.len(), + prev_segment_size = self.segment_size, + "clipped datagram size" + ); + } + self.segment_size = self.buf.len(); + self.buf_capacity = self.buf.len(); + } + + /// Returns the GSO segment size + /// + /// This is also the maximum size datagrams are allowed to be. The first and last + /// datagram in a batch are allowed to be smaller however. After the first datagram the + /// segment size is clipped to the size of the first datagram. + /// + /// If the last datagram was created using [`TransmitBuf::start_new_datagram_with_size`] + /// the the segment size will be greater than the current datagram is allowed to be. + /// Thus [`TransmitBuf::datagram_remaining_mut`] should be used if you need to know the + /// amount of data that can be written into the datagram. + pub(super) fn segment_size(&self) -> usize { + self.segment_size + } + + /// Returns the number of datagrams written into the buffer + /// + /// The last datagram is not necessarily finished yet. + pub(super) fn num_datagrams(&self) -> usize { + self.num_datagrams + } + + /// Returns the maximum number of datagrams allowed to be written into the buffer + pub(super) fn max_datagrams(&self) -> usize { + self.max_datagrams + } + + /// Returns the start offset of the current datagram in the buffer + /// + /// In other words, this offset contains the first byte of the current datagram. + pub(super) fn datagram_start_offset(&self) -> usize { + self.datagram_start + } + + /// Returns the maximum offset in the buffer allowed for the current datagram + /// + /// The first and last datagram in a batch are allowed to be smaller then the maximum + /// size. All datagrams in between need to be exactly this size. + pub(super) fn datagram_max_offset(&self) -> usize { + self.buf_capacity + } + + /// Returns the number of bytes that may still be written into this datagram + pub(super) fn datagram_remaining_mut(&self) -> usize { + self.buf_capacity.saturating_sub(self.buf.len()) + } + + /// Returns `true` if the buffer did not have anything written into it + pub(super) fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// The number of bytes written into the buffer so far + pub(super) fn len(&self) -> usize { + self.buf.len() + } + + /// Returns the already written bytes in the buffer + pub(super) fn as_mut_slice(&mut self) -> &mut [u8] { + self.buf.as_mut_slice() + } +} + +unsafe impl BufMut for TransmitBuf<'_> { + fn remaining_mut(&self) -> usize { + self.buf.remaining_mut() + } + + unsafe fn advance_mut(&mut self, cnt: usize) { + self.buf.advance_mut(cnt); + } + + fn chunk_mut(&mut self) -> &mut bytes::buf::UninitSlice { + self.buf.chunk_mut() + } +} + +impl BufLen for TransmitBuf<'_> { + fn len(&self) -> usize { + self.len() + } +} diff --git a/quinn-proto/src/crypto.rs b/quinn-proto/src/crypto.rs index f15d740c08..8d79abc2ba 100644 --- a/quinn-proto/src/crypto.rs +++ b/quinn-proto/src/crypto.rs @@ -13,8 +13,8 @@ use std::{any::Any, str, sync::Arc}; use bytes::BytesMut; use crate::{ - shared::ConnectionId, transport_parameters::TransportParameters, ConnectError, Side, - TransportError, + ConnectError, PathId, Side, TransportError, shared::ConnectionId, + transport_parameters::TransportParameters, }; /// Cryptography interface based on *ring* @@ -147,10 +147,11 @@ pub trait ServerConfig: Send + Sync { /// Keys used to protect packet payloads pub trait PacketKey: Send + Sync { /// Encrypt the packet payload with the given packet number - fn encrypt(&self, packet: u64, buf: &mut [u8], header_len: usize); + fn encrypt(&self, path_id: PathId, packet: u64, buf: &mut [u8], header_len: usize); /// Decrypt the packet payload with the given packet number fn decrypt( &self, + path_id: PathId, packet: u64, header: &[u8], payload: &mut BytesMut, diff --git a/quinn-proto/src/crypto/rustls.rs b/quinn-proto/src/crypto/rustls.rs index d60740bb30..1669e8ef05 100644 --- a/quinn-proto/src/crypto/rustls.rs +++ b/quinn-proto/src/crypto/rustls.rs @@ -7,19 +7,18 @@ use bytes::BytesMut; use ring::aead; pub use rustls::Error; use rustls::{ - self, + self, CipherSuite, client::danger::ServerCertVerifier, pki_types::{CertificateDer, PrivateKeyDer, ServerName}, quic::{Connection, HeaderProtectionKey, KeyChange, PacketKey, Secrets, Suite, Version}, - CipherSuite, }; use crate::{ + ConnectError, ConnectionId, PathId, Side, TransportError, TransportErrorCode, crypto::{ self, CryptoError, ExportKeyingMaterialError, HeaderKey, KeyPair, Keys, UnsupportedVersion, }, transport_parameters::TransportParameters, - ConnectError, ConnectionId, Side, TransportError, TransportErrorCode, }; impl From for rustls::Side { @@ -51,7 +50,7 @@ impl TlsSession { impl crypto::Session for TlsSession { fn initial_keys(&self, dst_cid: &ConnectionId, side: Side) -> Keys { - initial_keys(self.version, dst_cid, side, &self.suite) + initial_keys(self.version, *dst_cid, side, &self.suite) } fn handshake_data(&self) -> Option> { @@ -504,7 +503,7 @@ impl crypto::ServerConfig for QuicServerConfig { dst_cid: &ConnectionId, ) -> Result { let version = interpret_version(version)?; - Ok(initial_keys(version, dst_cid, Side::Server, &self.initial)) + Ok(initial_keys(version, *dst_cid, Side::Server, &self.initial)) } fn retry_tag(&self, version: u32, orig_dst_cid: &ConnectionId, packet: &[u8]) -> [u8; 16] { @@ -564,11 +563,11 @@ fn to_vec(params: &TransportParameters) -> Vec { pub(crate) fn initial_keys( version: Version, - dst_cid: &ConnectionId, + dst_cid: ConnectionId, side: Side, suite: &Suite, ) -> Keys { - let keys = suite.keys(dst_cid, side.into(), version); + let keys = suite.keys(&dst_cid, side.into(), version); Keys { header: KeyPair { local: Box::new(keys.local.header), @@ -582,21 +581,24 @@ pub(crate) fn initial_keys( } impl crypto::PacketKey for Box { - fn encrypt(&self, packet: u64, buf: &mut [u8], header_len: usize) { + fn encrypt(&self, PathId(path_id): PathId, packet: u64, buf: &mut [u8], header_len: usize) { let (header, payload_tag) = buf.split_at_mut(header_len); let (payload, tag_storage) = payload_tag.split_at_mut(payload_tag.len() - self.tag_len()); - let tag = self.encrypt_in_place(packet, &*header, payload).unwrap(); + let tag = self + .encrypt_in_place_for_path(path_id, packet, &*header, payload) + .unwrap(); tag_storage.copy_from_slice(tag.as_ref()); } fn decrypt( &self, + PathId(path_id): PathId, packet: u64, header: &[u8], payload: &mut BytesMut, ) -> Result<(), CryptoError> { let plain = self - .decrypt_in_place(packet, header, payload.as_mut()) + .decrypt_in_place_for_path(path_id, packet, header, payload.as_mut()) .map_err(|_| CryptoError)?; let plain_len = plain.len(); payload.truncate(plain_len); diff --git a/quinn-proto/src/endpoint.rs b/quinn-proto/src/endpoint.rs index 7c8a329156..9861e68847 100644 --- a/quinn-proto/src/endpoint.rs +++ b/quinn-proto/src/endpoint.rs @@ -1,5 +1,5 @@ use std::{ - collections::{hash_map, HashMap}, + collections::{HashMap, hash_map}, convert::TryFrom, fmt, mem, net::{IpAddr, SocketAddr}, @@ -8,13 +8,15 @@ use std::{ }; use bytes::{BufMut, Bytes, BytesMut}; -use rand::{rngs::StdRng, Rng, RngCore, SeedableRng}; +use rand::{Rng, RngCore, SeedableRng, rngs::StdRng}; use rustc_hash::FxHashMap; use slab::Slab; use thiserror::Error; use tracing::{debug, error, trace, warn}; use crate::{ + Duration, INITIAL_MTU, Instant, MAX_CID_SIZE, MIN_INITIAL_SIZE, PathId, RESET_TOKEN_SIZE, + ResetToken, Side, Transmit, TransportConfig, TransportError, cid_generator::ConnectionIdGenerator, coding::BufMutExt, config::{ClientConfig, EndpointConfig, ServerConfig}, @@ -22,17 +24,15 @@ use crate::{ crypto::{self, Keys, UnsupportedVersion}, frame, packet::{ - FixedLengthConnectionIdParser, Header, InitialHeader, InitialPacket, Packet, - PacketDecodeError, PacketNumber, PartialDecode, ProtectedInitialHeader, + FixedLengthConnectionIdParser, Header, InitialHeader, InitialPacket, PacketDecodeError, + PacketNumber, PartialDecode, ProtectedInitialHeader, }, shared::{ ConnectionEvent, ConnectionEventInner, ConnectionId, DatagramConnectionEvent, EcnCodepoint, EndpointEvent, EndpointEventInner, IssuedCid, }, - token, + token::{IncomingToken, InvalidRetryTokenError, Token, TokenPayload}, transport_parameters::{PreferredAddress, TransportParameters}, - Duration, Instant, ResetToken, RetryToken, Side, Transmit, TransportConfig, TransportError, - INITIAL_MTU, MAX_CID_SIZE, MIN_INITIAL_SIZE, RESET_TOKEN_SIZE, }; /// The main entry point to the library @@ -74,7 +74,7 @@ impl Endpoint { ) -> Self { let rng_seed = rng_seed.or(config.rng_seed); Self { - rng: rng_seed.map_or(StdRng::from_entropy(), StdRng::from_seed), + rng: rng_seed.map_or(StdRng::from_os_rng(), StdRng::from_seed), index: ConnectionIndex::default(), connections: Slab::new(), local_cid_generator: (config.connection_id_generator_factory.as_ref())(), @@ -102,8 +102,8 @@ impl Endpoint { ) -> Option { use EndpointEventInner::*; match event.0 { - NeedIdentifiers(now, n) => { - return Some(self.send_new_identifiers(now, ch, n)); + NeedIdentifiers(path_id, now, n) => { + return Some(self.send_new_identifiers(path_id, now, ch, n)); } ResetToken(remote, token) => { if let Some(old) = self.connections[ch].reset_token.replace((remote, token)) { @@ -113,12 +113,16 @@ impl Endpoint { warn!("duplicate reset token"); } } - RetireConnectionId(now, seq, allow_more_cids) => { - if let Some(cid) = self.connections[ch].loc_cids.remove(&seq) { + RetireConnectionId(now, path_id, seq, allow_more_cids) => { + if let Some(cid) = self.connections[ch] + .loc_cids + .get_mut(&path_id) + .and_then(|pcid| pcid.cids.remove(&seq)) + { trace!("peer retired CID {}: {}", seq, cid); - self.index.retire(&cid); + self.index.retire(cid); if allow_more_cids { - return Some(self.send_new_identifiers(now, ch, 1)); + return Some(self.send_new_identifiers(path_id, now, ch, 1)); } } } @@ -146,14 +150,22 @@ impl Endpoint { data: BytesMut, buf: &mut Vec, ) -> Option { + // Partially decode packet or short-circuit if unable let datagram_len = data.len(); - let (first_decode, remaining) = match PartialDecode::new( + let mut event = match PartialDecode::new( data, &FixedLengthConnectionIdParser::new(self.local_cid_generator.cid_len()), &self.config.supported_versions, self.config.grease_quic_bit, ) { - Ok(x) => x, + Ok((first_decode, remaining)) => DatagramConnectionEvent { + now, + remote, + path_id: PathId(0), // Corrected later for existing paths + ecn, + first_decode, + remaining, + }, Err(PacketDecodeError::UnsupportedVersion { src_cid, dst_cid, @@ -166,17 +178,16 @@ impl Endpoint { trace!("sending version negotiation"); // Negotiate versions Header::VersionNegotiate { - random: self.rng.gen::() | 0x40, + random: self.rng.random::() | 0x40, src_cid: dst_cid, dst_cid: src_cid, } .encode(buf); // Grease with a reserved version - if version != 0x0a1a_2a3a { - buf.write::(0x0a1a_2a3a); - } else { - buf.write::(0x0a1a_2a4a); - } + buf.write::(match version { + 0x0a1a_2a3a => 0x0a1a_2a4a, + _ => 0x0a1a_2a3a, + }); for &version in &self.config.supported_versions { buf.write(version); } @@ -194,18 +205,13 @@ impl Endpoint { } }; - // - // Handle packet on existing connection, if any - // - let addresses = FourTuple { remote, local_ip }; - if let Some(route_to) = self.index.get(&addresses, &first_decode) { - let event = DatagramConnectionEvent { - now, - remote: addresses.remote, - ecn, - first_decode, - remaining, + let dst_cid = event.first_decode.dst_cid(); + + if let Some(route_to) = self.index.get(&addresses, &event.first_decode) { + event.path_id = match route_to { + RouteDatagramTo::Incoming(_) => PathId(0), + RouteDatagramTo::Connection(_, path_id) => path_id, }; match route_to { RouteDatagramTo::Incoming(incoming_idx) => { @@ -226,102 +232,38 @@ impl Endpoint { self.all_incoming_buffers_total_bytes += datagram_len as u64; } - return None; - } - RouteDatagramTo::Connection(ch) => { - return Some(DatagramEvent::ConnectionEvent( - ch, - ConnectionEvent(ConnectionEventInner::Datagram(event)), - )) - } - } - } - - // - // Potentially create a new connection - // - - let dst_cid = first_decode.dst_cid(); - let server_config = match &self.server_config { - Some(config) => config, - None => { - debug!("packet for unrecognized connection {}", dst_cid); - return self - .stateless_reset(now, datagram_len, addresses, dst_cid, buf) - .map(DatagramEvent::Response); - } - }; - - if let Some(header) = first_decode.initial_header() { - if datagram_len < MIN_INITIAL_SIZE as usize { - debug!("ignoring short initial for connection {}", dst_cid); - return None; - } - - let crypto = match server_config.crypto.initial_keys(header.version, dst_cid) { - Ok(keys) => keys, - Err(UnsupportedVersion) => { - // This probably indicates that the user set supported_versions incorrectly in - // `EndpointConfig`. - debug!( - "ignoring initial packet version {:#x} unsupported by cryptographic layer", - header.version - ); - return None; + None } - }; - - if let Err(reason) = self.early_validate_first_packet(header) { - return Some(DatagramEvent::Response(self.initial_close( - header.version, - addresses, - &crypto, - &header.src_cid, - reason, - buf, - ))); + RouteDatagramTo::Connection(ch, _path_id) => Some(DatagramEvent::ConnectionEvent( + ch, + ConnectionEvent(ConnectionEventInner::Datagram(event)), + )), } + } else if event.first_decode.initial_header().is_some() { + // Potentially create a new connection - return match first_decode.finish(Some(&*crypto.header.remote)) { - Ok(packet) => { - self.handle_first_packet(addresses, ecn, packet, remaining, crypto, buf, now) - } - Err(e) => { - trace!("unable to decode initial packet: {}", e); - None - } - }; - } else if first_decode.has_long_header() { + self.handle_first_packet(datagram_len, event, addresses, buf) + } else if event.first_decode.has_long_header() { debug!( "ignoring non-initial packet for unknown connection {}", dst_cid ); - return None; - } - - // - // If we got this far, we're a server receiving a seemingly valid packet for an unknown - // connection. Send a stateless reset if possible. - // - - if !first_decode.is_initial() - && self - .local_cid_generator - .validate(first_decode.dst_cid()) - .is_err() + None + } else if !event.first_decode.is_initial() + && self.local_cid_generator.validate(dst_cid).is_err() { - debug!("dropping packet with invalid CID"); - return None; - } + // If we got this far, we're receiving a seemingly valid packet for an unknown + // connection. Send a stateless reset if possible. - if !dst_cid.is_empty() { - return self - .stateless_reset(now, datagram_len, addresses, dst_cid, buf) - .map(DatagramEvent::Response); + debug!("dropping packet with invalid CID"); + None + } else if dst_cid.is_empty() { + trace!("dropping unrecognized short packet without ID"); + None + } else { + self.stateless_reset(now, datagram_len, addresses, *dst_cid, buf) + .map(DatagramEvent::Response) } - - trace!("dropping unrecognized short packet without ID"); - None } fn stateless_reset( @@ -329,7 +271,7 @@ impl Endpoint { now: Instant, inciting_dgram_len: usize, addresses: FourTuple, - dst_cid: &ConnectionId, + dst_cid: ConnectionId, buf: &mut Vec, ) -> Option { if self @@ -348,7 +290,10 @@ impl Endpoint { let max_padding_len = match inciting_dgram_len.checked_sub(RESET_TOKEN_SIZE) { Some(headroom) if headroom > MIN_PADDING_LEN => headroom - 1, _ => { - debug!("ignoring unexpected {} byte packet: not larger than minimum stateless reset size", inciting_dgram_len); + debug!( + "ignoring unexpected {} byte packet: not larger than minimum stateless reset size", + inciting_dgram_len + ); return None; } }; @@ -363,12 +308,13 @@ impl Endpoint { let padding_len = if max_padding_len <= IDEAL_MIN_PADDING_LEN { max_padding_len } else { - self.rng.gen_range(IDEAL_MIN_PADDING_LEN..max_padding_len) + self.rng + .random_range(IDEAL_MIN_PADDING_LEN..max_padding_len) }; buf.reserve(padding_len + RESET_TOKEN_SIZE); buf.resize(padding_len, 0); self.rng.fill_bytes(&mut buf[0..padding_len]); - buf[0] = 0b0100_0000 | buf[0] >> 2; + buf[0] = 0b0100_0000 | (buf[0] >> 2); buf.extend_from_slice(&ResetToken::new(&*self.config.reset_key, dst_cid)); debug_assert!(buf.len() < inciting_dgram_len); @@ -404,7 +350,7 @@ impl Endpoint { trace!(initial_dcid = %remote_id); let ch = ConnectionHandle(self.connections.vacant_key()); - let loc_cid = self.new_cid(ch); + let loc_cid = self.new_cid(ch, PathId(0)); let params = TransportParameters::new( &config.transport, &self.config, @@ -430,44 +376,55 @@ impl Endpoint { now, tls, config.transport, - SideArgs::Client, + SideArgs::Client { + token_store: config.token_store, + server_name: server_name.into(), + }, ); Ok((ch, conn)) } + /// Generates new CIDs and creates message to send to the connection state fn send_new_identifiers( &mut self, + path_id: PathId, now: Instant, ch: ConnectionHandle, num: u64, ) -> ConnectionEvent { let mut ids = vec![]; for _ in 0..num { - let id = self.new_cid(ch); - let meta = &mut self.connections[ch]; - let sequence = meta.cids_issued; - meta.cids_issued += 1; - meta.loc_cids.insert(sequence, id); + let id = self.new_cid(ch, path_id); + let cid_meta = self.connections[ch].loc_cids.entry(path_id).or_default(); + let sequence = cid_meta.issued; + cid_meta.issued += 1; + cid_meta.cids.insert(sequence, id); ids.push(IssuedCid { + path_id, sequence, id, - reset_token: ResetToken::new(&*self.config.reset_key, &id), + reset_token: ResetToken::new(&*self.config.reset_key, id), }); } - ConnectionEvent(ConnectionEventInner::NewIdentifiers(ids, now)) + ConnectionEvent(ConnectionEventInner::NewIdentifiers( + ids, + now, + self.local_cid_generator.cid_len(), + self.local_cid_generator.cid_lifetime(), + )) } /// Generate a connection ID for `ch` - fn new_cid(&mut self, ch: ConnectionHandle) -> ConnectionId { + fn new_cid(&mut self, ch: ConnectionHandle, path_id: PathId) -> ConnectionId { loop { let cid = self.local_cid_generator.generate_cid(); - if cid.len() == 0 { + if cid.is_empty() { // Zero-length CID; nothing to track debug_assert_eq!(self.local_cid_generator.cid_len(), 0); return cid; } if let hash_map::Entry::Vacant(e) = self.index.connection_ids.entry(cid) { - e.insert(ch); + e.insert((ch, path_id)); break cid; } } @@ -475,14 +432,58 @@ impl Endpoint { fn handle_first_packet( &mut self, + datagram_len: usize, + event: DatagramConnectionEvent, addresses: FourTuple, - ecn: Option, - packet: Packet, - rest: Option, - crypto: Keys, buf: &mut Vec, - now: Instant, ) -> Option { + let dst_cid = event.first_decode.dst_cid(); + let header = event.first_decode.initial_header().unwrap(); + + let Some(server_config) = &self.server_config else { + debug!("packet for unrecognized connection {}", dst_cid); + return self + .stateless_reset(event.now, datagram_len, addresses, *dst_cid, buf) + .map(DatagramEvent::Response); + }; + + if datagram_len < MIN_INITIAL_SIZE as usize { + debug!("ignoring short initial for connection {}", dst_cid); + return None; + } + + let crypto = match server_config.crypto.initial_keys(header.version, dst_cid) { + Ok(keys) => keys, + Err(UnsupportedVersion) => { + // This probably indicates that the user set supported_versions incorrectly in + // `EndpointConfig`. + debug!( + "ignoring initial packet version {:#x} unsupported by cryptographic layer", + header.version + ); + return None; + } + }; + + if let Err(reason) = self.early_validate_first_packet(header) { + return Some(DatagramEvent::Response(self.initial_close( + header.version, + addresses, + &crypto, + &header.src_cid, + reason, + buf, + ))); + } + + let packet = match event.first_decode.finish(Some(&*crypto.header.remote)) { + Ok(packet) => packet, + Err(e) => { + trace!("unable to decode initial packet: {}", e); + return None; + } + }; + if !packet.reserved_bits_valid() { debug!("dropping connection attempt with invalid reserved bits"); return None; @@ -494,33 +495,18 @@ impl Endpoint { let server_config = self.server_config.as_ref().unwrap().clone(); - let (retry_src_cid, orig_dst_cid) = if header.token.is_empty() { - (None, header.dst_cid) - } else { - match RetryToken::from_bytes( - &*server_config.token_key, - &addresses.remote, - &header.dst_cid, - &header.token, - ) { - Ok(token) - if token.issued + server_config.retry_token_lifetime - > server_config.time_source.now() => - { - (Some(header.dst_cid), token.orig_dst_cid) - } - Err(token::ValidationError::Unusable) => (None, header.dst_cid), - _ => { - debug!("rejecting invalid stateless retry token"); - return Some(DatagramEvent::Response(self.initial_close( - header.version, - addresses, - &crypto, - &header.src_cid, - TransportError::INVALID_TOKEN(""), - buf, - ))); - } + let token = match IncomingToken::from_header(&header, &server_config, addresses.remote) { + Ok(token) => token, + Err(InvalidRetryTokenError) => { + debug!("rejecting invalid retry token"); + return Some(DatagramEvent::Response(self.initial_close( + header.version, + addresses, + &crypto, + &header.src_cid, + TransportError::INVALID_TOKEN(""), + buf, + ))); } }; @@ -529,24 +515,25 @@ impl Endpoint { .insert_initial_incoming(header.dst_cid, incoming_idx); Some(DatagramEvent::NewConnection(Incoming { - received_at: now, + received_at: event.now, addresses, - ecn, + ecn: event.ecn, packet: InitialPacket { header, header_data: packet.header_data, payload: packet.payload, }, - rest, + rest: event.remaining, crypto, - retry_src_cid, - orig_dst_cid, + token, incoming_idx, improper_drop_warner: IncomingImproperDropWarner, })) } /// Attempt to accept this incoming connection (an error may still occur) + // AcceptError cannot be made smaller without semver breakage + #[allow(clippy::result_large_err)] pub fn accept( &mut self, mut incoming: Incoming, @@ -605,6 +592,7 @@ impl Endpoint { .packet .remote .decrypt( + PathId(0), packet_number, &incoming.packet.header_data, &mut incoming.packet.payload, @@ -620,7 +608,7 @@ impl Endpoint { }; let ch = ConnectionHandle(self.connections.vacant_key()); - let loc_cid = self.new_cid(ch); + let loc_cid = self.new_cid(ch, PathId(0)); let mut params = TransportParameters::new( &server_config.transport, &self.config, @@ -629,20 +617,22 @@ impl Endpoint { Some(&server_config), &mut self.rng, ); - params.stateless_reset_token = Some(ResetToken::new(&*self.config.reset_key, &loc_cid)); - params.original_dst_cid = Some(incoming.orig_dst_cid); - params.retry_src_cid = incoming.retry_src_cid; + params.stateless_reset_token = Some(ResetToken::new(&*self.config.reset_key, loc_cid)); + params.original_dst_cid = Some(incoming.token.orig_dst_cid); + params.retry_src_cid = incoming.token.retry_src_cid; let mut pref_addr_cid = None; if server_config.preferred_address_v4.is_some() || server_config.preferred_address_v6.is_some() { - let cid = self.new_cid(ch); + // QUIC-MULTIPATH 1.2: Use of the "preferred address" is considered as a + // migration event that does not change the Path ID. + let cid = self.new_cid(ch, PathId(0)); pref_addr_cid = Some(cid); params.preferred_address = Some(PreferredAddress { address_v4: server_config.preferred_address_v4, address_v6: server_config.preferred_address_v6, connection_id: cid, - stateless_reset_token: ResetToken::new(&*self.config.reset_key, &cid), + stateless_reset_token: ResetToken::new(&*self.config.reset_key, cid), }); } @@ -749,10 +739,10 @@ impl Endpoint { /// Respond with a retry packet, requiring the client to retry with address validation /// - /// Errors if `incoming.remote_address_validated()` is true. + /// Errors if `incoming.may_retry()` is false. pub fn retry(&mut self, incoming: Incoming, buf: &mut Vec) -> Result { - if incoming.remote_address_validated() { - return Err(RetryError(incoming)); + if !incoming.may_retry() { + return Err(RetryError(Box::new(incoming))); } self.clean_up_incoming(&incoming); @@ -768,15 +758,12 @@ impl Endpoint { // retried by the application layer. let loc_cid = self.local_cid_generator.generate_cid(); - let token = RetryToken { + let payload = TokenPayload::Retry { + address: incoming.addresses.remote, orig_dst_cid: incoming.packet.header.dst_cid, issued: server_config.time_source.now(), - } - .encode( - &*server_config.token_key, - &incoming.addresses.remote, - &loc_cid, - ); + }; + let token = Token::new(payload, &mut self.rng).encode(&*server_config.token_key); let header = Header::Retry { src_cid: loc_cid, @@ -852,22 +839,19 @@ impl Endpoint { side_args, ); - let mut cids_issued = 0; - let mut loc_cids = FxHashMap::default(); - - loc_cids.insert(cids_issued, loc_cid); - cids_issued += 1; + let mut path_cids = PathLocalCids::default(); + path_cids.cids.insert(path_cids.issued, loc_cid); + path_cids.issued += 1; if let Some(cid) = pref_addr_cid { - debug_assert_eq!(cids_issued, 1, "preferred address cid seq must be 1"); - loc_cids.insert(cids_issued, cid); - cids_issued += 1; + debug_assert_eq!(path_cids.issued, 1, "preferred address cid seq must be 1"); + path_cids.cids.insert(path_cids.issued, cid); + path_cids.issued += 1; } let id = self.connections.insert(ConnectionMeta { init_cid, - cids_issued, - loc_cids, + loc_cids: FxHashMap::from_iter([(PathId(0), path_cids)]), addresses, side, reset_token: None, @@ -906,7 +890,11 @@ impl Endpoint { INITIAL_MTU as usize - partial_encode.header_len - crypto.packet.local.tag_len(); frame::Close::from(reason).encode(buf, max_len); buf.resize(buf.len() + crypto.packet.local.tag_len(), 0); - partial_encode.finish(buf, &*crypto.header.local, Some((0, &*crypto.packet.local))); + partial_encode.finish( + buf, + &*crypto.header.local, + Some((0, Default::default(), &*crypto.packet.local)), + ); Transmit { destination: addresses.remote, ecn: None, @@ -991,7 +979,7 @@ struct IncomingBuffer { #[derive(Copy, Clone, Debug)] enum RouteDatagramTo { Incoming(usize), - Connection(ConnectionHandle), + Connection(ConnectionHandle, PathId), } /// Maps packets to existing connections @@ -1006,7 +994,7 @@ struct ConnectionIndex { /// Identifies connections based on locally created CIDs /// /// Uses a cheaper hash function since keys are locally created - connection_ids: FxHashMap, + connection_ids: FxHashMap, /// Identifies incoming connections with zero-length CIDs /// /// Uses a standard `HashMap` to protect against hash collision attacks. @@ -1030,7 +1018,7 @@ struct ConnectionIndex { impl ConnectionIndex { /// Associate an incoming connection with its initial destination CID fn insert_initial_incoming(&mut self, dst_cid: ConnectionId, incoming_key: usize) { - if dst_cid.len() == 0 { + if dst_cid.is_empty() { return; } self.connection_ids_initial @@ -1039,7 +1027,7 @@ impl ConnectionIndex { /// Remove an association with an initial destination CID fn remove_initial(&mut self, dst_cid: ConnectionId) { - if dst_cid.len() == 0 { + if dst_cid.is_empty() { return; } let removed = self.connection_ids_initial.remove(&dst_cid); @@ -1048,11 +1036,11 @@ impl ConnectionIndex { /// Associate a connection with its initial destination CID fn insert_initial(&mut self, dst_cid: ConnectionId, connection: ConnectionHandle) { - if dst_cid.len() == 0 { + if dst_cid.is_empty() { return; } self.connection_ids_initial - .insert(dst_cid, RouteDatagramTo::Connection(connection)); + .insert(dst_cid, RouteDatagramTo::Connection(connection, PathId(0))); } /// Associate a connection with its first locally-chosen destination CID if used, or otherwise @@ -1076,14 +1064,14 @@ impl ConnectionIndex { } }, _ => { - self.connection_ids.insert(dst_cid, connection); + self.connection_ids.insert(dst_cid, (connection, PathId(0))); } } } /// Discard a connection ID - fn retire(&mut self, dst_cid: &ConnectionId) { - self.connection_ids.remove(dst_cid); + fn retire(&mut self, dst_cid: ConnectionId) { + self.connection_ids.remove(&dst_cid); } /// Remove all references to a connection @@ -1091,7 +1079,7 @@ impl ConnectionIndex { if conn.side.is_server() { self.remove_initial(conn.init_cid); } - for cid in conn.loc_cids.values() { + for cid in conn.loc_cids.values().flat_map(|pcids| pcids.cids.values()) { self.connection_ids.remove(cid); } self.incoming_connection_remotes.remove(&conn.addresses); @@ -1104,9 +1092,9 @@ impl ConnectionIndex { /// Find the existing connection that `datagram` should be routed to, if any fn get(&self, addresses: &FourTuple, datagram: &PartialDecode) -> Option { - if datagram.dst_cid().len() != 0 { - if let Some(&ch) = self.connection_ids.get(datagram.dst_cid()) { - return Some(RouteDatagramTo::Connection(ch)); + if !datagram.dst_cid().is_empty() { + if let Some(&(ch, path_id)) = self.connection_ids.get(datagram.dst_cid()) { + return Some(RouteDatagramTo::Connection(ch, path_id)); } } if datagram.is_initial() || datagram.is_0rtt() { @@ -1114,31 +1102,35 @@ impl ConnectionIndex { return Some(ch); } } - if datagram.dst_cid().len() == 0 { + if datagram.dst_cid().is_empty() { if let Some(&ch) = self.incoming_connection_remotes.get(addresses) { - return Some(RouteDatagramTo::Connection(ch)); + // Never multipath because QUIC-MULTIPATH 1.1 mandates the use of non-zero + // length CIDs. So this is always PathId(0). + return Some(RouteDatagramTo::Connection(ch, PathId(0))); } if let Some(&ch) = self.outgoing_connection_remotes.get(&addresses.remote) { - return Some(RouteDatagramTo::Connection(ch)); + // Like above, QUIC-MULTIPATH 1.1 mandates the use of non-zero length CIDs. + return Some(RouteDatagramTo::Connection(ch, PathId(0))); } } let data = datagram.data(); if data.len() < RESET_TOKEN_SIZE { return None; } + // For stateless resets the PathId is meaningless since it closes the entire + // connection regarldess of path. So use PathId(0). self.connection_reset_tokens .get(addresses.remote, &data[data.len() - RESET_TOKEN_SIZE..]) .cloned() - .map(RouteDatagramTo::Connection) + .map(|ch| RouteDatagramTo::Connection(ch, PathId(0))) } } #[derive(Debug)] pub(crate) struct ConnectionMeta { init_cid: ConnectionId, - /// Number of local connection IDs that have been issued in NEW_CONNECTION_ID frames. - cids_issued: u64, - loc_cids: FxHashMap, + /// Locally issues CIDs for each path + loc_cids: FxHashMap, /// Remote/local addresses the connection began with /// /// Only needed to support connections with zero-length CIDs, which cannot migrate, so we don't @@ -1150,6 +1142,17 @@ pub(crate) struct ConnectionMeta { reset_token: Option<(SocketAddr, ResetToken)>, } +/// Local connection IDs for a single path +#[derive(Debug, Default)] +struct PathLocalCids { + /// Number of connection IDs that have been issued in (PATH_)NEW_CONNECTION_ID frames + /// + /// Another way of saying this is that this is the next sequence number to be issued. + issued: u64, + /// Issues CIDs indexed by their sequence number. + cids: FxHashMap, +} + /// Internal identifier for a `Connection` currently associated with an endpoint #[derive(Debug, Copy, Clone, Eq, PartialEq, Hash, Ord, PartialOrd)] pub struct ConnectionHandle(pub usize); @@ -1174,7 +1177,6 @@ impl IndexMut for Slab { } /// Event resulting from processing a single datagram -#[allow(clippy::large_enum_variant)] // Not passed around extensively pub enum DatagramEvent { /// The datagram is redirected to its `Connection` ConnectionEvent(ConnectionHandle, ConnectionEvent), @@ -1192,8 +1194,7 @@ pub struct Incoming { packet: InitialPacket, rest: Option, crypto: Keys, - retry_src_cid: Option, - orig_dst_cid: ConnectionId, + token: IncomingToken, incoming_idx: usize, improper_drop_warner: IncomingImproperDropWarner, } @@ -1215,13 +1216,24 @@ impl Incoming { /// /// This means that the sender of the initial packet has proved that they can receive traffic /// sent to `self.remote_address()`. + /// + /// If `self.remote_address_validated()` is false, `self.may_retry()` is guaranteed to be true. + /// The inverse is not guaranteed. pub fn remote_address_validated(&self) -> bool { - self.retry_src_cid.is_some() + self.token.validated + } + + /// Whether it is legal to respond with a retry packet + /// + /// If `self.remote_address_validated()` is false, `self.may_retry()` is guaranteed to be true. + /// The inverse is not guaranteed. + pub fn may_retry(&self) -> bool { + self.token.retry_src_cid.is_none() } /// The original destination connection ID sent by the client pub fn orig_dst_cid(&self) -> &ConnectionId { - &self.orig_dst_cid + &self.token.orig_dst_cid } } @@ -1232,8 +1244,7 @@ impl fmt::Debug for Incoming { .field("ecn", &self.ecn) // packet doesn't implement debug // rest is too big and not meaningful enough - .field("retry_src_cid", &self.retry_src_cid) - .field("orig_dst_cid", &self.orig_dst_cid) + .field("token", &self.token) .field("incoming_idx", &self.incoming_idx) // improper drop warner contains no information .finish_non_exhaustive() @@ -1250,8 +1261,10 @@ impl IncomingImproperDropWarner { impl Drop for IncomingImproperDropWarner { fn drop(&mut self) { - warn!("quinn_proto::Incoming dropped without passing to Endpoint::accept/refuse/retry/ignore \ - (may cause memory leak and eventual inability to accept new connections)"); + warn!( + "quinn_proto::Incoming dropped without passing to Endpoint::accept/refuse/retry/ignore \ + (may cause memory leak and eventual inability to accept new connections)" + ); } } @@ -1300,12 +1313,12 @@ pub struct AcceptError { /// Error for attempting to retry an [`Incoming`] which already bears a token from a previous retry #[derive(Debug, Error)] #[error("retry() with validated Incoming")] -pub struct RetryError(Incoming); +pub struct RetryError(Box); impl RetryError { /// Get the [`Incoming`] pub fn into_incoming(self) -> Incoming { - self.0 + *self.0 } } diff --git a/quinn-proto/src/frame.rs b/quinn-proto/src/frame.rs index 915721c7f9..93350dab2f 100644 --- a/quinn-proto/src/frame.rs +++ b/quinn-proto/src/frame.rs @@ -1,6 +1,6 @@ use std::{ fmt::{self, Write}, - io, mem, + mem, net::{IpAddr, SocketAddr}, ops::{Range, RangeInclusive}, }; @@ -9,11 +9,12 @@ use bytes::{Buf, BufMut, Bytes}; use tinyvec::TinyVec; use crate::{ + Dir, MAX_CID_SIZE, RESET_TOKEN_SIZE, ResetToken, StreamId, TransportError, TransportErrorCode, + VarInt, coding::{self, BufExt, BufMutExt, UnexpectedEnd}, + connection::PathId, range_set::ArrayRangeSet, shared::{ConnectionId, EcnCodepoint}, - Dir, ResetToken, StreamId, TransportError, TransportErrorCode, VarInt, MAX_CID_SIZE, - RESET_TOKEN_SIZE, }; #[cfg(feature = "arbitrary")] @@ -138,6 +139,17 @@ frame_types! { // ADDRESS DISCOVERY REPORT OBSERVED_IPV4_ADDR = 0x9f81a6, OBSERVED_IPV6_ADDR = 0x9f81a7, + // Multipath + PATH_ACK = 0x15228c00, + PATH_ACK_ECN = 0x15228c01, + PATH_ABANDON = 0x15228c05, + PATH_BACKUP = 0x15228c07, + PATH_AVAILABLE = 0x15228c08, + PATH_NEW_CONNECTION_ID = 0x15228c09, + PATH_RETIRE_CONNECTION_ID = 0x15228c0a, + MAX_PATH_ID = 0x15228c0c, + PATHS_BLOCKED = 0x15228c0d, + PATH_CIDS_BLOCKED = 0x15228c0e, } const STREAM_TYS: RangeInclusive = RangeInclusive::new(0x08, 0x0f); @@ -148,19 +160,34 @@ pub(crate) enum Frame { Padding, Ping, Ack(Ack), + PathAck(PathAck), ResetStream(ResetStream), StopSending(StopSending), Crypto(Crypto), - NewToken { token: Bytes }, + NewToken(NewToken), Stream(Stream), MaxData(VarInt), - MaxStreamData { id: StreamId, offset: u64 }, - MaxStreams { dir: Dir, count: u64 }, - DataBlocked { offset: u64 }, - StreamDataBlocked { id: StreamId, offset: u64 }, - StreamsBlocked { dir: Dir, limit: u64 }, + MaxStreamData { + id: StreamId, + offset: u64, + }, + MaxStreams { + dir: Dir, + count: u64, + }, + DataBlocked { + offset: u64, + }, + StreamDataBlocked { + id: StreamId, + offset: u64, + }, + StreamsBlocked { + dir: Dir, + limit: u64, + }, NewConnectionId(NewConnectionId), - RetireConnectionId { sequence: u64 }, + RetireConnectionId(RetireConnectionId), PathChallenge(u64), PathResponse(u64), Close(Close), @@ -169,11 +196,21 @@ pub(crate) enum Frame { ImmediateAck, HandshakeDone, ObservedAddr(ObservedAddr), + #[allow(dead_code)] // TODO(flub) + PathAbandon(PathAbandon), + PathAvailable(PathAvailable), + #[allow(dead_code)] // TODO(flub) + MaxPathId(PathId), + PathsBlocked(PathId), + // TODO(flub): We should send this to be spec-compliant, but for ourselves we don't + // really care because we always issue CIDs. Perhaps we can get this frame removed + // again from the spec: https://github.com/quicwg/multipath/issues/500 + PathCidsBlocked(PathId), } impl Frame { pub(crate) fn ty(&self) -> FrameType { - use self::Frame::*; + use Frame::*; match *self { Padding => FrameType::PADDING, ResetStream(_) => FrameType::RESET_STREAM, @@ -191,6 +228,7 @@ impl Frame { StopSending { .. } => FrameType::STOP_SENDING, RetireConnectionId { .. } => FrameType::RETIRE_CONNECTION_ID, Ack(_) => FrameType::ACK, + PathAck(_) => FrameType::PATH_ACK, Stream(ref x) => { let mut ty = *STREAM_TYS.start(); if x.fin { @@ -203,14 +241,19 @@ impl Frame { } PathChallenge(_) => FrameType::PATH_CHALLENGE, PathResponse(_) => FrameType::PATH_RESPONSE, - NewConnectionId { .. } => FrameType::NEW_CONNECTION_ID, + NewConnectionId(cid) => cid.get_type(), Crypto(_) => FrameType::CRYPTO, - NewToken { .. } => FrameType::NEW_TOKEN, + NewToken(_) => FrameType::NEW_TOKEN, Datagram(_) => FrameType(*DATAGRAM_TYS.start()), AckFrequency(_) => FrameType::ACK_FREQUENCY, ImmediateAck => FrameType::IMMEDIATE_ACK, HandshakeDone => FrameType::HANDSHAKE_DONE, ObservedAddr(ref observed) => observed.get_type(), + PathAbandon(_) => FrameType::PATH_ABANDON, + PathAvailable(ref path_avaiable) => path_avaiable.get_type(), + MaxPathId(_) => FrameType::MAX_PATH_ID, + PathsBlocked(_) => FrameType::PATHS_BLOCKED, + PathCidsBlocked(_) => FrameType::PATH_CIDS_BLOCKED, } } @@ -219,6 +262,58 @@ impl Frame { } } +#[derive(Debug, PartialEq, Eq)] +pub(crate) struct RetireConnectionId { + pub(crate) path_id: Option, + pub(crate) sequence: u64, +} + +impl RetireConnectionId { + // TODO(@divma): docs + pub(crate) fn write(&self, buf: &mut W) { + buf.write(self.get_type()); + if let Some(id) = self.path_id { + buf.write(id); + } + buf.write(self.sequence); + } + + // TODO(@divma): docs + // should only be called after the frame type has been verified + pub(crate) fn read(bytes: &mut R, read_path: bool) -> coding::Result { + Ok(Self { + path_id: if read_path { Some(bytes.get()?) } else { None }, + sequence: bytes.get()?, + }) + } + + fn get_type(&self) -> FrameType { + if self.path_id.is_some() { + FrameType::PATH_RETIRE_CONNECTION_ID + } else { + FrameType::RETIRE_CONNECTION_ID + } + } + + /// Returns the maximum encoded size on the wire. + /// + /// This is a rough upper estimate, does not squeeze every last byte out. + // TODO(flub): This might be overkill and maybe we should just use a const + pub(crate) fn size_bound(path_retire_cid: bool) -> usize { + let type_id = match path_retire_cid { + true => FrameType::PATH_RETIRE_CONNECTION_ID.0, + false => FrameType::RETIRE_CONNECTION_ID.0, + }; + let type_len = VarInt::try_from(type_id).unwrap().size(); + let path_id_len = match path_retire_cid { + true => VarInt::from(u32::MAX).size(), + false => 0, + }; + let seq_max_len = 8usize; + type_len + path_id_len + seq_max_len + } +} + #[derive(Clone, Debug)] pub enum Close { Connection(ConnectionClose), @@ -344,6 +439,93 @@ impl ApplicationClose { } } +#[derive(Clone, Eq, PartialEq)] +pub struct PathAck { + pub path_id: PathId, + pub largest: u64, + pub delay: u64, + pub additional: Bytes, + pub ecn: Option, +} + +impl fmt::Debug for PathAck { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut ranges = "[".to_string(); + let mut first = true; + for range in self.into_iter() { + if !first { + ranges.push(','); + } + write!(ranges, "{range:?}").unwrap(); + first = false; + } + ranges.push(']'); + + f.debug_struct("PathAck") + .field("path_id", &self.path_id) + .field("largest", &self.largest) + .field("delay", &self.delay) + .field("ecn", &self.ecn) + .field("ranges", &ranges) + .finish() + } +} + +impl<'a> IntoIterator for &'a PathAck { + type Item = RangeInclusive; + type IntoIter = AckIter<'a>; + + fn into_iter(self) -> AckIter<'a> { + AckIter::new(self.largest, &self.additional[..]) + } +} + +impl PathAck { + pub fn encode( + path_id: PathId, + delay: u64, + ranges: &ArrayRangeSet, + ecn: Option<&EcnCounts>, + buf: &mut W, + ) { + let mut rest = ranges.iter().rev(); + let first = rest.next().unwrap(); + let largest = first.end - 1; + let first_size = first.end - first.start; + let kind = match ecn.is_some() { + true => FrameType::PATH_ACK_ECN, + false => FrameType::PATH_ACK, + }; + buf.write(kind); + buf.write(path_id); + buf.write_var(largest); + buf.write_var(delay); + buf.write_var(ranges.len() as u64 - 1); + buf.write_var(first_size - 1); + let mut prev = first.start; + for block in rest { + let size = block.end - block.start; + buf.write_var(prev - block.end - 1); + buf.write_var(size - 1); + prev = block.start; + } + if let Some(x) = ecn { + x.encode(buf) + } + } + + pub fn into_ack(self) -> (Ack, PathId) { + let ack = Ack { + largest: self.largest, + delay: self.delay, + additional: self.additional, + ecn: self.ecn, + }; + + (ack, self.path_id) + } +} + #[derive(Clone, Eq, PartialEq)] pub struct Ack { pub largest: u64, @@ -394,11 +576,11 @@ impl Ack { let first = rest.next().unwrap(); let largest = first.end - 1; let first_size = first.end - first.start; - buf.write(if ecn.is_some() { - FrameType::ACK_ECN - } else { - FrameType::ACK - }); + let kind = match ecn.is_some() { + true => FrameType::ACK_ECN, + false => FrameType::ACK, + }; + buf.write(kind); buf.write_var(largest); buf.write_var(delay); buf.write_var(ranges.len() as u64 - 1); @@ -531,9 +713,25 @@ impl Crypto { } } +#[derive(Debug, Clone)] +pub(crate) struct NewToken { + pub(crate) token: Bytes, +} + +impl NewToken { + pub(crate) fn encode(&self, out: &mut W) { + out.write(FrameType::NEW_TOKEN); + out.write_var(self.token.len() as u64); + out.put_slice(&self.token); + } + + pub(crate) fn size(&self) -> usize { + 1 + VarInt::from_u64(self.token.len() as u64).unwrap().size() + self.token.len() + } +} + pub(crate) struct Iter { - // TODO: ditch io::Cursor after bytes 0.5 - bytes: io::Cursor, + bytes: Bytes, last_ty: Option, } @@ -549,7 +747,7 @@ impl Iter { } Ok(Self { - bytes: io::Cursor::new(payload), + bytes: payload, last_ty: None, }) } @@ -559,11 +757,10 @@ impl Iter { if len > self.bytes.remaining() as u64 { return Err(UnexpectedEnd); } - let start = self.bytes.position() as usize; - self.bytes.advance(len as usize); - Ok(self.bytes.get_ref().slice(start..(start + len as usize))) + Ok(self.bytes.split_to(len as usize)) } + #[track_caller] fn try_next(&mut self) -> Result { let ty = self.bytes.get::()?; self.last_ty = Some(ty); @@ -578,11 +775,7 @@ impl Iter { error_code: self.bytes.get()?, frame_type: { let x = self.bytes.get_var()?; - if x == 0 { - None - } else { - Some(FrameType(x)) - } + if x == 0 { None } else { Some(FrameType(x)) } }, reason: self.take_len()?, })), @@ -623,21 +816,44 @@ impl Iter { id: self.bytes.get()?, error_code: self.bytes.get()?, }), - FrameType::RETIRE_CONNECTION_ID => Frame::RetireConnectionId { - sequence: self.bytes.get_var()?, - }, + FrameType::RETIRE_CONNECTION_ID | FrameType::PATH_RETIRE_CONNECTION_ID => { + Frame::RetireConnectionId(RetireConnectionId::read( + &mut self.bytes, + ty == FrameType::PATH_RETIRE_CONNECTION_ID, + )?) + } FrameType::ACK | FrameType::ACK_ECN => { let largest = self.bytes.get_var()?; let delay = self.bytes.get_var()?; let extra_blocks = self.bytes.get_var()? as usize; - let start = self.bytes.position() as usize; - scan_ack_blocks(&mut self.bytes, largest, extra_blocks)?; - let end = self.bytes.position() as usize; + let n = scan_ack_blocks(&self.bytes, largest, extra_blocks)?; Frame::Ack(Ack { delay, largest, - additional: self.bytes.get_ref().slice(start..end), - ecn: if ty != FrameType::ACK_ECN { + additional: self.bytes.split_to(n), + ecn: if ty != FrameType::ACK_ECN && ty != FrameType::PATH_ACK_ECN { + None + } else { + Some(EcnCounts { + ect0: self.bytes.get_var()?, + ect1: self.bytes.get_var()?, + ce: self.bytes.get_var()?, + }) + }, + }) + } + FrameType::PATH_ACK | FrameType::PATH_ACK_ECN => { + let path_id = self.bytes.get()?; + let largest = self.bytes.get_var()?; + let delay = self.bytes.get_var()?; + let extra_blocks = self.bytes.get_var()? as usize; + let n = scan_ack_blocks(&self.bytes, largest, extra_blocks)?; + Frame::PathAck(PathAck { + path_id, + delay, + largest, + additional: self.bytes.split_to(n), + ecn: if ty != FrameType::ACK_ECN && ty != FrameType::PATH_ACK_ECN { None } else { Some(EcnCounts { @@ -650,41 +866,17 @@ impl Iter { } FrameType::PATH_CHALLENGE => Frame::PathChallenge(self.bytes.get()?), FrameType::PATH_RESPONSE => Frame::PathResponse(self.bytes.get()?), - FrameType::NEW_CONNECTION_ID => { - let sequence = self.bytes.get_var()?; - let retire_prior_to = self.bytes.get_var()?; - if retire_prior_to > sequence { - return Err(IterErr::Malformed); - } - let length = self.bytes.get::()? as usize; - if length > MAX_CID_SIZE || length == 0 { - return Err(IterErr::Malformed); - } - if length > self.bytes.remaining() { - return Err(IterErr::UnexpectedEnd); - } - let mut stage = [0; MAX_CID_SIZE]; - self.bytes.copy_to_slice(&mut stage[0..length]); - let id = ConnectionId::new(&stage[..length]); - if self.bytes.remaining() < 16 { - return Err(IterErr::UnexpectedEnd); - } - let mut reset_token = [0; RESET_TOKEN_SIZE]; - self.bytes.copy_to_slice(&mut reset_token); - Frame::NewConnectionId(NewConnectionId { - sequence, - retire_prior_to, - id, - reset_token: reset_token.into(), - }) + FrameType::NEW_CONNECTION_ID | FrameType::PATH_NEW_CONNECTION_ID => { + let read_path = ty == FrameType::PATH_NEW_CONNECTION_ID; + Frame::NewConnectionId(NewConnectionId::read(&mut self.bytes, read_path)?) } FrameType::CRYPTO => Frame::Crypto(Crypto { offset: self.bytes.get_var()?, data: self.take_len()?, }), - FrameType::NEW_TOKEN => Frame::NewToken { + FrameType::NEW_TOKEN => Frame::NewToken(NewToken { token: self.take_len()?, - }, + }), FrameType::HANDSHAKE_DONE => Frame::HandshakeDone, FrameType::ACK_FREQUENCY => Frame::AckFrequency(AckFrequency { sequence: self.bytes.get()?, @@ -698,6 +890,14 @@ impl Iter { let observed = ObservedAddr::read(&mut self.bytes, is_ipv6)?; Frame::ObservedAddr(observed) } + FrameType::PATH_ABANDON => Frame::PathAbandon(PathAbandon::read(&mut self.bytes)?), + FrameType::PATH_BACKUP | FrameType::PATH_AVAILABLE => { + let is_backup = ty == FrameType::PATH_BACKUP; + Frame::PathAvailable(PathAvailable::read(&mut self.bytes, is_backup)?) + } + FrameType::MAX_PATH_ID => Frame::MaxPathId(self.bytes.get()?), + FrameType::PATHS_BLOCKED => Frame::PathsBlocked(self.bytes.get()?), + FrameType::PATH_CIDS_BLOCKED => Frame::PathCidsBlocked(self.bytes.get()?), _ => { if let Some(s) = ty.stream() { Frame::Stream(Stream { @@ -726,10 +926,7 @@ impl Iter { } fn take_remaining(&mut self) -> Bytes { - let mut x = mem::replace(self.bytes.get_mut(), Bytes::new()); - x.advance(self.bytes.position() as usize); - self.bytes.set_position(0); - x + mem::take(&mut self.bytes) } } @@ -743,7 +940,7 @@ impl Iterator for Iter { Ok(x) => Some(Ok(x)), Err(e) => { // Corrupt frame, skip it and everything that follows - self.bytes = io::Cursor::new(Bytes::new()); + self.bytes.clear(); Some(Err(InvalidFrame { ty: self.last_ty, reason: e.reason(), @@ -767,7 +964,9 @@ impl From for TransportError { } } -fn scan_ack_blocks(buf: &mut io::Cursor, largest: u64, n: usize) -> Result<(), IterErr> { +/// Validate exactly `n` ACK ranges in `buf` and return the number of bytes they cover +fn scan_ack_blocks(mut buf: &[u8], largest: u64, n: usize) -> Result { + let total_len = buf.remaining(); let first_block = buf.get_var()?; let mut smallest = largest.checked_sub(first_block).ok_or(IterErr::Malformed)?; for _ in 0..n { @@ -776,9 +975,10 @@ fn scan_ack_blocks(buf: &mut io::Cursor, largest: u64, n: usize) -> Resul let block = buf.get_var()?; smallest = smallest.checked_sub(block).ok_or(IterErr::Malformed)?; } - Ok(()) + Ok(total_len - buf.remaining()) } +#[derive(Debug)] enum IterErr { UnexpectedEnd, InvalidFrameId, @@ -787,7 +987,7 @@ enum IterErr { impl IterErr { fn reason(&self) -> &'static str { - use self::IterErr::*; + use IterErr::*; match *self { UnexpectedEnd => "unexpected end", InvalidFrameId => "invalid frame ID", @@ -805,12 +1005,11 @@ impl From for IterErr { #[derive(Debug, Clone)] pub struct AckIter<'a> { largest: u64, - data: io::Cursor<&'a [u8]>, + data: &'a [u8], } impl<'a> AckIter<'a> { - fn new(largest: u64, payload: &'a [u8]) -> Self { - let data = io::Cursor::new(payload); + fn new(largest: u64, data: &'a [u8]) -> Self { Self { largest, data } } } @@ -870,8 +1069,9 @@ impl StopSending { } } -#[derive(Debug, Copy, Clone)] +#[derive(Debug, Copy, Clone, PartialEq, Eq)] pub(crate) struct NewConnectionId { + pub(crate) path_id: Option, pub(crate) sequence: u64, pub(crate) retire_prior_to: u64, pub(crate) id: ConnectionId, @@ -880,17 +1080,77 @@ pub(crate) struct NewConnectionId { impl NewConnectionId { pub(crate) fn encode(&self, out: &mut W) { - out.write(FrameType::NEW_CONNECTION_ID); + out.write(self.get_type()); + if let Some(id) = self.path_id { + out.write(id); + } out.write_var(self.sequence); out.write_var(self.retire_prior_to); out.write(self.id.len() as u8); out.put_slice(&self.id); out.put_slice(&self.reset_token); } -} -/// Smallest number of bytes this type of frame is guaranteed to fit within. -pub(crate) const RETIRE_CONNECTION_ID_SIZE_BOUND: usize = 9; + pub(crate) fn get_type(&self) -> FrameType { + if self.path_id.is_some() { + FrameType::PATH_NEW_CONNECTION_ID + } else { + FrameType::NEW_CONNECTION_ID + } + } + + /// Returns the maximum encoded size on the wire. + /// + /// This is a rough upper estimate, does not squeeze every last byte out. + // TODO(flub): This might be overkill and maybe we should just use a const + pub(crate) fn size_bound(path_new_cid: bool, cid_len: usize) -> usize { + let type_id = match path_new_cid { + true => FrameType::PATH_NEW_CONNECTION_ID.0, + false => FrameType::NEW_CONNECTION_ID.0, + }; + let type_len = VarInt::try_from(type_id).unwrap().size(); + let path_id_len = match path_new_cid { + true => VarInt::from(u32::MAX).size(), + false => 0, + }; + let seq_max_len = 8usize; + let retire_prior_to_max_len = 8usize; + let cid_len = 1 + cid_len; + let reset_token_len = 16; + type_len + path_id_len + seq_max_len + retire_prior_to_max_len + cid_len + reset_token_len + } + + fn read(bytes: &mut R, read_path: bool) -> Result { + let path_id = if read_path { Some(bytes.get()?) } else { None }; + let sequence = bytes.get_var()?; + let retire_prior_to = bytes.get_var()?; + if retire_prior_to > sequence { + return Err(IterErr::Malformed); + } + let length = bytes.get::()? as usize; + if length > MAX_CID_SIZE || length == 0 { + return Err(IterErr::Malformed); + } + if length > bytes.remaining() { + return Err(IterErr::UnexpectedEnd); + } + let mut stage = [0; MAX_CID_SIZE]; + bytes.copy_to_slice(&mut stage[0..length]); + let id = ConnectionId::new(&stage[..length]); + if bytes.remaining() < 16 { + return Err(IterErr::UnexpectedEnd); + } + let mut reset_token = [0; RESET_TOKEN_SIZE]; + bytes.copy_to_slice(&mut reset_token); + Ok(Self { + path_id, + sequence, + retire_prior_to, + id, + reset_token: reset_token.into(), + }) + } +} /// An unreliable datagram #[derive(Debug, Clone)] @@ -904,13 +1164,13 @@ impl FrameStruct for Datagram { } impl Datagram { - pub(crate) fn encode(&self, length: bool, out: &mut Vec) { + pub(crate) fn encode(&self, length: bool, out: &mut impl BufMut) { out.write(FrameType(*DATAGRAM_TYS.start() | u64::from(length))); // 1 byte if length { // Safe to unwrap because we check length sanity before queueing datagrams out.write(VarInt::from_u64(self.data.len() as u64).unwrap()); // <= 8 bytes } - out.extend_from_slice(&self.data); + out.put_slice(&self.data); } pub(crate) fn size(&self, length: bool) -> usize { @@ -1017,13 +1277,78 @@ impl ObservedAddr { } } +/* Multipath */ + +// TODO(@divma): AbandonPath? PathAbandon is the name in the spec.... +#[derive(Debug, PartialEq, Eq)] +pub(crate) struct PathAbandon { + path_id: PathId, + // TODO(@divma): this is TransportErrorCode plus two new errors + error_code: TransportErrorCode, +} + +#[allow(dead_code)] // TODO(flub) +impl PathAbandon { + // TODO(@divma): docs + pub(crate) fn write(&self, buf: &mut W) { + buf.write(FrameType::PATH_ABANDON); + buf.write(self.path_id); + buf.write(self.error_code); + } + + // TODO(@divma): docs + // should only be called after the frame type has been verified + pub(crate) fn read(bytes: &mut R) -> coding::Result { + Ok(Self { + path_id: bytes.get()?, + error_code: bytes.get()?, + }) + } +} + +// TODO(@divma): split? +#[derive(Debug, PartialEq, Eq)] +pub(crate) struct PathAvailable { + pub(crate) is_backup: bool, + pub(crate) path_id: PathId, + pub(crate) status_seq_no: VarInt, +} + +#[allow(dead_code)] // TODO(flub) +impl PathAvailable { + // TODO(@divma): docs + pub(crate) fn write(&self, buf: &mut W) { + buf.write(self.get_type()); + buf.write(self.path_id); + buf.write(self.status_seq_no); + } + + // TODO(@divma): docs + // should only be called after the frame type has been verified + pub(crate) fn read(bytes: &mut R, is_backup: bool) -> coding::Result { + Ok(Self { + is_backup, + path_id: bytes.get()?, + status_seq_no: bytes.get()?, + }) + } + + fn get_type(&self) -> FrameType { + if self.is_backup { + FrameType::PATH_BACKUP + } else { + FrameType::PATH_AVAILABLE + } + } +} + #[cfg(test)] mod test { - use super::*; use crate::coding::Codec; use assert_matches::assert_matches; + #[track_caller] fn frames(buf: Vec) -> Vec { Iter::new(Bytes::from(buf)) .unwrap() @@ -1032,7 +1357,6 @@ mod test { } #[test] - #[allow(clippy::range_plus_one)] fn ack_coding() { const PACKETS: &[u64] = &[1, 2, 3, 5, 10, 11, 14]; let mut ranges = ArrayRangeSet::new(); @@ -1059,6 +1383,36 @@ mod test { } } + #[test] + #[allow(clippy::range_plus_one)] + fn path_ack_coding() { + const PACKETS: &[u64] = &[1, 2, 3, 5, 10, 11, 14]; + let mut ranges = ArrayRangeSet::new(); + for &packet in PACKETS { + ranges.insert(packet..packet + 1); + } + let mut buf = Vec::new(); + const ECN: EcnCounts = EcnCounts { + ect0: 42, + ect1: 24, + ce: 12, + }; + const PATH_ID: PathId = PathId(u32::MAX); + PathAck::encode(PATH_ID, 42, &ranges, Some(&ECN), &mut buf); + let frames = frames(buf); + assert_eq!(frames.len(), 1); + match frames[0] { + Frame::PathAck(ref ack) => { + assert_eq!(ack.path_id, PATH_ID); + let mut packets = ack.into_iter().flatten().collect::>(); + packets.sort_unstable(); + assert_eq!(&packets[..], PACKETS); + assert_eq!(ack.ecn, Some(ECN)); + } + ref x => panic!("incorrect frame {x:?}"), + } + } + #[test] fn ack_frequency_coding() { let mut buf = Vec::new(); @@ -1110,4 +1464,76 @@ mod test { x => panic!("incorrect frame {x:?}"), } } + + #[test] + fn test_path_abandon_roundtrip() { + let abandon = PathAbandon { + path_id: PathId(42), + error_code: TransportErrorCode::NO_ERROR, + }; + let mut buf = Vec::new(); + abandon.write(&mut buf); + + let mut decoded = frames(buf); + assert_eq!(decoded.len(), 1); + match decoded.pop().expect("non empty") { + Frame::PathAbandon(decoded) => assert_eq!(decoded, abandon), + x => panic!("incorrect frame {x:?}"), + } + } + + #[test] + fn test_path_available_roundtrip() { + let path_avaiable = PathAvailable { + is_backup: true, + path_id: PathId(42), + status_seq_no: VarInt(73), + }; + let mut buf = Vec::new(); + path_avaiable.write(&mut buf); + + let mut decoded = frames(buf); + assert_eq!(decoded.len(), 1); + match decoded.pop().expect("non empty") { + Frame::PathAvailable(decoded) => assert_eq!(decoded, path_avaiable), + x => panic!("incorrect frame {x:?}"), + } + } + + #[test] + fn test_path_new_connection_id_roundtrip() { + let cid = NewConnectionId { + path_id: Some(PathId(22)), + sequence: 31, + retire_prior_to: 13, + id: ConnectionId::new(&[0xAB; 8]), + reset_token: ResetToken::from([0xCD; crate::RESET_TOKEN_SIZE]), + }; + let mut buf = Vec::new(); + cid.encode(&mut buf); + + let mut decoded = frames(buf); + assert_eq!(decoded.len(), 1); + match decoded.pop().expect("non empty") { + Frame::NewConnectionId(decoded) => assert_eq!(decoded, cid), + x => panic!("incorrect frame {x:?}"), + } + } + + #[test] + fn test_path_retire_connection_id_roundtrip() { + let retire_cid = RetireConnectionId { + path_id: Some(PathId(22)), + sequence: 31, + }; + let mut buf = Vec::new(); + retire_cid.write(&mut buf); + + let mut decoded = frames(buf); + assert_eq!(decoded.len(), 1); + match decoded.pop().expect("non empty") { + Frame::RetireConnectionId(decoded) => assert_eq!(decoded, retire_cid), + x => panic!("incorrect frame {x:?}"), + } + } } diff --git a/quinn-proto/src/lib.rs b/quinn-proto/src/lib.rs index 76b550a97a..b0c9b90df0 100644 --- a/quinn-proto/src/lib.rs +++ b/quinn-proto/src/lib.rs @@ -15,9 +15,8 @@ #![cfg_attr(not(fuzzing), warn(missing_docs))] #![cfg_attr(test, allow(dead_code))] // Fixes welcome: -#![warn(unreachable_pub)] -#![allow(clippy::cognitive_complexity)] #![allow(clippy::too_many_arguments)] +#![warn(unreachable_pub)] #![warn(clippy::use_self)] use std::{ @@ -37,12 +36,17 @@ mod varint; pub use varint::{VarInt, VarIntBoundsExceeded}; +#[cfg(feature = "bloom")] +mod bloom_token_log; +#[cfg(feature = "bloom")] +pub use bloom_token_log::BloomTokenLog; + mod connection; pub use crate::connection::{ - BytesSource, Chunk, Chunks, ClosedStream, Connection, ConnectionError, ConnectionStats, - Datagrams, Event, FinishError, FrameStats, PathStats, ReadError, ReadableError, RecvStream, - RttEstimator, SendDatagramError, SendStream, ShouldTransmit, StreamEvent, Streams, UdpStats, - WriteError, Written, + Chunk, Chunks, ClosedStream, Connection, ConnectionError, ConnectionStats, Datagrams, Event, + FinishError, FrameStats, PathEvent, PathId, PathStats, PathStatus, ReadError, ReadableError, + RecvStream, RttEstimator, SendDatagramError, SendStream, ShouldTransmit, StreamEvent, Streams, + UdpStats, WriteError, Written, }; #[cfg(feature = "rustls")] @@ -51,7 +55,7 @@ pub use rustls; mod config; pub use config::{ AckFrequencyConfig, ClientConfig, ConfigError, EndpointConfig, IdleTimeout, MtuDiscoveryConfig, - ServerConfig, StdSystemTime, TimeSource, TransportConfig, + ServerConfig, StdSystemTime, TimeSource, TransportConfig, ValidationTokenConfig, }; pub mod crypto; @@ -85,10 +89,14 @@ pub use crate::cid_generator::{ }; mod token; -use token::{ResetToken, RetryToken}; +use token::ResetToken; +pub use token::{NoneTokenLog, NoneTokenStore, TokenLog, TokenReuseError, TokenStore}; mod address_discovery; +mod token_memory_cache; +pub use token_memory_cache::TokenMemoryCache; + #[cfg(feature = "arbitrary")] use arbitrary::Arbitrary; @@ -98,7 +106,6 @@ pub(crate) use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; #[cfg(all(target_family = "wasm", target_os = "unknown"))] pub(crate) use web_time::{Duration, Instant, SystemTime, UNIX_EPOCH}; -#[doc(hidden)] #[cfg(fuzzing)] pub mod fuzzing { pub use crate::connection::{Retransmits, State as ConnectionState, StreamsState}; @@ -209,7 +216,7 @@ impl Dir { impl fmt::Display for Dir { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - use self::Dir::*; + use Dir::*; f.pad(match *self { Bi => "bidirectional", Uni => "unidirectional", @@ -220,7 +227,7 @@ impl fmt::Display for Dir { /// Identifier for a stream within a particular connection #[cfg_attr(feature = "arbitrary", derive(Arbitrary))] #[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] -pub struct StreamId(#[doc(hidden)] pub u64); +pub struct StreamId(u64); impl fmt::Display for StreamId { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { @@ -245,7 +252,7 @@ impl fmt::Display for StreamId { impl StreamId { /// Create a new StreamId pub fn new(initiator: Side, dir: Dir, index: u64) -> Self { - Self(index << 2 | (dir as u64) << 1 | initiator as u64) + Self((index << 2) | ((dir as u64) << 1) | initiator as u64) } /// Which side of a connection initiated the stream pub fn initiator(self) -> Side { @@ -257,11 +264,7 @@ impl StreamId { } /// Which directions data flows in pub fn dir(self) -> Dir { - if self.0 & 0x2 == 0 { - Dir::Bi - } else { - Dir::Uni - } + if self.0 & 0x2 == 0 { Dir::Bi } else { Dir::Uni } } /// Distinguishes streams of the same initiator and directionality pub fn index(self) -> u64 { @@ -281,6 +284,12 @@ impl From for StreamId { } } +impl From for u64 { + fn from(x: StreamId) -> Self { + x.0 + } +} + impl coding::Codec for StreamId { fn decode(buf: &mut B) -> coding::Result { VarInt::decode(buf).map(|x| Self(x.into_inner())) @@ -311,7 +320,7 @@ pub struct Transmit { // Useful internal constants // -/// The maximum number of CIDs we bother to issue per connection +/// The maximum number of CIDs we bother to issue per path const LOC_CID_COUNT: u64 = 8; const RESET_TOKEN_SIZE: usize = 16; const MAX_CID_SIZE: usize = 20; diff --git a/quinn-proto/src/packet.rs b/quinn-proto/src/packet.rs index 1970b4679e..1d6dba25b5 100644 --- a/quinn-proto/src/packet.rs +++ b/quinn-proto/src/packet.rs @@ -4,8 +4,9 @@ use bytes::{Buf, BufMut, Bytes, BytesMut}; use thiserror::Error; use crate::{ + ConnectionId, PathId, coding::{self, BufExt, BufMutExt}, - crypto, ConnectionId, + crypto, }; /// Decodes a QUIC packet's invariant header @@ -74,7 +75,7 @@ impl PartialDecode { } pub(crate) fn space(&self) -> Option { - use self::ProtectedHeader::*; + use ProtectedHeader::*; match self.plain_header { Initial { .. } => Some(SpaceId::Initial), Long { @@ -112,7 +113,7 @@ impl PartialDecode { self, header_crypto: Option<&dyn crypto::HeaderKey>, ) -> Result { - use self::ProtectedHeader::*; + use ProtectedHeader::*; let Self { plain_header, mut buf, @@ -218,6 +219,23 @@ impl PartialDecode { } } +/// A buffer that can tell how much has been written to it already +/// +/// This is commonly used for when a buffer is passed and the user may not write past a +/// given size. It allows the user of such a buffer to know the current cursor position in +/// the buffer. The maximum write size is usually passed in the same unit as +/// [`BufLen::len`]: bytes since the buffer start. +pub(crate) trait BufLen { + /// Returns the number of bytes written into the buffer so far + fn len(&self) -> usize; +} + +impl BufLen for Vec { + fn len(&self) -> usize { + self.len() + } +} + pub(crate) struct Packet { pub(crate) header: Header, pub(crate) header_data: Bytes, @@ -280,8 +298,8 @@ pub(crate) enum Header { } impl Header { - pub(crate) fn encode(&self, w: &mut Vec) -> PartialEncode { - use self::Header::*; + pub(crate) fn encode(&self, w: &mut (impl BufMut + BufLen)) -> PartialEncode { + use Header::*; let start = w.len(); match *self { Initial(InitialHeader { @@ -383,7 +401,7 @@ impl Header { } pub(crate) fn number(&self) -> Option { - use self::Header::*; + use Header::*; Some(match *self { Initial(InitialHeader { number, .. }) => number, Long { number, .. } => number, @@ -395,7 +413,7 @@ impl Header { } pub(crate) fn space(&self) -> SpaceId { - use self::Header::*; + use Header::*; match *self { Short { .. } => SpaceId::Data, Long { @@ -435,14 +453,14 @@ impl Header { ) } - pub(crate) fn dst_cid(&self) -> &ConnectionId { - use self::Header::*; + pub(crate) fn dst_cid(&self) -> ConnectionId { + use Header::*; match *self { - Initial(InitialHeader { ref dst_cid, .. }) => dst_cid, - Long { ref dst_cid, .. } => dst_cid, - Retry { ref dst_cid, .. } => dst_cid, - Short { ref dst_cid, .. } => dst_cid, - VersionNegotiate { ref dst_cid, .. } => dst_cid, + Initial(InitialHeader { dst_cid, .. }) => dst_cid, + Long { dst_cid, .. } => dst_cid, + Retry { dst_cid, .. } => dst_cid, + Short { dst_cid, .. } => dst_cid, + VersionNegotiate { dst_cid, .. } => dst_cid, } } @@ -471,7 +489,7 @@ impl PartialEncode { self, buf: &mut [u8], header_crypto: &dyn crypto::HeaderKey, - crypto: Option<(u64, &dyn crypto::PacketKey)>, + crypto: Option<(u64, PathId, &dyn crypto::PacketKey)>, ) { let Self { header_len, pn, .. } = self; let (pn_len, write_len) = match pn { @@ -484,11 +502,11 @@ impl PartialEncode { let len = buf.len() - header_len + pn_len; assert!(len < 2usize.pow(14)); // Fits in reserved space let mut slice = &mut buf[pn_pos - 2..pn_pos]; - slice.put_u16(len as u16 | 0b01 << 14); + slice.put_u16(len as u16 | (0b01 << 14)); } - if let Some((number, crypto)) = crypto { - crypto.encrypt(number, buf, header_len); + if let Some((packet_number, path_id, crypto)) = crypto { + crypto.encrypt(path_id, packet_number, buf, header_len); } debug_assert!( @@ -555,7 +573,7 @@ impl ProtectedHeader { /// The destination Connection ID of the packet pub fn dst_cid(&self) -> &ConnectionId { - use self::ProtectedHeader::*; + use ProtectedHeader::*; match self { Initial(header) => &header.dst_cid, Long { dst_cid, .. } => dst_cid, @@ -566,7 +584,7 @@ impl ProtectedHeader { } fn payload_len(&self) -> Option { - use self::ProtectedHeader::*; + use ProtectedHeader::*; match self { Initial(ProtectedInitialHeader { len, .. }) | Long { len, .. } => Some(*len), _ => None, @@ -702,7 +720,7 @@ impl PacketNumber { } pub(crate) fn len(self) -> usize { - use self::PacketNumber::*; + use PacketNumber::*; match self { U8(_) => 1, U16(_) => 2, @@ -712,7 +730,7 @@ impl PacketNumber { } pub(crate) fn encode(self, w: &mut W) { - use self::PacketNumber::*; + use PacketNumber::*; match self { U8(x) => w.write(x), U16(x) => w.write(x), @@ -722,7 +740,7 @@ impl PacketNumber { } pub(crate) fn decode(len: usize, r: &mut R) -> Result { - use self::PacketNumber::*; + use PacketNumber::*; let pn = match len { 1 => U8(r.get()?), 2 => U16(r.get()?), @@ -738,7 +756,7 @@ impl PacketNumber { } fn tag(self) -> u8 { - use self::PacketNumber::*; + use PacketNumber::*; match self { U8(_) => 0b00, U16(_) => 0b01, @@ -749,7 +767,7 @@ impl PacketNumber { pub(crate) fn expand(self, expected: u64) -> u64 { // From Appendix A - use self::PacketNumber::*; + use PacketNumber::*; let truncated = match self { U8(x) => u64::from(x), U16(x) => u64::from(x), @@ -815,7 +833,7 @@ pub(crate) enum LongHeaderType { impl LongHeaderType { fn from_byte(b: u8) -> Result { - use self::{LongHeaderType::*, LongType::*}; + use {LongHeaderType::*, LongType::*}; debug_assert!(b & LONG_HEADER_FORM != 0, "not a long packet"); Ok(match (b & 0x30) >> 4 { 0x0 => Initial, @@ -829,7 +847,7 @@ impl LongHeaderType { impl From for u8 { fn from(ty: LongHeaderType) -> Self { - use self::{LongHeaderType::*, LongType::*}; + use {LongHeaderType::*, LongType::*}; match ty { Initial => LONG_HEADER_FORM | FIXED_BIT, Standard(ZeroRtt) => LONG_HEADER_FORM | FIXED_BIT | (0x1 << 4), @@ -893,6 +911,17 @@ impl SpaceId { pub fn iter() -> impl Iterator { [Self::Initial, Self::Handshake, Self::Data].iter().cloned() } + + /// Returns the next higher packet space. + /// + /// Keeps returning [`SpaceId::Data`] as the highest space. + pub fn next(&self) -> Self { + match self { + Self::Initial => Self::Handshake, + Self::Handshake => Self::Data, + Self::Data => Self::Data, + } + } } #[cfg(test)] @@ -937,8 +966,8 @@ mod tests { #[cfg(any(feature = "rustls-aws-lc-rs", feature = "rustls-ring"))] #[test] fn header_encoding() { - use crate::crypto::rustls::{initial_keys, initial_suite_from_provider}; use crate::Side; + use crate::crypto::rustls::{initial_keys, initial_suite_from_provider}; #[cfg(all(feature = "rustls-aws-lc-rs", not(feature = "rustls-ring")))] use rustls::crypto::aws_lc_rs::default_provider; #[cfg(feature = "rustls-ring")] @@ -949,7 +978,7 @@ mod tests { let provider = default_provider(); let suite = initial_suite_from_provider(&std::sync::Arc::new(provider)).unwrap(); - let client = initial_keys(Version::V1, &dcid, Side::Client, &suite); + let client = initial_keys(Version::V1, dcid, Side::Client, &suite); let mut buf = Vec::new(); let header = Header::Initial(InitialHeader { number: PacketNumber::U8(0), @@ -964,7 +993,7 @@ mod tests { encode.finish( &mut buf, &*client.header.local, - Some((0, &*client.packet.local)), + Some((0, PathId(0), &*client.packet.local)), ); for byte in &buf { @@ -979,7 +1008,7 @@ mod tests { )[..] ); - let server = initial_keys(Version::V1, &dcid, Side::Server, &suite); + let server = initial_keys(Version::V1, dcid, Side::Server, &suite); let supported_versions = crate::DEFAULT_SUPPORTED_VERSIONS.to_vec(); let decode = PartialDecode::new( buf.as_slice().into(), @@ -997,7 +1026,7 @@ mod tests { server .packet .remote - .decrypt(0, &packet.header_data, &mut packet.payload) + .decrypt(PathId(0), 0, &packet.header_data, &mut packet.payload) .unwrap(); assert_eq!(packet.payload[..], [0; 16]); match packet.header { diff --git a/quinn-proto/src/range_set/btree_range_set.rs b/quinn-proto/src/range_set/btree_range_set.rs index d8eb11de30..9121bd9cb0 100644 --- a/quinn-proto/src/range_set/btree_range_set.rs +++ b/quinn-proto/src/range_set/btree_range_set.rs @@ -1,7 +1,7 @@ use std::{ cmp, cmp::Ordering, - collections::{btree_map, BTreeMap}, + collections::{BTreeMap, btree_map}, ops::{ Bound::{Excluded, Included}, Range, diff --git a/quinn-proto/src/shared.rs b/quinn-proto/src/shared.rs index 8f377f0a1f..c2382a3dd2 100644 --- a/quinn-proto/src/shared.rs +++ b/quinn-proto/src/shared.rs @@ -2,7 +2,8 @@ use std::{fmt, net::SocketAddr}; use bytes::{Buf, BufMut, BytesMut}; -use crate::{coding::BufExt, packet::PartialDecode, Instant, ResetToken, MAX_CID_SIZE}; +use crate::PathId; +use crate::{Duration, Instant, MAX_CID_SIZE, ResetToken, coding::BufExt, packet::PartialDecode}; /// Events sent from an Endpoint to a Connection #[derive(Debug)] @@ -13,7 +14,7 @@ pub(crate) enum ConnectionEventInner { /// A datagram has been received for the Connection Datagram(DatagramConnectionEvent), /// New connection identifiers have been issued for the Connection - NewIdentifiers(Vec, Instant), + NewIdentifiers(Vec, Instant, usize, Option), } /// Variant of [`ConnectionEventInner`]. @@ -21,6 +22,7 @@ pub(crate) enum ConnectionEventInner { pub(crate) struct DatagramConnectionEvent { pub(crate) now: Instant, pub(crate) remote: SocketAddr, + pub(crate) path_id: PathId, pub(crate) ecn: Option, pub(crate) first_decode: PartialDecode, pub(crate) remaining: Option, @@ -54,10 +56,10 @@ pub(crate) enum EndpointEventInner { /// The reset token and/or address eligible for generating resets has been updated ResetToken(SocketAddr, ResetToken), /// The connection needs connection identifiers - NeedIdentifiers(Instant, u64), + NeedIdentifiers(PathId, Instant, u64), /// Stop routing connection ID for this sequence number to the connection /// When `bool == true`, a new connection ID will be issued to peer - RetireConnectionId(Instant, u64, bool), + RetireConnectionId(Instant, PathId, u64, bool), } /// Protocol-level identifier for a connection. @@ -96,6 +98,10 @@ impl ConnectionId { res } + pub(crate) fn len(&self) -> usize { + self.len as usize + } + /// Decode from long header format pub(crate) fn decode_long(buf: &mut impl Buf) -> Option { let len = buf.get::().ok()? as usize; @@ -144,18 +150,18 @@ impl fmt::Display for ConnectionId { #[repr(u8)] #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub enum EcnCodepoint { - #[doc(hidden)] + /// The ECT(0) codepoint, indicating that an endpoint is ECN-capable Ect0 = 0b10, - #[doc(hidden)] + /// The ECT(1) codepoint, indicating that an endpoint is ECN-capable Ect1 = 0b01, - #[doc(hidden)] + /// The CE codepoint, signalling that congestion was experienced Ce = 0b11, } impl EcnCodepoint { /// Create new object from the given bits pub fn from_bits(x: u8) -> Option { - use self::EcnCodepoint::*; + use EcnCodepoint::*; Some(match x & 0b11 { 0b10 => Ect0, 0b01 => Ect1, @@ -174,6 +180,7 @@ impl EcnCodepoint { #[derive(Debug, Copy, Clone)] pub(crate) struct IssuedCid { + pub(crate) path_id: PathId, pub(crate) sequence: u64, pub(crate) id: ConnectionId, pub(crate) reset_token: ResetToken, diff --git a/quinn-proto/src/tests/mod.rs b/quinn-proto/src/tests/mod.rs index 98438d3425..be8868c8c8 100644 --- a/quinn-proto/src/tests/mod.rs +++ b/quinn-proto/src/tests/mod.rs @@ -2,7 +2,8 @@ use std::{ convert::TryInto, mem, net::{Ipv4Addr, Ipv6Addr, SocketAddr}, - sync::Arc, + num::NonZeroU32, + sync::{Arc, Mutex}, }; use assert_matches::assert_matches; @@ -18,23 +19,25 @@ use rustls::crypto::aws_lc_rs::default_provider; #[cfg(feature = "rustls-ring")] use rustls::crypto::ring::default_provider; use rustls::{ + AlertDescription, RootCertStore, pki_types::{CertificateDer, PrivateKeyDer, PrivatePkcs8KeyDer}, server::WebPkiClientVerifier, - AlertDescription, RootCertStore, }; use tracing::info; use super::*; use crate::{ + Duration, Instant, cid_generator::{ConnectionIdGenerator, RandomConnectionIdGenerator}, crypto::rustls::QuicServerConfig, frame::FrameStruct, transport_parameters::TransportParameters, - Duration, Instant, }; mod util; use util::*; +mod token; + #[cfg(all(target_family = "wasm", target_os = "unknown"))] use wasm_bindgen_test::wasm_bindgen_test as test; @@ -182,29 +185,11 @@ fn draft_version_compat() { assert_eq!(pair.server.known_cids(), 0); } -#[test] -fn stateless_retry() { - let _guard = subscribe(); - let mut pair = Pair::default(); - pair.server.incoming_connection_behavior = IncomingConnectionBehavior::Validate; - let (client_ch, _server_ch) = pair.connect(); - pair.client - .connections - .get_mut(&client_ch) - .unwrap() - .close(pair.time, VarInt(42), Bytes::new()); - pair.drive(); - assert_eq!(pair.client.known_connections(), 0); - assert_eq!(pair.client.known_cids(), 0); - assert_eq!(pair.server.known_connections(), 0); - assert_eq!(pair.server.known_cids(), 0); -} - #[test] fn server_stateless_reset() { let _guard = subscribe(); let mut key_material = vec![0; 64]; - let mut rng = rand::thread_rng(); + let mut rng = rand::rng(); rng.fill_bytes(&mut key_material); let reset_key = hmac::Key::new(hmac::HMAC_SHA256, &key_material); rng.fill_bytes(&mut key_material); @@ -219,7 +204,11 @@ fn server_stateless_reset() { pair.server.endpoint = Endpoint::new(endpoint_config, Some(Arc::new(server_config())), true, None); // Force the server to generate the smallest possible stateless reset - pair.client.connections.get_mut(&client_ch).unwrap().ping(); + pair.client + .connections + .get_mut(&client_ch) + .unwrap() + .ping(PathId(0)); info!("resetting"); pair.drive(); assert_matches!( @@ -234,7 +223,7 @@ fn server_stateless_reset() { fn client_stateless_reset() { let _guard = subscribe(); let mut key_material = vec![0; 64]; - let mut rng = rand::thread_rng(); + let mut rng = rand::rng(); rng.fill_bytes(&mut key_material); let reset_key = hmac::Key::new(hmac::HMAC_SHA256, &key_material); rng.fill_bytes(&mut key_material); @@ -537,7 +526,6 @@ fn congestion() { pair.client_send(client_ch, s).write(&[42; 1024]).unwrap(); } -#[allow(clippy::field_reassign_with_default)] // https://github.com/rust-lang/rust-clippy/issues/6527 #[test] fn high_latency_handshake() { let _guard = subscribe(); @@ -554,7 +542,7 @@ fn high_latency_handshake() { fn zero_rtt_happypath() { let _guard = subscribe(); let mut pair = Pair::default(); - pair.server.incoming_connection_behavior = IncomingConnectionBehavior::Validate; + pair.server.handle_incoming = Box::new(validate_incoming); let config = client_config(); // Establish normal connection @@ -723,7 +711,7 @@ fn test_zero_rtt_incoming_limit(configure_server: CLIENT_PORTS.lock().unwrap().next().unwrap(), ); info!("resuming session"); - pair.server.incoming_connection_behavior = IncomingConnectionBehavior::Wait; + pair.server.handle_incoming = Box::new(|_| IncomingConnectionBehavior::Wait); let client_ch = pair.begin_connect(config); assert!(pair.client_conn_mut(client_ch).has_0rtt()); let s = pair.client_streams(client_ch).open(Dir::Uni).unwrap(); @@ -1012,7 +1000,7 @@ fn key_update_simple() { let _ = chunks.finalize(); info!("initiating key update"); - pair.client_conn_mut(client_ch).initiate_key_update(); + pair.client_conn_mut(client_ch).force_key_update(); const MSG2: &[u8] = b"hello2"; pair.client_send(client_ch, s).write(MSG2).unwrap(); @@ -1052,7 +1040,7 @@ fn key_update_reordered() { assert!(!pair.client.outbound.is_empty()); pair.client.delay_outbound(); - pair.client_conn_mut(client_ch).initiate_key_update(); + pair.client_conn_mut(client_ch).force_key_update(); info!("updated keys"); const MSG2: &[u8] = b"two"; @@ -1094,7 +1082,7 @@ fn initial_retransmit() { ); assert_matches!( pair.client_conn_mut(client_ch).poll(), - Some(Event::Connected { .. }) + Some(Event::Connected) ); } @@ -1194,7 +1182,7 @@ fn idle_timeout() { }; let mut pair = Pair::new(Default::default(), server); let (client_ch, server_ch) = pair.connect(); - pair.client_conn_mut(client_ch).ping(); + pair.client_conn_mut(client_ch).ping(PathId(0)); let start = pair.time; while !pair.client_conn_mut(client_ch).is_closed() @@ -1231,7 +1219,7 @@ fn connection_close_sends_acks() { let client_acks = pair.client_conn_mut(client_ch).stats().frame_rx.acks; - pair.client_conn_mut(client_ch).ping(); + pair.client_conn_mut(client_ch).ping(PathId(0)); pair.drive_client(); let time = pair.time; @@ -1255,6 +1243,7 @@ fn server_hs_retransmit() { pair.step(); assert!(!pair.client.inbound.is_empty()); // Initial + Handshakes pair.client.inbound.clear(); + info!("client inbound queue cleared"); pair.drive(); assert_matches!( pair.client_conn_mut(client_ch).poll(), @@ -1262,7 +1251,7 @@ fn server_hs_retransmit() { ); assert_matches!( pair.client_conn_mut(client_ch).poll(), - Some(Event::Connected { .. }) + Some(Event::Connected) ); } @@ -1279,7 +1268,7 @@ fn migration() { Ipv4Addr::new(127, 0, 0, 1).into(), CLIENT_PORTS.lock().unwrap().next().unwrap(), ); - pair.client_conn_mut(client_ch).ping(); + pair.client_conn_mut(client_ch).ping(PathId(0)); // Assert that just receiving the ping message is accounted into the servers // anti-amplification budget @@ -1575,8 +1564,8 @@ fn cid_rotation() { let mut stop = pair.time; let end = pair.time + 5 * CID_TIMEOUT; - use crate::cid_queue::CidQueue; use crate::LOC_CID_COUNT; + use crate::cid_queue::CidQueue; let mut active_cid_num = CidQueue::LEN as u64 + 1; active_cid_num = active_cid_num.min(LOC_CID_COUNT); let mut left_bound = 0; @@ -1608,6 +1597,86 @@ fn cid_rotation() { } } +#[test] +fn multipath_cid_rotation() { + let _guard = subscribe(); + const CID_TIMEOUT: Duration = Duration::from_secs(2); + + let cid_generator_factory: fn() -> Box = + || Box::new(*RandomConnectionIdGenerator::new(8).set_lifetime(CID_TIMEOUT)); + + // Only test cid rotation on server side to have a clear output trace + let server_cfg = ServerConfig { + transport: Arc::new(TransportConfig { + max_concurrent_multipath_paths: NonZeroU32::new(6), + ..TransportConfig::default() + }), + ..server_config() + }; + + let server = Endpoint::new( + Arc::new(EndpointConfig { + connection_id_generator_factory: Arc::new(cid_generator_factory), + ..EndpointConfig::default() + }), + Some(Arc::new(server_cfg)), + true, + None, + ); + let client = Endpoint::new(Arc::new(EndpointConfig::default()), None, true, None); + + let mut pair = Pair::new_from_endpoint(client, server); + let client_cfg = ClientConfig { + transport: Arc::new(TransportConfig { + max_concurrent_multipath_paths: NonZeroU32::new(3), + ..TransportConfig::default() + }), + ..client_config() + }; + + let (_, server_ch) = pair.connect_with(client_cfg); + + let mut round: u64 = 1; + let mut stop = pair.time; + let end = pair.time + 5 * CID_TIMEOUT; + + use crate::LOC_CID_COUNT; + use crate::cid_queue::CidQueue; + + let mut active_cid_num = CidQueue::LEN as u64 + 1; + active_cid_num = active_cid_num.min(LOC_CID_COUNT); + let mut left_bound = 0; + let mut right_bound = active_cid_num - 1; + + while pair.time < end { + stop += CID_TIMEOUT; + // Run a while until PushNewCID timer fires + while pair.time < stop { + if !pair.step() { + if let Some(time) = min_opt(pair.client.next_wakeup(), pair.server.next_wakeup()) { + pair.time = time; + } + } + } + info!( + "Checking active cid sequence range before {:?} seconds", + round * CID_TIMEOUT.as_secs() + ); + let _bound = (left_bound, right_bound); + for path_id in 0..=2 { + assert_matches!( + pair.server_conn_mut(server_ch) + .active_local_path_cid_seq(path_id), + _bound + ); + } + round += 1; + left_bound += active_cid_num; + right_bound += active_cid_num; + pair.drive_server(); + } +} + #[test] fn cid_retirement() { let _guard = subscribe(); @@ -1623,22 +1692,23 @@ fn cid_retirement() { assert!(!pair.server_conn_mut(server_ch).is_closed()); assert_matches!(pair.client_conn_mut(client_ch).active_rem_cid_seq(), 1); - use crate::cid_queue::CidQueue; use crate::LOC_CID_COUNT; + use crate::cid_queue::CidQueue; let mut active_cid_num = CidQueue::LEN as u64; active_cid_num = active_cid_num.min(LOC_CID_COUNT); let next_retire_prior_to = active_cid_num + 1; - pair.client_conn_mut(client_ch).ping(); + pair.client_conn_mut(client_ch).ping(PathId(0)); // Server retires all valid remote CIDs pair.server_conn_mut(server_ch) .rotate_local_cid(next_retire_prior_to, Instant::now()); pair.drive(); assert!(!pair.client_conn_mut(client_ch).is_closed()); assert!(!pair.server_conn_mut(server_ch).is_closed()); - assert_matches!( + + assert_eq!( pair.client_conn_mut(client_ch).active_rem_cid_seq(), - _next_retire_prior_to + next_retire_prior_to, ); } @@ -1796,6 +1866,100 @@ fn congested_tail_loss() { pair.client_send(client_ch, s).write(&[42; 1024]).unwrap(); } +// Send a tail-loss probe when GSO segment_size is less than INITIAL_MTU +#[test] +fn tail_loss_small_segment_size() { + let _guard = subscribe(); + let mut pair = Pair::default(); + let (client_ch, server_ch) = pair.connect(); + + // No datagrams frames received in the handshake. + let server_stats = pair.server_conn_mut(server_ch).stats(); + assert_eq!(server_stats.frame_rx.datagram, 0); + + const DGRAM_LEN: usize = 1000; // Below INITIAL_MTU after packet overhead. + const DGRAM_NUM: u64 = 5; // Enough to build a GSO batch. + + info!("Sending an ack-eliciting datagram"); + pair.client_conn_mut(client_ch).ping(PathId(0)); + pair.drive_client(); + + // Drop these packets on the server side. + assert!(!pair.server.inbound.is_empty()); + pair.server.inbound.clear(); + + // Doing one step makes the client advance time to the PTO fire time. + info!("stepping forward to PTO"); + pair.step(); + + // Still no datagrams frames received by the server. + let server_stats = pair.server_conn_mut(server_ch).stats(); + assert_eq!(server_stats.frame_rx.datagram, 0); + + // Now we can send another batch of datagrams, so the PTO can send them instead of + // sending a ping. These are small enough that the segment_size is less than the + // INITIAL_MTU. + info!("Sending datagram batch"); + for _ in 0..DGRAM_NUM { + pair.client_datagrams(client_ch) + .send(vec![0; DGRAM_LEN].into(), false) + .unwrap(); + } + + // If this succeeds the datagrams are received by the server and the client did not + // crash. + pair.drive(); + + // Finally the server should have received some datagrams. + let server_stats = pair.server_conn_mut(server_ch).stats(); + assert_eq!(server_stats.frame_rx.datagram, DGRAM_NUM); +} + +// Respect max_datagrams when TLP happens +#[test] +fn tail_loss_respect_max_datagrams() { + let _guard = subscribe(); + let client_config = { + let mut c_config = client_config(); + let mut t_config = TransportConfig::default(); + //Disabling GSO, so only a single segment should be sent per iops + t_config.enable_segmentation_offload(false); + c_config.transport_config(t_config.into()); + c_config + }; + let mut pair = Pair::default(); + let (client_ch, _) = pair.connect_with(client_config); + + const DGRAM_LEN: usize = 1000; // High enough so GSO batch could be built + const DGRAM_NUM: u64 = 5; // Enough to build a GSO batch. + + info!("Sending an ack-eliciting datagram"); + pair.client_conn_mut(client_ch).ping(PathId(0)); + pair.drive_client(); + + // Drop these packets on the server side. + assert!(!pair.server.inbound.is_empty()); + pair.server.inbound.clear(); + + // Doing one step makes the client advance time to the PTO fire time. + info!("stepping forward to PTO"); + pair.step(); + + // start sending datagram batches but the first should be a TLP + info!("Sending datagram batch"); + for _ in 0..DGRAM_NUM { + pair.client_datagrams(client_ch) + .send(vec![0; DGRAM_LEN].into(), false) + .unwrap(); + } + + pair.drive(); + + // Finally checking the number of sent udp datagrams match the number of iops + let client_stats = pair.client_conn_mut(client_ch).stats(); + assert_eq!(client_stats.udp_tx.ios, client_stats.udp_tx.datagrams); +} + #[test] fn datagram_send_recv() { let _guard = subscribe(); @@ -1906,7 +2070,7 @@ fn large_initial() { ); assert_matches!( pair.client_conn_mut(client_ch).poll(), - Some(Event::Connected { .. }) + Some(Event::Connected) ); assert_matches!( pair.server_conn_mut(server_ch).poll(), @@ -1914,7 +2078,7 @@ fn large_initial() { ); assert_matches!( pair.server_conn_mut(server_ch).poll(), - Some(Event::Connected { .. }) + Some(Event::Connected) ); } @@ -2098,7 +2262,7 @@ fn handshake_anti_deadlock_probe() { ); assert_matches!( pair.client_conn_mut(client_ch).poll(), - Some(Event::Connected { .. }) + Some(Event::Connected) ); } @@ -2128,7 +2292,7 @@ fn server_can_send_3_inital_packets() { ); assert_matches!( pair.client_conn_mut(client_ch).poll(), - Some(Event::Connected { .. }) + Some(Event::Connected) ); } @@ -2180,7 +2344,7 @@ fn loss_probe_requests_immediate_ack() { // Lose a ping let default_mtu = mem::replace(&mut pair.mtu, 0); - pair.client_conn_mut(client_ch).ping(); + pair.client_conn_mut(client_ch).ping(PathId(0)); pair.drive_client(); pair.mtu = default_mtu; @@ -2223,14 +2387,16 @@ fn connect_lost_mtu_probes_do_not_trigger_congestion_control() { let server_stats = pair.server_conn_mut(server_ch).stats(); // Sanity check (all MTU probes should have been lost) - assert_eq!(client_stats.path.sent_plpmtud_probes, 9); - assert_eq!(client_stats.path.lost_plpmtud_probes, 9); - assert_eq!(server_stats.path.sent_plpmtud_probes, 9); - assert_eq!(server_stats.path.lost_plpmtud_probes, 9); + let client_path_stats = client_stats.paths.get(&PathId::ZERO).unwrap(); + assert_eq!(client_path_stats.sent_plpmtud_probes, 9); + assert_eq!(client_path_stats.lost_plpmtud_probes, 9); + let server_path_stats = server_stats.paths.get(&PathId::ZERO).unwrap(); + assert_eq!(server_path_stats.sent_plpmtud_probes, 9); + assert_eq!(server_path_stats.lost_plpmtud_probes, 9); // No congestion events - assert_eq!(client_stats.path.congestion_events, 0); - assert_eq!(server_stats.path.congestion_events, 0); + assert_eq!(client_path_stats.congestion_events, 0); + assert_eq!(server_path_stats.congestion_events, 0); } #[test] @@ -2286,7 +2452,7 @@ fn migrate_detects_new_mtu_and_respects_original_peer_max_udp_payload_size() { Ipv4Addr::new(127, 0, 0, 1).into(), CLIENT_PORTS.lock().unwrap().next().unwrap(), ); - pair.client_conn_mut(client_ch).ping(); + pair.client_conn_mut(client_ch).ping(PathId(0)); pair.drive(); // Sanity check: the server saw that the client address was updated @@ -2328,13 +2494,17 @@ fn connect_runs_mtud_again_after_600_seconds() { // Sanity check: the mtu has been discovered let client_conn = pair.client_conn_mut(client_ch); + let client_stats = client_conn.stats(); + let client_path_stats = client_stats.paths.get(&PathId::ZERO).unwrap(); assert_eq!(client_conn.path_mtu(), 1389); - assert_eq!(client_conn.stats().path.sent_plpmtud_probes, 5); - assert_eq!(client_conn.stats().path.lost_plpmtud_probes, 3); + assert_eq!(client_path_stats.sent_plpmtud_probes, 5); + assert_eq!(client_path_stats.lost_plpmtud_probes, 3); let server_conn = pair.server_conn_mut(server_ch); + let server_stats = server_conn.stats(); + let server_path_stats = server_stats.paths.get(&PathId::ZERO).unwrap(); assert_eq!(server_conn.path_mtu(), 1389); - assert_eq!(server_conn.stats().path.sent_plpmtud_probes, 5); - assert_eq!(server_conn.stats().path.lost_plpmtud_probes, 3); + assert_eq!(server_path_stats.sent_plpmtud_probes, 5); + assert_eq!(server_path_stats.lost_plpmtud_probes, 3); // Sanity check: the mtu does not change after the fact, even though the link now supports a // higher udp payload size @@ -2386,9 +2556,10 @@ fn blackhole_after_mtu_change_repairs_itself() { // Sanity checks (black hole detected after 3 lost packets) let client_stats = pair.client_conn_mut(client_ch).stats(); - assert!(client_stats.path.lost_packets >= 3); - assert!(client_stats.path.congestion_events >= 3); - assert_eq!(client_stats.path.black_holes_detected, 1); + let client_path_stats = client_stats.paths.get(&PathId::ZERO).unwrap(); + assert!(client_path_stats.lost_packets >= 3); + assert!(client_path_stats.congestion_events >= 3); + assert_eq!(client_path_stats.black_holes_detected, 1); } #[test] @@ -2399,7 +2570,8 @@ fn mtud_probes_include_immediate_ack() { pair.drive(); let stats = pair.client_conn_mut(client_ch).stats(); - assert_eq!(stats.path.sent_plpmtud_probes, 4); + let path_stats = stats.paths.get(&PathId::ZERO).unwrap(); + assert_eq!(path_stats.sent_plpmtud_probes, 4); // Each probe contains a ping and an immediate ack assert_eq!(stats.frame_tx.ping, 4); @@ -2459,7 +2631,7 @@ fn single_ack_eliciting_packet_triggers_ack_after_delay() { let stats_after_connect = pair.client_conn_mut(client_ch).stats(); let start = pair.time; - pair.client_conn_mut(client_ch).ping(); + pair.client_conn_mut(client_ch).ping(PathId(0)); pair.drive_client(); // Send ping pair.drive_server(); // Process ping pair.drive_client(); // Give the client a chance to process an ack, so our assertion can fail @@ -2523,7 +2695,7 @@ fn immediate_ack_triggers_ack() { let acks_after_connect = pair.client_conn_mut(client_ch).stats().frame_rx.acks; - pair.client_conn_mut(client_ch).immediate_ack(); + pair.client_conn_mut(client_ch).immediate_ack(PathId(0)); pair.drive_client(); // Send immediate ack pair.drive_server(); // Process immediate ack pair.drive_client(); // Give the client a chance to process the ack @@ -2547,7 +2719,7 @@ fn out_of_order_ack_eliciting_packet_triggers_ack() { // Send a packet that won't arrive right away (it will be dropped and be re-sent later) pair.mtu = 0; - pair.client_conn_mut(client_ch).ping(); + pair.client_conn_mut(client_ch).ping(PathId(0)); pair.drive_client(); // Sanity check (ping sent, no ACK received) @@ -2563,7 +2735,7 @@ fn out_of_order_ack_eliciting_packet_triggers_ack() { // Restore the default MTU and send another ping, which will arrive earlier than the dropped one pair.mtu = default_mtu; - pair.client_conn_mut(client_ch).ping(); + pair.client_conn_mut(client_ch).ping(PathId(0)); pair.drive_client(); pair.drive_server(); pair.drive_client(); @@ -2602,7 +2774,7 @@ fn single_ack_eliciting_packet_with_ce_bit_triggers_immediate_ack() { let start = pair.time; - pair.client_conn_mut(client_ch).ping(); + pair.client_conn_mut(client_ch).ping(PathId(0)); pair.congestion_experienced = true; pair.drive_client(); // Send ping @@ -2623,8 +2795,10 @@ fn single_ack_eliciting_packet_with_ce_bit_triggers_immediate_ack() { stats_after_ping.frame_rx.acks - stats_after_connect.frame_rx.acks, 1 ); + let after_ping_path_stats = stats_after_ping.paths.get(&PathId::ZERO).unwrap(); + let after_connect_path_stats = stats_after_connect.paths.get(&PathId::ZERO).unwrap(); assert_eq!( - stats_after_ping.path.congestion_events - stats_after_connect.path.congestion_events, + after_ping_path_stats.congestion_events - after_connect_path_stats.congestion_events, 1 ); } @@ -2666,12 +2840,12 @@ fn ack_frequency_ack_delayed_from_first_of_flight() { // // * 0 ms: ping // * 5 ms: ping x2 - pair.client_conn_mut(client_ch).ping(); + pair.client_conn_mut(client_ch).ping(PathId(0)); pair.drive_client(); pair.time += Duration::from_millis(5); for _ in 0..2 { - pair.client_conn_mut(client_ch).ping(); + pair.client_conn_mut(client_ch).ping(PathId(0)); pair.drive_client(); } @@ -2721,7 +2895,7 @@ fn ack_frequency_ack_sent_after_max_ack_delay() { let (mut pair, client_ch, server_ch) = setup_ack_frequency_test(max_ack_delay); // Client sends a ping - pair.client_conn_mut(client_ch).ping(); + pair.client_conn_mut(client_ch).ping(PathId(0)); pair.drive_client(); // Server: receive the ping, send no ACK @@ -2763,12 +2937,12 @@ fn ack_frequency_ack_sent_after_packets_above_threshold() { // // * 0 ms: ping // * 5 ms: ping (11x) - pair.client_conn_mut(client_ch).ping(); + pair.client_conn_mut(client_ch).ping(PathId(0)); pair.drive_client(); pair.time += Duration::from_millis(5); for _ in 0..11 { - pair.client_conn_mut(client_ch).ping(); + pair.client_conn_mut(client_ch).ping(PathId(0)); pair.drive_client(); } @@ -2812,19 +2986,19 @@ fn ack_frequency_ack_sent_after_reordered_packets_below_threshold() { // * 0 ms: ping // * 5 ms: ping (lost) // * 5 ms: ping - pair.client_conn_mut(client_ch).ping(); + pair.client_conn_mut(client_ch).ping(PathId(0)); pair.drive_client(); pair.time += Duration::from_millis(5); // Send and lose an ack-eliciting packet pair.mtu = 0; - pair.client_conn_mut(client_ch).ping(); + pair.client_conn_mut(client_ch).ping(PathId(0)); pair.drive_client(); // Restore the default MTU and send another ping, which will arrive earlier than the dropped one pair.mtu = DEFAULT_MTU; - pair.client_conn_mut(client_ch).ping(); + pair.client_conn_mut(client_ch).ping(PathId(0)); pair.drive_client(); // Server: receive first ping, send no ACK @@ -2863,20 +3037,20 @@ fn ack_frequency_ack_sent_after_reordered_packets_above_threshold() { let (mut pair, client_ch, server_ch) = setup_ack_frequency_test(max_ack_delay); // Send a ping - pair.client_conn_mut(client_ch).ping(); + pair.client_conn_mut(client_ch).ping(PathId(0)); pair.drive_client(); // Send and lose two ack-eliciting packets pair.time += Duration::from_millis(5); pair.mtu = 0; for _ in 0..2 { - pair.client_conn_mut(client_ch).ping(); + pair.client_conn_mut(client_ch).ping(PathId(0)); pair.drive_client(); } // Restore the default MTU and send another ping, which will arrive earlier than the dropped ones pair.mtu = DEFAULT_MTU; - pair.client_conn_mut(client_ch).ping(); + pair.client_conn_mut(client_ch).ping(PathId(0)); pair.drive_client(); // Server: receive first ping, send no ACK @@ -2924,7 +3098,7 @@ fn ack_frequency_update_max_delay() { // Client sends a PING info!("first ping"); - pair.client_conn_mut(client_ch).ping(); + pair.client_conn_mut(client_ch).ping(PathId(0)); pair.drive(); // No change in ACK frequency @@ -2939,7 +3113,7 @@ fn ack_frequency_update_max_delay() { // RTT jumps, client sends another ping info!("delayed ping"); pair.latency *= 10; - pair.client_conn_mut(client_ch).ping(); + pair.client_conn_mut(client_ch).ping(PathId(0)); pair.drive(); // ACK frequency updated @@ -2993,7 +3167,7 @@ fn pure_sender_voluntarily_acks() { fn reject_manually() { let _guard = subscribe(); let mut pair = Pair::default(); - pair.server.incoming_connection_behavior = IncomingConnectionBehavior::RejectAll; + pair.server.handle_incoming = Box::new(|_| IncomingConnectionBehavior::Reject); // The server should now reject incoming connections. let client_ch = pair.begin_connect(client_config()); @@ -3013,7 +3187,20 @@ fn reject_manually() { fn validate_then_reject_manually() { let _guard = subscribe(); let mut pair = Pair::default(); - pair.server.incoming_connection_behavior = IncomingConnectionBehavior::ValidateThenReject; + pair.server.handle_incoming = Box::new({ + let mut i = 0; + move |incoming| { + if incoming.remote_address_validated() { + assert_eq!(i, 1); + i += 1; + IncomingConnectionBehavior::Reject + } else { + assert_eq!(i, 0); + i += 1; + IncomingConnectionBehavior::Retry + } + } + }); // The server should now retry and reject incoming connections. let client_ch = pair.begin_connect(client_config()); @@ -3136,10 +3323,10 @@ fn large_datagram_with_acks() { // Force the client to generate a large ACK frame by dropping several packets for _ in 0..10 { - pair.server_conn_mut(server_ch).ping(); + pair.server_conn_mut(server_ch).ping(PathId(0)); pair.drive_server(); pair.client.inbound.pop_back(); - pair.server_conn_mut(server_ch).ping(); + pair.server_conn_mut(server_ch).ping(PathId(0)); pair.drive_server(); } @@ -3244,7 +3431,6 @@ fn address_discovery_zero_rtt_accepted() { }; let mut pair = Pair::new(Default::default(), server); - pair.server.incoming_connection_behavior = IncomingConnectionBehavior::Validate; let client_cfg = ClientConfig { transport: Arc::new(TransportConfig { address_discovery_role: crate::address_discovery::Role::Both, diff --git a/quinn-proto/src/tests/token.rs b/quinn-proto/src/tests/token.rs new file mode 100644 index 0000000000..ac466c6266 --- /dev/null +++ b/quinn-proto/src/tests/token.rs @@ -0,0 +1,333 @@ +//! Tests specifically for tokens + +use super::*; + +#[cfg(all(target_family = "wasm", target_os = "unknown"))] +use wasm_bindgen_test::wasm_bindgen_test as test; + +#[test] +fn stateless_retry() { + let _guard = subscribe(); + let mut pair = Pair::default(); + pair.server.handle_incoming = Box::new(validate_incoming); + let (client_ch, _server_ch) = pair.connect(); + pair.client + .connections + .get_mut(&client_ch) + .unwrap() + .close(pair.time, VarInt(42), Bytes::new()); + pair.drive(); + assert_eq!(pair.client.known_connections(), 0); + assert_eq!(pair.client.known_cids(), 0); + assert_eq!(pair.server.known_connections(), 0); + assert_eq!(pair.server.known_cids(), 0); +} + +#[test] +fn retry_token_expired() { + let _guard = subscribe(); + + let fake_time = Arc::new(FakeTimeSource::new()); + let retry_token_lifetime = Duration::from_secs(1); + + let mut pair = Pair::default(); + pair.server.handle_incoming = Box::new(validate_incoming); + + let mut config = server_config(); + config + .time_source(Arc::clone(&fake_time) as _) + .retry_token_lifetime(retry_token_lifetime); + pair.server.set_server_config(Some(Arc::new(config))); + + let client_ch = pair.begin_connect(client_config()); + pair.drive_client(); + pair.drive_server(); + pair.drive_client(); + + // to expire retry token + fake_time.advance(retry_token_lifetime + Duration::from_millis(1)); + + pair.drive(); + assert_matches!( + pair.client_conn_mut(client_ch).poll(), + Some(Event::ConnectionLost { reason: ConnectionError::ConnectionClosed(err) }) + if err.error_code == TransportErrorCode::INVALID_TOKEN + ); + + assert_eq!(pair.client.known_connections(), 0); + assert_eq!(pair.client.known_cids(), 0); + assert_eq!(pair.server.known_connections(), 0); + assert_eq!(pair.server.known_cids(), 0); +} + +#[test] +fn use_token() { + let _guard = subscribe(); + let mut pair = Pair::default(); + let client_config = client_config(); + let (client_ch, _server_ch) = pair.connect_with(client_config.clone()); + pair.client + .connections + .get_mut(&client_ch) + .unwrap() + .close(pair.time, VarInt(42), Bytes::new()); + pair.drive(); + assert_eq!(pair.client.known_connections(), 0); + assert_eq!(pair.client.known_cids(), 0); + assert_eq!(pair.server.known_connections(), 0); + assert_eq!(pair.server.known_cids(), 0); + + pair.server.handle_incoming = Box::new(|incoming| { + assert!(incoming.remote_address_validated()); + assert!(incoming.may_retry()); + IncomingConnectionBehavior::Accept + }); + let (client_ch_2, _server_ch_2) = pair.connect_with(client_config); + pair.client + .connections + .get_mut(&client_ch_2) + .unwrap() + .close(pair.time, VarInt(42), Bytes::new()); + pair.drive(); + assert_eq!(pair.client.known_connections(), 0); + assert_eq!(pair.client.known_cids(), 0); + assert_eq!(pair.server.known_connections(), 0); + assert_eq!(pair.server.known_cids(), 0); +} + +#[test] +fn retry_then_use_token() { + let _guard = subscribe(); + let mut pair = Pair::default(); + let client_config = client_config(); + pair.server.handle_incoming = Box::new(validate_incoming); + let (client_ch, _server_ch) = pair.connect_with(client_config.clone()); + pair.client + .connections + .get_mut(&client_ch) + .unwrap() + .close(pair.time, VarInt(42), Bytes::new()); + pair.drive(); + assert_eq!(pair.client.known_connections(), 0); + assert_eq!(pair.client.known_cids(), 0); + assert_eq!(pair.server.known_connections(), 0); + assert_eq!(pair.server.known_cids(), 0); + + pair.server.handle_incoming = Box::new(|incoming| { + assert!(incoming.remote_address_validated()); + assert!(incoming.may_retry()); + IncomingConnectionBehavior::Accept + }); + let (client_ch_2, _server_ch_2) = pair.connect_with(client_config); + pair.client + .connections + .get_mut(&client_ch_2) + .unwrap() + .close(pair.time, VarInt(42), Bytes::new()); + pair.drive(); + assert_eq!(pair.client.known_connections(), 0); + assert_eq!(pair.client.known_cids(), 0); + assert_eq!(pair.server.known_connections(), 0); + assert_eq!(pair.server.known_cids(), 0); +} + +#[test] +fn use_token_then_retry() { + let _guard = subscribe(); + let mut pair = Pair::default(); + let client_config = client_config(); + let (client_ch, _server_ch) = pair.connect_with(client_config.clone()); + pair.client + .connections + .get_mut(&client_ch) + .unwrap() + .close(pair.time, VarInt(42), Bytes::new()); + pair.drive(); + assert_eq!(pair.client.known_connections(), 0); + assert_eq!(pair.client.known_cids(), 0); + assert_eq!(pair.server.known_connections(), 0); + assert_eq!(pair.server.known_cids(), 0); + + pair.server.handle_incoming = Box::new({ + let mut i = 0; + move |incoming| { + if i == 0 { + assert!(incoming.remote_address_validated()); + assert!(incoming.may_retry()); + i += 1; + IncomingConnectionBehavior::Retry + } else if i == 1 { + assert!(incoming.remote_address_validated()); + assert!(!incoming.may_retry()); + i += 1; + IncomingConnectionBehavior::Accept + } else { + panic!("too many handle_incoming iterations") + } + } + }); + let (client_ch_2, _server_ch_2) = pair.connect_with(client_config); + pair.client + .connections + .get_mut(&client_ch_2) + .unwrap() + .close(pair.time, VarInt(42), Bytes::new()); + pair.drive(); + assert_eq!(pair.client.known_connections(), 0); + assert_eq!(pair.client.known_cids(), 0); + assert_eq!(pair.server.known_connections(), 0); + assert_eq!(pair.server.known_cids(), 0); +} + +#[test] +fn use_same_token_twice() { + #[derive(Default)] + struct EvilTokenStore(Mutex); + + impl TokenStore for EvilTokenStore { + fn insert(&self, _server_name: &str, token: Bytes) { + let mut lock = self.0.lock().unwrap(); + if lock.is_empty() { + *lock = token; + } + } + + fn take(&self, _server_name: &str) -> Option { + let lock = self.0.lock().unwrap(); + if lock.is_empty() { + None + } else { + Some(lock.clone()) + } + } + } + + let _guard = subscribe(); + let mut pair = Pair::default(); + let mut client_config = client_config(); + client_config.token_store(Arc::new(EvilTokenStore::default())); + let (client_ch, _server_ch) = pair.connect_with(client_config.clone()); + pair.client + .connections + .get_mut(&client_ch) + .unwrap() + .close(pair.time, VarInt(42), Bytes::new()); + pair.drive(); + assert_eq!(pair.client.known_connections(), 0); + assert_eq!(pair.client.known_cids(), 0); + assert_eq!(pair.server.known_connections(), 0); + assert_eq!(pair.server.known_cids(), 0); + + pair.server.handle_incoming = Box::new(|incoming| { + assert!(incoming.remote_address_validated()); + assert!(incoming.may_retry()); + IncomingConnectionBehavior::Accept + }); + let (client_ch_2, _server_ch_2) = pair.connect_with(client_config.clone()); + pair.client + .connections + .get_mut(&client_ch_2) + .unwrap() + .close(pair.time, VarInt(42), Bytes::new()); + pair.drive(); + assert_eq!(pair.client.known_connections(), 0); + assert_eq!(pair.client.known_cids(), 0); + assert_eq!(pair.server.known_connections(), 0); + assert_eq!(pair.server.known_cids(), 0); + + pair.server.handle_incoming = Box::new(|incoming| { + assert!(!incoming.remote_address_validated()); + assert!(incoming.may_retry()); + IncomingConnectionBehavior::Accept + }); + let (client_ch_3, _server_ch_3) = pair.connect_with(client_config); + pair.client + .connections + .get_mut(&client_ch_3) + .unwrap() + .close(pair.time, VarInt(42), Bytes::new()); + pair.drive(); + assert_eq!(pair.client.known_connections(), 0); + assert_eq!(pair.client.known_cids(), 0); + assert_eq!(pair.server.known_connections(), 0); + assert_eq!(pair.server.known_cids(), 0); +} + +#[test] +fn use_token_expired() { + let _guard = subscribe(); + let fake_time = Arc::new(FakeTimeSource::new()); + let lifetime = Duration::from_secs(10000); + let mut server_config = server_config(); + server_config + .time_source(Arc::clone(&fake_time) as _) + .validation_token + .lifetime(lifetime); + let mut pair = Pair::new(Default::default(), server_config); + let client_config = client_config(); + let (client_ch, _server_ch) = pair.connect_with(client_config.clone()); + pair.client + .connections + .get_mut(&client_ch) + .unwrap() + .close(pair.time, VarInt(42), Bytes::new()); + pair.drive(); + assert_eq!(pair.client.known_connections(), 0); + assert_eq!(pair.client.known_cids(), 0); + assert_eq!(pair.server.known_connections(), 0); + assert_eq!(pair.server.known_cids(), 0); + + pair.server.handle_incoming = Box::new(|incoming| { + assert!(incoming.remote_address_validated()); + assert!(incoming.may_retry()); + IncomingConnectionBehavior::Accept + }); + let (client_ch_2, _server_ch_2) = pair.connect_with(client_config.clone()); + pair.client + .connections + .get_mut(&client_ch_2) + .unwrap() + .close(pair.time, VarInt(42), Bytes::new()); + pair.drive(); + assert_eq!(pair.client.known_connections(), 0); + assert_eq!(pair.client.known_cids(), 0); + assert_eq!(pair.server.known_connections(), 0); + assert_eq!(pair.server.known_cids(), 0); + + fake_time.advance(lifetime + Duration::from_secs(1)); + + pair.server.handle_incoming = Box::new(|incoming| { + assert!(!incoming.remote_address_validated()); + assert!(incoming.may_retry()); + IncomingConnectionBehavior::Accept + }); + let (client_ch_3, _server_ch_3) = pair.connect_with(client_config); + pair.client + .connections + .get_mut(&client_ch_3) + .unwrap() + .close(pair.time, VarInt(42), Bytes::new()); + pair.drive(); + assert_eq!(pair.client.known_connections(), 0); + assert_eq!(pair.client.known_cids(), 0); + assert_eq!(pair.server.known_connections(), 0); + assert_eq!(pair.server.known_cids(), 0); +} + +pub(super) struct FakeTimeSource(Mutex); + +impl FakeTimeSource { + pub(super) fn new() -> Self { + Self(Mutex::new(SystemTime::now())) + } + + pub(super) fn advance(&self, dur: Duration) { + *self.0.lock().unwrap() += dur; + } +} + +impl TimeSource for FakeTimeSource { + fn now(&self) -> SystemTime { + *self.0.lock().unwrap() + } +} diff --git a/quinn-proto/src/tests/util.rs b/quinn-proto/src/tests/util.rs index 7e927e2035..360bcd9228 100644 --- a/quinn-proto/src/tests/util.rs +++ b/quinn-proto/src/tests/util.rs @@ -1,6 +1,6 @@ use std::{ cmp, - collections::{HashMap, VecDeque}, + collections::{HashMap, HashSet, VecDeque}, env, io::{self, Write}, mem, @@ -14,13 +14,13 @@ use assert_matches::assert_matches; use bytes::BytesMut; use lazy_static::lazy_static; use rustls::{ + KeyLogFile, client::WebPkiServerVerifier, pki_types::{CertificateDer, PrivateKeyDer}, - KeyLogFile, }; use tracing::{info_span, trace}; -use super::crypto::rustls::{configured_provider, QuicClientConfig, QuicServerConfig}; +use super::crypto::rustls::{QuicClientConfig, QuicServerConfig, configured_provider}; use super::*; use crate::{Duration, Instant}; @@ -81,7 +81,7 @@ impl Pair { epoch: now, time: now, mtu: DEFAULT_MTU, - latency: Duration::new(0, 0), + latency: Duration::ZERO, spins: 0, last_spin: false, congestion_experienced: false, @@ -225,7 +225,7 @@ impl Pair { ); assert_matches!( self.client_conn_mut(client_ch).poll(), - Some(Event::Connected { .. }) + Some(Event::Connected) ); assert_matches!( self.server_conn_mut(server_ch).poll(), @@ -233,7 +233,7 @@ impl Pair { ); assert_matches!( self.server_conn_mut(server_ch).poll(), - Some(Event::Connected { .. }) + Some(Event::Connected) ); } @@ -297,25 +297,32 @@ pub(super) struct TestEndpoint { conn_events: HashMap>, pub(super) captured_packets: Vec>, pub(super) capture_inbound_packets: bool, - pub(super) incoming_connection_behavior: IncomingConnectionBehavior, + pub(super) handle_incoming: Box IncomingConnectionBehavior>, pub(super) waiting_incoming: Vec, } #[derive(Debug, Copy, Clone)] pub(super) enum IncomingConnectionBehavior { - AcceptAll, - RejectAll, - Validate, - ValidateThenReject, + Accept, + Reject, + Retry, Wait, } +pub(super) fn validate_incoming(incoming: &Incoming) -> IncomingConnectionBehavior { + if incoming.remote_address_validated() { + IncomingConnectionBehavior::Accept + } else { + IncomingConnectionBehavior::Retry + } +} + impl TestEndpoint { fn new(endpoint: Endpoint, addr: SocketAddr) -> Self { let socket = if env::var_os("SSLKEYLOGFILE").is_some() { let socket = UdpSocket::bind(addr).expect("failed to bind UDP socket"); socket - .set_read_timeout(Some(Duration::new(0, 10_000_000))) + .set_read_timeout(Some(Duration::from_millis(10))) .unwrap(); Some(socket) } else { @@ -334,7 +341,7 @@ impl TestEndpoint { conn_events: HashMap::default(), captured_packets: Vec::new(), capture_inbound_packets: false, - incoming_connection_behavior: IncomingConnectionBehavior::AcceptAll, + handle_incoming: Box::new(|_| IncomingConnectionBehavior::Accept), waiting_incoming: Vec::new(), } } @@ -364,26 +371,15 @@ impl TestEndpoint { { match event { DatagramEvent::NewConnection(incoming) => { - match self.incoming_connection_behavior { - IncomingConnectionBehavior::AcceptAll => { + match (self.handle_incoming)(&incoming) { + IncomingConnectionBehavior::Accept => { let _ = self.try_accept(incoming, now); } - IncomingConnectionBehavior::RejectAll => { + IncomingConnectionBehavior::Reject => { self.reject(incoming); } - IncomingConnectionBehavior::Validate => { - if incoming.remote_address_validated() { - let _ = self.try_accept(incoming, now); - } else { - self.retry(incoming); - } - } - IncomingConnectionBehavior::ValidateThenReject => { - if incoming.remote_address_validated() { - self.reject(incoming); - } else { - self.retry(incoming); - } + IncomingConnectionBehavior::Retry => { + self.retry(incoming); } IncomingConnectionBehavior::Wait => { self.waiting_incoming.push(incoming); @@ -564,14 +560,26 @@ impl Write for TestWriter { } pub(super) fn server_config() -> ServerConfig { - ServerConfig::with_crypto(Arc::new(server_crypto())) + let mut config = ServerConfig::with_crypto(Arc::new(server_crypto())); + if !cfg!(feature = "bloom") { + config + .validation_token + .sent(2) + .log(Arc::new(SimpleTokenLog::default())); + } + config } pub(super) fn server_config_with_cert( cert: CertificateDer<'static>, key: PrivateKeyDer<'static>, ) -> ServerConfig { - ServerConfig::with_crypto(Arc::new(server_crypto_with_cert(cert, key))) + let mut config = ServerConfig::with_crypto(Arc::new(server_crypto_with_cert(cert, key))); + config + .validation_token + .sent(2) + .log(Arc::new(SimpleTokenLog::default())); + config } pub(super) fn server_crypto() -> QuicServerConfig { @@ -717,3 +725,21 @@ lazy_static! { pub(crate) static ref CERTIFIED_KEY: rcgen::CertifiedKey = rcgen::generate_simple_self_signed(vec!["localhost".into()]).unwrap(); } + +#[derive(Default)] +struct SimpleTokenLog(Mutex>); + +impl TokenLog for SimpleTokenLog { + fn check_and_insert( + &self, + nonce: u128, + _issued: SystemTime, + _lifetime: Duration, + ) -> Result<(), TokenReuseError> { + if self.0.lock().unwrap().insert(nonce) { + Ok(()) + } else { + Err(TokenReuseError) + } + } +} diff --git a/quinn-proto/src/token.rs b/quinn-proto/src/token.rs index 5face46a74..e4ab5ea529 100644 --- a/quinn-proto/src/token.rs +++ b/quinn-proto/src/token.rs @@ -1,80 +1,333 @@ use std::{ - fmt, io, + fmt, + mem::size_of, net::{IpAddr, SocketAddr}, }; -use bytes::{Buf, BufMut}; +use bytes::{Buf, BufMut, Bytes}; +use rand::Rng; use crate::{ + Duration, RESET_TOKEN_SIZE, ServerConfig, SystemTime, UNIX_EPOCH, coding::{BufExt, BufMutExt}, - crypto::{CryptoError, HandshakeTokenKey, HmacKey}, + crypto::{HandshakeTokenKey, HmacKey}, + packet::InitialHeader, shared::ConnectionId, - Duration, SystemTime, RESET_TOKEN_SIZE, UNIX_EPOCH, }; -pub(crate) struct RetryToken { - /// The destination connection ID set in the very first packet from the client +/// Responsible for limiting clients' ability to reuse validation tokens +/// +/// [_RFC 9000 § 8.1.4:_](https://www.rfc-editor.org/rfc/rfc9000.html#section-8.1.4) +/// +/// > Attackers could replay tokens to use servers as amplifiers in DDoS attacks. To protect +/// > against such attacks, servers MUST ensure that replay of tokens is prevented or limited. +/// > Servers SHOULD ensure that tokens sent in Retry packets are only accepted for a short time, +/// > as they are returned immediately by clients. Tokens that are provided in NEW_TOKEN frames +/// > (Section 19.7) need to be valid for longer but SHOULD NOT be accepted multiple times. +/// > Servers are encouraged to allow tokens to be used only once, if possible; tokens MAY include +/// > additional information about clients to further narrow applicability or reuse. +/// +/// `TokenLog` pertains only to tokens provided in NEW_TOKEN frames. +pub trait TokenLog: Send + Sync { + /// Record that the token was used and, ideally, return a token reuse error if the token may + /// have been already used previously + /// + /// False negatives and false positives are both permissible. Called when a client uses an + /// address validation token. + /// + /// Parameters: + /// - `nonce`: A server-generated random unique value for the token. + /// - `issued`: The time the server issued the token. + /// - `lifetime`: The expiration time of address validation tokens sent via NEW_TOKEN frames, + /// as configured by [`ServerValidationTokenConfig::lifetime`][1]. + /// + /// [1]: crate::ValidationTokenConfig::lifetime + /// + /// ## Security & Performance + /// + /// To the extent that it is possible to repeatedly trigger false negatives (returning `Ok` for + /// a token which has been reused), an attacker could use the server to perform [amplification + /// attacks][2]. The QUIC specification requires that this be limited, if not prevented fully. + /// + /// A false positive (returning `Err` for a token which has never been used) is not a security + /// vulnerability; it is permissible for a `TokenLog` to always return `Err`. A false positive + /// causes the token to be ignored, which may cause the transmission of some 0.5-RTT data to be + /// delayed until the handshake completes, if a sufficient amount of 0.5-RTT data it sent. + /// + /// [2]: https://en.wikipedia.org/wiki/Denial-of-service_attack#Amplification + fn check_and_insert( + &self, + nonce: u128, + issued: SystemTime, + lifetime: Duration, + ) -> Result<(), TokenReuseError>; +} + +/// Error for when a validation token may have been reused +pub struct TokenReuseError; + +/// Null implementation of [`TokenLog`], which never accepts tokens +pub struct NoneTokenLog; + +impl TokenLog for NoneTokenLog { + fn check_and_insert(&self, _: u128, _: SystemTime, _: Duration) -> Result<(), TokenReuseError> { + Err(TokenReuseError) + } +} + +/// Responsible for storing validation tokens received from servers and retrieving them for use in +/// subsequent connections +pub trait TokenStore: Send + Sync { + /// Potentially store a token for later one-time use + /// + /// Called when a NEW_TOKEN frame is received from the server. + fn insert(&self, server_name: &str, token: Bytes); + + /// Try to find and take a token that was stored with the given server name + /// + /// The same token must never be returned from `take` twice, as doing so can be used to + /// de-anonymize a client's traffic. + /// + /// Called when trying to connect to a server. It is always ok for this to return `None`. + fn take(&self, server_name: &str) -> Option; +} + +/// Null implementation of [`TokenStore`], which does not store any tokens +pub struct NoneTokenStore; + +impl TokenStore for NoneTokenStore { + fn insert(&self, _: &str, _: Bytes) {} + fn take(&self, _: &str) -> Option { + None + } +} + +/// State in an `Incoming` determined by a token or lack thereof +#[derive(Debug)] +pub(crate) struct IncomingToken { + pub(crate) retry_src_cid: Option, pub(crate) orig_dst_cid: ConnectionId, - /// The time at which this token was issued - pub(crate) issued: SystemTime, + pub(crate) validated: bool, } -impl RetryToken { - pub(crate) fn encode( - &self, - key: &dyn HandshakeTokenKey, - address: &SocketAddr, - retry_src_cid: &ConnectionId, - ) -> Vec { - let aead_key = key.aead_from_hkdf(retry_src_cid); +impl IncomingToken { + /// Construct for an `Incoming` given the first packet header, or error if the connection + /// cannot be established + pub(crate) fn from_header( + header: &InitialHeader, + server_config: &ServerConfig, + remote_address: SocketAddr, + ) -> Result { + let unvalidated = Self { + retry_src_cid: None, + orig_dst_cid: header.dst_cid, + validated: false, + }; + + // Decode token or short-circuit + if header.token.is_empty() { + return Ok(unvalidated); + } + + // In cases where a token cannot be decrypted/decoded, we must allow for the possibility + // that this is caused not by client malfeasance, but by the token having been generated by + // an incompatible endpoint, e.g. a different version or a neighbor behind the same load + // balancer. In such cases we proceed as if there was no token. + // + // [_RFC 9000 § 8.1.3:_](https://www.rfc-editor.org/rfc/rfc9000.html#section-8.1.3-10) + // + // > If the token is invalid, then the server SHOULD proceed as if the client did not have + // > a validated address, including potentially sending a Retry packet. + let Some(retry) = Token::decode(&*server_config.token_key, &header.token) else { + return Ok(unvalidated); + }; + + // Validate token, then convert into Self + match retry.payload { + TokenPayload::Retry { + address, + orig_dst_cid, + issued, + } => { + if address != remote_address { + return Err(InvalidRetryTokenError); + } + if issued + server_config.retry_token_lifetime < server_config.time_source.now() { + return Err(InvalidRetryTokenError); + } + + Ok(Self { + retry_src_cid: Some(header.dst_cid), + orig_dst_cid, + validated: true, + }) + } + TokenPayload::Validation { ip, issued } => { + if ip != remote_address.ip() { + return Ok(unvalidated); + } + if issued + server_config.validation_token.lifetime + < server_config.time_source.now() + { + return Ok(unvalidated); + } + if server_config + .validation_token + .log + .check_and_insert(retry.nonce, issued, server_config.validation_token.lifetime) + .is_err() + { + return Ok(unvalidated); + } + + Ok(Self { + retry_src_cid: None, + orig_dst_cid: header.dst_cid, + validated: true, + }) + } + } + } +} + +/// Error for a token being unambiguously from a Retry packet, and not valid +/// +/// The connection cannot be established. +pub(crate) struct InvalidRetryTokenError; + +/// Retry or validation token +pub(crate) struct Token { + /// Content that is encrypted from the client + pub(crate) payload: TokenPayload, + /// Randomly generated value, which must be unique, and is visible to the client + nonce: u128, +} + +impl Token { + /// Construct with newly sampled randomness + pub(crate) fn new(payload: TokenPayload, rng: &mut impl Rng) -> Self { + Self { + nonce: rng.random(), + payload, + } + } + /// Encode and encrypt + pub(crate) fn encode(&self, key: &dyn HandshakeTokenKey) -> Vec { let mut buf = Vec::new(); - encode_addr(&mut buf, address); - self.orig_dst_cid.encode_long(&mut buf); - buf.write::( - self.issued - .duration_since(UNIX_EPOCH) - .map(|x| x.as_secs()) - .unwrap_or(0), - ); + // Encode payload + match self.payload { + TokenPayload::Retry { + address, + orig_dst_cid, + issued, + } => { + buf.put_u8(TokenType::Retry as u8); + encode_addr(&mut buf, address); + orig_dst_cid.encode_long(&mut buf); + encode_unix_secs(&mut buf, issued); + } + TokenPayload::Validation { ip, issued } => { + buf.put_u8(TokenType::Validation as u8); + encode_ip(&mut buf, ip); + encode_unix_secs(&mut buf, issued); + } + } + + // Encrypt + let aead_key = key.aead_from_hkdf(&self.nonce.to_le_bytes()); aead_key.seal(&mut buf, &[]).unwrap(); + buf.extend(&self.nonce.to_le_bytes()); buf } - pub(crate) fn from_bytes( - key: &dyn HandshakeTokenKey, - address: &SocketAddr, - retry_src_cid: &ConnectionId, - raw_token_bytes: &[u8], - ) -> Result { - let aead_key = key.aead_from_hkdf(retry_src_cid); - let mut sealed_token = raw_token_bytes.to_vec(); - - let data = aead_key.open(&mut sealed_token, &[])?; - let mut reader = io::Cursor::new(data); - let token_addr = decode_addr(&mut reader).ok_or(ValidationError::Unusable)?; - if token_addr != *address { - return Err(ValidationError::InvalidRetry); + /// Decode and decrypt + fn decode(key: &dyn HandshakeTokenKey, raw_token_bytes: &[u8]) -> Option { + // Decrypt + + // MSRV: split_at_checked requires 1.80.0 + let nonce_slice_start = raw_token_bytes.len().checked_sub(size_of::())?; + let (sealed_token, nonce_bytes) = raw_token_bytes.split_at(nonce_slice_start); + + let nonce = u128::from_le_bytes(nonce_bytes.try_into().unwrap()); + + let aead_key = key.aead_from_hkdf(nonce_bytes); + let mut sealed_token = sealed_token.to_vec(); + let data = aead_key.open(&mut sealed_token, &[]).ok()?; + + // Decode payload + let mut reader = &data[..]; + let payload = match TokenType::from_byte((&mut reader).get::().ok()?)? { + TokenType::Retry => TokenPayload::Retry { + address: decode_addr(&mut reader)?, + orig_dst_cid: ConnectionId::decode_long(&mut reader)?, + issued: decode_unix_secs(&mut reader)?, + }, + TokenType::Validation => TokenPayload::Validation { + ip: decode_ip(&mut reader)?, + issued: decode_unix_secs(&mut reader)?, + }, + }; + + if !reader.is_empty() { + // Consider extra bytes a decoding error (it may be from an incompatible endpoint) + return None; } - let orig_dst_cid = - ConnectionId::decode_long(&mut reader).ok_or(ValidationError::Unusable)?; - let issued = UNIX_EPOCH - + Duration::new( - reader.get::().map_err(|_| ValidationError::Unusable)?, - 0, - ); - Ok(Self { - orig_dst_cid, - issued, - }) + Some(Self { nonce, payload }) + } +} + +/// Content of a [`Token`] that is encrypted from the client +pub(crate) enum TokenPayload { + /// Token originating from a Retry packet + Retry { + /// The client's address + address: SocketAddr, + /// The destination connection ID set in the very first packet from the client + orig_dst_cid: ConnectionId, + /// The time at which this token was issued + issued: SystemTime, + }, + /// Token originating from a NEW_TOKEN frame + Validation { + /// The client's IP address (its port is likely to change between sessions) + ip: IpAddr, + /// The time at which this token was issued + issued: SystemTime, + }, +} + +/// Variant tag for a [`TokenPayload`] +#[derive(Copy, Clone)] +#[repr(u8)] +enum TokenType { + Retry = 0, + Validation = 1, +} + +impl TokenType { + fn from_byte(n: u8) -> Option { + use TokenType::*; + [Retry, Validation].into_iter().find(|ty| *ty as u8 == n) } } -fn encode_addr(buf: &mut Vec, address: &SocketAddr) { - match address.ip() { +fn encode_addr(buf: &mut Vec, address: SocketAddr) { + encode_ip(buf, address.ip()); + buf.put_u16(address.port()); +} + +fn decode_addr(buf: &mut B) -> Option { + let ip = decode_ip(buf)?; + let port = buf.get().ok()?; + Some(SocketAddr::new(ip, port)) +} + +fn encode_ip(buf: &mut Vec, ip: IpAddr) { + match ip { IpAddr::V4(x) => { buf.put_u8(0); buf.put_slice(&x.octets()); @@ -84,50 +337,26 @@ fn encode_addr(buf: &mut Vec, address: &SocketAddr) { buf.put_slice(&x.octets()); } } - buf.put_u16(address.port()); } -fn decode_addr(buf: &mut B) -> Option { - let ip = match buf.get_u8() { - 0 => IpAddr::V4(buf.get().ok()?), - 1 => IpAddr::V6(buf.get().ok()?), - _ => return None, - }; - let port = buf.get_u16(); - Some(SocketAddr::new(ip, port)) +fn decode_ip(buf: &mut B) -> Option { + match buf.get::().ok()? { + 0 => buf.get().ok().map(IpAddr::V4), + 1 => buf.get().ok().map(IpAddr::V6), + _ => None, + } } -/// Error for a token failing to validate a client's address -#[derive(Debug, Copy, Clone)] -pub(crate) enum ValidationError { - /// Token may have come from a NEW_TOKEN frame (including from a different server or a previous - /// run of this server with different keys), and was not valid - /// - /// It should be silently ignored. - /// - /// In cases where a token cannot be decrypted/decoded, we must allow for the possibility that - /// this is caused not by client malfeasance, but by the token having been generated by an - /// incompatible endpoint, e.g. a different version or a neighbor behind the same load - /// balancer. In such cases we proceed as if there was no token. - /// - /// [_RFC 9000 § 8.1.3:_](https://www.rfc-editor.org/rfc/rfc9000.html#section-8.1.3-10) - /// - /// > If the token is invalid, then the server SHOULD proceed as if the client did not have a - /// > validated address, including potentially sending a Retry packet. - /// - /// That said, this may also be used when a token _can_ be unambiguously decrypted/decoded as a - /// token from a NEW_TOKEN frame, but is simply not valid. - Unusable, - /// Token was unambiguously from a Retry packet, and was not valid - /// - /// The connection cannot be established. - InvalidRetry, +fn encode_unix_secs(buf: &mut Vec, time: SystemTime) { + buf.write::( + time.duration_since(UNIX_EPOCH) + .unwrap_or_default() + .as_secs(), + ); } -impl From for ValidationError { - fn from(CryptoError: CryptoError) -> Self { - Self::Unusable - } +fn decode_unix_secs(buf: &mut B) -> Option { + Some(UNIX_EPOCH + Duration::from_secs(buf.get::().ok()?)) } /// Stateless reset token @@ -138,9 +367,9 @@ impl From for ValidationError { pub(crate) struct ResetToken([u8; RESET_TOKEN_SIZE]); impl ResetToken { - pub(crate) fn new(key: &dyn HmacKey, id: &ConnectionId) -> Self { + pub(crate) fn new(key: &dyn HmacKey, id: ConnectionId) -> Self { let mut signature = vec![0; key.signature_len()]; - key.sign(id, &mut signature); + key.sign(&id, &mut signature); // TODO: Server ID?? let mut result = [0; RESET_TOKEN_SIZE]; result.copy_from_slice(&signature[..RESET_TOKEN_SIZE]); @@ -180,67 +409,99 @@ impl fmt::Display for ResetToken { #[cfg(all(test, any(feature = "aws-lc-rs", feature = "ring")))] mod test { + use super::*; #[cfg(all(feature = "aws-lc-rs", not(feature = "ring")))] use aws_lc_rs::hkdf; + use rand::prelude::*; #[cfg(feature = "ring")] use ring::hkdf; + fn token_round_trip(payload: TokenPayload) -> TokenPayload { + let rng = &mut rand::rng(); + let token = Token::new(payload, rng); + let mut master_key = [0; 64]; + rng.fill_bytes(&mut master_key); + let prk = hkdf::Salt::new(hkdf::HKDF_SHA256, &[]).extract(&master_key); + let encoded = token.encode(&prk); + let decoded = Token::decode(&prk, &encoded).expect("token didn't decrypt / decode"); + assert_eq!(token.nonce, decoded.nonce); + decoded.payload + } + #[test] - fn token_sanity() { - use super::*; - use crate::cid_generator::{ConnectionIdGenerator, RandomConnectionIdGenerator}; + fn retry_token_sanity() { use crate::MAX_CID_SIZE; + use crate::cid_generator::{ConnectionIdGenerator, RandomConnectionIdGenerator}; use crate::{Duration, UNIX_EPOCH}; - use rand::RngCore; use std::net::Ipv6Addr; - let rng = &mut rand::thread_rng(); + let address_1 = SocketAddr::new(Ipv6Addr::LOCALHOST.into(), 4433); + let orig_dst_cid_1 = RandomConnectionIdGenerator::new(MAX_CID_SIZE).generate_cid(); + let issued_1 = UNIX_EPOCH + Duration::from_secs(42); // Fractional seconds would be lost + let payload_1 = TokenPayload::Retry { + address: address_1, + orig_dst_cid: orig_dst_cid_1, + issued: issued_1, + }; + let TokenPayload::Retry { + address: address_2, + orig_dst_cid: orig_dst_cid_2, + issued: issued_2, + } = token_round_trip(payload_1) + else { + panic!("token decoded as wrong variant"); + }; - let mut master_key = [0; 64]; - rng.fill_bytes(&mut master_key); + assert_eq!(address_1, address_2); + assert_eq!(orig_dst_cid_1, orig_dst_cid_2); + assert_eq!(issued_1, issued_2); + } - let prk = hkdf::Salt::new(hkdf::HKDF_SHA256, &[]).extract(&master_key); + #[test] + fn validation_token_sanity() { + use crate::{Duration, UNIX_EPOCH}; + + use std::net::Ipv6Addr; - let addr = SocketAddr::new(Ipv6Addr::LOCALHOST.into(), 4433); - let retry_src_cid = RandomConnectionIdGenerator::new(MAX_CID_SIZE).generate_cid(); - let token = RetryToken { - orig_dst_cid: RandomConnectionIdGenerator::new(MAX_CID_SIZE).generate_cid(), - issued: UNIX_EPOCH + Duration::new(42, 0), // Fractional seconds would be lost + let ip_1 = Ipv6Addr::LOCALHOST.into(); + let issued_1 = UNIX_EPOCH + Duration::from_secs(42); // Fractional seconds would be lost + + let payload_1 = TokenPayload::Validation { + ip: ip_1, + issued: issued_1, + }; + let TokenPayload::Validation { + ip: ip_2, + issued: issued_2, + } = token_round_trip(payload_1) + else { + panic!("token decoded as wrong variant"); }; - let encoded = token.encode(&prk, &addr, &retry_src_cid); - let decoded = RetryToken::from_bytes(&prk, &addr, &retry_src_cid, &encoded) - .expect("token didn't validate"); - assert_eq!(token.orig_dst_cid, decoded.orig_dst_cid); - assert_eq!(token.issued, decoded.issued); + assert_eq!(ip_1, ip_2); + assert_eq!(issued_1, issued_2); } #[test] fn invalid_token_returns_err() { use super::*; - use crate::cid_generator::{ConnectionIdGenerator, RandomConnectionIdGenerator}; - use crate::MAX_CID_SIZE; use rand::RngCore; - use std::net::Ipv6Addr; - let rng = &mut rand::thread_rng(); + let rng = &mut rand::rng(); let mut master_key = [0; 64]; rng.fill_bytes(&mut master_key); let prk = hkdf::Salt::new(hkdf::HKDF_SHA256, &[]).extract(&master_key); - let addr = SocketAddr::new(Ipv6Addr::LOCALHOST.into(), 4433); - let retry_src_cid = RandomConnectionIdGenerator::new(MAX_CID_SIZE).generate_cid(); - let mut invalid_token = Vec::new(); let mut random_data = [0; 32]; - rand::thread_rng().fill_bytes(&mut random_data); + rand::rng().fill_bytes(&mut random_data); invalid_token.put_slice(&random_data); // Assert: garbage sealed data returns err - assert!(RetryToken::from_bytes(&prk, &addr, &retry_src_cid, &invalid_token).is_err()); + assert!(Token::decode(&prk, &invalid_token).is_none()); } } diff --git a/quinn-proto/src/token_memory_cache.rs b/quinn-proto/src/token_memory_cache.rs new file mode 100644 index 0000000000..0e01587313 --- /dev/null +++ b/quinn-proto/src/token_memory_cache.rs @@ -0,0 +1,246 @@ +//! Storing tokens sent from servers in NEW_TOKEN frames and using them in subsequent connections + +use std::{ + collections::{HashMap, VecDeque, hash_map}, + sync::{Arc, Mutex}, +}; + +use bytes::Bytes; +use lru_slab::LruSlab; +use tracing::trace; + +use crate::token::TokenStore; + +/// `TokenStore` implementation that stores up to `N` tokens per server name for up to a +/// limited number of server names, in-memory +#[derive(Debug)] +pub struct TokenMemoryCache(Mutex); + +impl TokenMemoryCache { + /// Construct empty + pub fn new(max_server_names: u32, max_tokens_per_server: usize) -> Self { + Self(Mutex::new(State::new( + max_server_names, + max_tokens_per_server, + ))) + } +} + +impl TokenStore for TokenMemoryCache { + fn insert(&self, server_name: &str, token: Bytes) { + trace!(%server_name, "storing token"); + self.0.lock().unwrap().store(server_name, token) + } + + fn take(&self, server_name: &str) -> Option { + let token = self.0.lock().unwrap().take(server_name); + trace!(%server_name, found=%token.is_some(), "taking token"); + token + } +} + +/// Defaults to a maximum of 256 servers and 2 tokens per server +impl Default for TokenMemoryCache { + fn default() -> Self { + Self::new(256, 2) + } +} + +/// Lockable inner state of `TokenMemoryCache` +#[derive(Debug)] +struct State { + max_server_names: u32, + max_tokens_per_server: usize, + // map from server name to index in lru + lookup: HashMap, u32>, + lru: LruSlab, +} + +impl State { + fn new(max_server_names: u32, max_tokens_per_server: usize) -> Self { + Self { + max_server_names, + max_tokens_per_server, + lookup: HashMap::new(), + lru: LruSlab::default(), + } + } + + fn store(&mut self, server_name: &str, token: Bytes) { + if self.max_server_names == 0 { + // the rest of this method assumes that we can always insert a new entry so long as + // we're willing to evict a pre-existing entry. thus, an entry limit of 0 is an edge + // case we must short-circuit on now. + return; + } + if self.max_tokens_per_server == 0 { + // similarly to above, the rest of this method assumes that we can always push a new + // token to a queue so long as we're willing to evict a pre-existing token, so we + // short-circuit on the edge case of a token limit of 0. + return; + } + + let server_name = Arc::::from(server_name); + match self.lookup.entry(server_name.clone()) { + hash_map::Entry::Occupied(hmap_entry) => { + // key already exists, push the new token to its token queue + let tokens = &mut self.lru.get_mut(*hmap_entry.get()).tokens; + if tokens.len() >= self.max_tokens_per_server { + debug_assert!(tokens.len() == self.max_tokens_per_server); + tokens.pop_front().unwrap(); + } + tokens.push_back(token); + } + hash_map::Entry::Vacant(hmap_entry) => { + // key does not yet exist, create a new one, evicting the oldest if necessary + let removed_key = if self.lru.len() >= self.max_server_names { + // unwrap safety: max_server_names is > 0, so there's at least one entry, so + // lru() is some + Some(self.lru.remove(self.lru.lru().unwrap()).server_name) + } else { + None + }; + + hmap_entry.insert(self.lru.insert(CacheEntry::new(server_name, token))); + + // for borrowing reasons, we must defer removing the evicted hmap entry to here + if let Some(removed_slot) = removed_key { + let removed = self.lookup.remove(&removed_slot); + debug_assert!(removed.is_some()); + } + } + }; + } + + fn take(&mut self, server_name: &str) -> Option { + let slab_key = *self.lookup.get(server_name)?; + + // pop from entry's token queue + let entry = self.lru.get_mut(slab_key); + // unwrap safety: we never leave tokens empty + let token = entry.tokens.pop_front().unwrap(); + + if entry.tokens.is_empty() { + // token stack emptied, remove entry + self.lru.remove(slab_key); + self.lookup.remove(server_name); + } + + Some(token) + } +} + +/// Cache entry within `TokenMemoryCache`'s LRU slab +#[derive(Debug)] +struct CacheEntry { + server_name: Arc, + // invariant: tokens is never empty + tokens: VecDeque, +} + +impl CacheEntry { + /// Construct with a single token + fn new(server_name: Arc, token: Bytes) -> Self { + let mut tokens = VecDeque::new(); + tokens.push_back(token); + Self { + server_name, + tokens, + } + } +} + +#[cfg(test)] +mod tests { + use std::collections::VecDeque; + + use super::*; + use rand::prelude::*; + use rand_pcg::Pcg32; + + fn new_rng() -> impl Rng { + Pcg32::from_seed(0xdeadbeefdeadbeefdeadbeefdeadbeefu128.to_le_bytes()) + } + + #[test] + fn cache_test() { + let mut rng = new_rng(); + const N: usize = 2; + + for _ in 0..10 { + let mut cache_1: Vec<(u32, VecDeque)> = Vec::new(); // keep it sorted oldest to newest + let cache_2 = TokenMemoryCache::new(20, 2); + + for i in 0..200 { + let server_name = rng.random::() % 10; + if rng.random_bool(0.666) { + // store + let token = Bytes::from(vec![i]); + println!("STORE {} {:?}", server_name, token); + if let Some((j, _)) = cache_1 + .iter() + .enumerate() + .find(|&(_, &(server_name_2, _))| server_name_2 == server_name) + { + let (_, mut queue) = cache_1.remove(j); + queue.push_back(token.clone()); + if queue.len() > N { + queue.pop_front(); + } + cache_1.push((server_name, queue)); + } else { + let mut queue = VecDeque::new(); + queue.push_back(token.clone()); + cache_1.push((server_name, queue)); + if cache_1.len() > 20 { + cache_1.remove(0); + } + } + cache_2.insert(&server_name.to_string(), token); + } else { + // take + println!("TAKE {}", server_name); + let expecting = cache_1 + .iter() + .enumerate() + .find(|&(_, &(server_name_2, _))| server_name_2 == server_name) + .map(|(j, _)| j) + .map(|j| { + let (_, mut queue) = cache_1.remove(j); + let token = queue.pop_front().unwrap(); + if !queue.is_empty() { + cache_1.push((server_name, queue)); + } + token + }); + println!("EXPECTING {:?}", expecting); + assert_eq!(cache_2.take(&server_name.to_string()), expecting); + } + } + } + } + + #[test] + fn zero_max_server_names() { + // test that this edge case doesn't panic + let cache = TokenMemoryCache::new(0, 2); + for i in 0..10 { + cache.insert(&i.to_string(), Bytes::from(vec![i])); + for j in 0..10 { + assert!(cache.take(&j.to_string()).is_none()); + } + } + } + + #[test] + fn zero_queue_length() { + // test that this edge case doesn't panic + let cache = TokenMemoryCache::new(256, 0); + for i in 0..10 { + cache.insert(&i.to_string(), Bytes::from(vec![i])); + for j in 0..10 { + assert!(cache.take(&j.to_string()).is_none()); + } + } + } +} diff --git a/quinn-proto/src/transport_error.rs b/quinn-proto/src/transport_error.rs index 047cd0acc1..d942d76afb 100644 --- a/quinn-proto/src/transport_error.rs +++ b/quinn-proto/src/transport_error.rs @@ -129,4 +129,8 @@ errors! { KEY_UPDATE_ERROR(0xE) "key update error"; AEAD_LIMIT_REACHED(0xF) "the endpoint has reached the confidentiality or integrity limit for the AEAD algorithm"; NO_VIABLE_PATH(0x10) "no viable network path exists"; + APPLICATION_ABANDON(0x004150504142414e) "Path abandoned at the application's request"; + RESOURCE_LIMIT_REACHED(0x0052534c494d4954) "Path abandoned due to resource limitations in the transport"; + UNSTABLE_INTERFACE(0x00554e5f494e5446) "Path abandoned due to unstable interfaces"; + NO_CID_AVAILABLE(0x004e4f5f4349445f) "Path abandoned due to no available connection IDs for the path"; } diff --git a/quinn-proto/src/transport_parameters.rs b/quinn-proto/src/transport_parameters.rs index af8c427f3e..69f896afae 100644 --- a/quinn-proto/src/transport_parameters.rs +++ b/quinn-proto/src/transport_parameters.rs @@ -12,18 +12,18 @@ use std::{ }; use bytes::{Buf, BufMut}; -use rand::{seq::SliceRandom as _, Rng as _, RngCore}; +use rand::{Rng as _, RngCore, seq::SliceRandom as _}; use thiserror::Error; use crate::{ - address_discovery, + LOC_CID_COUNT, MAX_CID_SIZE, MAX_STREAM_COUNT, RESET_TOKEN_SIZE, ResetToken, Side, + TIMER_GRANULARITY, TransportError, VarInt, address_discovery, cid_generator::ConnectionIdGenerator, cid_queue::CidQueue, coding::{BufExt, BufMutExt, UnexpectedEnd}, config::{EndpointConfig, ServerConfig, TransportConfig}, + connection::PathId, shared::ConnectionId, - ResetToken, Side, TransportError, VarInt, LOC_CID_COUNT, MAX_CID_SIZE, MAX_STREAM_COUNT, - RESET_TOKEN_SIZE, TIMER_GRANULARITY, }; // Apply a given macro to a list of all the transport parameters having integer types, along with @@ -115,6 +115,9 @@ macro_rules! make_struct { /// The role of this peer in address discovery, if any. pub(crate) address_discovery_role: address_discovery::Role, + + // Multipath extension + pub(crate) initial_max_path_id: Option, } // We deliberately don't implement the `Default` trait, since that would be public, and @@ -139,6 +142,7 @@ macro_rules! make_struct { grease_transport_parameter: None, write_order: None, address_discovery_role: address_discovery::Role::Disabled, + initial_max_path_id: None, } } } @@ -187,6 +191,7 @@ impl TransportParameters { order }), address_discovery_role: config.address_discovery_role, + initial_max_path_id: config.get_initial_max_path_id(), ..Self::default() } } @@ -396,6 +401,13 @@ impl TransportParameters { w.write(varint_role); } } + TransportParameterId::InitialMaxPathId => { + if let Some(val) = self.initial_max_path_id { + w.write_var(id as u64); + w.write_var(val.size() as u64); + w.write(val); + } + } id => { macro_rules! write_params { {$($(#[$doc:meta])* $name:ident ($id:ident) = $default:expr,)*} => { @@ -509,6 +521,19 @@ impl TransportParameters { "address discovery enabled for peer" ); } + TransportParameterId::InitialMaxPathId => { + if params.initial_max_path_id.is_some() { + return Err(Error::Malformed); + } + + let value: PathId = r.get()?; + if len != value.size() { + return Err(Error::Malformed); + } + + params.initial_max_path_id = Some(value); + tracing::debug!(initial_max_path_id=%value, "multipath enabled"); + } _ => { macro_rules! parse { {$($(#[$doc:meta])* $name:ident ($id:ident) = $default:expr,)*} => { @@ -592,7 +617,7 @@ impl ReservedTransportParameter { fn random(rng: &mut impl RngCore) -> Self { let id = Self::generate_reserved_id(rng); - let payload_len = rng.gen_range(0..Self::MAX_PAYLOAD_LEN); + let payload_len = rng.random_range(0..Self::MAX_PAYLOAD_LEN); let payload = { let mut slice = [0u8; Self::MAX_PAYLOAD_LEN]; @@ -619,7 +644,7 @@ impl ReservedTransportParameter { /// See: and fn generate_reserved_id(rng: &mut impl RngCore) -> VarInt { let id = { - let rand = rng.gen_range(0u64..(1 << 62) - 27); + let rand = rng.random_range(0u64..(1 << 62) - 27); let n = rand / 31; 31 * n + 27 }; @@ -674,11 +699,14 @@ pub(crate) enum TransportParameterId { // ObservedAddr = 0x9f81a176, + + // https://datatracker.ietf.org/doc/html/draft-ietf-quic-multipath + InitialMaxPathId = 0x0f739bbc1b666d0c, } impl TransportParameterId { /// Array with all supported transport parameter IDs - const SUPPORTED: [Self; 22] = [ + const SUPPORTED: [Self; 23] = [ Self::MaxIdleTimeout, Self::MaxUdpPayloadSize, Self::InitialMaxData, @@ -701,6 +729,7 @@ impl TransportParameterId { Self::GreaseQuicBit, Self::MinAckDelayDraft07, Self::ObservedAddr, + Self::InitialMaxPathId, ]; } @@ -741,6 +770,7 @@ impl TryFrom for TransportParameterId { id if Self::GreaseQuicBit == id => Self::GreaseQuicBit, id if Self::MinAckDelayDraft07 == id => Self::MinAckDelayDraft07, id if Self::ObservedAddr == id => Self::ObservedAddr, + id if Self::InitialMaxPathId == id => Self::InitialMaxPathId, _ => return Err(()), }; Ok(param) @@ -780,6 +810,7 @@ mod test { grease_quic_bit: true, min_ack_delay: Some(2_000u32.into()), address_discovery_role: address_discovery::Role::SendOnly, + initial_max_path_id: Some(PathId::MAX), ..TransportParameters::default() }; params.write(&mut buf); @@ -825,7 +856,7 @@ mod test { #[test] fn reserved_transport_parameter_ignored_when_read() { let mut buf = Vec::new(); - let reserved_parameter = ReservedTransportParameter::random(&mut rand::thread_rng()); + let reserved_parameter = ReservedTransportParameter::random(&mut rand::rng()); assert!(reserved_parameter.payload_len < ReservedTransportParameter::MAX_PAYLOAD_LEN); assert!(reserved_parameter.id.0 % 31 == 27); diff --git a/quinn-proto/src/varint.rs b/quinn-proto/src/varint.rs index 08022d9db3..fd01b0e18f 100644 --- a/quinn-proto/src/varint.rs +++ b/quinn-proto/src/varint.rs @@ -189,11 +189,11 @@ impl Codec for VarInt { if x < 2u64.pow(6) { w.put_u8(x as u8); } else if x < 2u64.pow(14) { - w.put_u16(0b01 << 14 | x as u16); + w.put_u16((0b01 << 14) | x as u16); } else if x < 2u64.pow(30) { - w.put_u32(0b10 << 30 | x as u32); + w.put_u32((0b10 << 30) | x as u32); } else if x < 2u64.pow(62) { - w.put_u64(0b11 << 62 | x); + w.put_u64((0b11 << 62) | x); } else { unreachable!("malformed VarInt") } diff --git a/quinn-udp/Cargo.toml b/quinn-udp/Cargo.toml index cbb865e5ce..ba71f64a8c 100644 --- a/quinn-udp/Cargo.toml +++ b/quinn-udp/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "iroh-quinn-udp" -version = "0.5.7" +version = "0.5.12" edition.workspace = true rust-version.workspace = true license.workspace = true @@ -11,6 +11,8 @@ categories.workspace = true workspace = ".." [features] +# NOTE: Please keep this in sync with the feature list in `.github/workflows/codecov.yml`, see +# comment in that file for more information. default = ["tracing", "log"] # Configure `tracing` to log events via `log` if no `tracing` subscriber exists. log = ["tracing/log"] @@ -21,9 +23,11 @@ fast-apple-datapath = [] [dependencies] libc = "0.2.158" log = { workspace = true, optional = true } -socket2 = { workspace = true } tracing = { workspace = true, optional = true } +[target.'cfg(not(all(target_family = "wasm", target_os = "unknown")))'.dependencies] +socket2 = { workspace = true } + [target.'cfg(windows)'.dependencies] once_cell = { workspace = true } windows-sys = { workspace = true } @@ -33,7 +37,7 @@ criterion = { version = "0.5", default-features = false, features = ["async_toki tokio = { workspace = true, features = ["rt", "rt-multi-thread", "net"] } [build-dependencies] -cfg_aliases = "0.2" +cfg_aliases = { workspace = true } [lib] # See https://github.com/bheisler/criterion.rs/blob/master/book/src/faq.md#cargo-bench-gives-unrecognized-option-errors-for-valid-command-line-options diff --git a/quinn-udp/benches/throughput.rs b/quinn-udp/benches/throughput.rs index 0f11ce8bc7..58f66eabd2 100644 --- a/quinn-udp/benches/throughput.rs +++ b/quinn-udp/benches/throughput.rs @@ -4,10 +4,11 @@ use std::{ net::{Ipv4Addr, Ipv6Addr, UdpSocket}, }; -use criterion::{criterion_group, criterion_main, Criterion}; -use iroh_quinn_udp::{RecvMeta, Transmit, UdpSocketState, BATCH_SIZE}; +use criterion::{Criterion, criterion_group, criterion_main}; use tokio::{io::Interest, runtime::Runtime}; +use iroh_quinn_udp::{BATCH_SIZE, RecvMeta, Transmit, UdpSocketState}; + pub fn criterion_benchmark(c: &mut Criterion) { const TOTAL_BYTES: usize = 10 * 1024 * 1024; const SEGMENT_SIZE: usize = 1280; diff --git a/quinn-udp/build.rs b/quinn-udp/build.rs index d9893ed948..c43c0aa342 100644 --- a/quinn-udp/build.rs +++ b/quinn-udp/build.rs @@ -28,5 +28,6 @@ fn main() { // Convenience aliases apple_fast: { all(apple, feature = "fast-apple-datapath") }, apple_slow: { all(apple, not(feature = "fast-apple-datapath")) }, + wasm_browser: { all(target_family = "wasm", target_os = "unknown") }, } } diff --git a/quinn-udp/src/cmsg/mod.rs b/quinn-udp/src/cmsg/mod.rs index 4a1c90e222..3b98be509a 100644 --- a/quinn-udp/src/cmsg/mod.rs +++ b/quinn-udp/src/cmsg/mod.rs @@ -112,6 +112,17 @@ impl<'a, M: MsgHdr> Iterator for Iter<'a, M> { fn next(&mut self) -> Option { let current = self.cmsg.take()?; self.cmsg = unsafe { self.hdr.cmsg_nxt_hdr(current).as_ref() }; + + #[cfg(apple_fast)] + { + // On MacOS < 14 CMSG_NXTHDR might continuously return a zeroed cmsg. In + // such case, return `None` instead, thus indicating the end of + // the cmsghdr chain. + if current.len() < mem::size_of::() { + return None; + } + } + Some(current) } } diff --git a/quinn-udp/src/fallback.rs b/quinn-udp/src/fallback.rs index fa81eb1b12..4444bed814 100644 --- a/quinn-udp/src/fallback.rs +++ b/quinn-udp/src/fallback.rs @@ -4,7 +4,7 @@ use std::{ time::Instant, }; -use super::{log_sendmsg_error, RecvMeta, Transmit, UdpSockRef, IO_ERROR_LOG_INTERVAL}; +use super::{IO_ERROR_LOG_INTERVAL, RecvMeta, Transmit, UdpSockRef, log_sendmsg_error}; /// Fallback UDP socket interface that stubs out all special functionality /// @@ -86,6 +86,30 @@ impl UdpSocketState { 1 } + /// Resize the send buffer of `socket` to `bytes` + #[inline] + pub fn set_send_buffer_size(&self, socket: UdpSockRef<'_>, bytes: usize) -> io::Result<()> { + socket.0.set_send_buffer_size(bytes) + } + + /// Resize the receive buffer of `socket` to `bytes` + #[inline] + pub fn set_recv_buffer_size(&self, socket: UdpSockRef<'_>, bytes: usize) -> io::Result<()> { + socket.0.set_recv_buffer_size(bytes) + } + + /// Get the size of the `socket` send buffer + #[inline] + pub fn send_buffer_size(&self, socket: UdpSockRef<'_>) -> io::Result { + socket.0.send_buffer_size() + } + + /// Get the size of the `socket` receive buffer + #[inline] + pub fn recv_buffer_size(&self, socket: UdpSockRef<'_>) -> io::Result { + socket.0.recv_buffer_size() + } + #[inline] pub fn may_fragment(&self) -> bool { true diff --git a/quinn-udp/src/lib.rs b/quinn-udp/src/lib.rs index 101db9ffca..0f69070f6d 100644 --- a/quinn-udp/src/lib.rs +++ b/quinn-udp/src/lib.rs @@ -27,12 +27,13 @@ #![warn(unreachable_pub)] #![warn(clippy::use_self)] +use std::net::{IpAddr, Ipv6Addr, SocketAddr}; #[cfg(unix)] use std::os::unix::io::AsFd; #[cfg(windows)] use std::os::windows::io::AsSocket; +#[cfg(not(wasm_browser))] use std::{ - net::{IpAddr, Ipv6Addr, SocketAddr}, sync::Mutex, time::{Duration, Instant}, }; @@ -49,7 +50,7 @@ mod imp; mod imp; // No ECN support -#[cfg(not(any(unix, windows)))] +#[cfg(not(any(wasm_browser, unix, windows)))] #[path = "fallback.rs"] mod imp; @@ -76,10 +77,15 @@ mod log { pub(crate) use no_op::*; } +#[cfg(not(wasm_browser))] pub use imp::UdpSocketState; /// Number of UDP packets to send/receive at a time +#[cfg(not(wasm_browser))] pub const BATCH_SIZE: usize = imp::BATCH_SIZE; +/// Number of UDP packets to send/receive at a time +#[cfg(wasm_browser)] +pub const BATCH_SIZE: usize = 1; /// Metadata for a single buffer filled with bytes received from the network /// @@ -141,13 +147,14 @@ pub struct Transmit<'a> { } /// Log at most 1 IO error per minute +#[cfg(not(wasm_browser))] const IO_ERROR_LOG_INTERVAL: Duration = std::time::Duration::from_secs(60); /// Logs a warning message when sendmsg fails /// /// Logging will only be performed if at least [`IO_ERROR_LOG_INTERVAL`] /// has elapsed since the last error was logged. -#[cfg(any(feature = "tracing", feature = "direct-log"))] +#[cfg(all(not(wasm_browser), any(feature = "tracing", feature = "direct-log")))] fn log_sendmsg_error( last_send_error: &Mutex, err: impl core::fmt::Debug, @@ -158,13 +165,19 @@ fn log_sendmsg_error( if now.saturating_duration_since(*last_send_error) > IO_ERROR_LOG_INTERVAL { *last_send_error = now; log::warn!( - "sendmsg error: {:?}, Transmit: {{ destination: {:?}, src_ip: {:?}, ecn: {:?}, len: {:?}, segment_size: {:?} }}", - err, transmit.destination, transmit.src_ip, transmit.ecn, transmit.contents.len(), transmit.segment_size); + "sendmsg error: {:?}, Transmit: {{ destination: {:?}, src_ip: {:?}, ecn: {:?}, len: {:?}, segment_size: {:?} }}", + err, + transmit.destination, + transmit.src_ip, + transmit.ecn, + transmit.contents.len(), + transmit.segment_size + ); } } // No-op -#[cfg(not(any(feature = "tracing", feature = "direct-log")))] +#[cfg(not(any(wasm_browser, feature = "tracing", feature = "direct-log")))] fn log_sendmsg_error(_: &Mutex, _: impl core::fmt::Debug, _: &Transmit) {} /// A borrowed UDP socket @@ -172,6 +185,7 @@ fn log_sendmsg_error(_: &Mutex, _: impl core::fmt::Debug, _: &Transmit) /// On Unix, constructible via `From`. On Windows, constructible via `From`. // Wrapper around socket2 to avoid making it a public dependency and incurring stability risk +#[cfg(not(wasm_browser))] pub struct UdpSockRef<'a>(socket2::SockRef<'a>); #[cfg(unix)] @@ -198,18 +212,18 @@ where #[repr(u8)] #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub enum EcnCodepoint { - #[doc(hidden)] + /// The ECT(0) codepoint, indicating that an endpoint is ECN-capable Ect0 = 0b10, - #[doc(hidden)] + /// The ECT(1) codepoint, indicating that an endpoint is ECN-capable Ect1 = 0b01, - #[doc(hidden)] + /// The CE codepoint, signalling that congestion was experienced Ce = 0b11, } impl EcnCodepoint { /// Create new object from the given bits pub fn from_bits(x: u8) -> Option { - use self::EcnCodepoint::*; + use EcnCodepoint::*; Some(match x & 0b11 { 0b10 => Ect0, 0b01 => Ect1, diff --git a/quinn-udp/src/unix.rs b/quinn-udp/src/unix.rs index c39941d5e2..c892796911 100644 --- a/quinn-udp/src/unix.rs +++ b/quinn-udp/src/unix.rs @@ -6,8 +6,8 @@ use std::{ net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}, os::unix::io::AsRawFd, sync::{ - atomic::{AtomicBool, AtomicUsize, Ordering}, Mutex, + atomic::{AtomicBool, AtomicUsize, Ordering}, }, time::Instant, }; @@ -15,7 +15,7 @@ use std::{ use socket2::SockRef; use super::{ - cmsg, log_sendmsg_error, EcnCodepoint, RecvMeta, Transmit, UdpSockRef, IO_ERROR_LOG_INTERVAL, + EcnCodepoint, IO_ERROR_LOG_INTERVAL, RecvMeta, Transmit, UdpSockRef, cmsg, log_sendmsg_error, }; // Adapted from https://github.com/apple-oss-distributions/xnu/blob/8d741a5de7ff4191bf97d57b9f54c2f6d4a15585/bsd/sys/socket_private.h @@ -208,6 +208,9 @@ impl UdpSocketState { match send(self, socket.0, transmit) { Ok(()) => Ok(()), Err(e) if e.kind() == io::ErrorKind::WouldBlock => Err(e), + // - EMSGSIZE is expected for MTU probes. Future work might be able to avoid + // these by automatically clamping the MTUD upper bound to the interface MTU. + Err(e) if e.raw_os_error() == Some(libc::EMSGSIZE) => Ok(()), Err(e) => { log_sendmsg_error(&self.last_send_error, e, transmit); @@ -249,6 +252,30 @@ impl UdpSocketState { self.gro_segments } + /// Resize the send buffer of `socket` to `bytes` + #[inline] + pub fn set_send_buffer_size(&self, socket: UdpSockRef<'_>, bytes: usize) -> io::Result<()> { + socket.0.set_send_buffer_size(bytes) + } + + /// Resize the receive buffer of `socket` to `bytes` + #[inline] + pub fn set_recv_buffer_size(&self, socket: UdpSockRef<'_>, bytes: usize) -> io::Result<()> { + socket.0.set_recv_buffer_size(bytes) + } + + /// Get the size of the `socket` send buffer + #[inline] + pub fn send_buffer_size(&self, socket: UdpSockRef<'_>) -> io::Result { + socket.0.send_buffer_size() + } + + /// Get the size of the `socket` receive buffer + #[inline] + pub fn recv_buffer_size(&self, socket: UdpSockRef<'_>) -> io::Result { + socket.0.recv_buffer_size() + } + /// Whether transmitted datagrams might get fragmented by the IP layer /// /// Returns `false` on targets which employ e.g. the `IPV6_DONTFRAG` socket option. @@ -304,57 +331,53 @@ fn send( loop { let n = unsafe { libc::sendmsg(io.as_raw_fd(), &msg_hdr, 0) }; - if n == -1 { - let e = io::Error::last_os_error(); - match e.kind() { - io::ErrorKind::Interrupted => { - // Retry the transmission - continue; - } - io::ErrorKind::WouldBlock => return Err(e), - _ => { - // Some network adapters and drivers do not support GSO. Unfortunately, Linux - // offers no easy way for us to detect this short of an EIO or sometimes EINVAL - // when we try to actually send datagrams using it. - #[cfg(any(target_os = "linux", target_os = "android"))] - if let Some(libc::EIO) | Some(libc::EINVAL) = e.raw_os_error() { - // Prevent new transmits from being scheduled using GSO. Existing GSO transmits - // may already be in the pipeline, so we need to tolerate additional failures. - if state.max_gso_segments() > 1 { - crate::log::info!( - "`libc::sendmsg` failed with {e}; halting segmentation offload" - ); - state - .max_gso_segments - .store(1, std::sync::atomic::Ordering::Relaxed); - } - } - // Some arguments to `sendmsg` are not supported. Switch to - // fallback mode and retry if we haven't already. - if e.raw_os_error() == Some(libc::EINVAL) && !state.sendmsg_einval() { - state.set_sendmsg_einval(); - prepare_msg( - transmit, - &dst_addr, - &mut msg_hdr, - &mut iovec, - &mut cmsgs, - encode_src_ip, - state.sendmsg_einval(), + if n >= 0 { + return Ok(()); + } + + let e = io::Error::last_os_error(); + match e.kind() { + // Retry the transmission + io::ErrorKind::Interrupted => continue, + io::ErrorKind::WouldBlock => return Err(e), + _ => { + // Some network adapters and drivers do not support GSO. Unfortunately, Linux + // offers no easy way for us to detect this short of an EIO or sometimes EINVAL + // when we try to actually send datagrams using it. + #[cfg(any(target_os = "linux", target_os = "android"))] + if let Some(libc::EIO) | Some(libc::EINVAL) = e.raw_os_error() { + // Prevent new transmits from being scheduled using GSO. Existing GSO transmits + // may already be in the pipeline, so we need to tolerate additional failures. + if state.max_gso_segments() > 1 { + crate::log::info!( + "`libc::sendmsg` failed with {e}; halting segmentation offload" ); - continue; + state + .max_gso_segments + .store(1, std::sync::atomic::Ordering::Relaxed); } + } - // - EMSGSIZE is expected for MTU probes. Future work might be able to avoid - // these by automatically clamping the MTUD upper bound to the interface MTU. - if e.raw_os_error() != Some(libc::EMSGSIZE) { - return Err(e); - } + // Some arguments to `sendmsg` are not supported. Switch to + // fallback mode and retry if we haven't already. + if e.raw_os_error() == Some(libc::EINVAL) && !state.sendmsg_einval() { + state.set_sendmsg_einval(); + prepare_msg( + transmit, + &dst_addr, + &mut msg_hdr, + &mut iovec, + &mut cmsgs, + encode_src_ip, + state.sendmsg_einval(), + ); + continue; } + + return Err(e); } } - return Ok(()); } } @@ -393,24 +416,17 @@ fn send(state: &UdpSocketState, io: SockRef<'_>, transmit: &Transmit<'_>) -> io: } loop { let n = unsafe { sendmsg_x(io.as_raw_fd(), hdrs.as_ptr(), cnt as u32, 0) }; - if n == -1 { - let e = io::Error::last_os_error(); - match e.kind() { - io::ErrorKind::Interrupted => { - // Retry the transmission - continue; - } - io::ErrorKind::WouldBlock => return Err(e), - _ => { - // - EMSGSIZE is expected for MTU probes. Future work might be able to avoid - // these by automatically clamping the MTUD upper bound to the interface MTU. - if e.raw_os_error() != Some(libc::EMSGSIZE) { - return Err(e); - } - } - } + + if n >= 0 { + return Ok(()); + } + + let e = io::Error::last_os_error(); + match e.kind() { + // Retry the transmission + io::ErrorKind::Interrupted => continue, + _ => return Err(e), } - return Ok(()); } } @@ -431,24 +447,17 @@ fn send(state: &UdpSocketState, io: SockRef<'_>, transmit: &Transmit<'_>) -> io: ); loop { let n = unsafe { libc::sendmsg(io.as_raw_fd(), &hdr, 0) }; - if n == -1 { - let e = io::Error::last_os_error(); - match e.kind() { - io::ErrorKind::Interrupted => { - // Retry the transmission - continue; - } - io::ErrorKind::WouldBlock => return Err(e), - _ => { - // - EMSGSIZE is expected for MTU probes. Future work might be able to avoid - // these by automatically clamping the MTUD upper bound to the interface MTU. - if e.raw_os_error() != Some(libc::EMSGSIZE) { - return Err(e); - } - } - } + + if n >= 0 { + return Ok(()); + } + + let e = io::Error::last_os_error(); + match e.kind() { + // Retry the transmission + io::ErrorKind::Interrupted => continue, + _ => return Err(e), } - return Ok(()); } } @@ -476,14 +485,17 @@ fn recv(io: SockRef<'_>, bufs: &mut [IoSliceMut<'_>], meta: &mut [RecvMeta]) -> ptr::null_mut::(), ) }; - if n == -1 { - let e = io::Error::last_os_error(); - if e.kind() == io::ErrorKind::Interrupted { - continue; - } - return Err(e); + + if n >= 0 { + break n; + } + + let e = io::Error::last_os_error(); + match e.kind() { + // Retry receiving + io::ErrorKind::Interrupted => continue, + _ => return Err(e), } - break n; }; for i in 0..(msg_count as usize) { meta[i] = decode_recv(&names[i], &hdrs[i].msg_hdr, hdrs[i].msg_len as usize); @@ -494,7 +506,13 @@ fn recv(io: SockRef<'_>, bufs: &mut [IoSliceMut<'_>], meta: &mut [RecvMeta]) -> #[cfg(apple_fast)] fn recv(io: SockRef<'_>, bufs: &mut [IoSliceMut<'_>], meta: &mut [RecvMeta]) -> io::Result { let mut names = [MaybeUninit::::uninit(); BATCH_SIZE]; - let mut ctrls = [cmsg::Aligned(MaybeUninit::<[u8; CMSG_LEN]>::uninit()); BATCH_SIZE]; + // MacOS 10.15 `recvmsg_x` does not override the `msghdr_x` + // `msg_controllen`. Thus, after the call to `recvmsg_x`, one does not know + // which control messages have been written to. To prevent reading + // uninitialized memory, do not use `MaybeUninit` for `ctrls`, instead + // initialize `ctrls` with `0`s. A control message of all `0`s is + // automatically skipped by `libc::CMSG_NXTHDR`. + let mut ctrls = [cmsg::Aligned([0u8; CMSG_LEN]); BATCH_SIZE]; let mut hdrs = unsafe { mem::zeroed::<[msghdr_x; BATCH_SIZE]>() }; let max_msg_count = bufs.len().min(BATCH_SIZE); for i in 0..max_msg_count { @@ -502,15 +520,16 @@ fn recv(io: SockRef<'_>, bufs: &mut [IoSliceMut<'_>], meta: &mut [RecvMeta]) -> } let msg_count = loop { let n = unsafe { recvmsg_x(io.as_raw_fd(), hdrs.as_mut_ptr(), max_msg_count as _, 0) }; - match n { - -1 => { - let e = io::Error::last_os_error(); - if e.kind() == io::ErrorKind::Interrupted { - continue; - } - return Err(e); - } - n => break n, + + if n >= 0 { + break n; + } + + let e = io::Error::last_os_error(); + match e.kind() { + // Retry receiving + io::ErrorKind::Interrupted => continue, + _ => return Err(e), } }; for i in 0..(msg_count as usize) { @@ -527,17 +546,21 @@ fn recv(io: SockRef<'_>, bufs: &mut [IoSliceMut<'_>], meta: &mut [RecvMeta]) -> prepare_recv(&mut bufs[0], &mut name, &mut ctrl, &mut hdr); let n = loop { let n = unsafe { libc::recvmsg(io.as_raw_fd(), &mut hdr, 0) }; - if n == -1 { - let e = io::Error::last_os_error(); - if e.kind() == io::ErrorKind::Interrupted { - continue; - } - return Err(e); - } + if hdr.msg_flags & libc::MSG_TRUNC != 0 { continue; } - break n; + + if n >= 0 { + break n; + } + + let e = io::Error::last_os_error(); + match e.kind() { + // Retry receiving + io::ErrorKind::Interrupted => continue, + _ => return Err(e), + } }; meta[0] = decode_recv(&name, &hdr, n as usize); Ok(1) @@ -589,11 +612,13 @@ fn prepare_msg( encoder.push(libc::IPPROTO_IPV6, libc::IPV6_TCLASS, ecn); } - // Only set the segment size if it is different from the size of the contents. - // Some network drivers don't like being told to do GSO even if there is effectively only a single segment. + // Only set the segment size if it is less than the size of the contents. + // Some network drivers don't like being told to do GSO even if there is effectively only a single segment (i.e. `segment_size == transmit.contents.len()`) + // Additionally, a `segment_size` that is greater than the content also means there is effectively only a single segment. + // This case is actually quite common when splitting up a prepared GSO batch again after GSO has been disabled because the last datagram in a GSO batch is allowed to be smaller than the segment size. if let Some(segment_size) = transmit .segment_size - .filter(|segment_size| *segment_size != transmit.contents.len()) + .filter(|segment_size| *segment_size < transmit.contents.len()) { gso::set_segment_size(&mut encoder, segment_size as u16); } @@ -657,7 +682,7 @@ fn prepare_recv( fn prepare_recv( buf: &mut IoSliceMut, name: &mut MaybeUninit, - ctrl: &mut cmsg::Aligned>, + ctrl: &mut cmsg::Aligned<[u8; CMSG_LEN]>, hdr: &mut msghdr_x, ) { hdr.msg_name = name.as_mut_ptr() as _; diff --git a/quinn-udp/src/windows.rs b/quinn-udp/src/windows.rs index b0e776e27b..1e0410722d 100644 --- a/quinn-udp/src/windows.rs +++ b/quinn-udp/src/windows.rs @@ -13,9 +13,10 @@ use once_cell::sync::Lazy; use windows_sys::Win32::Networking::WinSock; use crate::{ + EcnCodepoint, IO_ERROR_LOG_INTERVAL, RecvMeta, Transmit, UdpSockRef, cmsg::{self, CMsgHdr}, log::debug, - log_sendmsg_error, EcnCodepoint, RecvMeta, Transmit, UdpSockRef, IO_ERROR_LOG_INTERVAL, + log_sendmsg_error, }; /// QUIC-friendly UDP socket for Windows @@ -81,7 +82,12 @@ impl UdpSocketState { WinSock::IP_PKTINFO, OPTION_ON, )?; - set_socket_option(&*socket.0, WinSock::IPPROTO_IP, WinSock::IP_ECN, OPTION_ON)?; + set_socket_option( + &*socket.0, + WinSock::IPPROTO_IP, + WinSock::IP_RECVECN, + OPTION_ON, + )?; } if is_ipv6 { @@ -102,7 +108,7 @@ impl UdpSocketState { set_socket_option( &*socket.0, WinSock::IPPROTO_IPV6, - WinSock::IPV6_ECN, + WinSock::IPV6_RECVECN, OPTION_ON, )?; } @@ -286,6 +292,30 @@ impl UdpSocketState { 64 } + /// Resize the send buffer of `socket` to `bytes` + #[inline] + pub fn set_send_buffer_size(&self, socket: UdpSockRef<'_>, bytes: usize) -> io::Result<()> { + socket.0.set_send_buffer_size(bytes) + } + + /// Resize the receive buffer of `socket` to `bytes` + #[inline] + pub fn set_recv_buffer_size(&self, socket: UdpSockRef<'_>, bytes: usize) -> io::Result<()> { + socket.0.set_recv_buffer_size(bytes) + } + + /// Get the size of the `socket` send buffer + #[inline] + pub fn send_buffer_size(&self, socket: UdpSockRef<'_>) -> io::Result { + socket.0.send_buffer_size() + } + + /// Get the size of the `socket` receive buffer + #[inline] + pub fn recv_buffer_size(&self, socket: UdpSockRef<'_>) -> io::Result { + socket.0.recv_buffer_size() + } + #[inline] pub fn may_fragment(&self) -> bool { false diff --git a/quinn-udp/tests/tests.rs b/quinn-udp/tests/tests.rs index 5f4d833197..66992f9a15 100644 --- a/quinn-udp/tests/tests.rs +++ b/quinn-udp/tests/tests.rs @@ -1,8 +1,8 @@ #[cfg(not(any(target_os = "openbsd", target_os = "netbsd", solarish)))] -use std::net::{SocketAddr, SocketAddrV4, SocketAddrV6}; +use std::net::{SocketAddr, SocketAddrV6}; use std::{ io::IoSliceMut, - net::{IpAddr, Ipv4Addr, Ipv6Addr, UdpSocket}, + net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddrV4, UdpSocket}, slice, }; @@ -31,6 +31,29 @@ fn basic() { ); } +#[test] +fn basic_src_ip() { + let send = UdpSocket::bind((Ipv6Addr::LOCALHOST, 0)) + .or_else(|_| UdpSocket::bind((Ipv4Addr::LOCALHOST, 0))) + .unwrap(); + let recv = UdpSocket::bind((Ipv6Addr::LOCALHOST, 0)) + .or_else(|_| UdpSocket::bind((Ipv4Addr::LOCALHOST, 0))) + .unwrap(); + let src_ip = send.local_addr().unwrap().ip(); + let dst_addr = recv.local_addr().unwrap(); + test_send_recv( + &send.into(), + &recv.into(), + Transmit { + destination: dst_addr, + ecn: None, + contents: b"hello", + segment_size: None, + src_ip: Some(src_ip), + }, + ); +} + #[test] fn ecn_v6() { let send = Socket::from(UdpSocket::bind((Ipv6Addr::LOCALHOST, 0)).unwrap()); @@ -186,6 +209,79 @@ fn gso() { ); } +#[test] +fn socket_buffers() { + const BUFFER_SIZE: usize = 123456; + const FACTOR: usize = if cfg!(any(target_os = "linux", target_os = "android")) { + 2 // Linux and Android set the buffer to double the requested size + } else { + 1 // Everyone else is sane. + }; + + let send = socket2::Socket::new( + socket2::Domain::IPV4, + socket2::Type::DGRAM, + Some(socket2::Protocol::UDP), + ) + .unwrap(); + let recv = socket2::Socket::new( + socket2::Domain::IPV4, + socket2::Type::DGRAM, + Some(socket2::Protocol::UDP), + ) + .unwrap(); + for sock in [&send, &recv] { + sock.bind(&socket2::SockAddr::from(SocketAddrV4::new( + Ipv4Addr::LOCALHOST, + 0, + ))) + .unwrap(); + + let socket_state = UdpSocketState::new(sock.into()).expect("created socket state"); + + // Change the send buffer size. + let buffer_before = socket_state.send_buffer_size(sock.into()).unwrap(); + assert_ne!( + buffer_before, + BUFFER_SIZE * FACTOR, + "make sure buffer is not already desired size" + ); + socket_state + .set_send_buffer_size(sock.into(), BUFFER_SIZE) + .expect("set send buffer size {buffer_before} -> {BUFFER_SIZE}"); + let buffer_after = socket_state.send_buffer_size(sock.into()).unwrap(); + assert_eq!( + buffer_after, + BUFFER_SIZE * FACTOR, + "setting send buffer size to {BUFFER_SIZE} resulted in {buffer_before} -> {buffer_after}", + ); + + // Change the receive buffer size. + let buffer_before = socket_state.recv_buffer_size(sock.into()).unwrap(); + socket_state + .set_recv_buffer_size(sock.into(), BUFFER_SIZE) + .expect("set recv buffer size {buffer_before} -> {BUFFER_SIZE}"); + let buffer_after = socket_state.recv_buffer_size(sock.into()).unwrap(); + assert_eq!( + buffer_after, + BUFFER_SIZE * FACTOR, + "setting recv buffer size to {BUFFER_SIZE} resulted in {buffer_before} -> {buffer_after}", + ); + } + + test_send_recv( + &send, + &recv, + Transmit { + destination: recv.local_addr().unwrap().as_socket().unwrap(), + ecn: None, + contents: b"hello", + segment_size: None, + src_ip: None, + }, + ); +} + fn test_send_recv(send: &Socket, recv: &Socket, transmit: Transmit) { let send_state = UdpSocketState::new(send.into()).unwrap(); let recv_state = UdpSocketState::new(recv.into()).unwrap(); diff --git a/quinn/Cargo.toml b/quinn/Cargo.toml index 4cc8f248b8..20e08e48f7 100644 --- a/quinn/Cargo.toml +++ b/quinn/Cargo.toml @@ -11,11 +11,16 @@ workspace = ".." edition.workspace = true rust-version.workspace = true + [features] -default = ["log", "platform-verifier", "runtime-tokio", "rustls-ring"] +# NOTE: Please keep this in sync with the feature list in `.github/workflows/codecov.yml`, see +# comment in that file for more information. +default = ["log", "platform-verifier", "runtime-tokio", "rustls-ring", "bloom"] # Enables `Endpoint::client` and `Endpoint::server` conveniences aws-lc-rs = ["proto/aws-lc-rs"] aws-lc-rs-fips = ["proto/aws-lc-rs-fips"] +# Enables BloomTokenLog, and uses it by default +bloom = ["proto/bloom"] # Records how long locks are held, and warns if they are held >= 1ms lock_tracking = [] # Provides `ClientConfig::with_platform_verifier()` convenience method @@ -27,7 +32,8 @@ rustls-aws-lc-rs = ["dep:rustls", "aws-lc-rs", "proto/rustls-aws-lc-rs", "proto/ rustls-aws-lc-rs-fips = ["dep:rustls", "aws-lc-rs-fips", "proto/rustls-aws-lc-rs-fips", "proto/aws-lc-rs-fips"] # Enable rustls with the `ring` crypto provider rustls-ring = ["dep:rustls", "ring", "proto/rustls-ring", "proto/ring"] -# Enables `Endpoint::client` and `Endpoint::server` conveniences +# Enable the `ring` crypto provider. +# Outside wasm*-unknown-unknown targets, this enables `Endpoint::client` and `Endpoint::server` conveniences. ring = ["proto/ring"] runtime-tokio = ["tokio/time", "tokio/rt", "tokio/net"] runtime-async-std = ["async-io", "async-std"] @@ -49,7 +55,6 @@ pin-project-lite = { workspace = true } proto = { package = "iroh-quinn-proto", path = "../quinn-proto", version = "0.13.0", default-features = false } rustls = { workspace = true, optional = true } smol = { workspace = true, optional = true } -socket2 = { workspace = true } thiserror = { workspace = true } tracing = { workspace = true } tokio = { workspace = true } @@ -60,6 +65,12 @@ async-global-executor = { workspace = true, optional = true } async-fs = { workspace = true, optional = true } async-executor = { workspace = true, optional = true } +[target.'cfg(not(all(target_family = "wasm", target_os = "unknown")))'.dependencies] +socket2 = { workspace = true } + +[target.'cfg(all(target_family = "wasm", target_os = "unknown"))'.dependencies] +web-time = { workspace = true } + [dev-dependencies] anyhow = { workspace = true } crc = { workspace = true } @@ -75,6 +86,9 @@ tracing-futures = { workspace = true } url = { workspace = true } tokio-stream = "0.1.15" +[build-dependencies] +cfg_aliases = { workspace = true } + [[example]] name = "server" required-features = ["rustls-ring"] diff --git a/quinn/benches/bench.rs b/quinn/benches/bench.rs index a8f1b722b3..40dedb46f4 100644 --- a/quinn/benches/bench.rs +++ b/quinn/benches/bench.rs @@ -4,7 +4,7 @@ use std::{ thread, }; -use bencher::{benchmark_group, benchmark_main, Bencher}; +use bencher::{Bencher, benchmark_group, benchmark_main}; use rustls::pki_types::{CertificateDer, PrivatePkcs8KeyDer}; use tokio::runtime::{Builder, Runtime}; use tracing::error_span; diff --git a/quinn/build.rs b/quinn/build.rs new file mode 100644 index 0000000000..7aae56820c --- /dev/null +++ b/quinn/build.rs @@ -0,0 +1,9 @@ +use cfg_aliases::cfg_aliases; + +fn main() { + // Setup cfg aliases + cfg_aliases! { + // Convenience aliases + wasm_browser: { all(target_family = "wasm", target_os = "unknown") }, + } +} diff --git a/quinn/examples/client.rs b/quinn/examples/client.rs index 26c99c7bec..ebe6508509 100644 --- a/quinn/examples/client.rs +++ b/quinn/examples/client.rs @@ -11,10 +11,10 @@ use std::{ time::{Duration, Instant}, }; -use anyhow::{anyhow, Result}; +use anyhow::{Result, anyhow}; use clap::Parser; use iroh_quinn as quinn; -use proto::{crypto::rustls::QuicClientConfig, TransportConfig}; +use proto::{TransportConfig, crypto::rustls::QuicClientConfig}; use rustls::pki_types::CertificateDer; use tracing::{error, info}; use url::Url; diff --git a/quinn/examples/common/mod.rs b/quinn/examples/common/mod.rs index 0ffa51e45e..f1c112cdd8 100644 --- a/quinn/examples/common/mod.rs +++ b/quinn/examples/common/mod.rs @@ -55,8 +55,8 @@ fn configure_client( } /// Returns default server configuration along with its certificate. -fn configure_server( -) -> Result<(ServerConfig, CertificateDer<'static>), Box> { +fn configure_server() +-> Result<(ServerConfig, CertificateDer<'static>), Box> { let cert = rcgen::generate_simple_self_signed(vec!["localhost".into()]).unwrap(); let cert_der = CertificateDer::from(cert.cert); let priv_key = PrivatePkcs8KeyDer::from(cert.key_pair.serialize_der()); diff --git a/quinn/examples/server.rs b/quinn/examples/server.rs index 65821a1541..9085de1e8f 100644 --- a/quinn/examples/server.rs +++ b/quinn/examples/server.rs @@ -10,7 +10,7 @@ use std::{ sync::Arc, }; -use anyhow::{anyhow, bail, Context, Result}; +use anyhow::{Context, Result, anyhow, bail}; use clap::Parser; use iroh_quinn as quinn; use proto::crypto::rustls::QuicServerConfig; diff --git a/quinn/src/connection.rs b/quinn/src/connection.rs index f24ce40f92..32dfbe0d69 100644 --- a/quinn/src/connection.rs +++ b/quinn/src/connection.rs @@ -6,27 +6,28 @@ use std::{ net::{IpAddr, SocketAddr}, pin::Pin, sync::{Arc, Weak}, - task::{Context, Poll, Waker}, - time::{Duration, Instant}, + task::{Context, Poll, Waker, ready}, }; use bytes::Bytes; use pin_project_lite::pin_project; use rustc_hash::FxHashMap; use thiserror::Error; -use tokio::sync::{futures::Notified, mpsc, oneshot, watch, Notify}; -use tracing::{debug_span, Instrument, Span}; +use tokio::sync::{Notify, futures::Notified, mpsc, oneshot, watch}; +use tracing::{Instrument, Span, debug_span}; use crate::{ + ConnectionEvent, Duration, Instant, VarInt, mutex::Mutex, + path::OpenPath, recv_stream::RecvStream, runtime::{AsyncTimer, AsyncUdpSocket, Runtime, UdpPoller}, send_stream::SendStream, - udp_transmit, ConnectionEvent, VarInt, + udp_transmit, }; use proto::{ - congestion::Controller, ConnectionError, ConnectionHandle, ConnectionStats, Dir, EndpointEvent, - StreamEvent, StreamId, + ConnectionError, ConnectionHandle, ConnectionStats, Dir, EndpointEvent, PathEvent, PathId, + PathStatus, StreamEvent, StreamId, congestion::Controller, }; /// In-progress connection attempt future @@ -239,8 +240,7 @@ struct ConnectionDriver(ConnectionRef); impl Future for ConnectionDriver { type Output = Result<(), io::Error>; - #[allow(unused_mut)] // MSRV - fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { + fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { let conn = &mut *self.0.state.lock("poll"); let span = debug_span!("drive", id = conn.handle.0); @@ -294,7 +294,7 @@ pub struct Connection(ConnectionRef); impl Connection { /// Returns a weak reference to the inner connection struct. pub fn weak_handle(&self) -> WeakConnectionHandle { - WeakConnectionHandle(Arc::downgrade(&self.0 .0)) + WeakConnectionHandle(Arc::downgrade(&self.0.0)) } /// Initiate a new outgoing unidirectional stream. @@ -359,6 +359,19 @@ impl Connection { } } + /// Open a (Multi)Path. + pub fn open_path(&self, addr: SocketAddr, initial_status: PathStatus) -> OpenPath { + let (on_open_path_send, on_open_path_recv) = oneshot::channel(); + let path_id = { + let mut state = self.0.state.lock("open_path"); + let path_id = state.inner.open_path(addr, initial_status); + state.open_path.insert(path_id, on_open_path_send); + path_id + }; + + OpenPath::new(path_id, on_open_path_recv, self.0.clone()) + } + /// Wait for the connection to be closed for any reason /// /// Despite the return type's name, closed connections are often not an error condition at the @@ -581,14 +594,15 @@ impl Connection { self.0.stable_id() } - // Update traffic keys spontaneously for testing purposes. - #[doc(hidden)] + /// Update traffic keys spontaneously + /// + /// This primarily exists for testing purposes. pub fn force_key_update(&self) { self.0 .state .lock("force_key_update") .inner - .initiate_key_update() + .force_key_update() } /// Derive keying material from this connection's TLS session secrets. @@ -647,6 +661,14 @@ impl Connection { let conn = self.0.state.lock("external_addr"); conn.observed_external_addr.subscribe() } + + /// Is multipath enabled? + // TODO(flub): not a useful API, once we do real things with multipath we can remove + // this again. + pub fn is_multipath_enabled(&self) -> bool { + let conn = self.0.state.lock("is_multipath_enabled"); + conn.inner.is_multipath_negotiated() + } } pin_project! { @@ -896,6 +918,8 @@ impl ConnectionRef { blocked_writers: FxHashMap::default(), blocked_readers: FxHashMap::default(), stopped: FxHashMap::default(), + open_path: FxHashMap::default(), + close_path: FxHashMap::default(), error: None, ref_count: 0, io_poller: socket.clone().create_io_poller(), @@ -1013,9 +1037,13 @@ pub(crate) struct State { endpoint_events: mpsc::UnboundedSender<(ConnectionHandle, EndpointEvent)>, pub(crate) blocked_writers: FxHashMap, pub(crate) blocked_readers: FxHashMap, - pub(crate) stopped: FxHashMap, + pub(crate) stopped: FxHashMap>, /// Always set to Some before the connection becomes drained pub(crate) error: Option, + /// Tracks paths being opened + open_path: FxHashMap>, + /// Tracks paths being closed + pub(crate) close_path: FxHashMap>, /// Number of live handles that can be used to initiate or handle I/O; excludes the driver ref_count: usize, socket: Arc, @@ -1033,7 +1061,10 @@ impl State { let now = self.runtime.now(); let mut transmits = 0; - let max_datagrams = self.socket.max_transmit_segments(); + let max_datagrams = self + .socket + .max_transmit_segments() + .min(MAX_TRANSMIT_SEGMENTS); loop { // Retry the last transmit, or get a new one. @@ -1154,7 +1185,7 @@ impl State { // `ZeroRttRejected` errors. wake_all(&mut self.blocked_writers); wake_all(&mut self.blocked_readers); - wake_all(&mut self.stopped); + wake_all_notify(&mut self.stopped); } } ConnectionLost { reason } => { @@ -1178,9 +1209,9 @@ impl State { // Might mean any number of streams are ready, so we wake up everyone shared.stream_budget_available[dir as usize].notify_waiters(); } - Stream(StreamEvent::Finished { id }) => wake_stream(id, &mut self.stopped), + Stream(StreamEvent::Finished { id }) => wake_stream_notify(id, &mut self.stopped), Stream(StreamEvent::Stopped { id, .. }) => { - wake_stream(id, &mut self.stopped); + wake_stream_notify(id, &mut self.stopped); wake_stream(id, &mut self.blocked_writers); } ObservedAddr(observed) => { @@ -1189,6 +1220,16 @@ impl State { old != *addr }); } + Path(PathEvent::Opened { id }) => { + if let Some(sender) = self.open_path.remove(&id) { + let _ = sender.send(()); + } + } + Path(PathEvent::Closed { id, error_code }) => { + if let Some(sender) = self.close_path.remove(&id) { + let _ = sender.send(error_code); + } + } } } } @@ -1267,7 +1308,7 @@ impl State { if let Some(x) = self.on_connected.take() { let _ = x.send(false); } - wake_all(&mut self.stopped); + wake_all_notify(&mut self.stopped); shared.closed.notify_waiters(); } @@ -1321,6 +1362,18 @@ fn wake_all(wakers: &mut FxHashMap) { wakers.drain().for_each(|(_, waker)| waker.wake()) } +fn wake_stream_notify(stream_id: StreamId, wakers: &mut FxHashMap>) { + if let Some(notify) = wakers.remove(&stream_id) { + notify.notify_waiters() + } +} + +fn wake_all_notify(wakers: &mut FxHashMap>) { + wakers + .drain() + .for_each(|(_, notify)| notify.notify_waiters()) +} + /// Errors that can arise when sending a datagram #[derive(Debug, Error, Clone, Eq, PartialEq)] pub enum SendDatagramError { @@ -1346,3 +1399,10 @@ pub enum SendDatagramError { /// This limits the amount of CPU resources consumed by datagram generation, /// and allows other tasks (like receiving ACKs) to run in between. const MAX_TRANSMIT_DATAGRAMS: usize = 20; + +/// The maximum amount of datagrams that are sent in a single transmit +/// +/// This can be lower than the maximum platform capabilities, to avoid excessive +/// memory allocations when calling `poll_transmit()`. Benchmarks have shown +/// that numbers around 10 are a good compromise. +const MAX_TRANSMIT_SEGMENTS: usize = 10; diff --git a/quinn/src/endpoint.rs b/quinn/src/endpoint.rs index 288b852334..bee7fda3b3 100644 --- a/quinn/src/endpoint.rs +++ b/quinn/src/endpoint.rs @@ -10,12 +10,12 @@ use std::{ str, sync::{Arc, Mutex}, task::{Context, Poll, Waker}, - time::Instant, }; -#[cfg(any(feature = "aws-lc-rs", feature = "ring"))] +#[cfg(all(not(wasm_browser), any(feature = "aws-lc-rs", feature = "ring")))] use crate::runtime::default_runtime; use crate::{ + Instant, runtime::{AsyncUdpSocket, Runtime}, udp_transmit, }; @@ -26,15 +26,15 @@ use proto::{ EndpointEvent, ServerConfig, }; use rustc_hash::FxHashMap; -#[cfg(any(feature = "aws-lc-rs", feature = "ring"))] +#[cfg(all(not(wasm_browser), any(feature = "aws-lc-rs", feature = "ring"),))] use socket2::{Domain, Protocol, Socket, Type}; -use tokio::sync::{futures::Notified, mpsc, Notify}; +use tokio::sync::{Notify, futures::Notified, mpsc}; use tracing::{Instrument, Span}; -use udp::{RecvMeta, BATCH_SIZE}; +use udp::{BATCH_SIZE, RecvMeta}; use crate::{ - connection::Connecting, incoming::Incoming, work_limiter::WorkLimiter, ConnectionEvent, - EndpointConfig, VarInt, IO_LOOP_BOUND, RECV_TIME_BOUND, + ConnectionEvent, EndpointConfig, IO_LOOP_BOUND, RECV_TIME_BOUND, VarInt, + connection::Connecting, incoming::Incoming, work_limiter::WorkLimiter, }; /// A QUIC endpoint. @@ -68,7 +68,7 @@ impl Endpoint { /// /// Some environments may not allow creation of dual-stack sockets, in which case an IPv6 /// client will only be able to connect to IPv6 servers. An IPv4 client is never dual-stack. - #[cfg(any(feature = "aws-lc-rs", feature = "ring"))] // `EndpointConfig::default()` is only available with these + #[cfg(all(not(wasm_browser), any(feature = "aws-lc-rs", feature = "ring")))] // `EndpointConfig::default()` is only available with these pub fn client(addr: SocketAddr) -> io::Result { let socket = Socket::new(Domain::for_address(addr), Type::DGRAM, Some(Protocol::UDP))?; if addr.is_ipv6() { @@ -98,7 +98,7 @@ impl Endpoint { /// IPv6 address on Windows will not by default be able to communicate with IPv4 /// addresses. Portable applications should bind an address that matches the family they wish to /// communicate within. - #[cfg(any(feature = "aws-lc-rs", feature = "ring"))] // `EndpointConfig::default()` is only available with these + #[cfg(all(not(wasm_browser), any(feature = "aws-lc-rs", feature = "ring")))] // `EndpointConfig::default()` is only available with these pub fn server(config: ServerConfig, addr: SocketAddr) -> io::Result { let socket = std::net::UdpSocket::bind(addr)?; let runtime = default_runtime() @@ -112,6 +112,7 @@ impl Endpoint { } /// Construct an endpoint with arbitrary configuration and socket + #[cfg(not(wasm_browser))] pub fn new( config: EndpointConfig, server_config: Option, @@ -235,6 +236,7 @@ impl Endpoint { /// Switch to a new UDP socket /// /// See [`Endpoint::rebind_abstract()`] for details. + #[cfg(not(wasm_browser))] pub fn rebind(&self, socket: std::net::UdpSocket) -> io::Result<()> { self.rebind_abstract(self.runtime.wrap_udp_socket(socket)?) } @@ -357,8 +359,7 @@ pub(crate) struct EndpointDriver(pub(crate) EndpointRef); impl Future for EndpointDriver { type Output = Result<(), io::Error>; - #[allow(unused_mut)] // MSRV - fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { + fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { let mut endpoint = self.0.state.lock().unwrap(); if endpoint.driver.is_none() { endpoint.driver = Some(cx.waker().clone()); diff --git a/quinn/src/incoming.rs b/quinn/src/incoming.rs index 43dca653b2..8eced8cdcd 100644 --- a/quinn/src/incoming.rs +++ b/quinn/src/incoming.rs @@ -48,14 +48,14 @@ impl Incoming { /// Respond with a retry packet, requiring the client to retry with address validation /// - /// Errors if `remote_address_validated()` is true. + /// Errors if `may_retry()` is false. pub fn retry(mut self) -> Result<(), RetryError> { let state = self.0.take().unwrap(); state.endpoint.retry(state.inner).map_err(|e| { - RetryError(Self(Some(State { + RetryError(Box::new(Self(Some(State { inner: e.into_incoming(), endpoint: state.endpoint, - }))) + })))) }) } @@ -79,10 +79,21 @@ impl Incoming { /// /// This means that the sender of the initial packet has proved that they can receive traffic /// sent to `self.remote_address()`. + /// + /// If `self.remote_address_validated()` is false, `self.may_retry()` is guaranteed to be true. + /// The inverse is not guaranteed. pub fn remote_address_validated(&self) -> bool { self.0.as_ref().unwrap().inner.remote_address_validated() } + /// Whether it is legal to respond with a retry packet + /// + /// If `self.remote_address_validated()` is false, `self.may_retry()` is guaranteed to be true. + /// The inverse is not guaranteed. + pub fn may_retry(&self) -> bool { + self.0.as_ref().unwrap().inner.may_retry() + } + /// The original destination CID when initiating the connection pub fn orig_dst_cid(&self) -> ConnectionId { *self.0.as_ref().unwrap().inner.orig_dst_cid() @@ -107,12 +118,12 @@ struct State { /// Error for attempting to retry an [`Incoming`] which already bears a token from a previous retry #[derive(Debug, Error)] #[error("retry() with validated Incoming")] -pub struct RetryError(Incoming); +pub struct RetryError(Box); impl RetryError { /// Get the [`Incoming`] pub fn into_incoming(self) -> Incoming { - self.0 + *self.0 } } diff --git a/quinn/src/lib.rs b/quinn/src/lib.rs index b764a3aa66..784aa99993 100644 --- a/quinn/src/lib.rs +++ b/quinn/src/lib.rs @@ -41,33 +41,33 @@ #![warn(unreachable_pub)] #![warn(clippy::use_self)] -use std::{sync::Arc, time::Duration}; - -macro_rules! ready { - ($e:expr $(,)?) => { - match $e { - std::task::Poll::Ready(t) => t, - std::task::Poll::Pending => return std::task::Poll::Pending, - } - }; -} +use std::sync::Arc; mod connection; mod endpoint; mod incoming; mod mutex; +mod path; mod recv_stream; mod runtime; mod send_stream; mod work_limiter; +#[cfg(not(wasm_browser))] +pub(crate) use std::time::{Duration, Instant}; +#[cfg(wasm_browser)] +pub(crate) use web_time::{Duration, Instant}; + +#[cfg(feature = "bloom")] +pub use proto::BloomTokenLog; pub use proto::{ - congestion, crypto, AckFrequencyConfig, ApplicationClose, Chunk, ClientConfig, ClosedStream, - ConfigError, ConnectError, ConnectionClose, ConnectionError, ConnectionId, - ConnectionIdGenerator, ConnectionStats, Dir, EcnCodepoint, EndpointConfig, FrameStats, - FrameType, IdleTimeout, MtuDiscoveryConfig, PathStats, ServerConfig, Side, StdSystemTime, - StreamId, TimeSource, Transmit, TransportConfig, TransportErrorCode, UdpStats, VarInt, - VarIntBoundsExceeded, Written, + AckFrequencyConfig, ApplicationClose, Chunk, ClientConfig, ClosedStream, ConfigError, + ConnectError, ConnectionClose, ConnectionError, ConnectionId, ConnectionIdGenerator, + ConnectionStats, Dir, EcnCodepoint, EndpointConfig, FrameStats, FrameType, IdleTimeout, + MtuDiscoveryConfig, NoneTokenLog, NoneTokenStore, PathStats, ServerConfig, Side, StdSystemTime, + StreamId, TimeSource, TokenLog, TokenMemoryCache, TokenReuseError, TokenStore, Transmit, + TransportConfig, TransportErrorCode, UdpStats, ValidationTokenConfig, VarInt, + VarIntBoundsExceeded, Written, congestion, crypto, }; #[cfg(any(feature = "rustls-aws-lc-rs", feature = "rustls-ring"))] pub use rustls; @@ -79,6 +79,7 @@ pub use crate::connection::{ }; pub use crate::endpoint::{Accept, Endpoint, EndpointStats}; pub use crate::incoming::{Incoming, IncomingFuture, RetryError}; +pub use crate::path::{OpenPath, Path}; pub use crate::recv_stream::{ReadError, ReadExactError, ReadToEndError, RecvStream, ResetError}; #[cfg(feature = "runtime-async-std")] pub use crate::runtime::AsyncStdRuntime; @@ -86,7 +87,7 @@ pub use crate::runtime::AsyncStdRuntime; pub use crate::runtime::SmolRuntime; #[cfg(feature = "runtime-tokio")] pub use crate::runtime::TokioRuntime; -pub use crate::runtime::{default_runtime, AsyncTimer, AsyncUdpSocket, Runtime, UdpPoller}; +pub use crate::runtime::{AsyncTimer, AsyncUdpSocket, Runtime, UdpPoller, default_runtime}; pub use crate::send_stream::{SendStream, StoppedError, WriteError}; #[cfg(test)] diff --git a/quinn/src/mutex.rs b/quinn/src/mutex.rs index a2a1fe2773..7c24df4685 100644 --- a/quinn/src/mutex.rs +++ b/quinn/src/mutex.rs @@ -6,10 +6,8 @@ use std::{ #[cfg(feature = "lock_tracking")] mod tracking { use super::*; - use std::{ - collections::VecDeque, - time::{Duration, Instant}, - }; + use crate::{Duration, Instant}; + use std::collections::VecDeque; use tracing::warn; #[derive(Debug)] @@ -73,7 +71,7 @@ mod tracking { purpose: &'static str, } - impl<'a, T> Drop for MutexGuard<'a, T> { + impl Drop for MutexGuard<'_, T> { fn drop(&mut self) { if self.guard.last_lock_owner.len() == MAX_LOCK_OWNERS { self.guard.last_lock_owner.pop_back(); @@ -94,7 +92,7 @@ mod tracking { } } - impl<'a, T> Deref for MutexGuard<'a, T> { + impl Deref for MutexGuard<'_, T> { type Target = T; fn deref(&self) -> &Self::Target { @@ -102,7 +100,7 @@ mod tracking { } } - impl<'a, T> DerefMut for MutexGuard<'a, T> { + impl DerefMut for MutexGuard<'_, T> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.guard.value } diff --git a/quinn/src/path.rs b/quinn/src/path.rs new file mode 100644 index 0000000000..6eb967d647 --- /dev/null +++ b/quinn/src/path.rs @@ -0,0 +1,94 @@ +use std::future::Future; +use std::pin::Pin; +use std::task::{Context, Poll, ready}; + +use proto::{ConnectionError, PathId, PathStatus, VarInt}; +use tokio::sync::oneshot; + +use crate::connection::ConnectionRef; + +/// Future produced by [`crate::Connection::open_path`] +pub struct OpenPath { + opened: oneshot::Receiver<()>, + path_id: PathId, + conn: ConnectionRef, +} + +impl OpenPath { + pub(crate) fn new(path_id: PathId, opened: oneshot::Receiver<()>, conn: ConnectionRef) -> Self { + Self { + opened, + path_id, + conn, + } + } +} + +impl Future for OpenPath { + type Output = Result; + fn poll(mut self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll { + // TODO: thread through errors + Pin::new(&mut self.opened).poll(ctx).map(|_| { + Ok(Path { + id: self.path_id, + conn: self.conn.clone(), + }) + }) + } +} + +/// An open (Multi)Path +pub struct Path { + id: PathId, + conn: ConnectionRef, +} + +impl Path { + /// The [`PathId`] of this path. + pub fn id(&self) -> PathId { + self.id + } + + /// The current [`PathStatus`] of this path. + pub fn status(&self) -> PathStatus { + self.conn + .state + .lock("path status") + .inner + .path_status(self.id) + } + + /// Closes this path + /// + /// The passed in `error_code` is sent to the remote. + /// The future will resolve to the `error_code` received from the remote. + pub fn close(&self, error_code: VarInt) -> ClosePath { + let (on_path_close_send, on_path_close_recv) = oneshot::channel(); + { + let mut state = self.conn.state.lock("close_path"); + state.inner.close_path(self.id, error_code); + state.close_path.insert(self.id, on_path_close_send); + } + + ClosePath { + closed: on_path_close_recv, + } + } +} + +/// Future produced by [`Path::close`] +pub struct ClosePath { + closed: oneshot::Receiver, +} + +impl Future for ClosePath { + type Output = Result; + fn poll(mut self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll { + // TODO: thread through errors + let res = ready!(Pin::new(&mut self.closed).poll(ctx)); + match res { + Ok(code) => Poll::Ready(Ok(code)), + Err(_err) => todo!(), // TODO: appropriate error + } + } +} diff --git a/quinn/src/recv_stream.rs b/quinn/src/recv_stream.rs index be1e8dd249..8b77e62b7f 100644 --- a/quinn/src/recv_stream.rs +++ b/quinn/src/recv_stream.rs @@ -1,8 +1,8 @@ use std::{ - future::{poll_fn, Future}, + future::{Future, poll_fn}, io, pin::Pin, - task::{Context, Poll}, + task::{Context, Poll, ready}, }; use bytes::Bytes; @@ -10,7 +10,7 @@ use proto::{Chunk, Chunks, ClosedStream, ConnectionError, ReadableError, StreamI use thiserror::Error; use tokio::io::ReadBuf; -use crate::{connection::ConnectionRef, VarInt}; +use crate::{VarInt, connection::ConnectionRef}; /// A stream that can only be used to receive data /// @@ -94,14 +94,17 @@ impl RecvStream { .await } - /// Attempts to read from the stream into buf. + /// Attempts to read from the stream into the provided buffer /// - /// On success, returns Poll::Ready(Ok(num_bytes_read)) and places data in - /// the buf. If no data was read, it implies that EOF has been reached. + /// On success, returns `Poll::Ready(Ok(num_bytes_read))` and places data into `buf`. If this + /// returns zero bytes read (and `buf` has a non-zero length), that indicates that the remote + /// side has [`finish`]ed the stream and the local side has already read all bytes. /// - /// If no data is available for reading, the method returns Poll::Pending - /// and arranges for the current task (via cx.waker()) to receive a notification - /// when the stream becomes readable or is closed. + /// If no data is available for reading, this returns `Poll::Pending` and arranges for the + /// current task (via `cx.waker()`) to be notified when the stream becomes readable or is + /// closed. + /// + /// [`finish`]: crate::SendStream::finish pub fn poll_read( &mut self, cx: &mut Context, @@ -112,7 +115,19 @@ impl RecvStream { Poll::Ready(Ok(buf.filled().len())) } - fn poll_read_buf( + /// Attempts to read from the stream into the provided buffer, which may be uninitialized + /// + /// On success, returns `Poll::Ready(Ok(()))` and places data into the unfilled portion of + /// `buf`. If this does not write any bytes to `buf` (and `buf.remaining()` is non-zero), that + /// indicates that the remote side has [`finish`]ed the stream and the local side has already + /// read all bytes. + /// + /// If no data is available for reading, this returns `Poll::Pending` and arranges for the + /// current task (via `cx.waker()`) to be notified when the stream becomes readable or is + /// closed. + /// + /// [`finish`]: crate::SendStream::finish + pub fn poll_read_buf( &mut self, cx: &mut Context, buf: &mut ReadBuf<'_>, @@ -556,7 +571,7 @@ impl From for ReadError { impl From for io::Error { fn from(x: ReadError) -> Self { - use self::ReadError::*; + use ReadError::*; let kind = match x { Reset { .. } | ZeroRttRejected => io::ErrorKind::ConnectionReset, ConnectionLost(_) | ClosedStream => io::ErrorKind::NotConnected, diff --git a/quinn/src/runtime.rs b/quinn/src/runtime.rs index 0bf30b5682..927becbd58 100644 --- a/quinn/src/runtime.rs +++ b/quinn/src/runtime.rs @@ -6,11 +6,12 @@ use std::{ pin::Pin, sync::Arc, task::{Context, Poll}, - time::Instant, }; use udp::{RecvMeta, Transmit}; +use crate::Instant; + /// Abstracts I/O and timer operations for runtime independence pub trait Runtime: Send + Sync + Debug + 'static { /// Construct a timer that will expire at `i` @@ -18,6 +19,7 @@ pub trait Runtime: Send + Sync + Debug + 'static { /// Drive `future` to completion in the background fn spawn(&self, future: Pin + Send>>); /// Convert `t` into the socket type used by this runtime + #[cfg(not(wasm_browser))] fn wrap_udp_socket(&self, t: std::net::UdpSocket) -> io::Result>; /// Look up the current time /// @@ -116,7 +118,8 @@ impl UdpPollHelper { #[cfg(any( feature = "runtime-async-std", feature = "runtime-smol", - feature = "runtime-tokio" + feature = "runtime-tokio", + feature = "async-io" ))] fn new(make_fut: MakeFut) -> Self { Self { @@ -187,10 +190,12 @@ pub fn default_runtime() -> Option> { #[cfg(feature = "runtime-tokio")] mod tokio; +// Due to MSRV, we must specify `self::` where there's crate/module ambiguity #[cfg(feature = "runtime-tokio")] pub use self::tokio::TokioRuntime; #[cfg(feature = "async-io")] mod async_io; -#[cfg(feature = "async-io")] +// Due to MSRV, we must specify `self::` where there's crate/module ambiguity +#[cfg(any(feature = "runtime-smol", feature = "runtime-async-std"))] pub use self::async_io::*; diff --git a/quinn/src/runtime/async_io.rs b/quinn/src/runtime/async_io.rs index 63c12ec2d0..3476bc9133 100644 --- a/quinn/src/runtime/async_io.rs +++ b/quinn/src/runtime/async_io.rs @@ -3,18 +3,21 @@ use std::{ io, pin::Pin, sync::Arc, - task::{Context, Poll}, + task::{Context, Poll, ready}, time::Instant, }; use async_io::{Async, Timer}; -use super::{AsyncTimer, AsyncUdpSocket, Runtime, UdpPollHelper}; +#[cfg(any(feature = "runtime-smol", feature = "runtime-async-std"))] +use super::Runtime; +use super::{AsyncTimer, AsyncUdpSocket, UdpPollHelper}; -#[cfg(feature = "smol")] +#[cfg(feature = "runtime-smol")] +// Due to MSRV, we must specify `self::` where there's crate/module ambiguity pub use self::smol::SmolRuntime; -#[cfg(feature = "smol")] +#[cfg(feature = "runtime-smol")] mod smol { use super::*; @@ -40,10 +43,11 @@ mod smol { } } -#[cfg(feature = "async-std")] +#[cfg(feature = "runtime-async-std")] +// Due to MSRV, we must specify `self::` where there's crate/module ambiguity pub use self::async_std::AsyncStdRuntime; -#[cfg(feature = "async-std")] +#[cfg(feature = "runtime-async-std")] mod async_std { use super::*; @@ -86,6 +90,7 @@ struct UdpSocket { } impl UdpSocket { + #[cfg(any(feature = "runtime-smol", feature = "runtime-async-std"))] fn new(sock: std::net::UdpSocket) -> io::Result { Ok(Self { inner: udp::UdpSocketState::new((&sock).into())?, diff --git a/quinn/src/runtime/tokio.rs b/quinn/src/runtime/tokio.rs index ad321a240c..0e423660d3 100644 --- a/quinn/src/runtime/tokio.rs +++ b/quinn/src/runtime/tokio.rs @@ -3,13 +3,13 @@ use std::{ io, pin::Pin, sync::Arc, - task::{Context, Poll}, + task::{Context, Poll, ready}, time::Instant, }; use tokio::{ io::Interest, - time::{sleep_until, Sleep}, + time::{Sleep, sleep_until}, }; use super::{AsyncTimer, AsyncUdpSocket, Runtime, UdpPollHelper}; diff --git a/quinn/src/send_stream.rs b/quinn/src/send_stream.rs index 08e8ef4a21..27e199f502 100644 --- a/quinn/src/send_stream.rs +++ b/quinn/src/send_stream.rs @@ -1,7 +1,7 @@ use std::{ - future::Future, + future::{Future, poll_fn}, io, - pin::Pin, + pin::{Pin, pin}, task::{Context, Poll}, }; @@ -9,7 +9,10 @@ use bytes::Bytes; use proto::{ClosedStream, ConnectionError, FinishError, StreamId, Written}; use thiserror::Error; -use crate::{connection::ConnectionRef, VarInt}; +use crate::{ + VarInt, + connection::{ConnectionRef, State}, +}; /// A stream that can only be used to send data /// @@ -50,14 +53,18 @@ impl SendStream { /// /// This operation is cancel-safe. pub async fn write(&mut self, buf: &[u8]) -> Result { - Write { stream: self, buf }.await + poll_fn(|cx| self.execute_poll(cx, |s| s.write(buf))).await } /// Convenience method to write an entire buffer to the stream /// /// This operation is *not* cancel-safe. - pub async fn write_all(&mut self, buf: &[u8]) -> Result<(), WriteError> { - WriteAll { stream: self, buf }.await + pub async fn write_all(&mut self, mut buf: &[u8]) -> Result<(), WriteError> { + while !buf.is_empty() { + let written = self.write(buf).await?; + buf = &buf[written..]; + } + Ok(()) } /// Write chunks to the stream @@ -68,30 +75,26 @@ impl SendStream { /// /// This operation is cancel-safe. pub async fn write_chunks(&mut self, bufs: &mut [Bytes]) -> Result { - WriteChunks { stream: self, bufs }.await + poll_fn(|cx| self.execute_poll(cx, |s| s.write_chunks(bufs))).await } /// Convenience method to write a single chunk in its entirety to the stream /// /// This operation is *not* cancel-safe. pub async fn write_chunk(&mut self, buf: Bytes) -> Result<(), WriteError> { - WriteChunk { - stream: self, - buf: [buf], - } - .await + self.write_all_chunks(&mut [buf]).await?; + Ok(()) } /// Convenience method to write an entire list of chunks to the stream /// /// This operation is *not* cancel-safe. - pub async fn write_all_chunks(&mut self, bufs: &mut [Bytes]) -> Result<(), WriteError> { - WriteAllChunks { - stream: self, - bufs, - offset: 0, + pub async fn write_all_chunks(&mut self, mut bufs: &mut [Bytes]) -> Result<(), WriteError> { + while !bufs.is_empty() { + let written = self.write_chunks(bufs).await?; + bufs = &mut bufs[written.chunks..]; } - .await + Ok(()) } fn execute_poll(&mut self, cx: &mut Context, write_fn: F) -> Poll> @@ -144,7 +147,7 @@ impl SendStream { conn.wake(); Ok(()) } - Err(FinishError::ClosedStream) => Err(ClosedStream::new()), + Err(FinishError::ClosedStream) => Err(ClosedStream::default()), // Harmless. If the application needs to know about stopped streams at this point, it // should call `stopped`. Err(FinishError::Stopped(_)) => Ok(()), @@ -199,28 +202,31 @@ impl SendStream { /// For a variety of reasons, the peer may not send acknowledgements immediately upon receiving /// data. As such, relying on `stopped` to know when the peer has read a stream to completion /// may introduce more latency than using an application-level response of some sort. - pub async fn stopped(&mut self) -> Result, StoppedError> { - Stopped { stream: self }.await - } - - #[doc(hidden)] - pub fn poll_stopped(&mut self, cx: &mut Context) -> Poll, StoppedError>> { - let mut conn = self.conn.state.lock("SendStream::poll_stopped"); - - if self.is_0rtt { - conn.check_0rtt() - .map_err(|()| StoppedError::ZeroRttRejected)?; - } - - match conn.inner.send_stream(self.stream).stopped() { - Err(_) => Poll::Ready(Ok(None)), - Ok(Some(error_code)) => Poll::Ready(Ok(Some(error_code))), - Ok(None) => { - if let Some(e) = &conn.error { - return Poll::Ready(Err(e.clone().into())); + pub fn stopped( + &self, + ) -> impl Future, StoppedError>> + Send + Sync + 'static { + let conn = self.conn.clone(); + let stream = self.stream; + let is_0rtt = self.is_0rtt; + async move { + loop { + // The `Notify::notified` future needs to be created while the lock is being held, + // otherwise a wakeup could be missed if triggered inbetween releasing the lock + // and creating the future. + // The lock may only be held in a block without `await`s, otherwise the future + // becomes `!Send`. `Notify::notified` is lifetime-bound to `Notify`, therefore + // we need to declare `notify` outside of the block, and initialize it inside. + let notify; + { + let mut conn = conn.state.lock("SendStream::stopped"); + if let Some(output) = send_stream_stopped(&mut conn, stream, is_0rtt) { + return output; + } + + notify = conn.stopped.entry(stream).or_default().clone(); + notify.notified() } - conn.stopped.insert(self.stream, cx.waker().clone()); - Poll::Pending + .await } } } @@ -242,14 +248,33 @@ impl SendStream { cx: &mut Context, buf: &[u8], ) -> Poll> { - self.get_mut().execute_poll(cx, |stream| stream.write(buf)) + pin!(self.get_mut().write(buf)).as_mut().poll(cx) + } +} + +/// Check if a send stream is stopped. +/// +/// Returns `Some` if the stream is stopped or the connection is closed. +/// Returns `None` if the stream is not stopped. +fn send_stream_stopped( + conn: &mut State, + stream: StreamId, + is_0rtt: bool, +) -> Option, StoppedError>> { + if is_0rtt && conn.check_0rtt().is_err() { + return Some(Err(StoppedError::ZeroRttRejected)); + } + match conn.inner.send_stream(stream).stopped() { + Err(ClosedStream { .. }) => Some(Ok(None)), + Ok(Some(error_code)) => Some(Ok(Some(error_code))), + Ok(None) => conn.error.clone().map(|error| Err(error.into())), } } #[cfg(feature = "futures-io")] impl futures_io::AsyncWrite for SendStream { fn poll_write(self: Pin<&mut Self>, cx: &mut Context, buf: &[u8]) -> Poll> { - Self::execute_poll(self.get_mut(), cx, |stream| stream.write(buf)).map_err(Into::into) + self.poll_write(cx, buf).map_err(Into::into) } fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context) -> Poll> { @@ -261,14 +286,13 @@ impl futures_io::AsyncWrite for SendStream { } } -#[cfg(feature = "runtime-tokio")] impl tokio::io::AsyncWrite for SendStream { fn poll_write( self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll> { - Self::execute_poll(self.get_mut(), cx, |stream| stream.write(buf)).map_err(Into::into) + self.poll_write(cx, buf).map_err(Into::into) } fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context) -> Poll> { @@ -285,7 +309,6 @@ impl Drop for SendStream { let mut conn = self.conn.state.lock("SendStream::drop"); // clean up any previously registered wakers - conn.stopped.remove(&self.stream); conn.blocked_writers.remove(&self.stream); if conn.error.is_some() || (self.is_0rtt && conn.check_0rtt().is_err()) { @@ -304,122 +327,6 @@ impl Drop for SendStream { } } -/// Future produced by `SendStream::stopped` -struct Stopped<'a> { - stream: &'a mut SendStream, -} - -impl Future for Stopped<'_> { - type Output = Result, StoppedError>; - - fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { - self.get_mut().stream.poll_stopped(cx) - } -} - -/// Future produced by [`SendStream::write()`]. -/// -/// [`SendStream::write()`]: crate::SendStream::write -struct Write<'a> { - stream: &'a mut SendStream, - buf: &'a [u8], -} - -impl Future for Write<'_> { - type Output = Result; - fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { - let this = self.get_mut(); - let buf = this.buf; - this.stream.execute_poll(cx, |s| s.write(buf)) - } -} - -/// Future produced by [`SendStream::write_all()`]. -/// -/// [`SendStream::write_all()`]: crate::SendStream::write_all -struct WriteAll<'a> { - stream: &'a mut SendStream, - buf: &'a [u8], -} - -impl Future for WriteAll<'_> { - type Output = Result<(), WriteError>; - fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { - let this = self.get_mut(); - loop { - if this.buf.is_empty() { - return Poll::Ready(Ok(())); - } - let buf = this.buf; - let n = ready!(this.stream.execute_poll(cx, |s| s.write(buf)))?; - this.buf = &this.buf[n..]; - } - } -} - -/// Future produced by [`SendStream::write_chunks()`]. -/// -/// [`SendStream::write_chunks()`]: crate::SendStream::write_chunks -struct WriteChunks<'a> { - stream: &'a mut SendStream, - bufs: &'a mut [Bytes], -} - -impl Future for WriteChunks<'_> { - type Output = Result; - fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { - let this = self.get_mut(); - let bufs = &mut *this.bufs; - this.stream.execute_poll(cx, |s| s.write_chunks(bufs)) - } -} - -/// Future produced by [`SendStream::write_chunk()`]. -/// -/// [`SendStream::write_chunk()`]: crate::SendStream::write_chunk -struct WriteChunk<'a> { - stream: &'a mut SendStream, - buf: [Bytes; 1], -} - -impl Future for WriteChunk<'_> { - type Output = Result<(), WriteError>; - fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { - let this = self.get_mut(); - loop { - if this.buf[0].is_empty() { - return Poll::Ready(Ok(())); - } - let bufs = &mut this.buf[..]; - ready!(this.stream.execute_poll(cx, |s| s.write_chunks(bufs)))?; - } - } -} - -/// Future produced by [`SendStream::write_all_chunks()`]. -/// -/// [`SendStream::write_all_chunks()`]: crate::SendStream::write_all_chunks -struct WriteAllChunks<'a> { - stream: &'a mut SendStream, - bufs: &'a mut [Bytes], - offset: usize, -} - -impl Future for WriteAllChunks<'_> { - type Output = Result<(), WriteError>; - fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { - let this = self.get_mut(); - loop { - if this.offset == this.bufs.len() { - return Poll::Ready(Ok(())); - } - let bufs = &mut this.bufs[this.offset..]; - let written = ready!(this.stream.execute_poll(cx, |s| s.write_chunks(bufs)))?; - this.offset += written.chunks; - } - } -} - /// Errors that arise from writing to a stream #[derive(Debug, Error, Clone, PartialEq, Eq)] pub enum WriteError { @@ -462,7 +369,7 @@ impl From for WriteError { impl From for io::Error { fn from(x: WriteError) -> Self { - use self::WriteError::*; + use WriteError::*; let kind = match x { Stopped(_) | ZeroRttRejected => io::ErrorKind::ConnectionReset, ConnectionLost(_) | ClosedStream => io::ErrorKind::NotConnected, diff --git a/quinn/src/tests.rs b/quinn/src/tests.rs index fb4fc1bd2f..7c03a2e3bf 100755 --- a/quinn/src/tests.rs +++ b/quinn/src/tests.rs @@ -14,18 +14,16 @@ use std::{ }; use crate::runtime::TokioRuntime; +use crate::{Duration, Instant}; use bytes::Bytes; -use proto::{crypto::rustls::QuicClientConfig, RandomConnectionIdGenerator}; -use rand::{rngs::StdRng, RngCore, SeedableRng}; +use proto::{RandomConnectionIdGenerator, crypto::rustls::QuicClientConfig}; +use rand::{RngCore, SeedableRng, rngs::StdRng}; use rustls::{ - pki_types::{CertificateDer, PrivateKeyDer, PrivatePkcs8KeyDer}, RootCertStore, + pki_types::{CertificateDer, PrivateKeyDer, PrivatePkcs8KeyDer}, }; -use tokio::{ - runtime::{Builder, Runtime}, - time::{Duration, Instant}, -}; -use tracing::{error_span, info}; +use tokio::runtime::{Builder, Runtime}; +use tracing::{error_span, info, info_span}; use tracing_futures::Instrument as _; use tracing_subscriber::EnvFilter; @@ -160,7 +158,7 @@ fn read_after_close() { .unwrap() .await .expect("connect"); - tokio::time::sleep_until(Instant::now() + Duration::from_millis(100)).await; + tokio::time::sleep(Duration::from_millis(100)).await; let mut stream = new_conn.accept_uni().await.expect("incoming streams"); let msg = stream.read_to_end(usize::MAX).await.expect("read_to_end"); assert_eq!(msg, MSG); @@ -216,10 +214,10 @@ fn export_keying_material() { async fn ip_blocking() { let _guard = subscribe(); let endpoint_factory = EndpointFactory::new(); - let client_1 = endpoint_factory.endpoint(); + let client_1 = endpoint_factory.endpoint("client_1"); let client_1_addr = client_1.local_addr().unwrap(); - let client_2 = endpoint_factory.endpoint(); - let server = endpoint_factory.endpoint(); + let client_2 = endpoint_factory.endpoint("client_2"); + let server = endpoint_factory.endpoint("server"); let server_addr = server.local_addr().unwrap(); let server_task = tokio::spawn(async move { loop { @@ -258,11 +256,11 @@ async fn ip_blocking() { /// Construct an endpoint suitable for connecting to itself fn endpoint() -> Endpoint { - EndpointFactory::new().endpoint() + EndpointFactory::new().endpoint("ep") } fn endpoint_with_config(transport_config: TransportConfig) -> Endpoint { - EndpointFactory::new().endpoint_with_config(transport_config) + EndpointFactory::new().endpoint_with_config("ep", transport_config) } /// Constructs endpoints suitable for connecting to themselves and each other @@ -279,11 +277,18 @@ impl EndpointFactory { } } - fn endpoint(&self) -> Endpoint { - self.endpoint_with_config(TransportConfig::default()) + fn endpoint(&self, name: impl Into) -> Endpoint { + self.endpoint_with_config(name, TransportConfig::default()) } - fn endpoint_with_config(&self, transport_config: TransportConfig) -> Endpoint { + fn endpoint_with_config( + &self, + name: impl Into, + transport_config: TransportConfig, + ) -> Endpoint { + let span = info_span!("dummy"); + span.record("otel.name", name.into()); + let _guard = span.entered(); let key = PrivateKeyDer::Pkcs8(self.cert.key_pair.serialize_der().into()); let transport_config = Arc::new(transport_config); let mut server_config = @@ -820,20 +825,11 @@ async fn multiple_conns_with_zero_length_cids() { factory .endpoint_config .cid_generator(|| Box::new(RandomConnectionIdGenerator::new(0))); - let server = { - let _guard = error_span!("server").entered(); - factory.endpoint() - }; + let server = factory.endpoint("server"); let server_addr = server.local_addr().unwrap(); - let client1 = { - let _guard = error_span!("client1").entered(); - factory.endpoint() - }; - let client2 = { - let _guard = error_span!("client2").entered(); - factory.endpoint() - }; + let client1 = factory.endpoint("client1"); + let client2 = factory.endpoint("client2"); let client1 = async move { let conn = client1 @@ -863,3 +859,114 @@ async fn multiple_conns_with_zero_length_cids() { .instrument(error_span!("server")); tokio::join!(client1, client2, server); } + +#[tokio::test] +async fn stream_stopped() { + let _guard = subscribe(); + let factory = EndpointFactory::new(); + let server = { factory.endpoint("server") }; + let server_addr = server.local_addr().unwrap(); + + let client = { factory.endpoint("client1") }; + + let client = async move { + let conn = client + .connect(server_addr, "localhost") + .unwrap() + .await + .unwrap(); + let mut stream = conn.open_uni().await.unwrap(); + let stopped1 = stream.stopped(); + let stopped2 = stream.stopped(); + let stopped3 = stream.stopped(); + + stream.write_all(b"hi").await.unwrap(); + // spawn one of the futures into a task + let stopped1 = tokio::task::spawn(stopped1); + // verify that both futures resolved + let (stopped1, stopped2) = tokio::join!(stopped1, stopped2); + assert!(matches!(stopped1, Ok(Ok(Some(val))) if val == 42u32.into())); + assert!(matches!(stopped2, Ok(Some(val)) if val == 42u32.into())); + // drop the stream + drop(stream); + // verify that a future also resolves after dropping the stream + let stopped3 = stopped3.await; + assert_eq!(stopped3, Ok(Some(42u32.into()))); + }; + let client = + tokio::time::timeout(Duration::from_millis(100), client).instrument(error_span!("client")); + let server = async move { + let conn = server.accept().await.unwrap().await.unwrap(); + let mut stream = conn.accept_uni().await.unwrap(); + let mut buf = [0u8; 2]; + stream.read_exact(&mut buf).await.unwrap(); + stream.stop(42u32.into()).unwrap(); + conn + } + .instrument(error_span!("server")); + let (client, conn) = tokio::join!(client, server); + client.expect("timeout"); + drop(conn); +} + +#[tokio::test] +async fn stream_stopped_2() { + let _guard = subscribe(); + let endpoint = endpoint(); + + let (conn, _server_conn) = tokio::try_join!( + endpoint + .connect(endpoint.local_addr().unwrap(), "localhost") + .unwrap(), + async { endpoint.accept().await.unwrap().await } + ) + .unwrap(); + let send_stream = conn.open_uni().await.unwrap(); + let stopped = tokio::time::timeout(Duration::from_millis(100), send_stream.stopped()) + .instrument(error_span!("stopped")); + tokio::pin!(stopped); + // poll the future once so that the waker is registered. + tokio::select! { + biased; + _x = &mut stopped => {}, + _x = std::future::ready(()) => {} + } + // drop the send stream + drop(send_stream); + // make sure the stopped future still resolves + let res = stopped.await; + assert_eq!(res, Ok(Ok(None))); +} + +#[tokio::test] +async fn test_multipath_negotiated() { + let _logging = subscribe(); + let factory = EndpointFactory::new(); + + let mut transport_config = TransportConfig::default(); + transport_config.max_concurrent_multipath_paths(1); + let server = factory.endpoint_with_config("server", transport_config); + let server_addr = server.local_addr().unwrap(); + + let server_task = async move { + let conn = server.accept().await.unwrap().await.unwrap(); + conn.closed().await; + } + .instrument(info_span!("server")); + + let mut transport_config = TransportConfig::default(); + transport_config.max_concurrent_multipath_paths(1); + let client = factory.endpoint_with_config("client", transport_config); + + let client_task = async move { + let conn = client + .connect(server_addr, "localhost") + .unwrap() + .await + .unwrap(); + assert!(conn.is_multipath_enabled()); + } + .instrument(info_span!("client")); + + tokio::join!(server_task, client_task); +} diff --git a/quinn/src/work_limiter.rs b/quinn/src/work_limiter.rs index efffd2cca0..c3c3d3551d 100644 --- a/quinn/src/work_limiter.rs +++ b/quinn/src/work_limiter.rs @@ -1,4 +1,4 @@ -use std::time::{Duration, Instant}; +use crate::{Duration, Instant}; /// Limits the amount of time spent on a certain type of work in a cycle /// diff --git a/quinn/tests/many_connections.rs b/quinn/tests/many_connections.rs index f761086875..ad972832f2 100644 --- a/quinn/tests/many_connections.rs +++ b/quinn/tests/many_connections.rs @@ -104,8 +104,8 @@ async fn read_from_peer(mut stream: quinn::RecvStream) -> Result<(), quinn::Conn Ok(()) } Err(e) => { - use quinn::ReadToEndError::*; use ReadError::*; + use quinn::ReadToEndError::*; match e { TooLong | Read(ClosedStream) | Read(ZeroRttRejected) | Read(IllegalOrderedRead) => { unreachable!() @@ -184,9 +184,8 @@ fn hash_correct(data: &[u8], crc: &Crc) -> bool { encoded_hash == actual_hash } -#[allow(unsafe_code)] fn random_vec(size: usize) -> Vec { let mut ret = vec![0; size]; - rand::thread_rng().fill_bytes(&mut ret[..]); + rand::rng().fill_bytes(&mut ret[..]); ret } diff --git a/rustfmt.toml b/rustfmt.toml new file mode 100644 index 0000000000..3501136812 --- /dev/null +++ b/rustfmt.toml @@ -0,0 +1 @@ +style_edition = "2024"