From 772c65eab857a21999be922347f8ccae6c3c3f50 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 30 Jun 2025 13:12:34 +0200 Subject: [PATCH 001/305] docs: add comprehensive documentation for LaunchContext type system (#17120) --- crates/node/builder/src/launch/common.rs | 61 ++++++++++++++++++++++-- 1 file changed, 57 insertions(+), 4 deletions(-) diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index f7696799e97..3001256b96f 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -1,4 +1,33 @@ //! Helper types that can be used by launchers. +//! +//! ## Launch Context Type System +//! +//! The node launch process uses a type-state pattern to ensure correct initialization +//! order at compile time. Methods are only available when their prerequisites are met. +//! +//! ### Core Types +//! +//! - [`LaunchContext`]: Base context with executor and data directory +//! - [`LaunchContextWith`]: Context with an attached value of type `T` +//! - [`Attached`]: Pairs values, preserving both previous (L) and new (R) state +//! +//! ### Helper Attachments +//! +//! - [`WithConfigs`]: Node config + TOML config +//! - [`WithMeteredProvider`]: Provider factory with metrics +//! - [`WithMeteredProviders`]: Provider factory + blockchain provider +//! - [`WithComponents`]: Final form with all components +//! +//! ### Method Availability +//! +//! Methods are implemented on specific type combinations: +//! - `impl LaunchContextWith`: Generic methods available for any attachment +//! - `impl LaunchContextWith`: Config-specific methods +//! - `impl LaunchContextWith>`: Database operations +//! - `impl LaunchContextWith>`: Provider operations +//! - etc. +//! +//! This ensures correct initialization order without runtime checks. use crate::{ components::{NodeComponents, NodeComponentsBuilder}, @@ -70,7 +99,22 @@ use reth_node_events::{cl::ConsensusLayerHealthEvents, node::NodeEvent}; /// Reusable setup for launching a node. /// -/// This provides commonly used boilerplate for launching a node. +/// This is the entry point for the node launch process. It implements a builder +/// pattern using type-state programming to enforce correct initialization order. +/// +/// ## Type Evolution +/// +/// Starting from `LaunchContext`, each method transforms the type to reflect +/// accumulated state: +/// +/// ```text +/// LaunchContext +/// └─> LaunchContextWith +/// └─> LaunchContextWith> +/// └─> LaunchContextWith> +/// └─> LaunchContextWith> +/// └─> LaunchContextWith> +/// ``` #[derive(Debug, Clone)] pub struct LaunchContext { /// The task executor for the node. @@ -192,9 +236,14 @@ impl LaunchContext { /// A [`LaunchContext`] along with an additional value. /// -/// This can be used to sequentially attach additional values to the type during the launch process. +/// The type parameter `T` represents the current state of the launch process. +/// Methods are conditionally implemented based on `T`, ensuring operations +/// are only available when their prerequisites are met. /// -/// The type provides common boilerplate for launching a node depending on the additional value. +/// For example: +/// - Config methods when `T = WithConfigs` +/// - Database operations when `T = Attached, DB>` +/// - Provider operations when `T = Attached, ProviderFactory>` #[derive(Debug, Clone)] pub struct LaunchContextWith { /// The wrapped launch context. @@ -1074,7 +1123,11 @@ where } } -/// Joins two attachments together. +/// Joins two attachments together, preserving access to both values. +/// +/// This type enables the launch process to accumulate state while maintaining +/// access to all previously attached components. The `left` field holds the +/// previous state, while `right` holds the newly attached component. #[derive(Clone, Copy, Debug)] pub struct Attached { left: L, From bf260bfcb8d8f1857830e05c92b8bb8526de7b27 Mon Sep 17 00:00:00 2001 From: PixelPilot <161360836+PixelPil0t1@users.noreply.github.com> Date: Mon, 30 Jun 2025 13:10:46 +0200 Subject: [PATCH 002/305] docs: update Engine API link in ethereum.mdx (#17137) --- docs/vocs/docs/pages/run/ethereum.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/vocs/docs/pages/run/ethereum.mdx b/docs/vocs/docs/pages/run/ethereum.mdx index 3c488416ec9..9ba16f20c47 100644 --- a/docs/vocs/docs/pages/run/ethereum.mdx +++ b/docs/vocs/docs/pages/run/ethereum.mdx @@ -43,7 +43,7 @@ You can change this by adding the `--http`, `--ws` flags, respectively and using For more commands, see the [`reth node` CLI reference](/cli/cli). ::: -The EL \<> CL communication happens over the [Engine API](https://github.com/ethereum/execution-apis/blob/main/src/engine/common), which is by default exposed at `http://localhost:8551`. The connection is authenticated over JWT using a JWT secret which is auto-generated by Reth and placed in a file called `jwt.hex` in the data directory, which on Linux by default is `$HOME/.local/share/reth/` (`/Users//Library/Application Support/reth/mainnet/jwt.hex` in Mac). +The EL \<> CL communication happens over the [Engine API](https://github.com/ethereum/execution-apis/blob/main/src/engine/common.md), which is by default exposed at `http://localhost:8551`. The connection is authenticated over JWT using a JWT secret which is auto-generated by Reth and placed in a file called `jwt.hex` in the data directory, which on Linux by default is `$HOME/.local/share/reth/` (`/Users//Library/Application Support/reth/mainnet/jwt.hex` in Mac). You can override this path using the `--authrpc.jwtsecret` option. You MUST use the same JWT secret in BOTH Reth and the chosen Consensus Layer. If you want to override the address or port, you can use the `--authrpc.addr` and `--authrpc.port` options, respectively. From 515e2077b4493a3fd48628ac52b521407bc54c62 Mon Sep 17 00:00:00 2001 From: Alex Pikme Date: Mon, 30 Jun 2025 13:25:09 +0200 Subject: [PATCH 003/305] docs: fix spelling errors (#17139) --- crates/net/network/src/test_utils/init.rs | 2 +- crates/net/network/src/test_utils/transactions.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/net/network/src/test_utils/init.rs b/crates/net/network/src/test_utils/init.rs index 51537f37d87..db61931dd47 100644 --- a/crates/net/network/src/test_utils/init.rs +++ b/crates/net/network/src/test_utils/init.rs @@ -13,7 +13,7 @@ pub fn enr_to_peer_id(enr: Enr) -> PeerId { // copied from ethers-rs /// A bit of hack to find an unused TCP port. /// -/// Does not guarantee that the given port is unused after the function exists, just that it was +/// Does not guarantee that the given port is unused after the function exits, just that it was /// unused before the function started (i.e., it does not reserve a port). pub fn unused_port() -> u16 { unused_tcp_addr().port() diff --git a/crates/net/network/src/test_utils/transactions.rs b/crates/net/network/src/test_utils/transactions.rs index c3c38e3f1c7..467f146b059 100644 --- a/crates/net/network/src/test_utils/transactions.rs +++ b/crates/net/network/src/test_utils/transactions.rs @@ -51,7 +51,7 @@ pub async fn new_tx_manager( (transactions, network) } -/// Directly buffer hahs into tx fetcher for testing. +/// Directly buffer hash into tx fetcher for testing. pub fn buffer_hash_to_tx_fetcher( tx_fetcher: &mut TransactionFetcher, hash: TxHash, From 5409d3146b9e8563d8664753164e8a5c6443dc04 Mon Sep 17 00:00:00 2001 From: Cypher Pepe <125112044+cypherpepe@users.noreply.github.com> Date: Mon, 30 Jun 2025 14:27:07 +0300 Subject: [PATCH 004/305] chore: fixed broken links in opstack.mdx (#17135) --- docs/vocs/docs/pages/run/opstack.mdx | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/vocs/docs/pages/run/opstack.mdx b/docs/vocs/docs/pages/run/opstack.mdx index 86e9ad72438..d472485be60 100644 --- a/docs/vocs/docs/pages/run/opstack.mdx +++ b/docs/vocs/docs/pages/run/opstack.mdx @@ -91,13 +91,13 @@ op-node \ Consider adding the `--l1.trustrpc` flag to improve performance, if the connection to l1 is over localhost. [l1-el-spec]: https://github.com/ethereum/execution-specs -[rollup-node-spec]: https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/rollup-node +[rollup-node-spec]: https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/rollup-node.md [op-geth-forkdiff]: https://op-geth.optimism.io -[sequencer]: https://github.com/ethereum-optimism/specs/blob/main/specs/background#sequencers +[sequencer]: https://github.com/ethereum-optimism/specs/blob/main/specs/background.md#sequencers [op-stack-spec]: https://github.com/ethereum-optimism/specs/blob/main/specs -[l2-el-spec]: https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/exec-engine -[deposit-spec]: https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/deposits -[derivation-spec]: https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/derivation +[l2-el-spec]: https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/exec-engine.md +[deposit-spec]: https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/deposits.md +[derivation-spec]: https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/derivation.md [superchain-registry]: https://github.com/ethereum-optimism/superchain-registry [op-node-docker]: https://console.cloud.google.com/artifacts/docker/oplabs-tools-artifacts/us/images/op-node [reth]: https://github.com/paradigmxyz/reth From 42eb672473442e9eb746d6cd805567b690489c4e Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 30 Jun 2025 13:38:32 +0200 Subject: [PATCH 005/305] feat(optimism): add debug namespace endpoints to historical RPC forwarding (#17133) --- crates/optimism/rpc/src/historical.rs | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/crates/optimism/rpc/src/historical.rs b/crates/optimism/rpc/src/historical.rs index 0f8824882b3..6434d6bd519 100644 --- a/crates/optimism/rpc/src/historical.rs +++ b/crates/optimism/rpc/src/historical.rs @@ -136,16 +136,24 @@ where Box::pin(async move { let maybe_block_id = match req.method_name() { - "eth_getBlockByNumber" | "eth_getBlockByHash" => { - parse_block_id_from_params(&req.params(), 0) - } + "eth_getBlockByNumber" | + "eth_getBlockByHash" | + "debug_traceBlockByNumber" | + "debug_traceBlockByHash" => parse_block_id_from_params(&req.params(), 0), "eth_getBalance" | "eth_getCode" | "eth_getTransactionCount" | "eth_call" | "eth_estimateGas" | - "eth_createAccessList" => parse_block_id_from_params(&req.params(), 1), + "eth_createAccessList" | + "debug_traceCall" => parse_block_id_from_params(&req.params(), 1), "eth_getStorageAt" | "eth_getProof" => parse_block_id_from_params(&req.params(), 2), + "debug_traceTransaction" => { + // debug_traceTransaction takes a transaction hash as its first parameter, + // not a BlockId. We assume the op-reth instance is configured with minimal + // bootstrap without the bodies so we can't check if this tx is pre bedrock + None + } _ => None, }; From 678b5cd1fc274b42838eb57d0724585d2810b29c Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 30 Jun 2025 13:53:04 +0200 Subject: [PATCH 006/305] chore: rm unused either type (#17126) --- crates/revm/src/either.rs | 49 --------------------------------------- crates/revm/src/lib.rs | 3 --- 2 files changed, 52 deletions(-) delete mode 100644 crates/revm/src/either.rs diff --git a/crates/revm/src/either.rs b/crates/revm/src/either.rs deleted file mode 100644 index e26d2ccb721..00000000000 --- a/crates/revm/src/either.rs +++ /dev/null @@ -1,49 +0,0 @@ -use alloy_primitives::{Address, B256, U256}; -use revm::{bytecode::Bytecode, state::AccountInfo, Database}; - -/// An enum type that can hold either of two different [`Database`] implementations. -/// -/// This allows flexible usage of different [`Database`] types in the same context. -#[derive(Debug, Clone)] -pub enum Either { - /// A value of type `L`. - Left(L), - /// A value of type `R`. - Right(R), -} - -impl Database for Either -where - L: Database, - R: Database, -{ - type Error = L::Error; - - fn basic(&mut self, address: Address) -> Result, Self::Error> { - match self { - Self::Left(db) => db.basic(address), - Self::Right(db) => db.basic(address), - } - } - - fn code_by_hash(&mut self, code_hash: B256) -> Result { - match self { - Self::Left(db) => db.code_by_hash(code_hash), - Self::Right(db) => db.code_by_hash(code_hash), - } - } - - fn storage(&mut self, address: Address, index: U256) -> Result { - match self { - Self::Left(db) => db.storage(address, index), - Self::Right(db) => db.storage(address, index), - } - } - - fn block_hash(&mut self, number: u64) -> Result { - match self { - Self::Left(db) => db.block_hash(number), - Self::Right(db) => db.block_hash(number), - } - } -} diff --git a/crates/revm/src/lib.rs b/crates/revm/src/lib.rs index caaae237c8a..ecc5b576a84 100644 --- a/crates/revm/src/lib.rs +++ b/crates/revm/src/lib.rs @@ -30,9 +30,6 @@ pub mod test_utils; // Convenience re-exports. pub use revm::{self, database::State, *}; -/// Either type for flexible usage of different database types in the same context. -pub mod either; - /// Helper types for execution witness generation. #[cfg(feature = "witness")] pub mod witness; From c63459884e5988067b0b7a67362dd036f2085326 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Roman=20Hodul=C3=A1k?= Date: Mon, 30 Jun 2025 15:41:35 +0200 Subject: [PATCH 007/305] refactor: Replace reth `recover_singer_with_buf` with `alloy` (#17107) Co-authored-by: Matthias Seitz --- Cargo.toml | 54 +++++++-------- crates/ethereum/primitives/src/transaction.rs | 15 ++--- .../primitives/src/transaction/signed.rs | 17 ++--- crates/primitives-traits/src/extended.rs | 11 ++-- .../src/transaction/signed.rs | 66 +------------------ .../stages/src/stages/sender_recovery.rs | 2 +- examples/custom-node/src/primitives/tx.rs | 27 +------- 7 files changed, 49 insertions(+), 143 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 1c8a80d099b..fedef0f26ca 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -476,33 +476,33 @@ alloy-trie = { version = "0.9.0", default-features = false } alloy-hardforks = "0.2.7" -alloy-consensus = { version = "1.0.13", default-features = false } -alloy-contract = { version = "1.0.13", default-features = false } -alloy-eips = { version = "1.0.13", default-features = false } -alloy-genesis = { version = "1.0.13", default-features = false } -alloy-json-rpc = { version = "1.0.13", default-features = false } -alloy-network = { version = "1.0.13", default-features = false } -alloy-network-primitives = { version = "1.0.13", default-features = false } -alloy-provider = { version = "1.0.13", features = ["reqwest"], default-features = false } -alloy-pubsub = { version = "1.0.13", default-features = false } -alloy-rpc-client = { version = "1.0.13", default-features = false } -alloy-rpc-types = { version = "1.0.13", features = ["eth"], default-features = false } -alloy-rpc-types-admin = { version = "1.0.13", default-features = false } -alloy-rpc-types-anvil = { version = "1.0.13", default-features = false } -alloy-rpc-types-beacon = { version = "1.0.13", default-features = false } -alloy-rpc-types-debug = { version = "1.0.13", default-features = false } -alloy-rpc-types-engine = { version = "1.0.13", default-features = false } -alloy-rpc-types-eth = { version = "1.0.13", default-features = false } -alloy-rpc-types-mev = { version = "1.0.13", default-features = false } -alloy-rpc-types-trace = { version = "1.0.13", default-features = false } -alloy-rpc-types-txpool = { version = "1.0.13", default-features = false } -alloy-serde = { version = "1.0.13", default-features = false } -alloy-signer = { version = "1.0.13", default-features = false } -alloy-signer-local = { version = "1.0.13", default-features = false } -alloy-transport = { version = "1.0.13" } -alloy-transport-http = { version = "1.0.13", features = ["reqwest-rustls-tls"], default-features = false } -alloy-transport-ipc = { version = "1.0.13", default-features = false } -alloy-transport-ws = { version = "1.0.13", default-features = false } +alloy-consensus = { version = "1.0.16", default-features = false } +alloy-contract = { version = "1.0.16", default-features = false } +alloy-eips = { version = "1.0.16", default-features = false } +alloy-genesis = { version = "1.0.16", default-features = false } +alloy-json-rpc = { version = "1.0.16", default-features = false } +alloy-network = { version = "1.0.16", default-features = false } +alloy-network-primitives = { version = "1.0.16", default-features = false } +alloy-provider = { version = "1.0.16", features = ["reqwest"], default-features = false } +alloy-pubsub = { version = "1.0.16", default-features = false } +alloy-rpc-client = { version = "1.0.16", default-features = false } +alloy-rpc-types = { version = "1.0.16", features = ["eth"], default-features = false } +alloy-rpc-types-admin = { version = "1.0.16", default-features = false } +alloy-rpc-types-anvil = { version = "1.0.16", default-features = false } +alloy-rpc-types-beacon = { version = "1.0.16", default-features = false } +alloy-rpc-types-debug = { version = "1.0.16", default-features = false } +alloy-rpc-types-engine = { version = "1.0.16", default-features = false } +alloy-rpc-types-eth = { version = "1.0.16", default-features = false } +alloy-rpc-types-mev = { version = "1.0.16", default-features = false } +alloy-rpc-types-trace = { version = "1.0.16", default-features = false } +alloy-rpc-types-txpool = { version = "1.0.16", default-features = false } +alloy-serde = { version = "1.0.16", default-features = false } +alloy-signer = { version = "1.0.16", default-features = false } +alloy-signer-local = { version = "1.0.16", default-features = false } +alloy-transport = { version = "1.0.16" } +alloy-transport-http = { version = "1.0.16", features = ["reqwest-rustls-tls"], default-features = false } +alloy-transport-ipc = { version = "1.0.16", default-features = false } +alloy-transport-ws = { version = "1.0.16", default-features = false } # op alloy-op-evm = { version = "0.12", default-features = false } diff --git a/crates/ethereum/primitives/src/transaction.rs b/crates/ethereum/primitives/src/transaction.rs index 07191142e71..c6de2521a03 100644 --- a/crates/ethereum/primitives/src/transaction.rs +++ b/crates/ethereum/primitives/src/transaction.rs @@ -650,21 +650,18 @@ impl SignerRecoverable for TransactionSigned { let signature_hash = self.signature_hash(); recover_signer_unchecked(&self.signature, signature_hash) } + + fn recover_unchecked_with_buf(&self, buf: &mut Vec) -> Result { + self.encode_for_signing(buf); + let signature_hash = keccak256(buf); + recover_signer_unchecked(&self.signature, signature_hash) + } } impl SignedTransaction for TransactionSigned { fn tx_hash(&self) -> &TxHash { self.hash.get_or_init(|| self.recalculate_hash()) } - - fn recover_signer_unchecked_with_buf( - &self, - buf: &mut Vec, - ) -> Result { - self.encode_for_signing(buf); - let signature_hash = keccak256(buf); - recover_signer_unchecked(&self.signature, signature_hash) - } } #[cfg(test)] diff --git a/crates/optimism/primitives/src/transaction/signed.rs b/crates/optimism/primitives/src/transaction/signed.rs index 2a345229a65..75276754687 100644 --- a/crates/optimism/primitives/src/transaction/signed.rs +++ b/crates/optimism/primitives/src/transaction/signed.rs @@ -127,17 +127,8 @@ impl SignerRecoverable for OpTransactionSigned { let signature_hash = signature_hash(transaction); recover_signer_unchecked(signature, signature_hash) } -} - -impl SignedTransaction for OpTransactionSigned { - fn tx_hash(&self) -> &TxHash { - self.hash.get_or_init(|| self.recalculate_hash()) - } - fn recover_signer_unchecked_with_buf( - &self, - buf: &mut Vec, - ) -> Result { + fn recover_unchecked_with_buf(&self, buf: &mut Vec) -> Result { match &self.transaction { // Optimism's Deposit transaction does not have a signature. Directly return the // `from` address. @@ -149,6 +140,12 @@ impl SignedTransaction for OpTransactionSigned { }; recover_signer_unchecked(&self.signature, keccak256(buf)) } +} + +impl SignedTransaction for OpTransactionSigned { + fn tx_hash(&self) -> &TxHash { + self.hash.get_or_init(|| self.recalculate_hash()) + } fn recalculate_hash(&self) -> B256 { keccak256(self.encoded_2718()) diff --git a/crates/primitives-traits/src/extended.rs b/crates/primitives-traits/src/extended.rs index e235f47033e..f2f46cf36d2 100644 --- a/crates/primitives-traits/src/extended.rs +++ b/crates/primitives-traits/src/extended.rs @@ -149,6 +149,10 @@ where fn recover_signer_unchecked(&self) -> Result { delegate!(self => tx.recover_signer_unchecked()) } + + fn recover_unchecked_with_buf(&self, buf: &mut Vec) -> Result { + delegate!(self => tx.recover_unchecked_with_buf(buf)) + } } impl SignedTransaction for Extended @@ -162,13 +166,6 @@ where Self::Other(tx) => tx.tx_hash(), } } - - fn recover_signer_unchecked_with_buf( - &self, - buf: &mut Vec, - ) -> Result { - delegate!(self => tx.recover_signer_unchecked_with_buf(buf)) - } } impl Typed2718 for Extended diff --git a/crates/primitives-traits/src/transaction/signed.rs b/crates/primitives-traits/src/transaction/signed.rs index 84cf2769a01..dfa1d896162 100644 --- a/crates/primitives-traits/src/transaction/signed.rs +++ b/crates/primitives-traits/src/transaction/signed.rs @@ -1,10 +1,7 @@ //! API of a signed transaction. -use crate::{ - crypto::secp256k1::recover_signer_unchecked, InMemorySize, MaybeCompact, MaybeSerde, - MaybeSerdeBincodeCompat, -}; -use alloc::{fmt, vec::Vec}; +use crate::{InMemorySize, MaybeCompact, MaybeSerde, MaybeSerdeBincodeCompat}; +use alloc::fmt; use alloy_consensus::{ transaction::{Recovered, RlpEcdsaEncodableTx, SignerRecoverable}, EthereumTxEnvelope, SignableTransaction, @@ -77,14 +74,6 @@ pub trait SignedTransaction: self.recover_signer_unchecked() } - /// Same as [`SignerRecoverable::recover_signer_unchecked`] but receives a buffer to operate on. - /// This is used during batch recovery to avoid allocating a new buffer for each - /// transaction. - fn recover_signer_unchecked_with_buf( - &self, - buf: &mut Vec, - ) -> Result; - /// Calculate transaction hash, eip2728 transaction does not contain rlp header and start with /// tx type. fn recalculate_hash(&self) -> B256 { @@ -150,21 +139,6 @@ where Self::Eip4844(tx) => tx.hash(), } } - - fn recover_signer_unchecked_with_buf( - &self, - buf: &mut Vec, - ) -> Result { - match self { - Self::Legacy(tx) => tx.tx().encode_for_signing(buf), - Self::Eip2930(tx) => tx.tx().encode_for_signing(buf), - Self::Eip1559(tx) => tx.tx().encode_for_signing(buf), - Self::Eip7702(tx) => tx.tx().encode_for_signing(buf), - Self::Eip4844(tx) => tx.tx().encode_for_signing(buf), - } - let signature_hash = keccak256(buf); - recover_signer_unchecked(self.signature(), signature_hash) - } } #[cfg(feature = "op")] @@ -181,20 +155,6 @@ mod op { Self::Eip7702(tx) => tx.hash(), } } - - fn recover_signer_unchecked_with_buf( - &self, - buf: &mut Vec, - ) -> Result { - match self { - Self::Legacy(tx) => tx.tx().encode_for_signing(buf), - Self::Eip2930(tx) => tx.tx().encode_for_signing(buf), - Self::Eip1559(tx) => tx.tx().encode_for_signing(buf), - Self::Eip7702(tx) => tx.tx().encode_for_signing(buf), - } - let signature_hash = keccak256(buf); - recover_signer_unchecked(self.signature(), signature_hash) - } } impl SignedTransaction for OpTxEnvelope { @@ -207,27 +167,5 @@ mod op { Self::Deposit(tx) => tx.hash_ref(), } } - - fn recover_signer_unchecked_with_buf( - &self, - buf: &mut Vec, - ) -> Result { - match self { - Self::Deposit(tx) => return Ok(tx.from), - Self::Legacy(tx) => tx.tx().encode_for_signing(buf), - Self::Eip2930(tx) => tx.tx().encode_for_signing(buf), - Self::Eip1559(tx) => tx.tx().encode_for_signing(buf), - Self::Eip7702(tx) => tx.tx().encode_for_signing(buf), - } - let signature_hash = keccak256(buf); - let signature = match self { - Self::Legacy(tx) => tx.signature(), - Self::Eip2930(tx) => tx.signature(), - Self::Eip1559(tx) => tx.signature(), - Self::Eip7702(tx) => tx.signature(), - Self::Deposit(_) => unreachable!("Deposit transactions should not be handled here"), - }; - recover_signer_unchecked(signature, signature_hash) - } } } diff --git a/crates/stages/stages/src/stages/sender_recovery.rs b/crates/stages/stages/src/stages/sender_recovery.rs index e6bdb92cf20..2a2870f07ca 100644 --- a/crates/stages/stages/src/stages/sender_recovery.rs +++ b/crates/stages/stages/src/stages/sender_recovery.rs @@ -315,7 +315,7 @@ fn recover_sender( // value is greater than `secp256k1n / 2` if past EIP-2. There are transactions // pre-homestead which have large `s` values, so using [Signature::recover_signer] here // would not be backwards-compatible. - let sender = tx.recover_signer_unchecked_with_buf(rlp_buf).map_err(|_| { + let sender = tx.recover_unchecked_with_buf(rlp_buf).map_err(|_| { SenderRecoveryStageError::FailedRecovery(FailedSenderRecoveryError { tx: tx_id }) })?; diff --git a/examples/custom-node/src/primitives/tx.rs b/examples/custom-node/src/primitives/tx.rs index 48348f6839a..682d1a67552 100644 --- a/examples/custom-node/src/primitives/tx.rs +++ b/examples/custom-node/src/primitives/tx.rs @@ -5,13 +5,13 @@ use alloy_consensus::{ RecoveryError, }, transaction::SignerRecoverable, - SignableTransaction, Signed, Transaction, TransactionEnvelope, + Signed, Transaction, TransactionEnvelope, }; use alloy_eips::{ eip2718::{Eip2718Result, IsTyped2718}, Decodable2718, Encodable2718, Typed2718, }; -use alloy_primitives::{bytes::Buf, keccak256, Sealed, Signature, TxHash, B256}; +use alloy_primitives::{bytes::Buf, Sealed, Signature, TxHash, B256}; use alloy_rlp::{BufMut, Decodable, Encodable, Result as RlpResult}; use op_alloy_consensus::{OpTxEnvelope, TxDeposit}; use reth_codecs::{ @@ -128,15 +128,6 @@ impl SignedTransaction for CustomTransactionEnvelope { fn tx_hash(&self) -> &TxHash { self.inner.hash() } - - fn recover_signer_unchecked_with_buf( - &self, - buf: &mut Vec, - ) -> Result { - self.inner.tx().encode_for_signing(buf); - let signature_hash = keccak256(buf); - recover_signer_unchecked(self.inner.signature(), signature_hash) - } } impl Typed2718 for CustomTransactionEnvelope { @@ -300,20 +291,6 @@ impl SignerRecoverable for CustomTransaction { } impl SignedTransaction for CustomTransaction { - fn recover_signer_unchecked_with_buf( - &self, - buf: &mut Vec, - ) -> Result { - match self { - CustomTransaction::Op(tx) => { - SignedTransaction::recover_signer_unchecked_with_buf(tx, buf) - } - CustomTransaction::Payment(tx) => { - SignedTransaction::recover_signer_unchecked_with_buf(tx, buf) - } - } - } - fn tx_hash(&self) -> &B256 { match self { CustomTransaction::Op(tx) => SignedTransaction::tx_hash(tx), From bdb3debdf1f0081a4135b7bc61ba87275317f073 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Mon, 30 Jun 2025 17:07:39 +0300 Subject: [PATCH 008/305] feat: remove redundant generic from `EthereumEngineValidatorBuilder` (#17108) --- crates/ethereum/node/src/node.rs | 14 +++++--------- crates/exex/test-utils/src/lib.rs | 2 +- 2 files changed, 6 insertions(+), 10 deletions(-) diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index 02ebacdb7d7..f1c238aaa95 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -320,7 +320,7 @@ where type AddOns = EthereumAddOns< NodeAdapter>::Components>, EthereumEthApiBuilder, - EthereumEngineValidatorBuilder, + EthereumEngineValidatorBuilder, >; fn components_builder(&self) -> Self::ComponentsBuilder { @@ -475,23 +475,19 @@ where /// Builder for [`EthereumEngineValidator`]. #[derive(Debug, Default, Clone)] #[non_exhaustive] -pub struct EthereumEngineValidatorBuilder { - _phantom: std::marker::PhantomData, -} +pub struct EthereumEngineValidatorBuilder; -impl EngineValidatorBuilder - for EthereumEngineValidatorBuilder +impl EngineValidatorBuilder for EthereumEngineValidatorBuilder where Types: NodeTypes< - ChainSpec = ChainSpec, + ChainSpec: EthereumHardforks + Clone + 'static, Payload: EngineTypes + PayloadTypes, Primitives = EthPrimitives, >, Node: FullNodeComponents, - ChainSpec: EthChainSpec + EthereumHardforks + Clone + 'static, { - type Validator = EthereumEngineValidator; + type Validator = EthereumEngineValidator; async fn build(self, ctx: &AddOnsContext<'_, Node>) -> eyre::Result { Ok(EthereumEngineValidator::new(ctx.config.chain.clone())) diff --git a/crates/exex/test-utils/src/lib.rs b/crates/exex/test-utils/src/lib.rs index 00bcdcbbf70..14001ae8299 100644 --- a/crates/exex/test-utils/src/lib.rs +++ b/crates/exex/test-utils/src/lib.rs @@ -136,7 +136,7 @@ where type AddOns = EthereumAddOns< NodeAdapter>::Components>, EthereumEthApiBuilder, - EthereumEngineValidatorBuilder, + EthereumEngineValidatorBuilder, >; fn components_builder(&self) -> Self::ComponentsBuilder { From 2819ab2c0e32d546996d2c7534901cfe68ad6c16 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 30 Jun 2025 17:05:45 +0200 Subject: [PATCH 009/305] chore: promote trace to debug (#17144) --- crates/net/network-types/src/peers/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/net/network-types/src/peers/mod.rs b/crates/net/network-types/src/peers/mod.rs index f3529875018..5e998c87904 100644 --- a/crates/net/network-types/src/peers/mod.rs +++ b/crates/net/network-types/src/peers/mod.rs @@ -8,7 +8,7 @@ pub use config::{ConnectionsConfig, PeersConfig}; pub use reputation::{Reputation, ReputationChange, ReputationChangeKind, ReputationChangeWeights}; use alloy_eip2124::ForkId; -use tracing::trace; +use tracing::debug; use crate::{ is_banned_reputation, PeerAddr, PeerConnectionState, PeerKind, ReputationChangeOutcome, @@ -92,7 +92,7 @@ impl Peer { // we add reputation since negative reputation change decrease total reputation self.reputation = previous.saturating_add(reputation); - trace!(target: "net::peers", reputation=%self.reputation, banned=%self.is_banned(), ?kind, "applied reputation change"); + debug!(target: "net::peers", reputation=%self.reputation, banned=%self.is_banned(), ?kind, "applied reputation change"); if self.state.is_connected() && self.is_banned() { self.state.disconnect(); From c9f20728f275a7e3168db56ff42ddc2ae22a1a93 Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Mon, 30 Jun 2025 18:11:51 +0200 Subject: [PATCH 010/305] chore: pass provider into SparseTrie and SparseStateTrie via impl argument in update/remove_leaf (#17099) --- .../src/tree/payload_processor/sparse_trie.rs | 49 ++- crates/stateless/src/trie.rs | 29 +- crates/trie/sparse/benches/rlp_node.rs | 10 +- crates/trie/sparse/benches/root.rs | 7 +- crates/trie/sparse/benches/update.rs | 12 +- crates/trie/sparse/src/state.rs | 214 +++++------ crates/trie/sparse/src/trie.rs | 353 ++++++++---------- crates/trie/trie/src/witness.rs | 9 +- 8 files changed, 348 insertions(+), 335 deletions(-) diff --git a/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs b/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs index c8de07c1ec5..8f472cd8c8b 100644 --- a/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs +++ b/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs @@ -38,13 +38,15 @@ where /// Sparse Trie initialized with the blinded provider factory. /// /// It's kept as a field on the struct to prevent blocking on de-allocation in [`Self::run`]. - pub(super) trie: SparseStateTrie, + pub(super) trie: SparseStateTrie, pub(super) metrics: MultiProofTaskMetrics, + /// Blinded node provider factory. + blinded_provider_factory: BPF, } impl SparseTrieTask where - BPF: BlindedProviderFactory + Send + Sync, + BPF: BlindedProviderFactory + Send + Sync + Clone, BPF::AccountNodeProvider: BlindedProvider + Send + Sync, BPF::StorageNodeProvider: BlindedProvider + Send + Sync, { @@ -59,7 +61,8 @@ where executor, updates, metrics, - trie: SparseStateTrie::new(blinded_provider_factory).with_updates(true), + trie: SparseStateTrie::new().with_updates(true), + blinded_provider_factory, } } @@ -94,10 +97,10 @@ where metrics: MultiProofTaskMetrics, sparse_trie_state: SparseTrieState, ) -> Self { - let mut trie = SparseStateTrie::new(blinded_provider_factory).with_updates(true); + let mut trie = SparseStateTrie::new().with_updates(true); trie.populate_from(sparse_trie_state); - Self { executor, updates, metrics, trie } + Self { executor, updates, metrics, trie, blinded_provider_factory } } /// Runs the sparse trie task to completion. @@ -129,9 +132,13 @@ where "Updating sparse trie" ); - let elapsed = update_sparse_trie(&mut self.trie, update).map_err(|e| { - ParallelStateRootError::Other(format!("could not calculate state root: {e:?}")) - })?; + let elapsed = + update_sparse_trie(&mut self.trie, update, &self.blinded_provider_factory) + .map_err(|e| { + ParallelStateRootError::Other(format!( + "could not calculate state root: {e:?}" + )) + })?; self.metrics.sparse_trie_update_duration_histogram.record(elapsed); trace!(target: "engine::root", ?elapsed, num_iterations, "Root calculation completed"); } @@ -139,9 +146,10 @@ where debug!(target: "engine::root", num_iterations, "All proofs processed, ending calculation"); let start = Instant::now(); - let (state_root, trie_updates) = self.trie.root_with_updates().map_err(|e| { - ParallelStateRootError::Other(format!("could not calculate state root: {e:?}")) - })?; + let (state_root, trie_updates) = + self.trie.root_with_updates(&self.blinded_provider_factory).map_err(|e| { + ParallelStateRootError::Other(format!("could not calculate state root: {e:?}")) + })?; self.metrics.sparse_trie_final_update_duration_histogram.record(start.elapsed()); self.metrics.sparse_trie_total_duration_histogram.record(now.elapsed()); @@ -167,8 +175,9 @@ pub struct StateRootComputeOutcome { /// Updates the sparse trie with the given proofs and state, and returns the elapsed time. pub(crate) fn update_sparse_trie( - trie: &mut SparseStateTrie, + trie: &mut SparseStateTrie, SparseTrieUpdate { mut state, multiproof }: SparseTrieUpdate, + blinded_provider_factory: &BPF, ) -> SparseStateTrieResult where BPF: BlindedProviderFactory + Send + Sync, @@ -198,6 +207,7 @@ where let span = trace_span!(target: "engine::root::sparse", "Storage trie", ?address); let _enter = span.enter(); trace!(target: "engine::root::sparse", "Updating storage"); + let storage_provider = blinded_provider_factory.storage_node_provider(address); let mut storage_trie = storage_trie.ok_or(SparseTrieErrorKind::Blind)?; if storage.wiped { @@ -208,11 +218,14 @@ where let slot_nibbles = Nibbles::unpack(slot); if value.is_zero() { trace!(target: "engine::root::sparse", ?slot, "Removing storage slot"); - storage_trie.remove_leaf(&slot_nibbles)?; + storage_trie.remove_leaf(&slot_nibbles, &storage_provider)?; } else { trace!(target: "engine::root::sparse", ?slot, "Updating storage slot"); - storage_trie - .update_leaf(slot_nibbles, alloy_rlp::encode_fixed_size(&value).to_vec())?; + storage_trie.update_leaf( + slot_nibbles, + alloy_rlp::encode_fixed_size(&value).to_vec(), + &storage_provider, + )?; } } @@ -232,18 +245,18 @@ where // If the account itself has an update, remove it from the state update and update in // one go instead of doing it down below. trace!(target: "engine::root::sparse", ?address, "Updating account and its storage root"); - trie.update_account(address, account.unwrap_or_default())?; + trie.update_account(address, account.unwrap_or_default(), blinded_provider_factory)?; } else if trie.is_account_revealed(address) { // Otherwise, if the account is revealed, only update its storage root. trace!(target: "engine::root::sparse", ?address, "Updating account storage root"); - trie.update_account_storage_root(address)?; + trie.update_account_storage_root(address, blinded_provider_factory)?; } } // Update accounts for (address, account) in state.accounts { trace!(target: "engine::root::sparse", ?address, "Updating account"); - trie.update_account(address, account.unwrap_or_default())?; + trie.update_account(address, account.unwrap_or_default(), blinded_provider_factory)?; } let elapsed_before = started_at.elapsed(); diff --git a/crates/stateless/src/trie.rs b/crates/stateless/src/trie.rs index 5a35e52a7f3..c8c6e652209 100644 --- a/crates/stateless/src/trie.rs +++ b/crates/stateless/src/trie.rs @@ -9,8 +9,9 @@ use reth_errors::ProviderError; use reth_revm::state::Bytecode; use reth_trie_common::{HashedPostState, Nibbles, TRIE_ACCOUNT_RLP_MAX_SIZE}; use reth_trie_sparse::{ - blinded::DefaultBlindedProviderFactory, errors::SparseStateTrieResult, SparseStateTrie, - SparseTrie, + blinded::{DefaultBlindedProvider, DefaultBlindedProviderFactory}, + errors::SparseStateTrieResult, + SparseStateTrie, SparseTrie, }; /// Trait for stateless trie implementations that can be used for stateless validation. @@ -174,7 +175,8 @@ fn verify_execution_witness( witness: &ExecutionWitness, pre_state_root: B256, ) -> Result<(SparseStateTrie, B256Map), StatelessValidationError> { - let mut trie = SparseStateTrie::new(DefaultBlindedProviderFactory); + let provider_factory = DefaultBlindedProviderFactory; + let mut trie = SparseStateTrie::new(); let mut state_witness = B256Map::default(); let mut bytecode = B256Map::default(); @@ -200,7 +202,7 @@ fn verify_execution_witness( // Calculate the root let computed_root = trie - .root() + .root(&provider_factory) .map_err(|_e| StatelessValidationError::StatelessPreStateRootCalculationFailed)?; if computed_root == pre_state_root { @@ -235,6 +237,11 @@ fn calculate_state_root( // borrowing issues. let mut storage_results = Vec::with_capacity(state.storages.len()); + // In `verify_execution_witness` a `DefaultBlindedProviderFactory` is used, so we use the same + // again in here. + let provider_factory = DefaultBlindedProviderFactory; + let storage_provider = DefaultBlindedProvider; + for (address, storage) in state.storages.into_iter().sorted_unstable_by_key(|(addr, _)| *addr) { // Take the existing storage trie (or create an empty, “revealed” one) let mut storage_trie = @@ -250,9 +257,13 @@ fn calculate_state_root( { let nibbles = Nibbles::unpack(hashed_slot); if value.is_zero() { - storage_trie.remove_leaf(&nibbles)?; + storage_trie.remove_leaf(&nibbles, &storage_provider)?; } else { - storage_trie.update_leaf(nibbles, alloy_rlp::encode_fixed_size(&value).to_vec())?; + storage_trie.update_leaf( + nibbles, + alloy_rlp::encode_fixed_size(&value).to_vec(), + &storage_provider, + )?; } } @@ -288,14 +299,14 @@ fn calculate_state_root( // Decide whether to remove or update the account leaf if account.is_empty() && storage_root == EMPTY_ROOT_HASH { - trie.remove_account_leaf(&nibbles)?; + trie.remove_account_leaf(&nibbles, &provider_factory)?; } else { account_rlp_buf.clear(); account.into_trie_account(storage_root).encode(&mut account_rlp_buf); - trie.update_account_leaf(nibbles, account_rlp_buf.clone())?; + trie.update_account_leaf(nibbles, account_rlp_buf.clone(), &provider_factory)?; } } // Return new state root - trie.root() + trie.root(&provider_factory) } diff --git a/crates/trie/sparse/benches/rlp_node.rs b/crates/trie/sparse/benches/rlp_node.rs index 113392fca54..2b6fadeda1f 100644 --- a/crates/trie/sparse/benches/rlp_node.rs +++ b/crates/trie/sparse/benches/rlp_node.rs @@ -7,7 +7,7 @@ use proptest::{prelude::*, test_runner::TestRunner}; use rand::{seq::IteratorRandom, Rng}; use reth_testing_utils::generators; use reth_trie::Nibbles; -use reth_trie_sparse::RevealedSparseTrie; +use reth_trie_sparse::{blinded::DefaultBlindedProvider, RevealedSparseTrie}; fn update_rlp_node_level(c: &mut Criterion) { let mut rng = generators::rng(); @@ -22,10 +22,15 @@ fn update_rlp_node_level(c: &mut Criterion) { .current(); // Create a sparse trie with `size` leaves + let provider = DefaultBlindedProvider; let mut sparse = RevealedSparseTrie::default(); for (key, value) in &state { sparse - .update_leaf(Nibbles::unpack(key), alloy_rlp::encode_fixed_size(value).to_vec()) + .update_leaf( + Nibbles::unpack(key), + alloy_rlp::encode_fixed_size(value).to_vec(), + &provider, + ) .unwrap(); } sparse.root(); @@ -39,6 +44,7 @@ fn update_rlp_node_level(c: &mut Criterion) { .update_leaf( Nibbles::unpack(key), alloy_rlp::encode_fixed_size(&rng.random::()).to_vec(), + &provider, ) .unwrap(); } diff --git a/crates/trie/sparse/benches/root.rs b/crates/trie/sparse/benches/root.rs index f4d461ae51a..e34718ffc5a 100644 --- a/crates/trie/sparse/benches/root.rs +++ b/crates/trie/sparse/benches/root.rs @@ -13,7 +13,7 @@ use reth_trie::{ HashedStorage, }; use reth_trie_common::{HashBuilder, Nibbles}; -use reth_trie_sparse::SparseTrie; +use reth_trie_sparse::{blinded::DefaultBlindedProvider, SparseTrie}; fn calculate_root_from_leaves(c: &mut Criterion) { let mut group = c.benchmark_group("calculate root from leaves"); @@ -40,6 +40,7 @@ fn calculate_root_from_leaves(c: &mut Criterion) { }); // sparse trie + let provider = DefaultBlindedProvider; group.bench_function(BenchmarkId::new("sparse trie", size), |b| { b.iter_with_setup(SparseTrie::revealed_empty, |mut sparse| { for (key, value) in &state { @@ -47,6 +48,7 @@ fn calculate_root_from_leaves(c: &mut Criterion) { .update_leaf( Nibbles::unpack(key), alloy_rlp::encode_fixed_size(value).to_vec(), + &provider, ) .unwrap(); } @@ -177,6 +179,7 @@ fn calculate_root_from_leaves_repeated(c: &mut Criterion) { }); // sparse trie + let provider = DefaultBlindedProvider; let benchmark_id = BenchmarkId::new( "sparse trie", format!( @@ -192,6 +195,7 @@ fn calculate_root_from_leaves_repeated(c: &mut Criterion) { .update_leaf( Nibbles::unpack(key), alloy_rlp::encode_fixed_size(value).to_vec(), + &provider, ) .unwrap(); } @@ -205,6 +209,7 @@ fn calculate_root_from_leaves_repeated(c: &mut Criterion) { .update_leaf( Nibbles::unpack(key), alloy_rlp::encode_fixed_size(value).to_vec(), + &provider, ) .unwrap(); } diff --git a/crates/trie/sparse/benches/update.rs b/crates/trie/sparse/benches/update.rs index 4b2971c1e05..d230d51c58b 100644 --- a/crates/trie/sparse/benches/update.rs +++ b/crates/trie/sparse/benches/update.rs @@ -5,7 +5,7 @@ use criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criteri use proptest::{prelude::*, strategy::ValueTree}; use rand::seq::IteratorRandom; use reth_trie_common::Nibbles; -use reth_trie_sparse::SparseTrie; +use reth_trie_sparse::{blinded::DefaultBlindedProvider, SparseTrie}; const LEAF_COUNTS: [usize; 2] = [1_000, 5_000]; @@ -16,10 +16,11 @@ fn update_leaf(c: &mut Criterion) { group.bench_function(BenchmarkId::from_parameter(leaf_count), |b| { let leaves = generate_leaves(leaf_count); // Start with an empty trie + let provider = DefaultBlindedProvider; let mut trie = SparseTrie::revealed_empty(); // Pre-populate with data for (path, value) in leaves.iter().cloned() { - trie.update_leaf(path, value).unwrap(); + trie.update_leaf(path, value, &provider).unwrap(); } b.iter_batched( @@ -41,7 +42,7 @@ fn update_leaf(c: &mut Criterion) { }, |(mut trie, new_leaves)| { for (path, new_value) in new_leaves { - trie.update_leaf(*path, new_value).unwrap(); + trie.update_leaf(*path, new_value, &provider).unwrap(); } trie }, @@ -58,10 +59,11 @@ fn remove_leaf(c: &mut Criterion) { group.bench_function(BenchmarkId::from_parameter(leaf_count), |b| { let leaves = generate_leaves(leaf_count); // Start with an empty trie + let provider = DefaultBlindedProvider; let mut trie = SparseTrie::revealed_empty(); // Pre-populate with data for (path, value) in leaves.iter().cloned() { - trie.update_leaf(path, value).unwrap(); + trie.update_leaf(path, value, &provider).unwrap(); } b.iter_batched( @@ -76,7 +78,7 @@ fn remove_leaf(c: &mut Criterion) { }, |(mut trie, delete_leaves)| { for path in delete_leaves { - trie.remove_leaf(path).unwrap(); + trie.remove_leaf(path, &provider).unwrap(); } trie }, diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index 66c3596363c..49a31921335 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -1,16 +1,15 @@ use crate::{ - blinded::{BlindedProvider, BlindedProviderFactory, DefaultBlindedProviderFactory}, + blinded::{BlindedProvider, BlindedProviderFactory}, LeafLookup, RevealedSparseTrie, SparseTrie, SparseTrieState, TrieMasks, }; use alloc::{collections::VecDeque, vec::Vec}; use alloy_primitives::{ - hex, map::{B256Map, HashMap, HashSet}, Bytes, B256, }; use alloy_rlp::{Decodable, Encodable}; use alloy_trie::proof::DecodedProofNodes; -use core::{fmt, iter::Peekable}; +use core::iter::Peekable; use reth_execution_errors::{SparseStateTrieErrorKind, SparseStateTrieResult, SparseTrieErrorKind}; use reth_primitives_traits::Account; use reth_trie_common::{ @@ -21,14 +20,13 @@ use reth_trie_common::{ }; use tracing::trace; +#[derive(Debug)] /// Sparse state trie representing lazy-loaded Ethereum state trie. -pub struct SparseStateTrie { - /// Blinded node provider factory. - provider_factory: F, +pub struct SparseStateTrie { /// Sparse account trie. - state: SparseTrie, + state: SparseTrie, /// Sparse storage tries. - storages: B256Map>, + storages: B256Map, /// Collection of revealed account trie paths. revealed_account_paths: HashSet, /// Collection of revealed storage trie paths, per account. @@ -42,11 +40,9 @@ pub struct SparseStateTrie Self { Self { - provider_factory: DefaultBlindedProviderFactory, state: Default::default(), storages: Default::default(), revealed_account_paths: Default::default(), @@ -59,19 +55,6 @@ impl Default for SparseStateTrie { } } -impl fmt::Debug for SparseStateTrie

{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("SparseStateTrie") - .field("state", &self.state) - .field("storages", &self.storages) - .field("revealed_account_paths", &self.revealed_account_paths) - .field("revealed_storage_paths", &self.revealed_storage_paths) - .field("retain_updates", &self.retain_updates) - .field("account_rlp_buf", &hex::encode(&self.account_rlp_buf)) - .finish_non_exhaustive() - } -} - #[cfg(test)] impl SparseStateTrie { /// Create state trie from state trie. @@ -80,20 +63,10 @@ impl SparseStateTrie { } } -impl SparseStateTrie { - /// Create new [`SparseStateTrie`] with blinded node provider factory. - pub fn new(provider_factory: F) -> Self { - Self { - provider_factory, - state: Default::default(), - storages: Default::default(), - revealed_account_paths: Default::default(), - revealed_storage_paths: Default::default(), - retain_updates: false, - account_rlp_buf: Vec::with_capacity(TRIE_ACCOUNT_RLP_MAX_SIZE), - #[cfg(feature = "metrics")] - metrics: Default::default(), - } +impl SparseStateTrie { + /// Create new [`SparseStateTrie`] + pub fn new() -> Self { + Self::default() } /// Set the retention of branch node updates and deletions. @@ -163,40 +136,27 @@ impl SparseStateTrie { } /// Returns reference to state trie if it was revealed. - pub const fn state_trie_ref(&self) -> Option<&RevealedSparseTrie> { + pub const fn state_trie_ref(&self) -> Option<&RevealedSparseTrie> { self.state.as_revealed_ref() } /// Returns reference to storage trie if it was revealed. - pub fn storage_trie_ref( - &self, - address: &B256, - ) -> Option<&RevealedSparseTrie> { + pub fn storage_trie_ref(&self, address: &B256) -> Option<&RevealedSparseTrie> { self.storages.get(address).and_then(|e| e.as_revealed_ref()) } /// Returns mutable reference to storage sparse trie if it was revealed. - pub fn storage_trie_mut( - &mut self, - address: &B256, - ) -> Option<&mut RevealedSparseTrie> { + pub fn storage_trie_mut(&mut self, address: &B256) -> Option<&mut RevealedSparseTrie> { self.storages.get_mut(address).and_then(|e| e.as_revealed_mut()) } /// Takes the storage trie for the provided address. - pub fn take_storage_trie( - &mut self, - address: &B256, - ) -> Option> { + pub fn take_storage_trie(&mut self, address: &B256) -> Option { self.storages.remove(address) } /// Inserts storage trie for the provided address. - pub fn insert_storage_trie( - &mut self, - address: B256, - storage_trie: SparseTrie, - ) { + pub fn insert_storage_trie(&mut self, address: B256, storage_trie: SparseTrie) { self.storages.insert(address, storage_trie); } @@ -221,12 +181,7 @@ impl SparseStateTrie { let Some(root_node) = self.validate_root_node(&mut proof)? else { return Ok(()) }; // Reveal root node if it wasn't already. - let trie = self.state.reveal_root_with_provider( - self.provider_factory.account_node_provider(), - root_node, - TrieMasks::none(), - self.retain_updates, - )?; + let trie = self.state.reveal_root(root_node, TrieMasks::none(), self.retain_updates)?; // Reveal the remaining proof nodes. for (path, bytes) in proof { @@ -265,8 +220,7 @@ impl SparseStateTrie { let Some(root_node) = self.validate_root_node(&mut proof)? else { return Ok(()) }; // Reveal root node if it wasn't already. - let trie = self.storages.entry(account).or_default().reveal_root_with_provider( - self.provider_factory.storage_node_provider(account), + let trie = self.storages.entry(account).or_default().reveal_root( root_node, TrieMasks::none(), self.retain_updates, @@ -366,8 +320,7 @@ impl SparseStateTrie { if let Some(root_node) = Self::validate_root_node_decoded(&mut account_nodes)? { // Reveal root node if it wasn't already. - let trie = self.state.reveal_root_with_provider( - self.provider_factory.account_node_provider(), + let trie = self.state.reveal_root( root_node, TrieMasks { hash_mask: branch_node_hash_masks.get(&Nibbles::default()).copied(), @@ -432,8 +385,7 @@ impl SparseStateTrie { if let Some(root_node) = Self::validate_root_node_decoded(&mut nodes)? { // Reveal root node if it wasn't already. - let trie = self.storages.entry(account).or_default().reveal_root_with_provider( - self.provider_factory.storage_node_provider(account), + let trie = self.storages.entry(account).or_default().reveal_root( root_node, TrieMasks { hash_mask: storage_subtree @@ -537,8 +489,7 @@ impl SparseStateTrie { let storage_trie_entry = self.storages.entry(account).or_default(); if path.is_empty() { // Handle special storage state root node case. - storage_trie_entry.reveal_root_with_provider( - self.provider_factory.storage_node_provider(account), + storage_trie_entry.reveal_root( trie_node, TrieMasks::none(), self.retain_updates, @@ -559,12 +510,7 @@ impl SparseStateTrie { else if !self.revealed_account_paths.contains(&path) { if path.is_empty() { // Handle special state root node case. - self.state.reveal_root_with_provider( - self.provider_factory.account_node_provider(), - trie_node, - TrieMasks::none(), - self.retain_updates, - )?; + self.state.reveal_root(trie_node, TrieMasks::none(), self.retain_updates)?; } else { // Reveal non-root state trie node. self.state.as_revealed_mut().ok_or(SparseTrieErrorKind::Blind)?.reveal_node( @@ -655,11 +601,11 @@ impl SparseStateTrie { /// If the trie is not revealed yet, its root will be revealed using the blinded node provider. fn revealed_trie_mut( &mut self, - ) -> SparseStateTrieResult<&mut RevealedSparseTrie> { + provider_factory: impl BlindedProviderFactory, + ) -> SparseStateTrieResult<&mut RevealedSparseTrie> { match self.state { SparseTrie::Blind | SparseTrie::AllocatedEmpty { .. } => { - let (root_node, hash_mask, tree_mask) = self - .provider_factory + let (root_node, hash_mask, tree_mask) = provider_factory .account_node_provider() .blinded_node(&Nibbles::default())? .map(|node| { @@ -669,12 +615,7 @@ impl SparseStateTrie { .transpose()? .unwrap_or((TrieNode::EmptyRoot, None, None)); self.state - .reveal_root_with_provider( - self.provider_factory.account_node_provider(), - root_node, - TrieMasks { hash_mask, tree_mask }, - self.retain_updates, - ) + .reveal_root(root_node, TrieMasks { hash_mask, tree_mask }, self.retain_updates) .map_err(Into::into) } SparseTrie::Revealed(ref mut trie) => Ok(trie), @@ -684,22 +625,28 @@ impl SparseStateTrie { /// Returns sparse trie root. /// /// If the trie has not been revealed, this function reveals the root node and returns its hash. - pub fn root(&mut self) -> SparseStateTrieResult { + pub fn root( + &mut self, + provider_factory: impl BlindedProviderFactory, + ) -> SparseStateTrieResult { // record revealed node metrics #[cfg(feature = "metrics")] self.metrics.record(); - Ok(self.revealed_trie_mut()?.root()) + Ok(self.revealed_trie_mut(provider_factory)?.root()) } /// Returns sparse trie root and trie updates if the trie has been revealed. - pub fn root_with_updates(&mut self) -> SparseStateTrieResult<(B256, TrieUpdates)> { + pub fn root_with_updates( + &mut self, + provider_factory: impl BlindedProviderFactory, + ) -> SparseStateTrieResult<(B256, TrieUpdates)> { // record revealed node metrics #[cfg(feature = "metrics")] self.metrics.record(); let storage_tries = self.storage_trie_updates(); - let revealed = self.revealed_trie_mut()?; + let revealed = self.revealed_trie_mut(provider_factory)?; let (root, updates) = (revealed.root(), revealed.take_updates()); let updates = TrieUpdates { @@ -750,12 +697,14 @@ impl SparseStateTrie { &mut self, path: Nibbles, value: Vec, + provider_factory: impl BlindedProviderFactory, ) -> SparseStateTrieResult<()> { if !self.revealed_account_paths.contains(&path) { self.revealed_account_paths.insert(path); } - self.state.update_leaf(path, value)?; + let provider = provider_factory.account_node_provider(); + self.state.update_leaf(path, value, provider)?; Ok(()) } @@ -765,13 +714,16 @@ impl SparseStateTrie { address: B256, slot: Nibbles, value: Vec, + provider_factory: impl BlindedProviderFactory, ) -> SparseStateTrieResult<()> { if !self.revealed_storage_paths.get(&address).is_some_and(|slots| slots.contains(&slot)) { self.revealed_storage_paths.entry(address).or_default().insert(slot); } let storage_trie = self.storages.get_mut(&address).ok_or(SparseTrieErrorKind::Blind)?; - storage_trie.update_leaf(slot, value)?; + + let provider = provider_factory.storage_node_provider(address); + storage_trie.update_leaf(slot, value, provider)?; Ok(()) } @@ -779,7 +731,12 @@ impl SparseStateTrie { /// the storage root based on update storage trie or look it up from existing leaf value. /// /// If the new account info and storage trie are empty, the account leaf will be removed. - pub fn update_account(&mut self, address: B256, account: Account) -> SparseStateTrieResult<()> { + pub fn update_account( + &mut self, + address: B256, + account: Account, + provider_factory: impl BlindedProviderFactory, + ) -> SparseStateTrieResult<()> { let nibbles = Nibbles::unpack(address); let storage_root = if let Some(storage_trie) = self.storages.get_mut(&address) { @@ -801,12 +758,12 @@ impl SparseStateTrie { if account.is_empty() && storage_root == EMPTY_ROOT_HASH { trace!(target: "trie::sparse", ?address, "Removing account"); - self.remove_account_leaf(&nibbles) + self.remove_account_leaf(&nibbles, provider_factory) } else { trace!(target: "trie::sparse", ?address, "Updating account"); self.account_rlp_buf.clear(); account.into_trie_account(storage_root).encode(&mut self.account_rlp_buf); - self.update_account_leaf(nibbles, self.account_rlp_buf.clone()) + self.update_account_leaf(nibbles, self.account_rlp_buf.clone(), provider_factory) } } @@ -816,7 +773,11 @@ impl SparseStateTrie { /// /// If the new storage root is empty, and the account info was already empty, the account leaf /// will be removed. - pub fn update_account_storage_root(&mut self, address: B256) -> SparseStateTrieResult<()> { + pub fn update_account_storage_root( + &mut self, + address: B256, + provider_factory: impl BlindedProviderFactory, + ) -> SparseStateTrieResult<()> { if !self.is_account_revealed(address) { return Err(SparseTrieErrorKind::Blind.into()) } @@ -847,21 +808,26 @@ impl SparseStateTrie { if trie_account == TrieAccount::default() { // If the account is empty, remove it. trace!(target: "trie::sparse", ?address, "Removing account because the storage root is empty"); - self.remove_account_leaf(&nibbles)?; + self.remove_account_leaf(&nibbles, provider_factory)?; } else { // Otherwise, update the account leaf. trace!(target: "trie::sparse", ?address, "Updating account with the new storage root"); self.account_rlp_buf.clear(); trie_account.encode(&mut self.account_rlp_buf); - self.update_account_leaf(nibbles, self.account_rlp_buf.clone())?; + self.update_account_leaf(nibbles, self.account_rlp_buf.clone(), provider_factory)?; } Ok(()) } /// Remove the account leaf node. - pub fn remove_account_leaf(&mut self, path: &Nibbles) -> SparseStateTrieResult<()> { - self.state.remove_leaf(path)?; + pub fn remove_account_leaf( + &mut self, + path: &Nibbles, + provider_factory: impl BlindedProviderFactory, + ) -> SparseStateTrieResult<()> { + let provider = provider_factory.account_node_provider(); + self.state.remove_leaf(path, provider)?; Ok(()) } @@ -870,9 +836,12 @@ impl SparseStateTrie { &mut self, address: B256, slot: &Nibbles, + provider_factory: impl BlindedProviderFactory, ) -> SparseStateTrieResult<()> { let storage_trie = self.storages.get_mut(&address).ok_or(SparseTrieErrorKind::Blind)?; - storage_trie.remove_leaf(slot)?; + + let provider = provider_factory.storage_node_provider(address); + storage_trie.remove_leaf(slot, provider)?; Ok(()) } @@ -935,6 +904,7 @@ fn filter_revealed_nodes( #[cfg(test)] mod tests { use super::*; + use crate::blinded::DefaultBlindedProviderFactory; use alloy_primitives::{ b256, map::{HashMap, HashSet}, @@ -1011,6 +981,7 @@ mod tests { #[test] fn reveal_account_path_twice() { + let provider_factory = DefaultBlindedProviderFactory; let mut sparse = SparseStateTrie::default(); let leaf_value = alloy_rlp::encode(TrieAccount::default()); @@ -1053,7 +1024,7 @@ mod tests { // Remove the leaf node and check that the state trie does not contain the leaf node and // value - sparse.remove_account_leaf(&Nibbles::from_nibbles([0x0])).unwrap(); + sparse.remove_account_leaf(&Nibbles::from_nibbles([0x0]), &provider_factory).unwrap(); assert!(!sparse .state_trie_ref() .unwrap() @@ -1082,6 +1053,7 @@ mod tests { #[test] fn reveal_storage_path_twice() { + let provider_factory = DefaultBlindedProviderFactory; let mut sparse = SparseStateTrie::default(); let leaf_value = alloy_rlp::encode(TrieAccount::default()); @@ -1135,7 +1107,9 @@ mod tests { // Remove the leaf node and check that the storage trie does not contain the leaf node and // value - sparse.remove_storage_leaf(B256::ZERO, &Nibbles::from_nibbles([0x0])).unwrap(); + sparse + .remove_storage_leaf(B256::ZERO, &Nibbles::from_nibbles([0x0]), &provider_factory) + .unwrap(); assert!(!sparse .storage_trie_ref(&B256::ZERO) .unwrap() @@ -1211,6 +1185,7 @@ mod tests { let root = hash_builder.root(); let proof_nodes = hash_builder.take_proof_nodes(); + let provider_factory = DefaultBlindedProviderFactory; let mut sparse = SparseStateTrie::default().with_updates(true); sparse .reveal_decoded_multiproof( @@ -1247,24 +1222,49 @@ mod tests { ) .unwrap(); - assert_eq!(sparse.root().unwrap(), root); + assert_eq!(sparse.root(&provider_factory).unwrap(), root); let address_3 = b256!("0x2000000000000000000000000000000000000000000000000000000000000000"); let address_path_3 = Nibbles::unpack(address_3); let account_3 = Account { nonce: account_1.nonce + 1, ..account_1 }; let trie_account_3 = account_3.into_trie_account(EMPTY_ROOT_HASH); - sparse.update_account_leaf(address_path_3, alloy_rlp::encode(trie_account_3)).unwrap(); + sparse + .update_account_leaf( + address_path_3, + alloy_rlp::encode(trie_account_3), + &provider_factory, + ) + .unwrap(); - sparse.update_storage_leaf(address_1, slot_path_3, alloy_rlp::encode(value_3)).unwrap(); + sparse + .update_storage_leaf( + address_1, + slot_path_3, + alloy_rlp::encode(value_3), + &provider_factory, + ) + .unwrap(); trie_account_1.storage_root = sparse.storage_root(address_1).unwrap(); - sparse.update_account_leaf(address_path_1, alloy_rlp::encode(trie_account_1)).unwrap(); + sparse + .update_account_leaf( + address_path_1, + alloy_rlp::encode(trie_account_1), + &provider_factory, + ) + .unwrap(); sparse.wipe_storage(address_2).unwrap(); trie_account_2.storage_root = sparse.storage_root(address_2).unwrap(); - sparse.update_account_leaf(address_path_2, alloy_rlp::encode(trie_account_2)).unwrap(); + sparse + .update_account_leaf( + address_path_2, + alloy_rlp::encode(trie_account_2), + &provider_factory, + ) + .unwrap(); - sparse.root().unwrap(); + sparse.root(&provider_factory).unwrap(); let sparse_updates = sparse.take_trie_updates().unwrap(); // TODO(alexey): assert against real state root calculation updates diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index e2f28c2417f..7dbb611a1fe 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -1,4 +1,4 @@ -use crate::blinded::{BlindedProvider, DefaultBlindedProvider, RevealedNode}; +use crate::blinded::{BlindedProvider, RevealedNode}; use alloc::{ borrow::Cow, boxed::Box, @@ -78,7 +78,7 @@ pub struct SparseTrieState { /// 3. Incremental operations - nodes can be revealed as needed without loading the entire trie. /// This is what gives rise to the notion of a "sparse" trie. #[derive(PartialEq, Eq, Default, Clone)] -pub enum SparseTrie

{ +pub enum SparseTrie { /// This is a variant that can be used to store a previously allocated trie. In these cases, /// the trie will still be treated as blind, but the allocated trie will be reused if the trie /// becomes revealed. @@ -97,10 +97,10 @@ pub enum SparseTrie

{ /// In this state, the trie can be queried and modified for the parts /// that have been revealed. Other parts remain blind and require revealing /// before they can be accessed. - Revealed(Box>), + Revealed(Box), } -impl

fmt::Debug for SparseTrie

{ +impl fmt::Debug for SparseTrie { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Self::AllocatedEmpty { .. } => write!(f, "AllocatedEmpty"), @@ -118,9 +118,9 @@ impl SparseTrie { /// ``` /// use reth_trie_sparse::{blinded::DefaultBlindedProvider, SparseTrie}; /// - /// let trie: SparseTrie = SparseTrie::blind(); + /// let trie = SparseTrie::blind(); /// assert!(trie.is_blind()); - /// let trie: SparseTrie = SparseTrie::default(); + /// let trie = SparseTrie::default(); /// assert!(trie.is_blind()); /// ``` pub const fn blind() -> Self { @@ -134,7 +134,7 @@ impl SparseTrie { /// ``` /// use reth_trie_sparse::{blinded::DefaultBlindedProvider, SparseTrie}; /// - /// let trie: SparseTrie = SparseTrie::revealed_empty(); + /// let trie = SparseTrie::revealed_empty(); /// assert!(!trie.is_blind()); /// ``` pub fn revealed_empty() -> Self { @@ -158,11 +158,25 @@ impl SparseTrie { masks: TrieMasks, retain_updates: bool, ) -> SparseTrieResult<&mut RevealedSparseTrie> { - self.reveal_root_with_provider(Default::default(), root, masks, retain_updates) + // we take the allocated state here, which will make sure we are either `Blind` or + // `Revealed`, and giving us the allocated state if we were `AllocatedEmpty`. + let allocated = self.take_allocated_state(); + + // if `Blind`, we initialize the revealed trie + if self.is_blind() { + let mut revealed = RevealedSparseTrie::from_root(root, masks, retain_updates)?; + + // If we had an allocated state, we use its maps internally. use_allocated_state copies + // over any information we had from revealing. + if let Some(allocated) = allocated { + revealed.use_allocated_state(allocated); + } + + *self = Self::Revealed(Box::new(revealed)); + } + Ok(self.as_revealed_mut().unwrap()) } -} -impl

SparseTrie

{ /// Returns `true` if the sparse trie has no revealed nodes. pub const fn is_blind(&self) -> bool { matches!(self, Self::Blind) @@ -171,7 +185,7 @@ impl

SparseTrie

{ /// Returns an immutable reference to the underlying revealed sparse trie. /// /// Returns `None` if the trie is blinded. - pub const fn as_revealed_ref(&self) -> Option<&RevealedSparseTrie

> { + pub const fn as_revealed_ref(&self) -> Option<&RevealedSparseTrie> { if let Self::Revealed(revealed) = self { Some(revealed) } else { @@ -182,7 +196,7 @@ impl

SparseTrie

{ /// Returns a mutable reference to the underlying revealed sparse trie. /// /// Returns `None` if the trie is blinded. - pub fn as_revealed_mut(&mut self) -> Option<&mut RevealedSparseTrie

> { + pub fn as_revealed_mut(&mut self) -> Option<&mut RevealedSparseTrie> { if let Self::Revealed(revealed) = self { Some(revealed) } else { @@ -190,41 +204,6 @@ impl

SparseTrie

{ } } - /// Reveals the root node using a specified provider. - /// - /// This function is similar to [`Self::reveal_root`] but allows the caller to provide - /// a custom provider for fetching blinded nodes. - /// - /// # Returns - /// - /// Mutable reference to [`RevealedSparseTrie`]. - pub fn reveal_root_with_provider( - &mut self, - provider: P, - root: TrieNode, - masks: TrieMasks, - retain_updates: bool, - ) -> SparseTrieResult<&mut RevealedSparseTrie

> { - // we take the allocated state here, which will make sure we are either `Blind` or - // `Revealed`, and giving us the allocated state if we were `AllocatedEmpty`. - let allocated = self.take_allocated_state(); - - // if `Blind`, we initialize the revealed trie - if self.is_blind() { - let mut revealed = - RevealedSparseTrie::from_provider_and_root(provider, root, masks, retain_updates)?; - - // If we had an allocated state, we use its maps internally. use_allocated_state copies - // over any information we had from revealing. - if let Some(allocated) = allocated { - revealed.use_allocated_state(allocated); - } - - *self = Self::Revealed(Box::new(revealed)); - } - Ok(self.as_revealed_mut().unwrap()) - } - /// Take the allocated state if this is `AllocatedEmpty`, otherwise returns `None`. /// /// Converts this `SparseTrie` into `Blind` if this was `AllocatedEmpty`. @@ -290,15 +269,20 @@ impl

SparseTrie

{ } } -impl SparseTrie

{ +impl SparseTrie { /// Updates (or inserts) a leaf at the given key path with the specified RLP-encoded value. /// /// # Errors /// /// Returns an error if the trie is still blind, or if the update fails. - pub fn update_leaf(&mut self, path: Nibbles, value: Vec) -> SparseTrieResult<()> { + pub fn update_leaf( + &mut self, + path: Nibbles, + value: Vec, + provider: impl BlindedProvider, + ) -> SparseTrieResult<()> { let revealed = self.as_revealed_mut().ok_or(SparseTrieErrorKind::Blind)?; - revealed.update_leaf(path, value)?; + revealed.update_leaf(path, value, provider)?; Ok(()) } @@ -307,9 +291,13 @@ impl SparseTrie

{ /// # Errors /// /// Returns an error if the trie is still blind, or if the leaf cannot be removed - pub fn remove_leaf(&mut self, path: &Nibbles) -> SparseTrieResult<()> { + pub fn remove_leaf( + &mut self, + path: &Nibbles, + provider: impl BlindedProvider, + ) -> SparseTrieResult<()> { let revealed = self.as_revealed_mut().ok_or(SparseTrieErrorKind::Blind)?; - revealed.remove_leaf(path)?; + revealed.remove_leaf(path, provider)?; Ok(()) } } @@ -328,10 +316,7 @@ impl SparseTrie

{ /// The opposite is also true. /// - All keys in `values` collection are full leaf paths. #[derive(Clone, PartialEq, Eq)] -pub struct RevealedSparseTrie

{ - /// Provider used for retrieving blinded nodes. - /// This allows lazily loading parts of the trie from an external source. - provider: P, +pub struct RevealedSparseTrie { /// Map from a path (nibbles) to its corresponding sparse trie node. /// This contains all of the revealed nodes in trie. nodes: HashMap, @@ -351,7 +336,7 @@ pub struct RevealedSparseTrie

{ rlp_buf: Vec, } -impl

fmt::Debug for RevealedSparseTrie

{ +impl fmt::Debug for RevealedSparseTrie { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("RevealedSparseTrie") .field("nodes", &self.nodes) @@ -371,7 +356,7 @@ fn encode_nibbles(nibbles: &Nibbles) -> String { encoded[..nibbles.len()].to_string() } -impl fmt::Display for RevealedSparseTrie

{ +impl fmt::Display for RevealedSparseTrie { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { // This prints the trie in preorder traversal, using a stack let mut stack = Vec::new(); @@ -440,7 +425,6 @@ impl fmt::Display for RevealedSparseTrie

{ impl Default for RevealedSparseTrie { fn default() -> Self { Self { - provider: Default::default(), nodes: HashMap::from_iter([(Nibbles::default(), SparseNode::Empty)]), branch_node_tree_masks: HashMap::default(), branch_node_hash_masks: HashMap::default(), @@ -468,7 +452,6 @@ impl RevealedSparseTrie { retain_updates: bool, ) -> SparseTrieResult { let mut this = Self { - provider: Default::default(), nodes: HashMap::default(), branch_node_tree_masks: HashMap::default(), branch_node_hash_masks: HashMap::default(), @@ -483,57 +466,7 @@ impl RevealedSparseTrie { } } -impl

RevealedSparseTrie

{ - /// Creates a new revealed sparse trie from the given provider and root node. - /// - /// Similar to `from_root`, but allows specifying a custom provider for - /// retrieving blinded nodes. - /// - /// # Returns - /// - /// A [`RevealedSparseTrie`] if successful, or an error if revealing fails. - pub fn from_provider_and_root( - provider: P, - node: TrieNode, - masks: TrieMasks, - retain_updates: bool, - ) -> SparseTrieResult { - let mut this = Self { - provider, - nodes: HashMap::default(), - branch_node_tree_masks: HashMap::default(), - branch_node_hash_masks: HashMap::default(), - values: HashMap::default(), - prefix_set: PrefixSetMut::default(), - updates: None, - rlp_buf: Vec::new(), - } - .with_updates(retain_updates); - this.reveal_node(Nibbles::default(), node, masks)?; - Ok(this) - } - - /// Replaces the current provider with a new provider. - /// - /// This allows changing how blinded nodes are retrieved without - /// rebuilding the entire trie structure. - /// - /// # Returns - /// - /// A new [`RevealedSparseTrie`] with the updated provider. - pub fn with_provider(self, provider: BP) -> RevealedSparseTrie { - RevealedSparseTrie { - provider, - nodes: self.nodes, - branch_node_tree_masks: self.branch_node_tree_masks, - branch_node_hash_masks: self.branch_node_hash_masks, - values: self.values, - prefix_set: self.prefix_set, - updates: self.updates, - rlp_buf: self.rlp_buf, - } - } - +impl RevealedSparseTrie { /// Sets the fields of this `RevealedSparseTrie` to the fields of the input /// `SparseTrieState`. /// @@ -560,11 +493,6 @@ impl

RevealedSparseTrie

{ self.values = other.values; } - /// Set the provider for the trie. - pub fn set_provider(&mut self, provider: P) { - self.provider = provider; - } - /// Configures the trie to retain information about updates. /// /// If `retain_updates` is true, the trie will record branch node updates and deletions. @@ -1431,7 +1359,7 @@ pub enum LeafLookup { }, } -impl RevealedSparseTrie

{ +impl RevealedSparseTrie { /// Attempts to find a leaf node at the specified path. /// /// This method traverses the trie from the root down to the given path, checking @@ -1592,7 +1520,12 @@ impl RevealedSparseTrie

{ /// /// Note: If an update requires revealing a blinded node, an error is returned if the blinded /// provider returns an error. - pub fn update_leaf(&mut self, path: Nibbles, value: Vec) -> SparseTrieResult<()> { + pub fn update_leaf( + &mut self, + path: Nibbles, + value: Vec, + provider: impl BlindedProvider, + ) -> SparseTrieResult<()> { self.prefix_set.insert(path); let existing = self.values.insert(path, value); if existing.is_some() { @@ -1660,7 +1593,7 @@ impl RevealedSparseTrie

{ // Check if the extension node child is a hash that needs to be revealed if self.nodes.get(¤t).unwrap().is_hash() { if let Some(RevealedNode { node, tree_mask, hash_mask }) = - self.provider.blinded_node(¤t)? + provider.blinded_node(¤t)? { let decoded = TrieNode::decode(&mut &node[..])?; trace!( @@ -1727,7 +1660,11 @@ impl RevealedSparseTrie

{ /// /// Returns `Ok(())` if the leaf is successfully removed, otherwise returns an error /// if the leaf is not present or if a blinded node prevents removal. - pub fn remove_leaf(&mut self, path: &Nibbles) -> SparseTrieResult<()> { + pub fn remove_leaf( + &mut self, + path: &Nibbles, + provider: impl BlindedProvider, + ) -> SparseTrieResult<()> { if self.values.remove(path).is_none() { if let Some(&SparseNode::Hash(hash)) = self.nodes.get(path) { // Leaf is present in the trie, but it's blinded. @@ -1833,7 +1770,7 @@ impl RevealedSparseTrie

{ if self.nodes.get(&child_path).unwrap().is_hash() { trace!(target: "trie::sparse", ?child_path, "Retrieving remaining blinded branch child"); if let Some(RevealedNode { node, tree_mask, hash_mask }) = - self.provider.blinded_node(&child_path)? + provider.blinded_node(&child_path)? { let decoded = TrieNode::decode(&mut &node[..])?; trace!( @@ -2215,11 +2152,12 @@ mod find_leaf_tests { #[test] fn find_leaf_existing_leaf() { // Create a simple trie with one leaf + let provider = DefaultBlindedProvider; let mut sparse = RevealedSparseTrie::default(); let path = Nibbles::from_nibbles([0x1, 0x2, 0x3]); let value = b"test_value".to_vec(); - sparse.update_leaf(path, value.clone()).unwrap(); + sparse.update_leaf(path, value.clone(), &provider).unwrap(); // Check that the leaf exists let result = sparse.find_leaf(&path, None); @@ -2233,12 +2171,13 @@ mod find_leaf_tests { #[test] fn find_leaf_value_mismatch() { // Create a simple trie with one leaf + let provider = DefaultBlindedProvider; let mut sparse = RevealedSparseTrie::default(); let path = Nibbles::from_nibbles([0x1, 0x2, 0x3]); let value = b"test_value".to_vec(); let wrong_value = b"wrong_value".to_vec(); - sparse.update_leaf(path, value).unwrap(); + sparse.update_leaf(path, value, &provider).unwrap(); // Check with wrong expected value let result = sparse.find_leaf(&path, Some(&wrong_value)); @@ -2264,7 +2203,7 @@ mod find_leaf_tests { #[test] fn find_leaf_empty_trie() { - let sparse = RevealedSparseTrie::::default(); + let sparse = RevealedSparseTrie::default(); let path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4]); let result = sparse.find_leaf(&path, None); @@ -2275,9 +2214,10 @@ mod find_leaf_tests { #[test] fn find_leaf_exists_no_value_check() { - let mut sparse = RevealedSparseTrie::::default(); + let provider = DefaultBlindedProvider; + let mut sparse = RevealedSparseTrie::default(); let path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4]); - sparse.update_leaf(path, VALUE_A()).unwrap(); + sparse.update_leaf(path, VALUE_A(), &provider).unwrap(); let result = sparse.find_leaf(&path, None); assert_matches!(result, Ok(LeafLookup::Exists)); @@ -2285,10 +2225,11 @@ mod find_leaf_tests { #[test] fn find_leaf_exists_with_value_check_ok() { - let mut sparse = RevealedSparseTrie::::default(); + let provider = DefaultBlindedProvider; + let mut sparse = RevealedSparseTrie::default(); let path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4]); let value = VALUE_A(); - sparse.update_leaf(path, value.clone()).unwrap(); + sparse.update_leaf(path, value.clone(), &provider).unwrap(); let result = sparse.find_leaf(&path, Some(&value)); assert_matches!(result, Ok(LeafLookup::Exists)); @@ -2296,13 +2237,14 @@ mod find_leaf_tests { #[test] fn find_leaf_exclusion_branch_divergence() { - let mut sparse = RevealedSparseTrie::::default(); + let provider = DefaultBlindedProvider; + let mut sparse = RevealedSparseTrie::default(); let path1 = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4]); // Creates branch at 0x12 let path2 = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x5, 0x6]); // Belongs to same branch let search_path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x7, 0x8]); // Diverges at nibble 7 - sparse.update_leaf(path1, VALUE_A()).unwrap(); - sparse.update_leaf(path2, VALUE_B()).unwrap(); + sparse.update_leaf(path1, VALUE_A(), &provider).unwrap(); + sparse.update_leaf(path2, VALUE_B(), &provider).unwrap(); let result = sparse.find_leaf(&search_path, None); @@ -2313,13 +2255,14 @@ mod find_leaf_tests { #[test] fn find_leaf_exclusion_extension_divergence() { - let mut sparse = RevealedSparseTrie::::default(); + let provider = DefaultBlindedProvider; + let mut sparse = RevealedSparseTrie::default(); // This will create an extension node at root with key 0x12 let path1 = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4, 0x5, 0x6]); // This path diverges from the extension key let search_path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x7, 0x8]); - sparse.update_leaf(path1, VALUE_A()).unwrap(); + sparse.update_leaf(path1, VALUE_A(), &provider).unwrap(); let result = sparse.find_leaf(&search_path, None); @@ -2330,11 +2273,12 @@ mod find_leaf_tests { #[test] fn find_leaf_exclusion_leaf_divergence() { - let mut sparse = RevealedSparseTrie::::default(); + let provider = DefaultBlindedProvider; + let mut sparse = RevealedSparseTrie::default(); let existing_leaf_path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4]); let search_path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4, 0x5, 0x6]); - sparse.update_leaf(existing_leaf_path, VALUE_A()).unwrap(); + sparse.update_leaf(existing_leaf_path, VALUE_A(), &provider).unwrap(); let result = sparse.find_leaf(&search_path, None); @@ -2347,13 +2291,14 @@ mod find_leaf_tests { #[test] fn find_leaf_exclusion_path_ends_at_branch() { - let mut sparse = RevealedSparseTrie::::default(); + let provider = DefaultBlindedProvider; + let mut sparse = RevealedSparseTrie::default(); let path1 = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4]); // Creates branch at 0x12 let path2 = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x5, 0x6]); let search_path = Nibbles::from_nibbles_unchecked([0x1, 0x2]); // Path of the branch itself - sparse.update_leaf(path1, VALUE_A()).unwrap(); - sparse.update_leaf(path2, VALUE_B()).unwrap(); + sparse.update_leaf(path1, VALUE_A(), &provider).unwrap(); + sparse.update_leaf(path2, VALUE_B(), &provider).unwrap(); let result = sparse.find_leaf(&search_path, None); @@ -2386,7 +2331,6 @@ mod find_leaf_tests { nodes.insert(leaf_path, SparseNode::Hash(blinded_hash)); // Blinded node at 0x1234 let sparse = RevealedSparseTrie { - provider: DefaultBlindedProvider, nodes, branch_node_tree_masks: Default::default(), branch_node_hash_masks: Default::default(), @@ -2430,7 +2374,6 @@ mod find_leaf_tests { values.insert(path_revealed_leaf, VALUE_A()); let sparse = RevealedSparseTrie { - provider: DefaultBlindedProvider, nodes, branch_node_tree_masks: Default::default(), branch_node_hash_masks: Default::default(), @@ -2510,6 +2453,7 @@ mod find_leaf_tests { #[cfg(test)] mod tests { use super::*; + use crate::blinded::DefaultBlindedProvider; use alloy_primitives::{map::B256Set, U256}; use alloy_rlp::Encodable; use assert_matches::assert_matches; @@ -2694,8 +2638,9 @@ mod tests { [key], ); + let provider = DefaultBlindedProvider; let mut sparse = RevealedSparseTrie::default().with_updates(true); - sparse.update_leaf(key, value_encoded()).unwrap(); + sparse.update_leaf(key, value_encoded(), &provider).unwrap(); let sparse_root = sparse.root(); let sparse_updates = sparse.take_updates(); @@ -2724,9 +2669,10 @@ mod tests { paths.clone(), ); + let provider = DefaultBlindedProvider; let mut sparse = RevealedSparseTrie::default().with_updates(true); for path in &paths { - sparse.update_leaf(*path, value_encoded()).unwrap(); + sparse.update_leaf(*path, value_encoded(), &provider).unwrap(); } let sparse_root = sparse.root(); let sparse_updates = sparse.take_updates(); @@ -2754,9 +2700,10 @@ mod tests { paths.clone(), ); + let provider = DefaultBlindedProvider; let mut sparse = RevealedSparseTrie::default().with_updates(true); for path in &paths { - sparse.update_leaf(*path, value_encoded()).unwrap(); + sparse.update_leaf(*path, value_encoded(), &provider).unwrap(); } let sparse_root = sparse.root(); let sparse_updates = sparse.take_updates(); @@ -2792,9 +2739,10 @@ mod tests { paths.clone(), ); + let provider = DefaultBlindedProvider; let mut sparse = RevealedSparseTrie::default().with_updates(true); for path in &paths { - sparse.update_leaf(*path, value_encoded()).unwrap(); + sparse.update_leaf(*path, value_encoded(), &provider).unwrap(); } let sparse_root = sparse.root(); let sparse_updates = sparse.take_updates(); @@ -2831,9 +2779,10 @@ mod tests { paths.clone(), ); + let provider = DefaultBlindedProvider; let mut sparse = RevealedSparseTrie::default().with_updates(true); for path in &paths { - sparse.update_leaf(*path, old_value_encoded.clone()).unwrap(); + sparse.update_leaf(*path, old_value_encoded.clone(), &provider).unwrap(); } let sparse_root = sparse.root(); let sparse_updates = sparse.updates_ref(); @@ -2851,7 +2800,7 @@ mod tests { ); for path in &paths { - sparse.update_leaf(*path, new_value_encoded.clone()).unwrap(); + sparse.update_leaf(*path, new_value_encoded.clone(), &provider).unwrap(); } let sparse_root = sparse.root(); let sparse_updates = sparse.take_updates(); @@ -2865,26 +2814,29 @@ mod tests { fn sparse_trie_remove_leaf() { reth_tracing::init_test_tracing(); + let provider = DefaultBlindedProvider; let mut sparse = RevealedSparseTrie::default(); let value = alloy_rlp::encode_fixed_size(&U256::ZERO).to_vec(); sparse - .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), value.clone()) + .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), value.clone(), &provider) .unwrap(); sparse - .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3]), value.clone()) + .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3]), value.clone(), &provider) .unwrap(); sparse - .update_leaf(Nibbles::from_nibbles([0x5, 0x2, 0x0, 0x1, 0x3]), value.clone()) + .update_leaf(Nibbles::from_nibbles([0x5, 0x2, 0x0, 0x1, 0x3]), value.clone(), &provider) .unwrap(); sparse - .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x1, 0x0, 0x2]), value.clone()) + .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x1, 0x0, 0x2]), value.clone(), &provider) .unwrap(); sparse - .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0, 0x2]), value.clone()) + .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0, 0x2]), value.clone(), &provider) + .unwrap(); + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2, 0x0]), value, &provider) .unwrap(); - sparse.update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2, 0x0]), value).unwrap(); // Extension (Key = 5) // └── Branch (Mask = 1011) @@ -2940,7 +2892,7 @@ mod tests { ]) ); - sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x2, 0x0, 0x1, 0x3])).unwrap(); + sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x2, 0x0, 0x1, 0x3]), &provider).unwrap(); // Extension (Key = 5) // └── Branch (Mask = 1001) @@ -2991,7 +2943,7 @@ mod tests { ]) ); - sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1])).unwrap(); + sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), &provider).unwrap(); // Extension (Key = 5) // └── Branch (Mask = 1001) @@ -3027,7 +2979,7 @@ mod tests { ]) ); - sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x3, 0x1, 0x0, 0x2])).unwrap(); + sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x3, 0x1, 0x0, 0x2]), &provider).unwrap(); // Extension (Key = 5) // └── Branch (Mask = 1001) @@ -3060,7 +3012,7 @@ mod tests { ]) ); - sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2, 0x0])).unwrap(); + sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2, 0x0]), &provider).unwrap(); // Extension (Key = 5) // └── Branch (Mask = 1001) @@ -3082,7 +3034,7 @@ mod tests { ]) ); - sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3])).unwrap(); + sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3]), &provider).unwrap(); // Leaf (Key = 53302) pretty_assertions::assert_eq!( @@ -3093,7 +3045,7 @@ mod tests { ),]) ); - sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0, 0x2])).unwrap(); + sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0, 0x2]), &provider).unwrap(); // Empty pretty_assertions::assert_eq!( @@ -3116,6 +3068,7 @@ mod tests { TrieMask::new(0b11), )); + let provider = DefaultBlindedProvider; let mut sparse = RevealedSparseTrie::from_root( branch.clone(), TrieMasks { hash_mask: Some(TrieMask::new(0b01)), tree_mask: None }, @@ -3141,7 +3094,7 @@ mod tests { // Removing a blinded leaf should result in an error assert_matches!( - sparse.remove_leaf(&Nibbles::from_nibbles([0x0])).map_err(|e| e.into_kind()), + sparse.remove_leaf(&Nibbles::from_nibbles([0x0]), &provider).map_err(|e| e.into_kind()), Err(SparseTrieErrorKind::BlindedNode { path, hash }) if path == Nibbles::from_nibbles([0x0]) && hash == B256::repeat_byte(1) ); } @@ -3160,6 +3113,7 @@ mod tests { TrieMask::new(0b11), )); + let provider = DefaultBlindedProvider; let mut sparse = RevealedSparseTrie::from_root( branch.clone(), TrieMasks { hash_mask: Some(TrieMask::new(0b01)), tree_mask: None }, @@ -3185,7 +3139,7 @@ mod tests { // Removing a non-existent leaf should be a noop let sparse_old = sparse.clone(); - assert_matches!(sparse.remove_leaf(&Nibbles::from_nibbles([0x2])), Ok(())); + assert_matches!(sparse.remove_leaf(&Nibbles::from_nibbles([0x2]), &provider), Ok(())); assert_eq!(sparse, sparse_old); } @@ -3199,6 +3153,7 @@ mod tests { fn test(updates: Vec<(BTreeMap, BTreeSet)>) { { let mut state = BTreeMap::default(); + let default_provider = DefaultBlindedProvider; let provider_factory = create_test_provider_factory(); let mut sparse = RevealedSparseTrie::default().with_updates(true); @@ -3208,7 +3163,7 @@ mod tests { let account = account.into_trie_account(EMPTY_ROOT_HASH); let mut account_rlp = Vec::new(); account.encode(&mut account_rlp); - sparse.update_leaf(key, account_rlp).unwrap(); + sparse.update_leaf(key, account_rlp, &default_provider).unwrap(); } // We need to clone the sparse trie, so that all updated branch nodes are // preserved, and not only those that were changed after the last call to @@ -3248,7 +3203,7 @@ mod tests { // that the sparse trie root still matches the hash builder root for key in &keys_to_delete { state.remove(key).unwrap(); - sparse.remove_leaf(key).unwrap(); + sparse.remove_leaf(key, &default_provider).unwrap(); } // We need to clone the sparse trie, so that all updated branch nodes are @@ -3358,6 +3313,8 @@ mod tests { Default::default(), [Nibbles::default()], ); + + let provider = DefaultBlindedProvider; let mut sparse = RevealedSparseTrie::from_root( TrieNode::decode(&mut &hash_builder_proof_nodes.nodes_sorted()[0].1[..]).unwrap(), TrieMasks { @@ -3395,7 +3352,7 @@ mod tests { ); // Insert the leaf for the second key - sparse.update_leaf(key2(), value_encoded()).unwrap(); + sparse.update_leaf(key2(), value_encoded(), &provider).unwrap(); // Check that the branch node was updated and another nibble was set assert_eq!( @@ -3466,6 +3423,8 @@ mod tests { Default::default(), [Nibbles::default()], ); + + let provider = DefaultBlindedProvider; let mut sparse = RevealedSparseTrie::from_root( TrieNode::decode(&mut &hash_builder_proof_nodes.nodes_sorted()[0].1[..]).unwrap(), TrieMasks { @@ -3504,7 +3463,7 @@ mod tests { ); // Remove the leaf for the first key - sparse.remove_leaf(&key1()).unwrap(); + sparse.remove_leaf(&key1(), &provider).unwrap(); // Check that the branch node was turned into an extension node assert_eq!( @@ -3567,6 +3526,8 @@ mod tests { Default::default(), [Nibbles::default()], ); + + let provider = DefaultBlindedProvider; let mut sparse = RevealedSparseTrie::from_root( TrieNode::decode(&mut &hash_builder_proof_nodes.nodes_sorted()[0].1[..]).unwrap(), TrieMasks { @@ -3584,7 +3545,7 @@ mod tests { ); // Insert the leaf with a different prefix - sparse.update_leaf(key3(), value_encoded()).unwrap(); + sparse.update_leaf(key3(), value_encoded(), &provider).unwrap(); // Check that the extension node was turned into a branch node assert_matches!( @@ -3621,6 +3582,7 @@ mod tests { #[test] fn sparse_trie_get_changed_nodes_at_depth() { + let provider = DefaultBlindedProvider; let mut sparse = RevealedSparseTrie::default(); let value = alloy_rlp::encode_fixed_size(&U256::ZERO).to_vec(); @@ -3638,21 +3600,23 @@ mod tests { // ├── 0 -> Leaf (Key = 3302, Path = 53302) – Level 4 // └── 2 -> Leaf (Key = 3320, Path = 53320) – Level 4 sparse - .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), value.clone()) + .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), value.clone(), &provider) + .unwrap(); + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3]), value.clone(), &provider) .unwrap(); sparse - .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3]), value.clone()) + .update_leaf(Nibbles::from_nibbles([0x5, 0x2, 0x0, 0x1, 0x3]), value.clone(), &provider) .unwrap(); sparse - .update_leaf(Nibbles::from_nibbles([0x5, 0x2, 0x0, 0x1, 0x3]), value.clone()) + .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x1, 0x0, 0x2]), value.clone(), &provider) .unwrap(); sparse - .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x1, 0x0, 0x2]), value.clone()) + .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0, 0x2]), value.clone(), &provider) .unwrap(); sparse - .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0, 0x2]), value.clone()) + .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2, 0x0]), value, &provider) .unwrap(); - sparse.update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2, 0x0]), value).unwrap(); assert_eq!( sparse.get_changed_nodes_at_depth(&mut PrefixSet::default(), 0), @@ -3732,9 +3696,11 @@ mod tests { Default::default(), [Nibbles::default()], ); + + let provider = DefaultBlindedProvider; let mut sparse = RevealedSparseTrie::default(); - sparse.update_leaf(key1(), value_encoded()).unwrap(); - sparse.update_leaf(key2(), value_encoded()).unwrap(); + sparse.update_leaf(key1(), value_encoded(), &provider).unwrap(); + sparse.update_leaf(key2(), value_encoded(), &provider).unwrap(); let sparse_root = sparse.root(); let sparse_updates = sparse.take_updates(); @@ -3744,6 +3710,7 @@ mod tests { #[test] fn sparse_trie_wipe() { + let provider = DefaultBlindedProvider; let mut sparse = RevealedSparseTrie::default().with_updates(true); let value = alloy_rlp::encode_fixed_size(&U256::ZERO).to_vec(); @@ -3761,21 +3728,23 @@ mod tests { // ├── 0 -> Leaf (Key = 3302, Path = 53302) – Level 4 // └── 2 -> Leaf (Key = 3320, Path = 53320) – Level 4 sparse - .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), value.clone()) + .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), value.clone(), &provider) + .unwrap(); + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3]), value.clone(), &provider) .unwrap(); sparse - .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3]), value.clone()) + .update_leaf(Nibbles::from_nibbles([0x5, 0x2, 0x0, 0x1, 0x3]), value.clone(), &provider) .unwrap(); sparse - .update_leaf(Nibbles::from_nibbles([0x5, 0x2, 0x0, 0x1, 0x3]), value.clone()) + .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x1, 0x0, 0x2]), value.clone(), &provider) .unwrap(); sparse - .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x1, 0x0, 0x2]), value.clone()) + .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0, 0x2]), value.clone(), &provider) .unwrap(); sparse - .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0, 0x2]), value.clone()) + .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2, 0x0]), value, &provider) .unwrap(); - sparse.update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2, 0x0]), value).unwrap(); sparse.wipe(); @@ -3786,18 +3755,21 @@ mod tests { fn sparse_trie_clear() { // tests that if we fill a sparse trie with some nodes and then clear it, it has the same // contents as an empty sparse trie + let provider = DefaultBlindedProvider; let mut sparse = RevealedSparseTrie::default(); let value = alloy_rlp::encode_fixed_size(&U256::ZERO).to_vec(); sparse - .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), value.clone()) + .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), value.clone(), &provider) .unwrap(); sparse - .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3]), value.clone()) + .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3]), value.clone(), &provider) .unwrap(); sparse - .update_leaf(Nibbles::from_nibbles([0x5, 0x2, 0x0, 0x1, 0x3]), value.clone()) + .update_leaf(Nibbles::from_nibbles([0x5, 0x2, 0x0, 0x1, 0x3]), value.clone(), &provider) + .unwrap(); + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x1, 0x0, 0x2]), value, &provider) .unwrap(); - sparse.update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x1, 0x0, 0x2]), value).unwrap(); sparse.clear(); @@ -3813,6 +3785,7 @@ mod tests { #[test] fn sparse_trie_display() { + let provider = DefaultBlindedProvider; let mut sparse = RevealedSparseTrie::default(); let value = alloy_rlp::encode_fixed_size(&U256::ZERO).to_vec(); @@ -3830,21 +3803,23 @@ mod tests { // ├── 0 -> Leaf (Key = 3302, Path = 53302) – Level 4 // └── 2 -> Leaf (Key = 3320, Path = 53320) – Level 4 sparse - .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), value.clone()) + .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), value.clone(), &provider) + .unwrap(); + sparse + .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3]), value.clone(), &provider) .unwrap(); sparse - .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3]), value.clone()) + .update_leaf(Nibbles::from_nibbles([0x5, 0x2, 0x0, 0x1, 0x3]), value.clone(), &provider) .unwrap(); sparse - .update_leaf(Nibbles::from_nibbles([0x5, 0x2, 0x0, 0x1, 0x3]), value.clone()) + .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x1, 0x0, 0x2]), value.clone(), &provider) .unwrap(); sparse - .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x1, 0x0, 0x2]), value.clone()) + .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0, 0x2]), value.clone(), &provider) .unwrap(); sparse - .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0, 0x2]), value.clone()) + .update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2, 0x0]), value, &provider) .unwrap(); - sparse.update_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2, 0x0]), value).unwrap(); let normal_printed = format!("{sparse}"); let expected = "\ diff --git a/crates/trie/trie/src/witness.rs b/crates/trie/trie/src/witness.rs index ce40a01e1c0..8417b6875a9 100644 --- a/crates/trie/trie/src/witness.rs +++ b/crates/trie/trie/src/witness.rs @@ -153,7 +153,7 @@ where ), tx, ); - let mut sparse_trie = SparseStateTrie::new(blinded_provider_factory); + let mut sparse_trie = SparseStateTrie::new(); sparse_trie.reveal_multiproof(multiproof)?; // Attempt to update state trie to gather additional information for the witness. @@ -161,6 +161,7 @@ where proof_targets.into_iter().sorted_unstable_by_key(|(ha, _)| *ha) { // Update storage trie first. + let provider = blinded_provider_factory.storage_node_provider(hashed_address); let storage = state.storages.get(&hashed_address); let storage_trie = sparse_trie.storage_trie_mut(&hashed_address).ok_or( SparseStateTrieErrorKind::SparseStorageTrie( @@ -176,11 +177,11 @@ where .map(|v| alloy_rlp::encode_fixed_size(v).to_vec()); if let Some(value) = maybe_leaf_value { - storage_trie.update_leaf(storage_nibbles, value).map_err(|err| { + storage_trie.update_leaf(storage_nibbles, value, &provider).map_err(|err| { SparseStateTrieErrorKind::SparseStorageTrie(hashed_address, err.into_kind()) })?; } else { - storage_trie.remove_leaf(&storage_nibbles).map_err(|err| { + storage_trie.remove_leaf(&storage_nibbles, &provider).map_err(|err| { SparseStateTrieErrorKind::SparseStorageTrie(hashed_address, err.into_kind()) })?; } @@ -194,7 +195,7 @@ where .get(&hashed_address) .ok_or(TrieWitnessError::MissingAccount(hashed_address))? .unwrap_or_default(); - sparse_trie.update_account(hashed_address, account)?; + sparse_trie.update_account(hashed_address, account, &blinded_provider_factory)?; while let Ok(node) = rx.try_recv() { self.witness.insert(keccak256(&node), node); From 22d271a7148bf352fa1f8b52cfffdf48e78b2fa3 Mon Sep 17 00:00:00 2001 From: youyyytrok Date: Mon, 30 Jun 2025 22:14:58 +0200 Subject: [PATCH 011/305] chore: fixed dead link in docs/.../sync-op-mainnet.mdx (#17146) --- docs/vocs/docs/pages/run/faq/sync-op-mainnet.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/vocs/docs/pages/run/faq/sync-op-mainnet.mdx b/docs/vocs/docs/pages/run/faq/sync-op-mainnet.mdx index e895331288e..58fe9a2babe 100644 --- a/docs/vocs/docs/pages/run/faq/sync-op-mainnet.mdx +++ b/docs/vocs/docs/pages/run/faq/sync-op-mainnet.mdx @@ -11,7 +11,7 @@ To sync OP mainnet, Bedrock state needs to be imported as a starting point. Ther ## Minimal bootstrap (recommended) -**The state snapshot at Bedrock block is required.** It can be exported from [op-geth](https://github.com/testinprod-io/op-erigon/blob/pcw109550/bedrock-db-migration/bedrock-migration#export-state) (**.jsonl**) or downloaded directly from [here](https://mega.nz/file/GdZ1xbAT#a9cBv3AqzsTGXYgX7nZc_3fl--tcBmOAIwIA5ND6kwc). +**The state snapshot at Bedrock block is required.** It can be exported from [op-geth](https://github.com/testinprod-io/op-erigon/blob/pcw109550/bedrock-db-migration/bedrock-migration.md#export-state) (**.jsonl**) or downloaded directly from [here](https://mega.nz/file/GdZ1xbAT#a9cBv3AqzsTGXYgX7nZc_3fl--tcBmOAIwIA5ND6kwc). Import the state snapshot From 7276dae4eeaa8fa448a4f13a154f1b482444631a Mon Sep 17 00:00:00 2001 From: Niran Babalola Date: Mon, 30 Jun 2025 15:44:28 -0500 Subject: [PATCH 012/305] feat: introduce max_tx_gas_limit feature to enforce per-transaction gas limits (#17028) Co-authored-by: Matthias Seitz --- crates/ethereum/node/src/node.rs | 1 + crates/node/core/src/args/txpool.rs | 6 ++ crates/optimism/node/src/node.rs | 1 + crates/rpc/rpc-eth-types/src/error/mod.rs | 5 ++ crates/transaction-pool/src/error.rs | 7 ++ crates/transaction-pool/src/validate/eth.rs | 94 +++++++++++++++++++++ docs/vocs/docs/pages/cli/reth/node.mdx | 3 + 7 files changed, 117 insertions(+) diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index f1c238aaa95..c0aeba68b3b 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -411,6 +411,7 @@ where .kzg_settings(ctx.kzg_settings()?) .with_local_transactions_config(pool_config.local_transactions_config.clone()) .set_tx_fee_cap(ctx.config().rpc.rpc_tx_fee_cap) + .with_max_tx_gas_limit(ctx.config().txpool.max_tx_gas_limit) .with_additional_tasks(ctx.config().txpool.additional_validation_tasks) .build_with_tasks(ctx.task_executor().clone(), blob_store.clone()); diff --git a/crates/node/core/src/args/txpool.rs b/crates/node/core/src/args/txpool.rs index 59b920cc604..cae968f2d7e 100644 --- a/crates/node/core/src/args/txpool.rs +++ b/crates/node/core/src/args/txpool.rs @@ -69,6 +69,11 @@ pub struct TxPoolArgs { #[arg(long = "txpool.gas-limit", default_value_t = ETHEREUM_BLOCK_GAS_LIMIT_30M)] pub enforced_gas_limit: u64, + /// Maximum gas limit for individual transactions. Transactions exceeding this limit will be + /// rejected by the transaction pool + #[arg(long = "txpool.max-tx-gas")] + pub max_tx_gas_limit: Option, + /// Price bump percentage to replace an already existing blob transaction #[arg(long = "blobpool.pricebump", default_value_t = REPLACE_BLOB_PRICE_BUMP)] pub blob_transaction_price_bump: u128, @@ -140,6 +145,7 @@ impl Default for TxPoolArgs { price_bump: DEFAULT_PRICE_BUMP, minimal_protocol_basefee: MIN_PROTOCOL_BASE_FEE, enforced_gas_limit: ETHEREUM_BLOCK_GAS_LIMIT_30M, + max_tx_gas_limit: None, blob_transaction_price_bump: REPLACE_BLOB_PRICE_BUMP, max_tx_input_bytes: DEFAULT_MAX_TX_INPUT_BYTES, max_cached_entries: DEFAULT_MAX_CACHED_BLOBS, diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 2d33f05f4ae..117adde1d46 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -806,6 +806,7 @@ where .with_max_tx_input_bytes(ctx.config().txpool.max_tx_input_bytes) .kzg_settings(ctx.kzg_settings()?) .set_tx_fee_cap(ctx.config().rpc.rpc_tx_fee_cap) + .with_max_tx_gas_limit(ctx.config().txpool.max_tx_gas_limit) .with_additional_tasks( pool_config_overrides .additional_validation_tasks diff --git a/crates/rpc/rpc-eth-types/src/error/mod.rs b/crates/rpc/rpc-eth-types/src/error/mod.rs index 96adc4e67b2..e598ea3df76 100644 --- a/crates/rpc/rpc-eth-types/src/error/mod.rs +++ b/crates/rpc/rpc-eth-types/src/error/mod.rs @@ -801,6 +801,9 @@ pub enum RpcPoolError { /// When the transaction exceeds the block gas limit #[error("exceeds block gas limit")] ExceedsGasLimit, + /// When the transaction gas limit exceeds the maximum transaction gas limit + #[error("exceeds max transaction gas limit")] + MaxTxGasLimitExceeded, /// Thrown when a new transaction is added to the pool, but then immediately discarded to /// respect the tx fee exceeds the configured cap #[error("tx fee ({max_tx_fee_wei} wei) exceeds the configured cap ({tx_fee_cap_wei} wei)")] @@ -854,6 +857,7 @@ impl From for jsonrpsee_types::error::ErrorObject<'static> { RpcPoolError::Underpriced | RpcPoolError::ReplaceUnderpriced | RpcPoolError::ExceedsGasLimit | + RpcPoolError::MaxTxGasLimitExceeded | RpcPoolError::ExceedsFeeCap { .. } | RpcPoolError::NegativeValue | RpcPoolError::OversizedData | @@ -890,6 +894,7 @@ impl From for RpcPoolError { match err { InvalidPoolTransactionError::Consensus(err) => Self::Invalid(err.into()), InvalidPoolTransactionError::ExceedsGasLimit(_, _) => Self::ExceedsGasLimit, + InvalidPoolTransactionError::MaxTxGasLimitExceeded(_, _) => Self::MaxTxGasLimitExceeded, InvalidPoolTransactionError::ExceedsFeeCap { max_tx_fee_wei, tx_fee_cap_wei } => { Self::ExceedsFeeCap { max_tx_fee_wei, tx_fee_cap_wei } } diff --git a/crates/transaction-pool/src/error.rs b/crates/transaction-pool/src/error.rs index 686c9456d39..09aec26bd1e 100644 --- a/crates/transaction-pool/src/error.rs +++ b/crates/transaction-pool/src/error.rs @@ -218,6 +218,9 @@ pub enum InvalidPoolTransactionError { /// respect the size limits of the pool. #[error("transaction's gas limit {0} exceeds block's gas limit {1}")] ExceedsGasLimit(u64, u64), + /// Thrown when a transaction's gas limit exceeds the configured maximum per-transaction limit. + #[error("transaction's gas limit {0} exceeds maximum per-transaction gas limit {1}")] + MaxTxGasLimitExceeded(u64, u64), /// Thrown when a new transaction is added to the pool, but then immediately discarded to /// respect the tx fee exceeds the configured cap #[error("tx fee ({max_tx_fee_wei} wei) exceeds the configured cap ({tx_fee_cap_wei} wei)")] @@ -320,6 +323,10 @@ impl InvalidPoolTransactionError { } } Self::ExceedsGasLimit(_, _) => true, + Self::MaxTxGasLimitExceeded(_, _) => { + // local setting + false + } Self::ExceedsFeeCap { max_tx_fee_wei: _, tx_fee_cap_wei: _ } => true, Self::ExceedsMaxInitCodeSize(_, _) => true, Self::OversizedData(_, _) => true, diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index 2c5803a735a..deea3598013 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -231,6 +231,8 @@ pub(crate) struct EthTransactionValidatorInner { local_transactions_config: LocalTransactionConfig, /// Maximum size in bytes a single transaction can have in order to be accepted into the pool. max_tx_input_bytes: usize, + /// Maximum gas limit for individual transactions + max_tx_gas_limit: Option, /// Marker for the transaction type _marker: PhantomData, /// Metrics for tsx pool validation @@ -387,6 +389,19 @@ where )) } + // Check individual transaction gas limit if configured + if let Some(max_tx_gas_limit) = self.max_tx_gas_limit { + if transaction_gas_limit > max_tx_gas_limit { + return Err(TransactionValidationOutcome::Invalid( + transaction, + InvalidPoolTransactionError::MaxTxGasLimitExceeded( + transaction_gas_limit, + max_tx_gas_limit, + ), + )) + } + } + // Ensure max_priority_fee_per_gas (if EIP1559) is less than max_fee_per_gas if any. if transaction.max_priority_fee_per_gas() > Some(transaction.max_fee_per_gas()) { return Err(TransactionValidationOutcome::Invalid( @@ -771,6 +786,8 @@ pub struct EthTransactionValidatorBuilder { local_transactions_config: LocalTransactionConfig, /// Max size in bytes of a single transaction allowed max_tx_input_bytes: usize, + /// Maximum gas limit for individual transactions + max_tx_gas_limit: Option, } impl EthTransactionValidatorBuilder { @@ -793,6 +810,7 @@ impl EthTransactionValidatorBuilder { local_transactions_config: Default::default(), max_tx_input_bytes: DEFAULT_MAX_TX_INPUT_BYTES, tx_fee_cap: Some(1e18 as u128), + max_tx_gas_limit: None, // by default all transaction types are allowed eip2718: true, eip1559: true, @@ -962,6 +980,12 @@ impl EthTransactionValidatorBuilder { self } + /// Sets the maximum gas limit for individual transactions + pub const fn with_max_tx_gas_limit(mut self, max_tx_gas_limit: Option) -> Self { + self.max_tx_gas_limit = max_tx_gas_limit; + self + } + /// Builds a the [`EthTransactionValidator`] without spawning validator tasks. pub fn build(self, blob_store: S) -> EthTransactionValidator where @@ -983,6 +1007,7 @@ impl EthTransactionValidatorBuilder { kzg_settings, local_transactions_config, max_tx_input_bytes, + max_tx_gas_limit, .. } = self; @@ -1017,6 +1042,7 @@ impl EthTransactionValidatorBuilder { kzg_settings, local_transactions_config, max_tx_input_bytes, + max_tx_gas_limit, _marker: Default::default(), validation_metrics: TxPoolValidationMetrics::default(), }; @@ -1315,4 +1341,72 @@ mod tests { let outcome = validator.validate_one(TransactionOrigin::Local, transaction); assert!(outcome.is_valid()); } + + #[tokio::test] + async fn invalid_on_max_tx_gas_limit_exceeded() { + let transaction = get_transaction(); + let provider = MockEthProvider::default(); + provider.add_account( + transaction.sender(), + ExtendedAccount::new(transaction.nonce(), U256::MAX), + ); + + let blob_store = InMemoryBlobStore::default(); + let validator = EthTransactionValidatorBuilder::new(provider) + .with_max_tx_gas_limit(Some(500_000)) // Set limit lower than transaction gas limit (1_015_288) + .build(blob_store.clone()); + + let outcome = validator.validate_one(TransactionOrigin::External, transaction.clone()); + assert!(outcome.is_invalid()); + + let pool = + Pool::new(validator, CoinbaseTipOrdering::default(), blob_store, Default::default()); + + let res = pool.add_external_transaction(transaction.clone()).await; + assert!(res.is_err()); + assert!(matches!( + res.unwrap_err().kind, + PoolErrorKind::InvalidTransaction(InvalidPoolTransactionError::MaxTxGasLimitExceeded( + 1_015_288, 500_000 + )) + )); + let tx = pool.get(transaction.hash()); + assert!(tx.is_none()); + } + + #[tokio::test] + async fn valid_on_max_tx_gas_limit_disabled() { + let transaction = get_transaction(); + let provider = MockEthProvider::default(); + provider.add_account( + transaction.sender(), + ExtendedAccount::new(transaction.nonce(), U256::MAX), + ); + + let blob_store = InMemoryBlobStore::default(); + let validator = EthTransactionValidatorBuilder::new(provider) + .with_max_tx_gas_limit(None) // disabled + .build(blob_store); + + let outcome = validator.validate_one(TransactionOrigin::External, transaction); + assert!(outcome.is_valid()); + } + + #[tokio::test] + async fn valid_on_max_tx_gas_limit_within_limit() { + let transaction = get_transaction(); + let provider = MockEthProvider::default(); + provider.add_account( + transaction.sender(), + ExtendedAccount::new(transaction.nonce(), U256::MAX), + ); + + let blob_store = InMemoryBlobStore::default(); + let validator = EthTransactionValidatorBuilder::new(provider) + .with_max_tx_gas_limit(Some(2_000_000)) // Set limit higher than transaction gas limit (1_015_288) + .build(blob_store); + + let outcome = validator.validate_one(TransactionOrigin::External, transaction); + assert!(outcome.is_valid()); + } } diff --git a/docs/vocs/docs/pages/cli/reth/node.mdx b/docs/vocs/docs/pages/cli/reth/node.mdx index efd96c90028..8d53c821fc3 100644 --- a/docs/vocs/docs/pages/cli/reth/node.mdx +++ b/docs/vocs/docs/pages/cli/reth/node.mdx @@ -494,6 +494,9 @@ TxPool: [default: 30000000] + --txpool.max-tx-gas + Maximum gas limit for individual transactions. Transactions exceeding this limit will be rejected by the transaction pool + --blobpool.pricebump Price bump percentage to replace an already existing blob transaction From fcf58cb5acc2825e7c046f6741e90a8c5dab7847 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 1 Jul 2025 00:14:17 +0200 Subject: [PATCH 013/305] fix: use safe math for withdrawals check (#17150) --- crates/rpc/rpc/src/validation.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/rpc/rpc/src/validation.rs b/crates/rpc/rpc/src/validation.rs index cb64e10e047..e2d5a553d54 100644 --- a/crates/rpc/rpc/src/validation.rs +++ b/crates/rpc/rpc/src/validation.rs @@ -307,7 +307,7 @@ where } } - if balance_after >= balance_before + message.value { + if balance_after >= balance_before.saturating_add(message.value) { return Ok(()) } From 06b542c556eb8c2d31f86c421b2a7311e587fa74 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 1 Jul 2025 12:30:57 +0200 Subject: [PATCH 014/305] docs: fix broken links and typos (#17149) --- crates/e2e-test-utils/src/node.rs | 2 +- crates/net/eth-wire/src/eth_snap_stream.rs | 2 +- crates/net/network/src/session/conn.rs | 2 +- docs/vocs/docs/pages/overview.mdx | 2 +- docs/vocs/docs/pages/run/system-requirements.mdx | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/crates/e2e-test-utils/src/node.rs b/crates/e2e-test-utils/src/node.rs index 4a096ac5a7f..080304ca0c8 100644 --- a/crates/e2e-test-utils/src/node.rs +++ b/crates/e2e-test-utils/src/node.rs @@ -27,7 +27,7 @@ use std::pin::Pin; use tokio_stream::StreamExt; use url::Url; -/// An helper struct to handle node actions +/// A helper struct to handle node actions #[expect(missing_debug_implementations)] pub struct NodeTestContext where diff --git a/crates/net/eth-wire/src/eth_snap_stream.rs b/crates/net/eth-wire/src/eth_snap_stream.rs index 000e1615103..82260186593 100644 --- a/crates/net/eth-wire/src/eth_snap_stream.rs +++ b/crates/net/eth-wire/src/eth_snap_stream.rs @@ -44,7 +44,7 @@ pub enum EthSnapStreamError { StatusNotInHandshake, } -/// Combined message type that include either eth or snao protocol messages +/// Combined message type that include either eth or snap protocol messages #[derive(Debug)] pub enum EthSnapMessage { /// An Ethereum protocol message diff --git a/crates/net/network/src/session/conn.rs b/crates/net/network/src/session/conn.rs index 1b262430f14..ea13cef4f01 100644 --- a/crates/net/network/src/session/conn.rs +++ b/crates/net/network/src/session/conn.rs @@ -65,7 +65,7 @@ impl EthRlpxConnection { } } - /// Returns access to the underlying stream. + /// Returns access to the underlying stream. #[inline] pub(crate) const fn inner(&self) -> &P2PStream> { match self { diff --git a/docs/vocs/docs/pages/overview.mdx b/docs/vocs/docs/pages/overview.mdx index e467dacc03f..33bc607bd45 100644 --- a/docs/vocs/docs/pages/overview.mdx +++ b/docs/vocs/docs/pages/overview.mdx @@ -111,4 +111,4 @@ You can contribute to the docs on [GitHub][gh-docs]. [tg-badge]: https://img.shields.io/endpoint?color=neon&logo=telegram&label=chat&url=https%3A%2F%2Ftg.sumanjay.workers.dev%2Fparadigm%5Freth [tg-url]: https://t.me/paradigm_reth -[gh-docs]: https://github.com/paradigmxyz/reth/tree/main/book +[gh-docs]: https://github.com/paradigmxyz/reth/tree/main/docs diff --git a/docs/vocs/docs/pages/run/system-requirements.mdx b/docs/vocs/docs/pages/run/system-requirements.mdx index 60e30189f6a..9db3294f68e 100644 --- a/docs/vocs/docs/pages/run/system-requirements.mdx +++ b/docs/vocs/docs/pages/run/system-requirements.mdx @@ -55,7 +55,7 @@ TLC (Triple-Level Cell) NVMe drives, on the other hand, use three bits of data p Most of the time during syncing is spent executing transactions, which is a single-threaded operation due to potential state dependencies of a transaction on previous ones. -As a result, the number of cores matters less, but in general higher clock speeds are better. More cores are better for parallelizable [stages](https://github.com/paradigmxyz/reth/blob/main/docs/crates/stages) (like sender recovery or bodies downloading), but these stages are not the primary bottleneck for syncing. +As a result, the number of cores matters less, but in general higher clock speeds are better. More cores are better for parallelizable [stages](https://github.com/paradigmxyz/reth/blob/main/docs/crates/stages.md) (like sender recovery or bodies downloading), but these stages are not the primary bottleneck for syncing. ## Memory From 1bd5761b324f2ba45ff416a4f0c2e95192792995 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 1 Jul 2025 12:51:16 +0200 Subject: [PATCH 015/305] chore: bump revm (#17153) Co-authored-by: Arsenii Kulikov --- Cargo.lock | 62 ++++++++++--------- Cargo.toml | 26 ++++---- .../engine/tree/src/tree/precompile_cache.rs | 10 ++- crates/ress/provider/src/recorder.rs | 1 + crates/revm/src/database.rs | 8 ++- crates/rpc/rpc-eth-api/src/helpers/call.rs | 8 ++- crates/rpc/rpc-eth-types/src/cache/db.rs | 7 ++- 7 files changed, 72 insertions(+), 50 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fad2c21f36d..984db80c7be 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -259,9 +259,9 @@ dependencies = [ [[package]] name = "alloy-evm" -version = "0.12.3" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff5aae4c6dc600734b206b175f3200085ee82dcdaa388760358830a984ca9869" +checksum = "a198edb5172413c2300bdc591b4dec1caa643398bd7facc21d0925487dffcd8f" dependencies = [ "alloy-consensus", "alloy-eips", @@ -372,9 +372,9 @@ dependencies = [ [[package]] name = "alloy-op-evm" -version = "0.12.3" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "588a87b77b30452991151667522d2f2f724cec9c2ec6602e4187bc97f66d8095" +checksum = "de31eff0ae512dcca4fa0a58d158aa6d68e3b8b4a4e50ca5d6aff09c248a0aa2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6072,9 +6072,9 @@ dependencies = [ [[package]] name = "op-revm" -version = "7.0.1" +version = "8.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b97d2b54651fcd2955b454e86b2336c031e17925a127f4c44e2b63b2eeda923" +checksum = "84de364c50baff786d09ab18d3cdd4f5ff23612e96c00a96b65de3c470f553df" dependencies = [ "auto_impl", "once_cell", @@ -10629,9 +10629,9 @@ dependencies = [ [[package]] name = "revm" -version = "26.0.1" +version = "27.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b2a493c73054a0f6635bad6e840cdbef34838e6e6186974833c901dff7dd709" +checksum = "0eff49cb058b1100aba529a048655594d89f6b86cefd1b50b63facd2465b6a0e" dependencies = [ "revm-bytecode", "revm-context", @@ -10648,9 +10648,9 @@ dependencies = [ [[package]] name = "revm-bytecode" -version = "5.0.0" +version = "6.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b395ee2212d44fcde20e9425916fee685b5440c3f8e01fabae8b0f07a2fd7f08" +checksum = "b6a7d034cdf74c5f952ffc26e9667dd4285c86379ce1b1190b5d597c398a7565" dependencies = [ "bitvec", "once_cell", @@ -10661,9 +10661,9 @@ dependencies = [ [[package]] name = "revm-context" -version = "7.0.1" +version = "8.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b97b69d05651509b809eb7215a6563dc64be76a941666c40aabe597ab544d38" +checksum = "199000545a2516f3fef7241e33df677275f930f56203ec4a586f7815e7fb5598" dependencies = [ "cfg-if", "derive-where", @@ -10677,9 +10677,9 @@ dependencies = [ [[package]] name = "revm-context-interface" -version = "7.0.1" +version = "8.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f8f4f06a1c43bf8e6148509aa06a6c4d28421541944842b9b11ea1a6e53468f" +checksum = "47db30cb6579fddb974462ea385d297ea57d0d13750fc1086d65166c4fb281eb" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -10693,9 +10693,9 @@ dependencies = [ [[package]] name = "revm-database" -version = "6.0.0" +version = "7.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "763eb5867a109a85f8e47f548b9d88c9143c0e443ec056742052f059fa32f4f1" +checksum = "bbe1906ae0f5f83153a6d46da8791405eb30385b9deb4845c27b4a6802e342e8" dependencies = [ "alloy-eips", "revm-bytecode", @@ -10707,11 +10707,12 @@ dependencies = [ [[package]] name = "revm-database-interface" -version = "6.0.0" +version = "7.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf5ecd19a5b75b862841113b9abdd864ad4b22e633810e11e6d620e8207e361d" +checksum = "faffdc496bad90183f31a144ed122caefa4e74ffb02f57137dc8a94d20611550" dependencies = [ "auto_impl", + "either", "revm-primitives", "revm-state", "serde", @@ -10719,9 +10720,9 @@ dependencies = [ [[package]] name = "revm-handler" -version = "7.0.1" +version = "8.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17b61f992beaa7a5fc3f5fcf79f1093624fa1557dc42d36baa42114c2d836b59" +checksum = "844ecdeb61f8067a7ccb61e32c69d303fe9081b5f1e21e09a337c883f4dda1ad" dependencies = [ "auto_impl", "derive-where", @@ -10738,9 +10739,9 @@ dependencies = [ [[package]] name = "revm-inspector" -version = "7.0.1" +version = "8.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7e4400a109a2264f4bf290888ac6d02432b6d5d070492b9dcf134b0c7d51354" +checksum = "ee95fd546963e456ab9b615adc3564f64a801a49d9ebcdc31ff63ce3a601069c" dependencies = [ "auto_impl", "either", @@ -10756,9 +10757,9 @@ dependencies = [ [[package]] name = "revm-inspectors" -version = "0.25.0" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2aabdffc06bdb434d9163e2d63b6fae843559afd300ea3fbeb113b8a0d8ec728" +checksum = "8c42441fb05ac958e69262bd86841f8a91220e6794f9a0b99db1e1af51d8013e" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -10776,9 +10777,9 @@ dependencies = [ [[package]] name = "revm-interpreter" -version = "22.0.1" +version = "23.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2481ef059708772cec0ce6bc4c84b796a40111612efb73b01adf1caed7ff9ac" +checksum = "1776f996bb79805b361badd8b6326ac04a8580764aebf72b145620a6e21cf1c3" dependencies = [ "revm-bytecode", "revm-context-interface", @@ -10788,15 +10789,16 @@ dependencies = [ [[package]] name = "revm-precompile" -version = "23.0.0" +version = "24.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d581e78c8f132832bd00854fb5bf37efd95a52582003da35c25cd2cbfc63849" +checksum = "5c35a987086055a5cb368e080d1300ea853a3185b7bb9cdfebb8c05852cda24f" dependencies = [ "ark-bls12-381", "ark-bn254", "ark-ec", "ark-ff 0.5.0", "ark-serialize 0.5.0", + "arrayref", "aurora-engine-modexp", "blst", "c-kzg", @@ -10825,9 +10827,9 @@ dependencies = [ [[package]] name = "revm-state" -version = "6.0.0" +version = "7.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d6274928dd78f907103740b10800d3c0db6caeca391e75a159c168a1e5c78f8" +checksum = "7f7bc9492e94ad3280c4540879d28d3fdbfbc432ebff60f17711740ebb4309ff" dependencies = [ "bitflags 2.9.1", "revm-bytecode", diff --git a/Cargo.toml b/Cargo.toml index fedef0f26ca..0cb3085eda2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -450,24 +450,24 @@ reth-ress-protocol = { path = "crates/ress/protocol" } reth-ress-provider = { path = "crates/ress/provider" } # revm -revm = { version = "26.0.1", default-features = false } -revm-bytecode = { version = "5.0.0", default-features = false } -revm-database = { version = "6.0.0", default-features = false } -revm-state = { version = "6.0.0", default-features = false } +revm = { version = "27.0.1", default-features = false } +revm-bytecode = { version = "6.0.0", default-features = false } +revm-database = { version = "7.0.0", default-features = false } +revm-state = { version = "7.0.0", default-features = false } revm-primitives = { version = "20.0.0", default-features = false } -revm-interpreter = { version = "22.0.1", default-features = false } -revm-inspector = { version = "7.0.1", default-features = false } -revm-context = { version = "7.0.1", default-features = false } -revm-context-interface = { version = "7.0.0", default-features = false } -revm-database-interface = { version = "6.0.0", default-features = false } -op-revm = { version = "7.0.1", default-features = false } -revm-inspectors = "0.25.0" +revm-interpreter = { version = "23.0.0", default-features = false } +revm-inspector = { version = "8.0.1", default-features = false } +revm-context = { version = "8.0.1", default-features = false } +revm-context-interface = { version = "8.0.0", default-features = false } +revm-database-interface = { version = "7.0.0", default-features = false } +op-revm = { version = "8.0.1", default-features = false } +revm-inspectors = "0.26.0" # eth alloy-chains = { version = "0.2.0", default-features = false } alloy-dyn-abi = "1.2.0" alloy-eip2124 = { version = "0.2.0", default-features = false } -alloy-evm = { version = "0.12", default-features = false } +alloy-evm = { version = "0.13", default-features = false } alloy-primitives = { version = "1.2.0", default-features = false, features = ["map-foldhash"] } alloy-rlp = { version = "0.3.10", default-features = false, features = ["core-net"] } alloy-sol-macro = "1.2.0" @@ -505,7 +505,7 @@ alloy-transport-ipc = { version = "1.0.16", default-features = false } alloy-transport-ws = { version = "1.0.16", default-features = false } # op -alloy-op-evm = { version = "0.12", default-features = false } +alloy-op-evm = { version = "0.13", default-features = false } alloy-op-hardforks = "0.2.2" op-alloy-rpc-types = { version = "0.18.7", default-features = false } op-alloy-rpc-types-engine = { version = "0.18.7", default-features = false } diff --git a/crates/engine/tree/src/tree/precompile_cache.rs b/crates/engine/tree/src/tree/precompile_cache.rs index a3eb3a5ba2b..9d59ccbce49 100644 --- a/crates/engine/tree/src/tree/precompile_cache.rs +++ b/crates/engine/tree/src/tree/precompile_cache.rs @@ -191,11 +191,12 @@ where } } + let calldata = input.data; let result = self.precompile.call(input); match &result { Ok(output) => { - let key = CacheKey::new(self.spec_id.clone(), Bytes::copy_from_slice(input.data)); + let key = CacheKey::new(self.spec_id.clone(), Bytes::copy_from_slice(calldata)); let size = self.cache.insert(key, CacheEntry(output.clone())); self.set_precompile_cache_size_metric(size as f64); self.increment_by_one_precompile_cache_misses(); @@ -240,7 +241,9 @@ mod tests { use std::hash::DefaultHasher; use super::*; - use revm::precompile::PrecompileOutput; + use reth_evm::EvmInternals; + use reth_revm::db::EmptyDB; + use revm::{context::JournalTr, precompile::PrecompileOutput, Journal}; use revm_primitives::{hardfork::SpecId, U256}; #[test] @@ -341,6 +344,7 @@ mod tests { gas: gas_limit, caller: Address::ZERO, value: U256::ZERO, + internals: EvmInternals::new(&mut Journal::<_>::new(EmptyDB::new())), }) .unwrap(); assert_eq!(result1.bytes.as_ref(), b"output_from_precompile_1"); @@ -353,6 +357,7 @@ mod tests { gas: gas_limit, caller: Address::ZERO, value: U256::ZERO, + internals: EvmInternals::new(&mut Journal::<_>::new(EmptyDB::new())), }) .unwrap(); assert_eq!(result2.bytes.as_ref(), b"output_from_precompile_2"); @@ -364,6 +369,7 @@ mod tests { gas: gas_limit, caller: Address::ZERO, value: U256::ZERO, + internals: EvmInternals::new(&mut Journal::<_>::new(EmptyDB::new())), }) .unwrap(); assert_eq!(result3.bytes.as_ref(), b"output_from_precompile_1"); diff --git a/crates/ress/provider/src/recorder.rs b/crates/ress/provider/src/recorder.rs index b692dd9a4d1..ec5afacbf0c 100644 --- a/crates/ress/provider/src/recorder.rs +++ b/crates/ress/provider/src/recorder.rs @@ -8,6 +8,7 @@ use reth_trie::{HashedPostState, HashedStorage}; /// The state witness recorder that records all state accesses during execution. /// It does so by implementing the [`reth_revm::Database`] and recording accesses of accounts and /// slots. +#[derive(Debug)] pub(crate) struct StateWitnessRecorderDatabase { database: D, state: HashedPostState, diff --git a/crates/revm/src/database.rs b/crates/revm/src/database.rs index 50415815759..6b829c3d734 100644 --- a/crates/revm/src/database.rs +++ b/crates/revm/src/database.rs @@ -61,7 +61,7 @@ impl EvmStateProvider for T { /// A [Database] and [`DatabaseRef`] implementation that uses [`EvmStateProvider`] as the underlying /// data source. -#[derive(Debug, Clone)] +#[derive(Clone)] pub struct StateProviderDatabase(pub DB); impl StateProviderDatabase { @@ -76,6 +76,12 @@ impl StateProviderDatabase { } } +impl core::fmt::Debug for StateProviderDatabase { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("StateProviderDatabase").finish_non_exhaustive() + } +} + impl AsRef for StateProviderDatabase { fn as_ref(&self) -> &DB { self diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index 12d63243f1c..8659721d457 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -1,6 +1,8 @@ //! Loads a pending block from database. Helper trait for `eth_` transaction, call and trace RPC //! methods. +use core::fmt; + use super::{LoadBlock, LoadPendingBlock, LoadState, LoadTransaction, SpawnBlocking, Trace}; use crate::{ helpers::estimate::EstimateCall, FromEvmError, FullEthApiTypes, RpcBlock, RpcNodeCore, @@ -495,7 +497,7 @@ pub trait Call: tx_env: TxEnvFor, ) -> Result>, Self::Error> where - DB: Database, + DB: Database + fmt::Debug, { let mut evm = self.evm_config().evm_with_env(db, evm_env); let res = evm.transact(tx_env).map_err(Self::Error::from_evm_err)?; @@ -513,7 +515,7 @@ pub trait Call: inspector: I, ) -> Result>, Self::Error> where - DB: Database, + DB: Database + fmt::Debug, I: InspectorFor, { let mut evm = self.evm_config().evm_with_env_and_inspector(db, evm_env, inspector); @@ -675,7 +677,7 @@ pub trait Call: target_tx_hash: B256, ) -> Result where - DB: Database + DatabaseCommit, + DB: Database + DatabaseCommit + core::fmt::Debug, I: IntoIterator>>, { let mut evm = self.evm_config().evm_with_env(db, evm_env); diff --git a/crates/rpc/rpc-eth-types/src/cache/db.rs b/crates/rpc/rpc-eth-types/src/cache/db.rs index 7c1bedb8224..abb8983485a 100644 --- a/crates/rpc/rpc-eth-types/src/cache/db.rs +++ b/crates/rpc/rpc-eth-types/src/cache/db.rs @@ -182,9 +182,14 @@ impl BytecodeReader for StateProviderTraitObjWrapper<'_> { /// Hack to get around 'higher-ranked lifetime error', see /// -#[expect(missing_debug_implementations)] pub struct StateCacheDbRefMutWrapper<'a, 'b>(pub &'b mut StateCacheDb<'a>); +impl<'a, 'b> core::fmt::Debug for StateCacheDbRefMutWrapper<'a, 'b> { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("StateCacheDbRefMutWrapper").finish_non_exhaustive() + } +} + impl<'a> Database for StateCacheDbRefMutWrapper<'a, '_> { type Error = as Database>::Error; fn basic(&mut self, address: Address) -> Result, Self::Error> { From 7350c0151e557b567db032fa303ef84015533f1b Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Tue, 1 Jul 2025 13:00:50 +0200 Subject: [PATCH 016/305] fix(trie): correct ParallelSparseTrie lower subtrie path management (#17143) --- crates/trie/sparse-parallel/src/trie.rs | 214 ++++++++++++++++++++---- 1 file changed, 177 insertions(+), 37 deletions(-) diff --git a/crates/trie/sparse-parallel/src/trie.rs b/crates/trie/sparse-parallel/src/trie.rs index b2d8d147f8c..b397fdb3493 100644 --- a/crates/trie/sparse-parallel/src/trie.rs +++ b/crates/trie/sparse-parallel/src/trie.rs @@ -59,14 +59,19 @@ impl ParallelSparseTrie { /// Returns a mutable reference to the lower `SparseSubtrie` for the given path, or None if the /// path belongs to the upper trie. /// - /// This method will create a new lower subtrie if one doesn't exist for the given path. + /// This method will create a new lower subtrie if one doesn't exist for the given path. If one + /// does exist, but its path field is longer than the given path, then the field will be set + /// to the given path. fn lower_subtrie_for_path(&mut self, path: &Nibbles) -> Option<&mut Box> { match SparseSubtrieType::from_path(path) { SparseSubtrieType::Upper => None, SparseSubtrieType::Lower(idx) => { - if self.lower_subtries[idx].is_none() { - let upper_path = path.slice(..UPPER_TRIE_MAX_DEPTH); - self.lower_subtries[idx] = Some(Box::new(SparseSubtrie::new(upper_path))); + if let Some(subtrie) = self.lower_subtries[idx].as_mut() { + if path.len() < subtrie.path.len() { + subtrie.path = *path; + } + } else { + self.lower_subtries[idx] = Some(Box::new(SparseSubtrie::new(*path))); } self.lower_subtries[idx].as_mut() @@ -77,18 +82,16 @@ impl ParallelSparseTrie { /// Returns a mutable reference to either the lower or upper `SparseSubtrie` for the given path, /// depending on the path's length. /// - /// This method will create a new lower subtrie if one doesn't exist for the given path. + /// This method will create a new lower subtrie if one doesn't exist for the given path. If one + /// does exist, but its path field is longer than the given path, then the field will be set + /// to the given path. fn subtrie_for_path(&mut self, path: &Nibbles) -> &mut Box { - match SparseSubtrieType::from_path(path) { - SparseSubtrieType::Upper => &mut self.upper_subtrie, - SparseSubtrieType::Lower(idx) => { - if self.lower_subtries[idx].is_none() { - let upper_path = path.slice(..UPPER_TRIE_MAX_DEPTH); - self.lower_subtries[idx] = Some(Box::new(SparseSubtrie::new(upper_path))); - } - - self.lower_subtries[idx].as_mut().unwrap() - } + // We can't just call `lower_subtrie_for_path` and return `upper_subtrie` if it returns + // None, because Rust complains about double mutable borrowing `self`. + if SparseSubtrieType::path_len_is_upper(path.len()) { + &mut self.upper_subtrie + } else { + self.lower_subtrie_for_path(path).unwrap() } } @@ -282,6 +285,47 @@ impl ParallelSparseTrie { } } + /// Used by `remove_leaf` to ensure that when a node is removed from a lower subtrie that any + /// externalities are handled. These can include: + /// - Removing the lower subtrie completely, if it is now empty. + /// - Updating the `path` field of the lower subtrie to indicate that its root node has changed. + /// + /// This method assumes that the caller will deal with putting all other nodes in the trie into + /// a consistent state after the removal of this one. + /// + /// ## Panics + /// + /// - If the removed node was not a leaf or extension. + fn remove_node(&mut self, path: &Nibbles) { + let subtrie = self.subtrie_for_path(path); + let node = subtrie.nodes.remove(path); + + let Some(idx) = SparseSubtrieType::from_path(path).lower_index() else { + // When removing a node from the upper trie there's nothing special we need to do to fix + // its path field; the upper trie's path is always empty. + return; + }; + + match node { + Some(SparseNode::Leaf { .. }) => { + // If the leaf was the final node in its lower subtrie then we can remove the lower + // subtrie completely. + if subtrie.nodes.is_empty() { + self.lower_subtries[idx] = None; + } + } + Some(SparseNode::Extension { key, .. }) => { + // If the removed extension was the root node of a lower subtrie then the lower + // subtrie's `path` needs to be updated to be whatever node the extension used to + // point to. + if &subtrie.path == path { + subtrie.path.extend(&key); + } + } + _ => panic!("Expected to remove a leaf or extension, but removed {node:?}"), + } + } + /// Given the path to a parent branch node and a child node which is the sole remaining child on /// that branch after removing a leaf, returns a node to replace the parent branch node and a /// boolean indicating if the child should be deleted. @@ -483,7 +527,7 @@ impl ParallelSparseTrie { // from its SparseSubtrie. self.prefix_set.insert(*leaf_full_path); leaf_subtrie.inner.values.remove(leaf_full_path); - leaf_subtrie.nodes.remove(&leaf_path); + self.remove_node(&leaf_path); // If the leaf was at the root replace its node with the empty value. We can stop execution // here, all remaining logic is related to the ancestors of the leaf. @@ -566,12 +610,12 @@ impl ParallelSparseTrie { ); if remove_child { - remaining_child_subtrie.nodes.remove(&remaining_child_path); self.move_value_on_leaf_removal( branch_path, &new_branch_node, &remaining_child_path, ); + self.remove_node(&remaining_child_path); } if let Some(updates) = self.updates.as_mut() { @@ -607,8 +651,8 @@ impl ParallelSparseTrie { branch_parent_node.as_ref().unwrap(), ) { ext_subtrie.nodes.insert(ext_path, new_ext_node.clone()); - self.subtrie_for_path(branch_path).nodes.remove(branch_path); self.move_value_on_leaf_removal(&ext_path, &new_ext_node, branch_path); + self.remove_node(branch_path); } } @@ -1955,13 +1999,31 @@ mod tests { let idx = path_subtrie_index_unchecked(&path); assert!(trie.lower_subtries[idx].is_some()); + // Check that the lower subtrie's path was correctly set let lower_subtrie = trie.lower_subtries[idx].as_ref().unwrap(); + assert_eq!(lower_subtrie.path, path); + assert_matches!( lower_subtrie.nodes.get(&path), Some(SparseNode::Leaf { key, hash: None }) if key == &Nibbles::from_nibbles([0x3, 0x4]) ); } + + // Reveal leaf in a lower trie with a longer path, shouldn't result in the subtrie's root + // path changing. + { + let path = Nibbles::from_nibbles([0x1, 0x2, 0x3]); + let node = create_leaf_node([0x4, 0x5], 42); + let masks = TrieMasks::none(); + + trie.reveal_node(path, node, masks).unwrap(); + + // Check that the lower subtrie's path hasn't changed + let idx = path_subtrie_index_unchecked(&path); + let lower_subtrie = trie.lower_subtries[idx].as_ref().unwrap(); + assert_eq!(lower_subtrie.path, Nibbles::from_nibbles([0x1, 0x2])); + } } #[test] @@ -2008,6 +2070,7 @@ mod tests { assert!(trie.lower_subtries[idx].is_some()); let lower_subtrie = trie.lower_subtries[idx].as_ref().unwrap(); + assert_eq!(lower_subtrie.path, child_path); assert_eq!(lower_subtrie.nodes.get(&child_path), Some(&SparseNode::Hash(child_hash))); } @@ -2034,6 +2097,7 @@ mod tests { assert!(trie.lower_subtries[idx].is_some()); let lower_subtrie = trie.lower_subtries[idx].as_ref().unwrap(); + assert_eq!(lower_subtrie.path, child_path); assert_eq!(lower_subtrie.nodes.get(&child_path), Some(&SparseNode::Hash(child_hash))); } @@ -2101,6 +2165,7 @@ mod tests { for (i, child_path) in child_paths.iter().enumerate() { let idx = path_subtrie_index_unchecked(child_path); let lower_subtrie = trie.lower_subtries[idx].as_ref().unwrap(); + assert_eq!(&lower_subtrie.path, child_path); assert_eq!( lower_subtrie.nodes.get(child_path), Some(&SparseNode::Hash(child_hashes[i].as_hash().unwrap())), @@ -2334,10 +2399,10 @@ mod tests { let upper_subtrie = &trie.upper_subtrie; let lower_subtrie_50 = trie.lower_subtries[0x50].as_ref().unwrap(); - let lower_subtrie_53 = trie.lower_subtries[0x53].as_ref().unwrap(); - // Check that the leaf value was removed from the appropriate `SparseSubtrie`. - assert_matches!(lower_subtrie_53.inner.values.get(&leaf_full_path), None); + // Check that the `SparseSubtrie` the leaf was removed from was itself removed, as it is now + // empty. + assert_matches!(trie.lower_subtries[0x53].as_ref(), None); // Check that the leaf node was removed, and that its parent/grandparent were modified // appropriately. @@ -2353,7 +2418,6 @@ mod tests { Some(SparseNode::Branch{ state_mask, .. }) if *state_mask == 0b0101.into() ); - assert_matches!(lower_subtrie_53.nodes.get(&Nibbles::from_nibbles([0x5, 0x3])), None); } #[test] @@ -2453,15 +2517,15 @@ mod tests { trie.remove_leaf(&leaf_full_path, provider).unwrap(); let upper_subtrie = &trie.upper_subtrie; - let lower_subtrie_50 = trie.lower_subtries[0x50].as_ref().unwrap(); - let lower_subtrie_51 = trie.lower_subtries[0x51].as_ref().unwrap(); - // Check that the full key was removed - assert_matches!(lower_subtrie_50.inner.values.get(&leaf_full_path), None); + // Check that both lower subtries were removed. 0x50 should have been removed because + // removing its leaf made it empty. 0x51 should have been removed after its own leaf was + // collapsed into the upper trie, leaving it also empty. + assert_matches!(trie.lower_subtries[0x50].as_ref(), None); + assert_matches!(trie.lower_subtries[0x51].as_ref(), None); // Check that the other leaf's value was moved to the upper trie let other_leaf_full_value = Nibbles::from_nibbles([0x5, 0x1, 0x3, 0x4]); - assert_matches!(lower_subtrie_51.inner.values.get(&other_leaf_full_value), None); assert_matches!(upper_subtrie.inner.values.get(&other_leaf_full_value), Some(_)); // Check that the extension node collapsed into a leaf node @@ -2473,8 +2537,6 @@ mod tests { // Check that intermediate nodes were removed assert_matches!(upper_subtrie.nodes.get(&Nibbles::from_nibbles([0x5])), None); - assert_matches!(lower_subtrie_50.nodes.get(&Nibbles::from_nibbles([0x5, 0x0])), None); - assert_matches!(lower_subtrie_51.nodes.get(&Nibbles::from_nibbles([0x5, 0x1])), None); } #[test] @@ -2515,15 +2577,15 @@ mod tests { trie.remove_leaf(&leaf_full_path, provider).unwrap(); let upper_subtrie = &trie.upper_subtrie; - let lower_subtrie_20 = trie.lower_subtries[0x20].as_ref().unwrap(); - let lower_subtrie_21 = trie.lower_subtries[0x21].as_ref().unwrap(); - // Check that the leaf's value was removed - assert_matches!(lower_subtrie_20.inner.values.get(&leaf_full_path), None); + // Check that both lower subtries were removed. 0x20 should have been removed because + // removing its leaf made it empty. 0x21 should have been removed after its own leaf was + // collapsed into the upper trie, leaving it also empty. + assert_matches!(trie.lower_subtries[0x20].as_ref(), None); + assert_matches!(trie.lower_subtries[0x21].as_ref(), None); // Check that the other leaf's value was moved to the upper trie let other_leaf_full_value = Nibbles::from_nibbles([0x2, 0x1, 0x5, 0x6]); - assert_matches!(lower_subtrie_21.inner.values.get(&other_leaf_full_value), None); assert_matches!(upper_subtrie.inner.values.get(&other_leaf_full_value), Some(_)); // Check that the root branch still exists unchanged @@ -2539,10 +2601,88 @@ mod tests { Some(SparseNode::Leaf{ key, ..}) if key == &Nibbles::from_nibbles([0x1, 0x5, 0x6]) ); + } + + #[test] + fn test_remove_leaf_lower_subtrie_root_path_update() { + // + // 0x: Extension (Key = 123, root of lower subtrie) + // 0x123: └── Branch (Mask = 0011000) + // 0x1233: ├── 3 -> Leaf (Key = []) + // 0x1234: └── 4 -> Extension (Key = 5) + // 0x12345: └── Branch (Mask = 0011) + // 0x123450: ├── 0 -> Leaf (Key = []) + // 0x123451: └── 1 -> Leaf (Key = []) + // + // After removing leaf at 0x1233, the branch at 0x123 becomes an extension to 0x12345, which + // then gets merged with the root extension at 0x. The lower subtrie's `path` field should + // be updated from 0x123 to 0x12345. + // + let mut trie = new_test_trie( + [ + (Nibbles::default(), SparseNode::new_ext(Nibbles::from_nibbles([0x1, 0x2, 0x3]))), + ( + Nibbles::from_nibbles([0x1, 0x2, 0x3]), + SparseNode::new_branch(TrieMask::new(0b0011000)), + ), + ( + Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x3]), + SparseNode::new_leaf(Nibbles::default()), + ), + ( + Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x4]), + SparseNode::new_ext(Nibbles::from_nibbles([0x5])), + ), + ( + Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x4, 0x5]), + SparseNode::new_branch(TrieMask::new(0b0011)), + ), + ( + Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x4, 0x5, 0x0]), + SparseNode::new_leaf(Nibbles::default()), + ), + ( + Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x4, 0x5, 0x1]), + SparseNode::new_leaf(Nibbles::default()), + ), + ] + .into_iter(), + ); + + let provider = MockBlindedProvider::new(); + + // Verify initial state - the lower subtrie's path should be 0x123 + let lower_subtrie_root_path = Nibbles::from_nibbles([0x1, 0x2, 0x3]); + assert_matches!( + trie.lower_subtrie_for_path(&lower_subtrie_root_path), + Some(subtrie) + if subtrie.path == lower_subtrie_root_path + ); + + // Remove the leaf at 0x1233 + let leaf_full_path = Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x3]); + trie.remove_leaf(&leaf_full_path, provider).unwrap(); - // Check that the branch's child nodes were removed - assert_matches!(lower_subtrie_20.nodes.get(&Nibbles::from_nibbles([0x2, 0x0])), None); - assert_matches!(lower_subtrie_21.nodes.get(&Nibbles::from_nibbles([0x2, 0x1])), None); + // After removal: + // 1. The branch at 0x123 should become an extension to 0x12345 + // 2. That extension should merge with the root extension at 0x + // 3. The lower subtrie's path should be updated to 0x12345 + let lower_subtrie = trie.lower_subtries[0x12].as_ref().unwrap(); + assert_eq!(lower_subtrie.path, Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x4, 0x5])); + + // Verify the root extension now points all the way to 0x12345 + assert_matches!( + trie.upper_subtrie.nodes.get(&Nibbles::default()), + Some(SparseNode::Extension { key, .. }) + if key == &Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x4, 0x5]) + ); + + // Verify the branch at 0x12345 hasn't been modified + assert_matches!( + lower_subtrie.nodes.get(&Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x4, 0x5])), + Some(SparseNode::Branch { state_mask, .. }) + if state_mask == &TrieMask::new(0b0011) + ); } #[test] From 1c169257b61943162fb1702525e68262d3e7fd82 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 1 Jul 2025 15:31:46 +0200 Subject: [PATCH 017/305] chore: add debug for forkid mismatch (#17157) --- crates/net/network/src/session/mod.rs | 5 +++++ crates/net/network/src/swarm.rs | 3 ++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/crates/net/network/src/session/mod.rs b/crates/net/network/src/session/mod.rs index 5aad90cbb6f..e94376948c6 100644 --- a/crates/net/network/src/session/mod.rs +++ b/crates/net/network/src/session/mod.rs @@ -174,6 +174,11 @@ impl SessionManager { } } + /// Returns the currently tracked [`ForkId`]. + pub(crate) const fn fork_id(&self) -> ForkId { + self.fork_filter.current() + } + /// Check whether the provided [`ForkId`] is compatible based on the validation rules in /// `EIP-2124`. pub fn is_valid_fork_id(&self, fork_id: ForkId) -> bool { diff --git a/crates/net/network/src/swarm.rs b/crates/net/network/src/swarm.rs index fbb7b0bf941..229d149a2f9 100644 --- a/crates/net/network/src/swarm.rs +++ b/crates/net/network/src/swarm.rs @@ -20,7 +20,7 @@ use std::{ sync::Arc, task::{Context, Poll}, }; -use tracing::trace; +use tracing::{debug, trace}; #[cfg_attr(doc, aquamarine::aquamarine)] /// Contains the connectivity related state of the network. @@ -259,6 +259,7 @@ impl Swarm { if self.sessions.is_valid_fork_id(fork_id) { self.state_mut().peers_mut().set_discovered_fork_id(peer_id, fork_id); } else { + debug!(target: "net", ?peer_id, remote_fork_id=?fork_id, our_fork_id=?self.sessions.fork_id(), "fork id mismatch, removing peer"); self.state_mut().peers_mut().remove_peer(peer_id); } } From 4199dd46767af78ac9c4b360c3c218e80d54b2e1 Mon Sep 17 00:00:00 2001 From: Aliaksei Misiukevich Date: Tue, 1 Jul 2025 18:18:24 +0200 Subject: [PATCH 018/305] feat: eth addons' middleware setter (#17159) Signed-off-by: Aliaksei Misiukevich --- crates/ethereum/node/src/node.rs | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index c0aeba68b3b..4eefecb34a0 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -27,7 +27,7 @@ use reth_node_builder::{ node::{FullNodeTypes, NodeTypes}, rpc::{ BasicEngineApiBuilder, EngineApiBuilder, EngineValidatorAddOn, EngineValidatorBuilder, - EthApiBuilder, EthApiCtx, RethRpcAddOns, RpcAddOns, RpcHandle, + EthApiBuilder, EthApiCtx, Identity, RethRpcAddOns, RpcAddOns, RpcHandle, }, BuilderContext, DebugNode, Node, NodeAdapter, NodeComponentsBuilder, PayloadBuilderConfig, PayloadTypes, @@ -166,8 +166,9 @@ pub struct EthereumAddOns< EthB: EthApiBuilder, EV, EB = BasicEngineApiBuilder, + RpcMiddleware = Identity, > { - inner: RpcAddOns, + inner: RpcAddOns, } impl Default for EthereumAddOns @@ -212,6 +213,15 @@ where let Self { inner } = self; EthereumAddOns { inner: inner.with_engine_validator(engine_validator_builder) } } + + /// Sets rpc middleware + pub fn with_rpc_middleware(self, rpc_middleware: T) -> EthereumAddOns + where + T: Send, + { + let Self { inner } = self; + EthereumAddOns { inner: inner.with_rpc_middleware(rpc_middleware) } + } } impl NodeAddOns for EthereumAddOns From a37917dd7a4b781315cedb0b3c7a0140536e3b23 Mon Sep 17 00:00:00 2001 From: Rebustron Date: Wed, 2 Jul 2025 03:42:28 +0300 Subject: [PATCH 019/305] chore: removed link for book `repo/layout.md` (#17164) --- docs/repo/layout.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/repo/layout.md b/docs/repo/layout.md index 8626d264432..22aae4c3512 100644 --- a/docs/repo/layout.md +++ b/docs/repo/layout.md @@ -29,7 +29,7 @@ The supporting crates are split into two categories: [primitives](#primitives) a ### Documentation -Contributor documentation is in [`docs`](../../docs) and end-user documentation is in [`book`](../../book). +Contributor documentation is in [`docs`](../../docs). ### Binaries From b1f9f716a8218205912c78019a60a3504f41199f Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Wed, 2 Jul 2025 13:36:16 +0200 Subject: [PATCH 020/305] chore(trie): factor out SparseTrieState (#17166) Co-authored-by: Dan Cline <6798349+Rjected@users.noreply.github.com> Co-authored-by: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> --- .../tree/src/tree/payload_processor/mod.rs | 10 +- .../src/tree/payload_processor/sparse_trie.rs | 27 ++- crates/trie/sparse/src/state.rs | 33 ++- crates/trie/sparse/src/trie.rs | 204 +++++++----------- 4 files changed, 104 insertions(+), 170 deletions(-) diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index 055d4622d1e..21c34d952ec 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -28,7 +28,7 @@ use reth_trie_parallel::{ proof_task::{ProofTaskCtx, ProofTaskManager}, root::ParallelStateRootError, }; -use reth_trie_sparse::SparseTrieState; +use reth_trie_sparse::SparseTrie; use std::{ collections::VecDeque, sync::{ @@ -68,9 +68,9 @@ where precompile_cache_disabled: bool, /// Precompile cache map. precompile_cache_map: PrecompileCacheMap>, - /// A sparse trie, kept around to be used for the state root computation so that allocations - /// can be minimized. - sparse_trie: Option, + /// A cleared sparse trie, kept around to be re-used for the state root computation so that + /// allocations can be minimized. + sparse_trie: Option, _marker: std::marker::PhantomData, } @@ -251,7 +251,7 @@ where } /// Sets the sparse trie to be kept around for the state root computation. - pub(super) fn set_sparse_trie(&mut self, sparse_trie: SparseTrieState) { + pub(super) fn set_sparse_trie(&mut self, sparse_trie: SparseTrie) { self.sparse_trie = Some(sparse_trie); } diff --git a/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs b/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs index 8f472cd8c8b..92115b40d94 100644 --- a/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs +++ b/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs @@ -11,7 +11,7 @@ use reth_trie_parallel::root::ParallelStateRootError; use reth_trie_sparse::{ blinded::{BlindedProvider, BlindedProviderFactory}, errors::{SparseStateTrieResult, SparseTrieErrorKind}, - SparseStateTrie, SparseTrieState, + SparseStateTrie, SparseTrie, }; use std::{ sync::mpsc, @@ -66,40 +66,39 @@ where } } - /// Creates a new sparse trie, populating the accounts trie with the given cleared - /// `SparseTrieState` if it exists. + /// Creates a new sparse trie, populating the accounts trie with the given `SparseTrie`, if it + /// exists. pub(super) fn new_with_stored_trie( executor: WorkloadExecutor, updates: mpsc::Receiver, blinded_provider_factory: BPF, trie_metrics: MultiProofTaskMetrics, - sparse_trie_state: Option, + sparse_trie: Option, ) -> Self { - if let Some(sparse_trie_state) = sparse_trie_state { + if let Some(sparse_trie) = sparse_trie { Self::with_accounts_trie( executor, updates, blinded_provider_factory, trie_metrics, - sparse_trie_state, + sparse_trie, ) } else { Self::new(executor, updates, blinded_provider_factory, trie_metrics) } } - /// Creates a new sparse trie task, using the given cleared `SparseTrieState` for the accounts + /// Creates a new sparse trie task, using the given [`SparseTrie::Blind`] for the accounts /// trie. pub(super) fn with_accounts_trie( executor: WorkloadExecutor, updates: mpsc::Receiver, blinded_provider_factory: BPF, metrics: MultiProofTaskMetrics, - sparse_trie_state: SparseTrieState, + sparse_trie: SparseTrie, ) -> Self { - let mut trie = SparseStateTrie::new().with_updates(true); - trie.populate_from(sparse_trie_state); - + debug_assert!(sparse_trie.is_blind()); + let trie = SparseStateTrie::new().with_updates(true).with_accounts_trie(sparse_trie); Self { executor, updates, metrics, trie, blinded_provider_factory } } @@ -154,8 +153,8 @@ where self.metrics.sparse_trie_final_update_duration_histogram.record(start.elapsed()); self.metrics.sparse_trie_total_duration_histogram.record(now.elapsed()); - // take the account trie - let trie = self.trie.take_cleared_account_trie_state(); + // take the account trie so that we can re-use its already allocated data structures. + let trie = self.trie.take_cleared_accounts_trie(); Ok(StateRootComputeOutcome { state_root, trie_updates, trie }) } @@ -170,7 +169,7 @@ pub struct StateRootComputeOutcome { /// The trie updates. pub trie_updates: TrieUpdates, /// The account state trie. - pub trie: SparseTrieState, + pub trie: SparseTrie, } /// Updates the sparse trie with the given proofs and state, and returns the elapsed time. diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index 49a31921335..7eaa99e500f 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -1,6 +1,6 @@ use crate::{ blinded::{BlindedProvider, BlindedProviderFactory}, - LeafLookup, RevealedSparseTrie, SparseTrie, SparseTrieState, TrieMasks, + LeafLookup, RevealedSparseTrie, SparseTrie, TrieMasks, }; use alloc::{collections::VecDeque, vec::Vec}; use alloy_primitives::{ @@ -75,21 +75,22 @@ impl SparseStateTrie { self } + /// Set the accounts trie to the given `SparseTrie`. + pub fn with_accounts_trie(mut self, trie: SparseTrie) -> Self { + self.state = trie; + self + } + + /// Takes the `SparseTrie` from within the state root and clears it if it is not blinded. + pub fn take_cleared_accounts_trie(&mut self) -> SparseTrie { + core::mem::take(&mut self.state).clear() + } + /// Returns `true` if account was already revealed. pub fn is_account_revealed(&self, account: B256) -> bool { self.revealed_account_paths.contains(&Nibbles::unpack(account)) } - /// Uses the input `SparseTrieState` to populate the backing data structures in the `state` - /// trie. - pub fn populate_from(&mut self, trie: SparseTrieState) { - if let Some(new_trie) = self.state.as_revealed_mut() { - new_trie.use_allocated_state(trie); - } else { - self.state = SparseTrie::AllocatedEmpty { allocated: trie }; - } - } - /// Was the account witness for `address` complete? pub fn check_valid_account_witness(&self, address: B256) -> bool { let path = Nibbles::unpack(address); @@ -604,7 +605,7 @@ impl SparseStateTrie { provider_factory: impl BlindedProviderFactory, ) -> SparseStateTrieResult<&mut RevealedSparseTrie> { match self.state { - SparseTrie::Blind | SparseTrie::AllocatedEmpty { .. } => { + SparseTrie::Blind(_) => { let (root_node, hash_mask, tree_mask) = provider_factory .account_node_provider() .blinded_node(&Nibbles::default())? @@ -844,12 +845,6 @@ impl SparseStateTrie { storage_trie.remove_leaf(slot, provider)?; Ok(()) } - - /// Clears and takes the account trie. - pub fn take_cleared_account_trie_state(&mut self) -> SparseTrieState { - let trie = core::mem::take(&mut self.state); - trie.cleared() - } } /// Result of [`filter_revealed_nodes`]. @@ -953,7 +948,7 @@ mod tests { assert_eq!(proofs.len(), 1); let mut sparse = SparseStateTrie::default(); - assert_eq!(sparse.state, SparseTrie::Blind); + assert_eq!(sparse.state, SparseTrie::Blind(None)); sparse.reveal_account(Default::default(), proofs.into_inner()).unwrap(); assert_eq!(sparse.state, SparseTrie::revealed_empty()); diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 7dbb611a1fe..fbb8b08c2d4 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -52,19 +52,6 @@ impl TrieMasks { } } -/// A struct for keeping the hashmaps from `RevealedSparseTrie`. -#[derive(Debug, Clone, PartialEq, Eq, Default)] -pub struct SparseTrieState { - /// Map from a path (nibbles) to its corresponding sparse trie node. - nodes: HashMap, - /// When a branch is set, the corresponding child subtree is stored in the database. - branch_node_tree_masks: HashMap, - /// When a bit is set, the corresponding child is stored as a hash in the database. - branch_node_hash_masks: HashMap, - /// Map from leaf key paths to their values. - values: HashMap>, -} - /// A sparse trie that is either in a "blind" state (no nodes are revealed, root node hash is /// unknown) or in a "revealed" state (root node has been revealed and the trie can be updated). /// @@ -77,21 +64,16 @@ pub struct SparseTrieState { /// 2. Update tracking - changes to the trie structure can be tracked and selectively persisted /// 3. Incremental operations - nodes can be revealed as needed without loading the entire trie. /// This is what gives rise to the notion of a "sparse" trie. -#[derive(PartialEq, Eq, Default, Clone)] +#[derive(PartialEq, Eq, Clone, Debug)] pub enum SparseTrie { - /// This is a variant that can be used to store a previously allocated trie. In these cases, - /// the trie will still be treated as blind, but the allocated trie will be reused if the trie - /// becomes revealed. - AllocatedEmpty { - /// This is the state of the allocated trie. - allocated: SparseTrieState, - }, /// The trie is blind -- no nodes have been revealed /// - /// This is the default state. In this state, - /// the trie cannot be directly queried or modified until nodes are revealed. - #[default] - Blind, + /// This is the default state. In this state, the trie cannot be directly queried or modified + /// until nodes are revealed. + /// + /// In this state the `SparseTrie` can optionally carry with it a cleared `RevealedSparseTrie`. + /// This allows for re-using the trie's allocations between payload executions. + Blind(Option>), /// Some nodes in the Trie have been revealed. /// /// In this state, the trie can be queried and modified for the parts @@ -100,13 +82,9 @@ pub enum SparseTrie { Revealed(Box), } -impl fmt::Debug for SparseTrie { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Self::AllocatedEmpty { .. } => write!(f, "AllocatedEmpty"), - Self::Blind => write!(f, "Blind"), - Self::Revealed(revealed) => write!(f, "Revealed({revealed:?})"), - } +impl Default for SparseTrie { + fn default() -> Self { + Self::Blind(None) } } @@ -124,7 +102,7 @@ impl SparseTrie { /// assert!(trie.is_blind()); /// ``` pub const fn blind() -> Self { - Self::Blind + Self::Blind(None) } /// Creates a new revealed but empty sparse trie with `SparseNode::Empty` as root node. @@ -158,28 +136,25 @@ impl SparseTrie { masks: TrieMasks, retain_updates: bool, ) -> SparseTrieResult<&mut RevealedSparseTrie> { - // we take the allocated state here, which will make sure we are either `Blind` or - // `Revealed`, and giving us the allocated state if we were `AllocatedEmpty`. - let allocated = self.take_allocated_state(); - - // if `Blind`, we initialize the revealed trie + // if `Blind`, we initialize the revealed trie with the given root node, using a + // pre-allocated trie if available. if self.is_blind() { - let mut revealed = RevealedSparseTrie::from_root(root, masks, retain_updates)?; - - // If we had an allocated state, we use its maps internally. use_allocated_state copies - // over any information we had from revealing. - if let Some(allocated) = allocated { - revealed.use_allocated_state(allocated); - } + let mut revealed_trie = if let Self::Blind(Some(cleared_trie)) = core::mem::take(self) { + cleared_trie + } else { + Box::default() + }; - *self = Self::Revealed(Box::new(revealed)); + *revealed_trie = revealed_trie.with_root(root, masks, retain_updates)?; + *self = Self::Revealed(revealed_trie); } + Ok(self.as_revealed_mut().unwrap()) } /// Returns `true` if the sparse trie has no revealed nodes. pub const fn is_blind(&self) -> bool { - matches!(self, Self::Blind) + matches!(self, Self::Blind(_)) } /// Returns an immutable reference to the underlying revealed sparse trie. @@ -204,19 +179,6 @@ impl SparseTrie { } } - /// Take the allocated state if this is `AllocatedEmpty`, otherwise returns `None`. - /// - /// Converts this `SparseTrie` into `Blind` if this was `AllocatedEmpty`. - pub fn take_allocated_state(&mut self) -> Option { - if let Self::AllocatedEmpty { allocated } = self { - let state = core::mem::take(allocated); - *self = Self::Blind; - Some(state) - } else { - None - } - } - /// Wipes the trie by removing all nodes and values, /// and resetting the trie to only contain an empty root node. /// @@ -227,16 +189,6 @@ impl SparseTrie { Ok(()) } - /// Returns a `SparseTrieState` obtained by clearing the sparse trie state and reusing the - /// allocated state if it was `AllocatedEmpty` or `Revealed`. - pub fn cleared(self) -> SparseTrieState { - match self { - Self::Revealed(revealed) => revealed.cleared_state(), - Self::AllocatedEmpty { allocated } => allocated, - Self::Blind => Default::default(), - } - } - /// Calculates the root hash of the trie. /// /// This will update any remaining dirty nodes before computing the root hash. @@ -267,6 +219,19 @@ impl SparseTrie { let revealed = self.as_revealed_mut()?; Some((revealed.root(), revealed.take_updates())) } + + /// Returns a [`SparseTrie::Blind`] based on this one. If this instance was revealed, or was + /// itself a `Blind` with a pre-allocated [`RevealedSparseTrie`], this will return + /// a `Blind` carrying a cleared pre-allocated [`RevealedSparseTrie`]. + pub fn clear(self) -> Self { + match self { + Self::Blind(_) => self, + Self::Revealed(mut trie) => { + trie.clear(); + Self::Blind(Some(trie)) + } + } + } } impl SparseTrie { @@ -451,46 +416,7 @@ impl RevealedSparseTrie { masks: TrieMasks, retain_updates: bool, ) -> SparseTrieResult { - let mut this = Self { - nodes: HashMap::default(), - branch_node_tree_masks: HashMap::default(), - branch_node_hash_masks: HashMap::default(), - values: HashMap::default(), - prefix_set: PrefixSetMut::default(), - rlp_buf: Vec::new(), - updates: None, - } - .with_updates(retain_updates); - this.reveal_node(Nibbles::default(), root, masks)?; - Ok(this) - } -} - -impl RevealedSparseTrie { - /// Sets the fields of this `RevealedSparseTrie` to the fields of the input - /// `SparseTrieState`. - /// - /// This is meant for reusing the allocated maps contained in the `SparseTrieState`. - /// - /// Copies over any existing nodes, branch masks, and values. - pub fn use_allocated_state(&mut self, mut other: SparseTrieState) { - for (path, node) in self.nodes.drain() { - other.nodes.insert(path, node); - } - for (path, mask) in self.branch_node_tree_masks.drain() { - other.branch_node_tree_masks.insert(path, mask); - } - for (path, mask) in self.branch_node_hash_masks.drain() { - other.branch_node_hash_masks.insert(path, mask); - } - for (path, value) in self.values.drain() { - other.values.insert(path, value); - } - - self.nodes = other.nodes; - self.branch_node_tree_masks = other.branch_node_tree_masks; - self.branch_node_hash_masks = other.branch_node_hash_masks; - self.values = other.values; + Self::default().with_root(root, masks, retain_updates) } /// Configures the trie to retain information about updates. @@ -504,6 +430,29 @@ impl RevealedSparseTrie { self } + /// Configures the trie to have the given root node revealed. + /// + /// ## Panics + /// + /// - If called on a [`RevealedSparseTrie`] which was not newly created or cleared. + pub fn with_root( + mut self, + root: TrieNode, + masks: TrieMasks, + retain_updates: bool, + ) -> SparseTrieResult { + self = self.with_updates(retain_updates); + + // A fresh/cleared `RevealedSparseTrie` has a `SparseNode::Empty` at its root. Delete that + // so we can reveal the new root node. + let path = Nibbles::default(); + let _removed_root = self.nodes.remove(&path).unwrap(); + debug_assert_eq!(_removed_root, SparseNode::Empty); + + self.reveal_node(path, root, masks)?; + Ok(self) + } + /// Returns a reference to the current sparse trie updates. /// /// If no updates have been made/recorded, returns an empty update set. @@ -837,7 +786,10 @@ impl RevealedSparseTrie { } /// Removes all nodes and values from the trie, resetting it to a blank state - /// with only an empty root node. + /// with only an empty root node. This is used when a storage root is deleted. + /// + /// This should not be used when intending to re-use the trie for a fresh account/storage root; + /// use [`Self::clear`] for that. /// /// Note: All previously tracked changes to the trie are also removed. pub fn wipe(&mut self) { @@ -848,32 +800,21 @@ impl RevealedSparseTrie { } /// This clears all data structures in the sparse trie, keeping the backing data structures - /// allocated. + /// allocated. A [`SparseNode::Empty`] is inserted at the root. /// /// This is useful for reusing the trie without needing to reallocate memory. pub fn clear(&mut self) { self.nodes.clear(); + self.nodes.insert(Nibbles::default(), SparseNode::Empty); + self.branch_node_tree_masks.clear(); self.branch_node_hash_masks.clear(); self.values.clear(); self.prefix_set.clear(); - if let Some(updates) = self.updates.as_mut() { - updates.clear() - } + self.updates = None; self.rlp_buf.clear(); } - /// Returns the cleared `SparseTrieState` for this `RevealedSparseTrie`. - pub fn cleared_state(mut self) -> SparseTrieState { - self.clear(); - SparseTrieState { - nodes: self.nodes, - branch_node_tree_masks: self.branch_node_tree_masks, - branch_node_hash_masks: self.branch_node_hash_masks, - values: self.values, - } - } - /// Calculates and returns the root hash of the trie. /// /// Before computing the hash, this function processes any remaining (dirty) nodes by @@ -3748,6 +3689,11 @@ mod tests { sparse.wipe(); + assert_matches!( + &sparse.updates, + Some(SparseTrieUpdates{ updated_nodes, removed_nodes, wiped }) + if updated_nodes.is_empty() && removed_nodes.is_empty() && *wiped + ); assert_eq!(sparse.root(), EMPTY_ROOT_HASH); } @@ -3773,12 +3719,6 @@ mod tests { sparse.clear(); - // we have to update the root hash to be an empty one, because the `Default` impl of - // `RevealedSparseTrie` sets the root hash to `EMPTY_ROOT_HASH` in the constructor. - // - // The default impl is only used in tests. - sparse.nodes.insert(Nibbles::default(), SparseNode::Empty); - let empty_trie = RevealedSparseTrie::default(); assert_eq!(empty_trie, sparse); } From 9c045810ada8649b9be225af3f00b8dea7266e49 Mon Sep 17 00:00:00 2001 From: Guro Date: Wed, 2 Jul 2025 13:38:23 +0200 Subject: [PATCH 021/305] docs: update metrics link in ethereum.mdx (#17170) --- docs/vocs/docs/pages/run/ethereum.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/vocs/docs/pages/run/ethereum.mdx b/docs/vocs/docs/pages/run/ethereum.mdx index 9ba16f20c47..885fca1d950 100644 --- a/docs/vocs/docs/pages/run/ethereum.mdx +++ b/docs/vocs/docs/pages/run/ethereum.mdx @@ -90,7 +90,7 @@ In the meantime, consider setting up [observability](/run/monitoring) to monitor [installation]: ./../installation/installation [docs]: https://github.com/paradigmxyz/reth/tree/main/docs -[metrics]: https://github.com/paradigmxyz/reth/blob/main/docs/design/metrics#current-metrics +[metrics]: https://github.com/paradigmxyz/reth/blob/main/docs/design/metrics.md#metrics ## Running without a Consensus Layer From 3a3bc5f795d01209fe565cce4991e19fc8cb52ee Mon Sep 17 00:00:00 2001 From: Aliaksei Misiukevich Date: Wed, 2 Jul 2025 15:00:33 +0200 Subject: [PATCH 022/305] feat: trait impl for dbmock (#17124) Signed-off-by: Aliaksei Misiukevich Co-authored-by: Matthias Seitz --- crates/storage/db-api/src/mock.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/crates/storage/db-api/src/mock.rs b/crates/storage/db-api/src/mock.rs index ece47f81ee5..d37ffa289b9 100644 --- a/crates/storage/db-api/src/mock.rs +++ b/crates/storage/db-api/src/mock.rs @@ -7,6 +7,7 @@ use crate::{ ReverseWalker, Walker, }, database::Database, + database_metrics::DatabaseMetrics, table::{DupSort, Encode, Table, TableImporter}, transaction::{DbTx, DbTxMut}, DatabaseError, @@ -34,6 +35,8 @@ impl Database for DatabaseMock { } } +impl DatabaseMetrics for DatabaseMock {} + /// Mock read only tx #[derive(Debug, Clone, Default)] pub struct TxMock { From 40fd91a06861f58ad2df54975115b5e220cef2e8 Mon Sep 17 00:00:00 2001 From: Rez Date: Wed, 2 Jul 2025 23:55:04 +1000 Subject: [PATCH 023/305] feat: expose chain_spec field in LocalPayloadAttributesBuilder (#17151) --- crates/engine/local/src/payload.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/crates/engine/local/src/payload.rs b/crates/engine/local/src/payload.rs index 0c34279d60b..327690197e4 100644 --- a/crates/engine/local/src/payload.rs +++ b/crates/engine/local/src/payload.rs @@ -11,7 +11,8 @@ use std::sync::Arc; #[derive(Debug)] #[non_exhaustive] pub struct LocalPayloadAttributesBuilder { - chain_spec: Arc, + /// The chainspec + pub chain_spec: Arc, } impl LocalPayloadAttributesBuilder { From f54cef5e287f24e50b8ff47318c8134586205fc2 Mon Sep 17 00:00:00 2001 From: CrazyFrog Date: Wed, 2 Jul 2025 15:58:42 +0200 Subject: [PATCH 024/305] docs: update Grafana repository URL in monitoring documentation (#17175) --- docs/vocs/docs/pages/run/monitoring.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/vocs/docs/pages/run/monitoring.mdx b/docs/vocs/docs/pages/run/monitoring.mdx index d09b795dc4b..6a8a35fcec7 100644 --- a/docs/vocs/docs/pages/run/monitoring.mdx +++ b/docs/vocs/docs/pages/run/monitoring.mdx @@ -57,7 +57,7 @@ cd prometheus-* # Install Grafana sudo apt-get install -y apt-transport-https software-properties-common wget -q -O - https://packages.grafana.com/gpg.key | sudo apt-key add - -echo "deb https://packages.grafana.com/oss/deb stable main" | sudo tee -a /etc/apt/sources.list.d/grafana.list +echo "deb https://packages.grafana.com stable main" | sudo tee -a /etc/apt/sources.list.d/grafana.list sudo apt-get update sudo apt-get install grafana ``` From b286a61db8a46e51649be721d4db7d3f3b3fbddb Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 2 Jul 2025 16:52:16 +0200 Subject: [PATCH 025/305] chore: relax rpc middleware generic (#17174) --- crates/ethereum/node/src/node.rs | 15 ++++++++++----- crates/ethereum/node/tests/it/builder.rs | 2 ++ 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index 4eefecb34a0..672b427feee 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -35,7 +35,7 @@ use reth_node_builder::{ use reth_provider::{providers::ProviderFactoryBuilder, EthStorage}; use reth_rpc::{eth::core::EthApiFor, ValidationApi}; use reth_rpc_api::{eth::FullEthApiServer, servers::BlockSubmissionValidationApiServer}; -use reth_rpc_builder::config::RethRpcServerConfig; +use reth_rpc_builder::{config::RethRpcServerConfig, middleware::RethRpcMiddleware}; use reth_rpc_eth_types::{error::FromEvmError, EthApiError}; use reth_rpc_server_types::RethRpcModule; use reth_tracing::tracing::{debug, info}; @@ -188,13 +188,16 @@ where } } -impl EthereumAddOns +impl EthereumAddOns where N: FullNodeComponents, EthB: EthApiBuilder, { /// Replace the engine API builder. - pub fn with_engine_api(self, engine_api_builder: T) -> EthereumAddOns + pub fn with_engine_api( + self, + engine_api_builder: T, + ) -> EthereumAddOns where T: Send, { @@ -206,7 +209,7 @@ where pub fn with_engine_validator( self, engine_validator_builder: T, - ) -> EthereumAddOns + ) -> EthereumAddOns where T: Send, { @@ -224,7 +227,8 @@ where } } -impl NodeAddOns for EthereumAddOns +impl NodeAddOns + for EthereumAddOns where N: FullNodeComponents< Types: NodeTypes< @@ -239,6 +243,7 @@ where EB: EngineApiBuilder, EthApiError: FromEvmError, EvmFactoryFor: EvmFactory, + RpcMiddleware: RethRpcMiddleware, { type Handle = RpcHandle; diff --git a/crates/ethereum/node/tests/it/builder.rs b/crates/ethereum/node/tests/it/builder.rs index 91dfd683efe..4e619f5f3d0 100644 --- a/crates/ethereum/node/tests/it/builder.rs +++ b/crates/ethereum/node/tests/it/builder.rs @@ -10,6 +10,7 @@ use reth_node_api::NodeTypesWithDBAdapter; use reth_node_builder::{EngineNodeLauncher, FullNodeComponents, NodeBuilder, NodeConfig}; use reth_node_ethereum::node::{EthereumAddOns, EthereumNode}; use reth_provider::providers::BlockchainProvider; +use reth_rpc_builder::Identity; use reth_tasks::TaskManager; #[test] @@ -33,6 +34,7 @@ fn test_basic_setup() { let _client = handles.rpc.http_client(); Ok(()) }) + .map_add_ons(|addons| addons.with_rpc_middleware(Identity::default())) .extend_rpc_modules(|ctx| { let _ = ctx.config(); let _ = ctx.node().provider(); From 60940dd243918a1ba55f5cce5046a443bc75611d Mon Sep 17 00:00:00 2001 From: Ferran Borreguero Date: Wed, 2 Jul 2025 17:46:56 +0100 Subject: [PATCH 026/305] Add bootnode cmd to cli runner (#17180) Co-authored-by: Matthias Seitz --- crates/cli/commands/src/p2p/mod.rs | 9 +- docs/vocs/docs/pages/cli/SUMMARY.mdx | 1 + docs/vocs/docs/pages/cli/reth/p2p.mdx | 9 +- .../vocs/docs/pages/cli/reth/p2p/bootnode.mdx | 114 ++++++++++++++++++ docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx | 2 +- 5 files changed, 128 insertions(+), 7 deletions(-) create mode 100644 docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx diff --git a/crates/cli/commands/src/p2p/mod.rs b/crates/cli/commands/src/p2p/mod.rs index ab07a553c19..3aa7569e9b6 100644 --- a/crates/cli/commands/src/p2p/mod.rs +++ b/crates/cli/commands/src/p2p/mod.rs @@ -18,7 +18,7 @@ use reth_node_core::{ }; pub mod bootnode; -mod rlpx; +pub mod rlpx; /// `reth p2p` command #[derive(Debug, Parser)] @@ -71,8 +71,10 @@ pub enum Subcommands { #[arg(value_parser = hash_or_num_value_parser)] id: BlockHashOrNumber, }, - // RLPx utilities + /// RLPx utilities Rlpx(rlpx::Command), + /// Bootnode command + Bootnode(bootnode::Command), } impl> Command { @@ -162,6 +164,9 @@ impl Subcommands::Rlpx(command) => { command.execute().await?; } + Subcommands::Bootnode(command) => { + command.execute().await?; + } } Ok(()) diff --git a/docs/vocs/docs/pages/cli/SUMMARY.mdx b/docs/vocs/docs/pages/cli/SUMMARY.mdx index 330f32b3fd2..143cded1466 100644 --- a/docs/vocs/docs/pages/cli/SUMMARY.mdx +++ b/docs/vocs/docs/pages/cli/SUMMARY.mdx @@ -36,6 +36,7 @@ - [`reth p2p body`](/cli/reth/p2p/body) - [`reth p2p rlpx`](/cli/reth/p2p/rlpx) - [`reth p2p rlpx ping`](/cli/reth/p2p/rlpx/ping) + - [`reth p2p bootnode`](/cli/reth/p2p/bootnode) - [`reth config`](/cli/reth/config) - [`reth debug`](/cli/reth/debug) - [`reth debug execution`](/cli/reth/debug/execution) diff --git a/docs/vocs/docs/pages/cli/reth/p2p.mdx b/docs/vocs/docs/pages/cli/reth/p2p.mdx index a435c916169..53a6f214532 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p.mdx @@ -9,10 +9,11 @@ $ reth p2p --help Usage: reth p2p [OPTIONS] Commands: - header Download block header - body Download block body - rlpx RLPx commands - help Print this message or the help of the given subcommand(s) + header Download block header + body Download block body + rlpx RLPx utilities + bootnode Bootnode command + help Print this message or the help of the given subcommand(s) Options: --config diff --git a/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx b/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx new file mode 100644 index 00000000000..a7edd5b9a53 --- /dev/null +++ b/docs/vocs/docs/pages/cli/reth/p2p/bootnode.mdx @@ -0,0 +1,114 @@ +# reth p2p bootnode + +Bootnode command + +```bash +$ reth p2p bootnode --help +``` +```txt +Usage: reth p2p bootnode [OPTIONS] + +Options: + --addr + Listen address for the bootnode (default: ":30301") + + [default: :30301] + + --gen-key + Generate a new node key and save it to the specified file + + [default: ] + + --node-key + Private key filename for the node + + [default: ] + + --nat + NAT resolution method (any|none|upnp|publicip|extip:\) + + [default: any] + + --v5 + Run a v5 topic discovery bootnode + + -h, --help + Print help (see a summary with '-h') + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + [default: terminal] + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + [default: terminal] + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + [default: always] + + Possible values: + - always: Colors on + - auto: Colors on + - never: Colors off + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output +``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx b/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx index 484a8005cbd..145409e767e 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx @@ -1,6 +1,6 @@ # reth p2p rlpx -RLPx commands +RLPx utilities ```bash $ reth p2p rlpx --help From f86445e0945987c938de00f039a5aac2d33494d7 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Thu, 3 Jul 2025 02:00:41 -0400 Subject: [PATCH 027/305] feat(trie): add ParallelSparseTrie::update_leaf (#16956) Co-authored-by: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Co-authored-by: Brian Picciano --- crates/trie/sparse-parallel/src/trie.rs | 1375 ++++++++++++++++++++++- 1 file changed, 1364 insertions(+), 11 deletions(-) diff --git a/crates/trie/sparse-parallel/src/trie.rs b/crates/trie/sparse-parallel/src/trie.rs index b397fdb3493..96fccea84b7 100644 --- a/crates/trie/sparse-parallel/src/trie.rs +++ b/crates/trie/sparse-parallel/src/trie.rs @@ -184,16 +184,108 @@ impl ParallelSparseTrie { /// provider returns an error. pub fn update_leaf( &mut self, - key_path: Nibbles, + full_path: Nibbles, value: Vec, - masks: TrieMasks, provider: impl BlindedProvider, ) -> SparseTrieResult<()> { - let _key_path = key_path; - let _value = value; - let _masks = masks; - let _provider = provider; - todo!() + self.prefix_set.insert(full_path); + let existing = self.upper_subtrie.inner.values.insert(full_path, value.clone()); + if existing.is_some() { + // upper trie structure unchanged, return immediately + return Ok(()) + } + + // Start at the root, traversing until we find either the node to update or a subtrie to + // update. + // + // We first traverse the upper subtrie for two levels, and moving any created nodes to a + // lower subtrie if necessary. + // + // We use `next` to keep track of the next node that we need to traverse to, and + // `new_nodes` to keep track of any nodes that were created during the traversal. + let mut new_nodes = Vec::new(); + let mut next = Some(Nibbles::default()); + + // Traverse the upper subtrie to find the node to update or the subtrie to update. + // + // We stop when the next node to traverse would be in a lower subtrie, or if there are no + // more nodes to traverse. + while let Some(current) = + next.filter(|next| SparseSubtrieType::path_len_is_upper(next.len())) + { + // Traverse the next node, keeping track of any changed nodes and the next step in the + // trie + match self.upper_subtrie.update_next_node(current, &full_path, &provider)? { + LeafUpdateStep::Continue { next_node } => { + next = Some(next_node); + } + LeafUpdateStep::Complete { inserted_nodes } => { + new_nodes.extend(inserted_nodes); + next = None; + } + LeafUpdateStep::NodeNotFound => { + next = None; + } + } + } + + // Move nodes from upper subtrie to lower subtries + for node_path in &new_nodes { + // Skip nodes that belong in the upper subtrie + if SparseSubtrieType::path_len_is_upper(node_path.len()) { + continue + } + + let node = + self.upper_subtrie.nodes.remove(node_path).expect("node belongs to upper subtrie"); + + // If it's a leaf node, extract its value before getting mutable reference to subtrie + let leaf_value = if let SparseNode::Leaf { key, .. } = &node { + let mut leaf_full_path = *node_path; + leaf_full_path.extend(key); + Some(( + leaf_full_path, + self.upper_subtrie + .inner + .values + .remove(&leaf_full_path) + .expect("leaf nodes have associated values entries"), + )) + } else { + None + }; + + // Get or create the subtrie with the exact node path (not truncated to 2 nibbles). + let subtrie = self.subtrie_for_path(node_path); + + // Insert the leaf value if we have one + if let Some((leaf_full_path, value)) = leaf_value { + subtrie.inner.values.insert(leaf_full_path, value); + } + + // Insert the node into the lower subtrie + subtrie.nodes.insert(*node_path, node); + } + + // If we reached the max depth of the upper trie, we may have had more nodes to insert. + if let Some(next_path) = next.filter(|n| !SparseSubtrieType::path_len_is_upper(n.len())) { + // Use subtrie_for_path to ensure the subtrie has the correct path. + // + // The next_path here represents where we need to continue traversal, which may + // be longer than 2 nibbles if we're following an extension node. + let subtrie = self.subtrie_for_path(&next_path); + + // Create an empty root at the subtrie path if the subtrie is empty + if subtrie.nodes.is_empty() { + subtrie.nodes.insert(subtrie.path, SparseNode::Empty); + } + + // If we didn't update the target leaf, we need to call update_leaf on the subtrie + // to ensure that the leaf is updated correctly. + subtrie.update_leaf(full_path, value, provider)?; + } + + Ok(()) } /// Returns the next node in the traversal path from the given path towards the leaf for the @@ -865,6 +957,7 @@ enum FindNextToLeafOutcome { } impl SparseSubtrie { + /// Creates a new empty subtrie with the specified root path. fn new(path: Nibbles) -> Self { Self { path, ..Default::default() } } @@ -885,6 +978,205 @@ impl SparseSubtrie { current_level == child_level } + /// Updates or inserts a leaf node at the specified key path with the provided RLP-encoded + /// value. + /// + /// If the leaf did not previously exist, this method adjusts the trie structure by inserting + /// new leaf nodes, splitting branch nodes, or collapsing extension nodes as needed. + /// + /// # Returns + /// + /// Returns the `Ok` if the update is successful. + /// If a split branch was added this is returned as well, along with its path. + /// + /// Note: If an update requires revealing a blinded node, an error is returned if the blinded + /// provider returns an error. + pub fn update_leaf( + &mut self, + full_path: Nibbles, + value: Vec, + provider: impl BlindedProvider, + ) -> SparseTrieResult<()> { + debug_assert!(full_path.starts_with(&self.path)); + let existing = self.inner.values.insert(full_path, value); + if existing.is_some() { + // trie structure unchanged, return immediately + return Ok(()) + } + + // Here we are starting at the root of the subtrie, and traversing from there. + let mut current = Some(self.path); + while let Some(current_path) = current { + match self.update_next_node(current_path, &full_path, &provider)? { + LeafUpdateStep::Continue { next_node } => { + current = Some(next_node); + } + LeafUpdateStep::Complete { .. } | LeafUpdateStep::NodeNotFound => { + current = None; + } + } + } + + Ok(()) + } + + /// Processes the current node, returning what to do next in the leaf update process. + /// + /// This will add or update any nodes in the trie as necessary. + /// + /// Returns a `LeafUpdateStep` containing the next node to process (if any) and + /// the paths of nodes that were inserted during this step. + fn update_next_node( + &mut self, + mut current: Nibbles, + path: &Nibbles, + provider: impl BlindedProvider, + ) -> SparseTrieResult { + debug_assert!(path.starts_with(&self.path)); + debug_assert!(current.starts_with(&self.path)); + debug_assert!(path.starts_with(¤t)); + let Some(node) = self.nodes.get_mut(¤t) else { + return Ok(LeafUpdateStep::NodeNotFound); + }; + match node { + SparseNode::Empty => { + // We need to insert the node with a different path and key depending on the path of + // the subtrie. + let path = path.slice(self.path.len()..); + *node = SparseNode::new_leaf(path); + Ok(LeafUpdateStep::complete_with_insertions(vec![current])) + } + SparseNode::Hash(hash) => { + Err(SparseTrieErrorKind::BlindedNode { path: current, hash: *hash }.into()) + } + SparseNode::Leaf { key: current_key, .. } => { + current.extend(current_key); + + // this leaf is being updated + debug_assert!( + ¤t != path, + "we already checked leaf presence in the beginning" + ); + + // find the common prefix + let common = current.common_prefix_length(path); + + // update existing node + let new_ext_key = current.slice(current.len() - current_key.len()..common); + *node = SparseNode::new_ext(new_ext_key); + + // create a branch node and corresponding leaves + self.nodes.reserve(3); + let branch_path = current.slice(..common); + let new_leaf_path = path.slice(..=common); + let existing_leaf_path = current.slice(..=common); + + self.nodes.insert( + branch_path, + SparseNode::new_split_branch( + current.get_unchecked(common), + path.get_unchecked(common), + ), + ); + self.nodes.insert(new_leaf_path, SparseNode::new_leaf(path.slice(common + 1..))); + self.nodes + .insert(existing_leaf_path, SparseNode::new_leaf(current.slice(common + 1..))); + + Ok(LeafUpdateStep::complete_with_insertions(vec![ + branch_path, + new_leaf_path, + existing_leaf_path, + ])) + } + SparseNode::Extension { key, .. } => { + current.extend(key); + + if !path.starts_with(¤t) { + // find the common prefix + let common = current.common_prefix_length(path); + *key = current.slice(current.len() - key.len()..common); + + // If branch node updates retention is enabled, we need to query the + // extension node child to later set the hash mask for a parent branch node + // correctly. + if self.inner.updates.is_some() { + // Check if the extension node child is a hash that needs to be revealed + if self + .nodes + .get(¤t) + .expect( + "node must exist, extension nodes are only created with children", + ) + .is_hash() + { + if let Some(RevealedNode { node, tree_mask, hash_mask }) = + provider.blinded_node(¤t)? + { + let decoded = TrieNode::decode(&mut &node[..])?; + trace!( + target: "trie::parallel_sparse", + ?current, + ?decoded, + ?tree_mask, + ?hash_mask, + "Revealing extension node child", + ); + self.reveal_node( + current, + &decoded, + TrieMasks { hash_mask, tree_mask }, + )?; + } + } + } + + // create state mask for new branch node + // NOTE: this might overwrite the current extension node + self.nodes.reserve(3); + let branch_path = current.slice(..common); + let new_leaf_path = path.slice(..=common); + let branch = SparseNode::new_split_branch( + current.get_unchecked(common), + path.get_unchecked(common), + ); + + self.nodes.insert(branch_path, branch); + + // create new leaf + let new_leaf = SparseNode::new_leaf(path.slice(common + 1..)); + self.nodes.insert(new_leaf_path, new_leaf); + + let mut inserted_nodes = vec![branch_path, new_leaf_path]; + + // recreate extension to previous child if needed + let key = current.slice(common + 1..); + if !key.is_empty() { + let ext_path = current.slice(..=common); + self.nodes.insert(ext_path, SparseNode::new_ext(key)); + inserted_nodes.push(ext_path); + } + + return Ok(LeafUpdateStep::complete_with_insertions(inserted_nodes)) + } + + Ok(LeafUpdateStep::continue_with(current)) + } + SparseNode::Branch { state_mask, .. } => { + let nibble = path.get_unchecked(current.len()); + current.push_unchecked(nibble); + if !state_mask.is_bit_set(nibble) { + state_mask.set_bit(nibble); + let new_leaf = SparseNode::new_leaf(path.slice(current.len()..)); + self.nodes.insert(current, new_leaf); + return Ok(LeafUpdateStep::complete_with_insertions(vec![current])) + } + + // If the nibble is set, we can continue traversing the branch. + Ok(LeafUpdateStep::continue_with(current)) + } + } + } + /// Internal implementation of the method of the same name on `ParallelSparseTrie`. fn reveal_node( &mut self, @@ -1467,6 +1759,36 @@ impl SparseSubtrieInner { } } +/// Represents the outcome of processing a node during leaf insertion +#[derive(Clone, Debug, PartialEq, Eq, Default)] +pub enum LeafUpdateStep { + /// Continue traversing to the next node + Continue { + /// The next node path to process + next_node: Nibbles, + }, + /// Update is complete with nodes inserted + Complete { + /// The node paths that were inserted during this step + inserted_nodes: Vec, + }, + /// The node was not found + #[default] + NodeNotFound, +} + +impl LeafUpdateStep { + /// Creates a step to continue with the next node + pub const fn continue_with(next_node: Nibbles) -> Self { + Self::Continue { next_node } + } + + /// Creates a step indicating completion with inserted nodes + pub const fn complete_with_insertions(inserted_nodes: Vec) -> Self { + Self::Complete { inserted_nodes } + } +} + /// Sparse Subtrie Type. /// /// Used to determine the type of subtrie a certain path belongs to: @@ -1581,16 +1903,17 @@ mod tests { node_iter::{TrieElement, TrieNodeIter}, trie_cursor::{noop::NoopAccountTrieCursor, TrieCursor}, walker::TrieWalker, + HashedPostState, }; use reth_trie_common::{ prefix_set::PrefixSetMut, proof::{ProofNodes, ProofRetainer}, updates::TrieUpdates, - BranchNode, ExtensionNode, HashBuilder, HashedPostState, LeafNode, RlpNode, TrieMask, - TrieNode, EMPTY_ROOT_HASH, + BranchNode, ExtensionNode, HashBuilder, LeafNode, RlpNode, TrieMask, TrieNode, + EMPTY_ROOT_HASH, }; use reth_trie_sparse::{ - blinded::{BlindedProvider, RevealedNode}, + blinded::{BlindedProvider, DefaultBlindedProvider, RevealedNode}, SparseNode, TrieMasks, }; @@ -1634,6 +1957,160 @@ mod tests { buf } + /// Test context that provides helper methods for trie testing + #[derive(Default)] + struct ParallelSparseTrieTestContext; + + impl ParallelSparseTrieTestContext { + /// Assert that a lower subtrie exists at the given path + fn assert_subtrie_exists(&self, trie: &ParallelSparseTrie, path: &Nibbles) { + let idx = path_subtrie_index_unchecked(path); + assert!( + trie.lower_subtries[idx].is_some(), + "Expected lower subtrie at path {path:?} to exist", + ); + } + + /// Get a lower subtrie, panicking if it doesn't exist + fn get_subtrie<'a>( + &self, + trie: &'a ParallelSparseTrie, + path: &Nibbles, + ) -> &'a SparseSubtrie { + let idx = path_subtrie_index_unchecked(path); + trie.lower_subtries[idx] + .as_ref() + .unwrap_or_else(|| panic!("Lower subtrie at path {path:?} should exist")) + } + + /// Assert that a lower subtrie has a specific path field value + fn assert_subtrie_path( + &self, + trie: &ParallelSparseTrie, + subtrie_prefix: impl AsRef<[u8]>, + expected_path: impl AsRef<[u8]>, + ) { + let subtrie_prefix = Nibbles::from_nibbles(subtrie_prefix); + let expected_path = Nibbles::from_nibbles(expected_path); + let idx = path_subtrie_index_unchecked(&subtrie_prefix); + + let subtrie = trie.lower_subtries[idx].as_ref().unwrap_or_else(|| { + panic!("Lower subtrie at prefix {subtrie_prefix:?} should exist") + }); + + assert_eq!( + subtrie.path, expected_path, + "Subtrie at prefix {subtrie_prefix:?} should have path {expected_path:?}, but has {:?}", + subtrie.path + ); + } + + /// Create test leaves with consecutive account values + fn create_test_leaves(&self, paths: &[&[u8]]) -> Vec<(Nibbles, Vec)> { + paths + .iter() + .enumerate() + .map(|(i, path)| (Nibbles::from_nibbles(path), encode_account_value(i as u64 + 1))) + .collect() + } + + /// Create a single test leaf with the given path and value nonce + fn create_test_leaf(&self, path: impl AsRef<[u8]>, value_nonce: u64) -> (Nibbles, Vec) { + (Nibbles::from_nibbles(path), encode_account_value(value_nonce)) + } + + /// Insert multiple leaves into the trie + fn insert_leaves(&self, trie: &mut ParallelSparseTrie, leaves: &[(Nibbles, Vec)]) { + for (path, value) in leaves { + trie.update_leaf(*path, value.clone(), DefaultBlindedProvider).unwrap(); + } + } + + /// Create an assertion builder for a subtrie + fn assert_subtrie<'a>( + &self, + trie: &'a ParallelSparseTrie, + path: Nibbles, + ) -> SubtrieAssertion<'a> { + self.assert_subtrie_exists(trie, &path); + let subtrie = self.get_subtrie(trie, &path); + SubtrieAssertion::new(subtrie) + } + + /// Create an assertion builder for the upper subtrie + fn assert_upper_subtrie<'a>(&self, trie: &'a ParallelSparseTrie) -> SubtrieAssertion<'a> { + SubtrieAssertion::new(&trie.upper_subtrie) + } + } + + /// Assertion builder for subtrie structure + struct SubtrieAssertion<'a> { + subtrie: &'a SparseSubtrie, + } + + impl<'a> SubtrieAssertion<'a> { + fn new(subtrie: &'a SparseSubtrie) -> Self { + Self { subtrie } + } + + fn has_branch(self, path: &Nibbles, expected_mask_bits: &[u8]) -> Self { + match self.subtrie.nodes.get(path) { + Some(SparseNode::Branch { state_mask, .. }) => { + for bit in expected_mask_bits { + assert!( + state_mask.is_bit_set(*bit), + "Expected branch at {path:?} to have bit {bit} set, instead mask is: {state_mask:?}", + ); + } + } + node => panic!("Expected branch node at {path:?}, found {node:?}"), + } + self + } + + fn has_leaf(self, path: &Nibbles, expected_key: &Nibbles) -> Self { + match self.subtrie.nodes.get(path) { + Some(SparseNode::Leaf { key, .. }) => { + assert_eq!( + *key, *expected_key, + "Expected leaf at {path:?} to have key {expected_key:?}, found {key:?}", + ); + } + node => panic!("Expected leaf node at {path:?}, found {node:?}"), + } + self + } + + fn has_extension(self, path: &Nibbles, expected_key: &Nibbles) -> Self { + match self.subtrie.nodes.get(path) { + Some(SparseNode::Extension { key, .. }) => { + assert_eq!( + *key, *expected_key, + "Expected extension at {path:?} to have key {expected_key:?}, found {key:?}", + ); + } + node => panic!("Expected extension node at {path:?}, found {node:?}"), + } + self + } + + fn has_value(self, path: &Nibbles, expected_value: &[u8]) -> Self { + let actual = self.subtrie.inner.values.get(path); + assert_eq!( + actual.map(|v| v.as_slice()), + Some(expected_value), + "Expected value at {path:?} to be {expected_value:?}, found {actual:?}", + ); + self + } + + fn has_no_value(self, path: &Nibbles) -> Self { + let actual = self.subtrie.inner.values.get(path); + assert!(actual.is_none(), "Expected no value at {path:?}, but found {actual:?}"); + self + } + } + fn create_leaf_node(key: impl AsRef<[u8]>, value_nonce: u64) -> TrieNode { TrieNode::Leaf(LeafNode::new(Nibbles::from_nibbles(key), encode_account_value(value_nonce))) } @@ -1810,6 +2287,49 @@ mod tests { } } + /// Assert that the sparse subtrie nodes and the proof nodes from the hash builder are equal. + fn assert_eq_sparse_subtrie_proof_nodes(sparse_trie: &SparseSubtrie, proof_nodes: ProofNodes) { + let proof_nodes = proof_nodes + .into_nodes_sorted() + .into_iter() + .map(|(path, node)| (path, TrieNode::decode(&mut node.as_ref()).unwrap())); + + let sparse_nodes = sparse_trie.nodes.iter().sorted_by_key(|(path, _)| *path); + + for ((proof_node_path, proof_node), (sparse_node_path, sparse_node)) in + proof_nodes.zip(sparse_nodes) + { + assert_eq!(&proof_node_path, sparse_node_path); + + let equals = match (&proof_node, &sparse_node) { + // Both nodes are empty + (TrieNode::EmptyRoot, SparseNode::Empty) => true, + // Both nodes are branches and have the same state mask + ( + TrieNode::Branch(BranchNode { state_mask: proof_state_mask, .. }), + SparseNode::Branch { state_mask: sparse_state_mask, .. }, + ) => proof_state_mask == sparse_state_mask, + // Both nodes are extensions and have the same key + ( + TrieNode::Extension(ExtensionNode { key: proof_key, .. }), + SparseNode::Extension { key: sparse_key, .. }, + ) | + // Both nodes are leaves and have the same key + ( + TrieNode::Leaf(LeafNode { key: proof_key, .. }), + SparseNode::Leaf { key: sparse_key, .. }, + ) => proof_key == sparse_key, + // Empty and hash nodes are specific to the sparse trie, skip them + (_, SparseNode::Empty | SparseNode::Hash(_)) => continue, + _ => false, + }; + assert!( + equals, + "path: {proof_node_path:?}\nproof node: {proof_node:?}\nsparse node: {sparse_node:?}" + ); + } + } + #[test] fn test_get_changed_subtries_empty() { let mut trie = ParallelSparseTrie::default(); @@ -2316,7 +2836,6 @@ mod tests { ); // Compare hashes between hash builder and subtrie - let hash_builder_branch_1_hash = RlpNode::from_rlp(proof_nodes.get(&branch_1_path).unwrap().as_ref()).as_hash().unwrap(); let subtrie_branch_1_hash = subtrie.nodes.get(&branch_1_path).unwrap().hash().unwrap(); @@ -2975,4 +3494,838 @@ mod tests { assert!(leaf_1_subtrie.nodes.get(&leaf_1_path).unwrap().hash().is_some()); assert!(leaf_2_subtrie.nodes.get(&leaf_2_path).unwrap().hash().is_some()); } + + #[test] + fn sparse_subtrie_empty_update_one() { + let key = Nibbles::unpack(B256::with_last_byte(42)); + let value = || Account::default(); + let value_encoded = || { + let mut account_rlp = Vec::new(); + value().into_trie_account(EMPTY_ROOT_HASH).encode(&mut account_rlp); + account_rlp + }; + + let (_hash_builder_root, _hash_builder_updates, hash_builder_proof_nodes, _, _) = + run_hash_builder( + [(key, value())], + NoopAccountTrieCursor::default(), + Default::default(), + [key], + ); + + let mut sparse = SparseSubtrie::default().with_updates(true); + sparse.update_leaf(key, value_encoded(), DefaultBlindedProvider).unwrap(); + // TODO: enable these and make test pass as we have these implemented + // let sparse_root = sparse.root(); + // let sparse_updates = sparse.take_updates(); + + // assert_eq!(sparse_root, hash_builder_root); + // assert_eq!(sparse_updates.updated_nodes, hash_builder_updates.account_nodes); + assert_eq_sparse_subtrie_proof_nodes(&sparse, hash_builder_proof_nodes); + } + + #[test] + fn test_update_leaf_cross_level() { + let ctx = ParallelSparseTrieTestContext; + let mut trie = + ParallelSparseTrie::from_root(TrieNode::EmptyRoot, TrieMasks::none(), true).unwrap(); + + // Test adding leaves that demonstrate the cross-level behavior + // Based on the example: leaves 0x1234, 0x1245, 0x1334, 0x1345 + // + // Final trie structure: + // Upper trie: + // 0x: Branch { state_mask: 0x10 } + // └── 0x1: Extension { key: 0x } + // └── Subtrie (0x12): pointer to lower subtrie + // └── Subtrie (0x13): pointer to lower subtrie + // + // Lower subtrie (0x12): + // 0x12: Branch { state_mask: 0x8 | 0x10 } + // ├── 0x123: Leaf { key: 0x4 } + // └── 0x124: Leaf { key: 0x5 } + // + // Lower subtrie (0x13): + // 0x13: Branch { state_mask: 0x8 | 0x10 } + // ├── 0x133: Leaf { key: 0x4 } + // └── 0x134: Leaf { key: 0x5 } + + // First add leaf 0x1345 - this should create a leaf in upper trie at 0x + let (leaf1_path, value1) = ctx.create_test_leaf([0x1, 0x3, 0x4, 0x5], 1); + trie.update_leaf(leaf1_path, value1.clone(), DefaultBlindedProvider).unwrap(); + + // Verify upper trie has a leaf at the root with key 1345 + ctx.assert_upper_subtrie(&trie) + .has_leaf(&Nibbles::default(), &Nibbles::from_nibbles([0x1, 0x3, 0x4, 0x5])); + + // Add leaf 0x1234 - this should go first in the upper subtrie + let (leaf2_path, value2) = ctx.create_test_leaf([0x1, 0x2, 0x3, 0x4], 2); + trie.update_leaf(leaf2_path, value2.clone(), DefaultBlindedProvider).unwrap(); + + // Upper trie should now have a branch at 0x1 + ctx.assert_upper_subtrie(&trie).has_branch(&Nibbles::from_nibbles([0x1]), &[0x2, 0x3]); + + // Add leaf 0x1245 - this should cause a branch and create the 0x12 subtrie + let (leaf3_path, value3) = ctx.create_test_leaf([0x1, 0x2, 0x4, 0x5], 3); + trie.update_leaf(leaf3_path, value3.clone(), DefaultBlindedProvider).unwrap(); + + // Verify lower subtrie at 0x12 exists with correct structure + ctx.assert_subtrie(&trie, Nibbles::from_nibbles([0x1, 0x2])) + .has_branch(&Nibbles::from_nibbles([0x1, 0x2]), &[0x3, 0x4]) + .has_leaf(&Nibbles::from_nibbles([0x1, 0x2, 0x3]), &Nibbles::from_nibbles([0x4])) + .has_leaf(&Nibbles::from_nibbles([0x1, 0x2, 0x4]), &Nibbles::from_nibbles([0x5])) + .has_value(&leaf2_path, &value2) + .has_value(&leaf3_path, &value3); + + // Add leaf 0x1334 - this should create another lower subtrie + let (leaf4_path, value4) = ctx.create_test_leaf([0x1, 0x3, 0x3, 0x4], 4); + trie.update_leaf(leaf4_path, value4.clone(), DefaultBlindedProvider).unwrap(); + + // Verify lower subtrie at 0x13 exists with correct values + ctx.assert_subtrie(&trie, Nibbles::from_nibbles([0x1, 0x3])) + .has_value(&leaf1_path, &value1) + .has_value(&leaf4_path, &value4); + + // Verify the 0x12 subtrie still has its values + ctx.assert_subtrie(&trie, Nibbles::from_nibbles([0x1, 0x2])) + .has_value(&leaf2_path, &value2) + .has_value(&leaf3_path, &value3); + } + + #[test] + fn test_update_leaf_split_at_level_boundary() { + let ctx = ParallelSparseTrieTestContext; + let mut trie = + ParallelSparseTrie::from_root(TrieNode::EmptyRoot, TrieMasks::none(), true).unwrap(); + + // This test demonstrates what happens when we insert leaves that cause + // splitting exactly at the upper/lower trie boundary (2 nibbles). + // + // Final trie structure: + // Upper trie: + // 0x: Extension { key: 0x12 } + // └── Subtrie (0x12): pointer to lower subtrie + // + // Lower subtrie (0x12): + // 0x12: Branch { state_mask: 0x4 | 0x8 } + // ├── 0x122: Leaf { key: 0x4 } + // └── 0x123: Leaf { key: 0x4 } + + // First insert a leaf that ends exactly at the boundary (2 nibbles) + let (first_leaf_path, first_value) = ctx.create_test_leaf([0x1, 0x2, 0x2, 0x4], 1); + + trie.update_leaf(first_leaf_path, first_value.clone(), DefaultBlindedProvider).unwrap(); + + // In an empty trie, the first leaf becomes the root, regardless of path length + ctx.assert_upper_subtrie(&trie) + .has_leaf(&Nibbles::default(), &Nibbles::from_nibbles([0x1, 0x2, 0x2, 0x4])) + .has_value(&first_leaf_path, &first_value); + + // Now insert another leaf that shares the same 2-nibble prefix + let (second_leaf_path, second_value) = ctx.create_test_leaf([0x1, 0x2, 0x3, 0x4], 2); + + trie.update_leaf(second_leaf_path, second_value.clone(), DefaultBlindedProvider).unwrap(); + + // Now both leaves should be in a lower subtrie at index [0x1, 0x2] + ctx.assert_subtrie(&trie, Nibbles::from_nibbles([0x1, 0x2])) + .has_branch(&Nibbles::from_nibbles([0x1, 0x2]), &[0x2, 0x3]) + .has_leaf(&Nibbles::from_nibbles([0x1, 0x2, 0x2]), &Nibbles::from_nibbles([0x4])) + .has_leaf(&Nibbles::from_nibbles([0x1, 0x2, 0x3]), &Nibbles::from_nibbles([0x4])) + .has_value(&first_leaf_path, &first_value) + .has_value(&second_leaf_path, &second_value); + + // Upper subtrie should no longer have these values + ctx.assert_upper_subtrie(&trie) + .has_no_value(&first_leaf_path) + .has_no_value(&second_leaf_path); + } + + #[test] + fn test_update_subtrie_with_multiple_leaves() { + let ctx = ParallelSparseTrieTestContext; + let mut trie = + ParallelSparseTrie::from_root(TrieNode::EmptyRoot, TrieMasks::none(), true).unwrap(); + + // First, add multiple leaves that will create a subtrie structure + // All leaves share the prefix [0x1, 0x2] to ensure they create a subtrie + // + // This should result in a trie with the following structure: + // 0x: Extension { key: 0x12 } + // └── Subtrie (0x12): + // 0x12: Branch { state_mask: 0x3 | 0x4 } + // ├── 0x123: Branch { state_mask: 0x4 | 0x5 } + // │ ├── 0x1234: Leaf { key: 0x } + // │ └── 0x1235: Leaf { key: 0x } + // └── 0x124: Branch { state_mask: 0x6 | 0x7 } + // ├── 0x1246: Leaf { key: 0x } + // └── 0x1247: Leaf { key: 0x } + let leaves = ctx.create_test_leaves(&[ + &[0x1, 0x2, 0x3, 0x4], + &[0x1, 0x2, 0x3, 0x5], + &[0x1, 0x2, 0x4, 0x6], + &[0x1, 0x2, 0x4, 0x7], + ]); + + // Insert all leaves + ctx.insert_leaves(&mut trie, &leaves); + + // Verify the upper subtrie has an extension node at the root with key 0x12 + ctx.assert_upper_subtrie(&trie) + .has_extension(&Nibbles::default(), &Nibbles::from_nibbles([0x1, 0x2])); + + // Verify the subtrie structure using fluent assertions + ctx.assert_subtrie(&trie, Nibbles::from_nibbles([0x1, 0x2])) + .has_branch(&Nibbles::from_nibbles([0x1, 0x2]), &[0x3, 0x4]) + .has_branch(&Nibbles::from_nibbles([0x1, 0x2, 0x3]), &[0x4, 0x5]) + .has_branch(&Nibbles::from_nibbles([0x1, 0x2, 0x4]), &[0x6, 0x7]) + .has_value(&leaves[0].0, &leaves[0].1) + .has_value(&leaves[1].0, &leaves[1].1) + .has_value(&leaves[2].0, &leaves[2].1) + .has_value(&leaves[3].0, &leaves[3].1); + + // Now update one of the leaves with a new value + let updated_path = Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x4]); + let (_, updated_value) = ctx.create_test_leaf([0x1, 0x2, 0x3, 0x4], 100); + + trie.update_leaf(updated_path, updated_value.clone(), DefaultBlindedProvider).unwrap(); + + // Verify the subtrie structure is maintained and value is updated + // The branch structure should remain the same and all values should be present + ctx.assert_subtrie(&trie, Nibbles::from_nibbles([0x1, 0x2])) + .has_branch(&Nibbles::from_nibbles([0x1, 0x2]), &[0x3, 0x4]) + .has_value(&updated_path, &updated_value) + .has_value(&leaves[1].0, &leaves[1].1) + .has_value(&leaves[2].0, &leaves[2].1) + .has_value(&leaves[3].0, &leaves[3].1); + + // Add a new leaf that extends an existing branch + let (new_leaf_path, new_leaf_value) = ctx.create_test_leaf([0x1, 0x2, 0x3, 0x6], 200); + + trie.update_leaf(new_leaf_path, new_leaf_value.clone(), DefaultBlindedProvider).unwrap(); + + // Verify the branch at [0x1, 0x2, 0x3] now has an additional child + ctx.assert_subtrie(&trie, Nibbles::from_nibbles([0x1, 0x2])) + .has_branch(&Nibbles::from_nibbles([0x1, 0x2, 0x3]), &[0x4, 0x5, 0x6]) + .has_value(&new_leaf_path, &new_leaf_value); + } + + #[test] + fn test_update_subtrie_extension_node_subtrie() { + let ctx = ParallelSparseTrieTestContext; + let mut trie = + ParallelSparseTrie::from_root(TrieNode::EmptyRoot, TrieMasks::none(), true).unwrap(); + + // All leaves share the prefix [0x1, 0x2] to ensure they create a subtrie + // + // This should result in a trie with the following structure + // 0x: Extension { key: 0x123 } + // └── Subtrie (0x12): + // 0x123: Branch { state_mask: 0x3 | 0x4 } + // ├── 0x123: Leaf { key: 0x4 } + // └── 0x124: Leaf { key: 0x5 } + let leaves = ctx.create_test_leaves(&[&[0x1, 0x2, 0x3, 0x4], &[0x1, 0x2, 0x3, 0x5]]); + + // Insert all leaves + ctx.insert_leaves(&mut trie, &leaves); + + // Verify the upper subtrie has an extension node at the root with key 0x123 + ctx.assert_upper_subtrie(&trie) + .has_extension(&Nibbles::default(), &Nibbles::from_nibbles([0x1, 0x2, 0x3])); + + // Verify the lower subtrie structure + ctx.assert_subtrie(&trie, Nibbles::from_nibbles([0x1, 0x2])) + .has_branch(&Nibbles::from_nibbles([0x1, 0x2, 0x3]), &[0x4, 0x5]) + .has_value(&leaves[0].0, &leaves[0].1) + .has_value(&leaves[1].0, &leaves[1].1); + } + + #[test] + fn update_subtrie_extension_node_cross_level() { + let ctx = ParallelSparseTrieTestContext; + let mut trie = + ParallelSparseTrie::from_root(TrieNode::EmptyRoot, TrieMasks::none(), true).unwrap(); + + // First, add multiple leaves that will create a subtrie structure + // All leaves share the prefix [0x1, 0x2] to ensure they create a branch ndoe and subtrie + // + // This should result in a trie with the following structure + // 0x: Extension { key: 0x12 } + // └── Subtrie (0x12): + // 0x12: Branch { state_mask: 0x3 | 0x4 } + // ├── 0x123: Leaf { key: 0x4 } + // └── 0x124: Leaf { key: 0x5 } + let leaves = ctx.create_test_leaves(&[&[0x1, 0x2, 0x3, 0x4], &[0x1, 0x2, 0x4, 0x5]]); + + // Insert all leaves + ctx.insert_leaves(&mut trie, &leaves); + + // Verify the upper subtrie has an extension node at the root with key 0x12 + ctx.assert_upper_subtrie(&trie) + .has_extension(&Nibbles::default(), &Nibbles::from_nibbles([0x1, 0x2])); + + // Verify the lower subtrie structure + ctx.assert_subtrie(&trie, Nibbles::from_nibbles([0x1, 0x2])) + .has_branch(&Nibbles::from_nibbles([0x1, 0x2]), &[0x3, 0x4]) + .has_leaf(&Nibbles::from_nibbles([0x1, 0x2, 0x3]), &Nibbles::from_nibbles([0x4])) + .has_leaf(&Nibbles::from_nibbles([0x1, 0x2, 0x4]), &Nibbles::from_nibbles([0x5])) + .has_value(&leaves[0].0, &leaves[0].1) + .has_value(&leaves[1].0, &leaves[1].1); + } + + #[test] + fn test_update_single_nibble_paths() { + let ctx = ParallelSparseTrieTestContext; + let mut trie = + ParallelSparseTrie::from_root(TrieNode::EmptyRoot, TrieMasks::none(), true).unwrap(); + + // Test edge case: single nibble paths that create branches in upper trie + // + // Final trie structure: + // Upper trie: + // 0x: Branch { state_mask: 0x1 | 0x2 | 0x4 | 0x8 } + // ├── 0x0: Leaf { key: 0x } + // ├── 0x1: Leaf { key: 0x } + // ├── 0x2: Leaf { key: 0x } + // └── 0x3: Leaf { key: 0x } + + // Insert leaves with single nibble paths + let (leaf1_path, value1) = ctx.create_test_leaf([0x0], 1); + let (leaf2_path, value2) = ctx.create_test_leaf([0x1], 2); + let (leaf3_path, value3) = ctx.create_test_leaf([0x2], 3); + let (leaf4_path, value4) = ctx.create_test_leaf([0x3], 4); + + trie.update_leaf(leaf1_path, value1.clone(), DefaultBlindedProvider).unwrap(); + trie.update_leaf(leaf2_path, value2.clone(), DefaultBlindedProvider).unwrap(); + trie.update_leaf(leaf3_path, value3.clone(), DefaultBlindedProvider).unwrap(); + trie.update_leaf(leaf4_path, value4.clone(), DefaultBlindedProvider).unwrap(); + + // Verify upper trie has a branch at root with 4 children + ctx.assert_upper_subtrie(&trie) + .has_branch(&Nibbles::default(), &[0x0, 0x1, 0x2, 0x3]) + .has_leaf(&Nibbles::from_nibbles([0x0]), &Nibbles::default()) + .has_leaf(&Nibbles::from_nibbles([0x1]), &Nibbles::default()) + .has_leaf(&Nibbles::from_nibbles([0x2]), &Nibbles::default()) + .has_leaf(&Nibbles::from_nibbles([0x3]), &Nibbles::default()) + .has_value(&leaf1_path, &value1) + .has_value(&leaf2_path, &value2) + .has_value(&leaf3_path, &value3) + .has_value(&leaf4_path, &value4); + } + + #[test] + fn test_update_deep_extension_chain() { + let ctx = ParallelSparseTrieTestContext; + let mut trie = + ParallelSparseTrie::from_root(TrieNode::EmptyRoot, TrieMasks::none(), true).unwrap(); + + // Test edge case: deep extension chains that span multiple levels + // + // Final trie structure: + // Upper trie: + // 0x: Extension { key: 0x111111 } + // └── Subtrie (0x11): pointer to lower subtrie + // + // Lower subtrie (0x11): + // 0x111111: Branch { state_mask: 0x1 | 0x2 } + // ├── 0x1111110: Leaf { key: 0x } + // └── 0x1111111: Leaf { key: 0x } + + // Create leaves with a long common prefix + let (leaf1_path, value1) = ctx.create_test_leaf([0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x0], 1); + let (leaf2_path, value2) = ctx.create_test_leaf([0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1], 2); + + trie.update_leaf(leaf1_path, value1.clone(), DefaultBlindedProvider).unwrap(); + trie.update_leaf(leaf2_path, value2.clone(), DefaultBlindedProvider).unwrap(); + + // Verify upper trie has extension with the full common prefix + ctx.assert_upper_subtrie(&trie).has_extension( + &Nibbles::default(), + &Nibbles::from_nibbles([0x1, 0x1, 0x1, 0x1, 0x1, 0x1]), + ); + + // Verify lower subtrie has branch structure + ctx.assert_subtrie(&trie, Nibbles::from_nibbles([0x1, 0x1])) + .has_branch(&Nibbles::from_nibbles([0x1, 0x1, 0x1, 0x1, 0x1, 0x1]), &[0x0, 0x1]) + .has_leaf( + &Nibbles::from_nibbles([0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x0]), + &Nibbles::default(), + ) + .has_leaf( + &Nibbles::from_nibbles([0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1]), + &Nibbles::default(), + ) + .has_value(&leaf1_path, &value1) + .has_value(&leaf2_path, &value2); + } + + #[test] + fn test_update_branch_with_all_nibbles() { + let ctx = ParallelSparseTrieTestContext; + let mut trie = + ParallelSparseTrie::from_root(TrieNode::EmptyRoot, TrieMasks::none(), true).unwrap(); + + // Test edge case: branch node with all 16 possible nibble children + // + // Final trie structure: + // Upper trie: + // 0x: Extension { key: 0xA } + // └── Subtrie (0xA0): pointer to lower subtrie + // + // Lower subtrie (0xA0): + // 0xA0: Branch { state_mask: 0xFFFF } (all 16 children) + // ├── 0xA00: Leaf { key: 0x } + // ├── 0xA01: Leaf { key: 0x } + // ├── 0xA02: Leaf { key: 0x } + // ... (all nibbles 0x0 through 0xF) + // └── 0xA0F: Leaf { key: 0x } + + // Create leaves for all 16 possible nibbles + let mut leaves = Vec::new(); + for nibble in 0x0..=0xF { + let (path, value) = ctx.create_test_leaf([0xA, 0x0, nibble], nibble as u64 + 1); + leaves.push((path, value)); + } + + // Insert all leaves + for (path, value) in &leaves { + trie.update_leaf(*path, value.clone(), DefaultBlindedProvider).unwrap(); + } + + // Verify upper trie structure + ctx.assert_upper_subtrie(&trie) + .has_extension(&Nibbles::default(), &Nibbles::from_nibbles([0xA, 0x0])); + + // Verify lower subtrie has branch with all 16 children + let mut subtrie_assert = + ctx.assert_subtrie(&trie, Nibbles::from_nibbles([0xA, 0x0])).has_branch( + &Nibbles::from_nibbles([0xA, 0x0]), + &[0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xA, 0xB, 0xC, 0xD, 0xE, 0xF], + ); + + // Verify all leaves exist + for (i, (path, value)) in leaves.iter().enumerate() { + subtrie_assert = subtrie_assert + .has_leaf(&Nibbles::from_nibbles([0xA, 0x0, i as u8]), &Nibbles::default()) + .has_value(path, value); + } + } + + #[test] + fn test_update_creates_multiple_subtries() { + let ctx = ParallelSparseTrieTestContext; + let mut trie = + ParallelSparseTrie::from_root(TrieNode::EmptyRoot, TrieMasks::none(), true).unwrap(); + + // Test edge case: updates that create multiple subtries at once + // + // Final trie structure: + // Upper trie: + // 0x: Extension { key: 0x0 } + // └── 0x0: Branch { state_mask: 0xF } + // ├── Subtrie (0x00): pointer + // ├── Subtrie (0x01): pointer + // ├── Subtrie (0x02): pointer + // └── Subtrie (0x03): pointer + // + // Each lower subtrie has leaves: + // 0xXY: Leaf { key: 0xZ... } + + // Create leaves that will force multiple subtries + let leaves = vec![ + ctx.create_test_leaf([0x0, 0x0, 0x1, 0x2], 1), + ctx.create_test_leaf([0x0, 0x1, 0x3, 0x4], 2), + ctx.create_test_leaf([0x0, 0x2, 0x5, 0x6], 3), + ctx.create_test_leaf([0x0, 0x3, 0x7, 0x8], 4), + ]; + + // Insert all leaves + for (path, value) in &leaves { + trie.update_leaf(*path, value.clone(), DefaultBlindedProvider).unwrap(); + } + + // Verify upper trie has extension then branch + ctx.assert_upper_subtrie(&trie) + .has_extension(&Nibbles::default(), &Nibbles::from_nibbles([0x0])) + .has_branch(&Nibbles::from_nibbles([0x0]), &[0x0, 0x1, 0x2, 0x3]); + + // Verify each subtrie exists and contains its leaf + for (i, (leaf_path, leaf_value)) in leaves.iter().enumerate() { + let subtrie_path = Nibbles::from_nibbles([0x0, i as u8]); + ctx.assert_subtrie(&trie, subtrie_path) + .has_leaf( + &subtrie_path, + &Nibbles::from_nibbles(match i { + 0 => vec![0x1, 0x2], + 1 => vec![0x3, 0x4], + 2 => vec![0x5, 0x6], + 3 => vec![0x7, 0x8], + _ => unreachable!(), + }), + ) + .has_value(leaf_path, leaf_value); + } + } + + #[test] + fn test_update_extension_to_branch_transformation() { + let ctx = ParallelSparseTrieTestContext; + let mut trie = + ParallelSparseTrie::from_root(TrieNode::EmptyRoot, TrieMasks::none(), true).unwrap(); + + // Test edge case: extension node transforms to branch when split + // + // Initial state after first two leaves: + // Upper trie: + // 0x: Extension { key: 0xFF0 } + // └── Subtrie (0xFF): pointer + // + // After third leaf (0xF0...): + // Upper trie: + // 0x: Extension { key: 0xF } + // └── 0xF: Branch { state_mask: 0x10 | 0x8000 } + // ├── Subtrie (0xF0): pointer + // └── Subtrie (0xFF): pointer + + // First two leaves share prefix 0xFF0 + let (leaf1_path, value1) = ctx.create_test_leaf([0xF, 0xF, 0x0, 0x1], 1); + let (leaf2_path, value2) = ctx.create_test_leaf([0xF, 0xF, 0x0, 0x2], 2); + + trie.update_leaf(leaf1_path, value1.clone(), DefaultBlindedProvider).unwrap(); + trie.update_leaf(leaf2_path, value2.clone(), DefaultBlindedProvider).unwrap(); + + // Verify initial extension structure + ctx.assert_upper_subtrie(&trie) + .has_extension(&Nibbles::default(), &Nibbles::from_nibbles([0xF, 0xF, 0x0])); + + // Add leaf that splits the extension + let (leaf3_path, value3) = ctx.create_test_leaf([0xF, 0x0, 0x0, 0x3], 3); + trie.update_leaf(leaf3_path, value3.clone(), DefaultBlindedProvider).unwrap(); + + // Verify transformed structure + ctx.assert_upper_subtrie(&trie) + .has_extension(&Nibbles::default(), &Nibbles::from_nibbles([0xF])) + .has_branch(&Nibbles::from_nibbles([0xF]), &[0x0, 0xF]); + + // Verify subtries + ctx.assert_subtrie(&trie, Nibbles::from_nibbles([0xF, 0xF])) + .has_branch(&Nibbles::from_nibbles([0xF, 0xF, 0x0]), &[0x1, 0x2]) + .has_leaf(&Nibbles::from_nibbles([0xF, 0xF, 0x0, 0x1]), &Nibbles::default()) + .has_leaf(&Nibbles::from_nibbles([0xF, 0xF, 0x0, 0x2]), &Nibbles::default()) + .has_value(&leaf1_path, &value1) + .has_value(&leaf2_path, &value2); + + ctx.assert_subtrie(&trie, Nibbles::from_nibbles([0xF, 0x0])) + .has_leaf(&Nibbles::from_nibbles([0xF, 0x0]), &Nibbles::from_nibbles([0x0, 0x3])) + .has_value(&leaf3_path, &value3); + } + + #[test] + fn test_update_long_shared_prefix_at_boundary() { + let ctx = ParallelSparseTrieTestContext; + let mut trie = + ParallelSparseTrie::from_root(TrieNode::EmptyRoot, TrieMasks::none(), true).unwrap(); + + // Test edge case: leaves with long shared prefix that ends exactly at 2-nibble boundary + // + // Final trie structure: + // Upper trie: + // 0x: Extension { key: 0xAB } + // └── Subtrie (0xAB): pointer to lower subtrie + // + // Lower subtrie (0xAB): + // 0xAB: Branch { state_mask: 0x1000 | 0x2000 } + // ├── 0xABC: Leaf { key: 0xDEF } + // └── 0xABD: Leaf { key: 0xEF0 } + + // Create leaves that share exactly 2 nibbles + let (leaf1_path, value1) = ctx.create_test_leaf([0xA, 0xB, 0xC, 0xD, 0xE, 0xF], 1); + let (leaf2_path, value2) = ctx.create_test_leaf([0xA, 0xB, 0xD, 0xE, 0xF, 0x0], 2); + + trie.update_leaf(leaf1_path, value1.clone(), DefaultBlindedProvider).unwrap(); + trie.update_leaf(leaf2_path, value2.clone(), DefaultBlindedProvider).unwrap(); + + // Verify upper trie structure + ctx.assert_upper_subtrie(&trie) + .has_extension(&Nibbles::default(), &Nibbles::from_nibbles([0xA, 0xB])); + + // Verify lower subtrie structure + ctx.assert_subtrie(&trie, Nibbles::from_nibbles([0xA, 0xB])) + .has_branch(&Nibbles::from_nibbles([0xA, 0xB]), &[0xC, 0xD]) + .has_leaf( + &Nibbles::from_nibbles([0xA, 0xB, 0xC]), + &Nibbles::from_nibbles([0xD, 0xE, 0xF]), + ) + .has_leaf( + &Nibbles::from_nibbles([0xA, 0xB, 0xD]), + &Nibbles::from_nibbles([0xE, 0xF, 0x0]), + ) + .has_value(&leaf1_path, &value1) + .has_value(&leaf2_path, &value2); + } + + #[test] + fn test_update_branch_to_extension_collapse() { + let ctx = ParallelSparseTrieTestContext; + let mut trie = + ParallelSparseTrie::from_root(TrieNode::EmptyRoot, TrieMasks::none(), true).unwrap(); + + // Test creating a trie with leaves that share a long common prefix + // + // Initial state with 3 leaves (0x1234, 0x2345, 0x2356): + // Upper trie: + // 0x: Branch { state_mask: 0x6 } + // ├── 0x1: Leaf { key: 0x234 } + // └── 0x2: Extension { key: 0x3 } + // └── Subtrie (0x23): pointer + // Lower subtrie (0x23): + // 0x23: Branch { state_mask: 0x30 } + // ├── 0x234: Leaf { key: 0x5 } + // └── 0x235: Leaf { key: 0x6 } + // + // Then we create a new trie with leaves (0x1234, 0x1235, 0x1236): + // Expected structure: + // Upper trie: + // 0x: Extension { key: 0x123 } + // └── Subtrie (0x12): pointer + // Lower subtrie (0x12): + // 0x123: Branch { state_mask: 0x70 } // bits 4, 5, 6 set + // ├── 0x1234: Leaf { key: 0x } + // ├── 0x1235: Leaf { key: 0x } + // └── 0x1236: Leaf { key: 0x } + + // Create initial leaves + let (leaf1_path, value1) = ctx.create_test_leaf([0x1, 0x2, 0x3, 0x4], 1); + let (leaf2_path, value2) = ctx.create_test_leaf([0x2, 0x3, 0x4, 0x5], 2); + let (leaf3_path, value3) = ctx.create_test_leaf([0x2, 0x3, 0x5, 0x6], 3); + + trie.update_leaf(leaf1_path, value1, DefaultBlindedProvider).unwrap(); + trie.update_leaf(leaf2_path, value2, DefaultBlindedProvider).unwrap(); + trie.update_leaf(leaf3_path, value3, DefaultBlindedProvider).unwrap(); + + // Verify initial structure has branch at root + ctx.assert_upper_subtrie(&trie).has_branch(&Nibbles::default(), &[0x1, 0x2]); + + // Now update to create a pattern where extension is more efficient + // Replace leaves to all share prefix 0x123 + let (new_leaf1_path, new_value1) = ctx.create_test_leaf([0x1, 0x2, 0x3, 0x4], 10); + let (new_leaf2_path, new_value2) = ctx.create_test_leaf([0x1, 0x2, 0x3, 0x5], 11); + let (new_leaf3_path, new_value3) = ctx.create_test_leaf([0x1, 0x2, 0x3, 0x6], 12); + + // Clear and add new leaves + let mut trie = + ParallelSparseTrie::from_root(TrieNode::EmptyRoot, TrieMasks::none(), true).unwrap(); + trie.update_leaf(new_leaf1_path, new_value1.clone(), DefaultBlindedProvider).unwrap(); + trie.update_leaf(new_leaf2_path, new_value2.clone(), DefaultBlindedProvider).unwrap(); + trie.update_leaf(new_leaf3_path, new_value3.clone(), DefaultBlindedProvider).unwrap(); + + // Verify new structure has extension + ctx.assert_upper_subtrie(&trie) + .has_extension(&Nibbles::default(), &Nibbles::from_nibbles([0x1, 0x2, 0x3])); + + // Verify lower subtrie path was correctly updated to 0x123 + ctx.assert_subtrie_path(&trie, [0x1, 0x2], [0x1, 0x2, 0x3]); + + // Verify lower subtrie - all three leaves should be properly inserted + ctx.assert_subtrie(&trie, Nibbles::from_nibbles([0x1, 0x2])) + .has_branch(&Nibbles::from_nibbles([0x1, 0x2, 0x3]), &[0x4, 0x5, 0x6]) // All three children + .has_leaf(&Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x4]), &Nibbles::default()) + .has_leaf(&Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x5]), &Nibbles::default()) + .has_leaf(&Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x6]), &Nibbles::default()) + .has_value(&new_leaf1_path, &new_value1) + .has_value(&new_leaf2_path, &new_value2) + .has_value(&new_leaf3_path, &new_value3); + } + + #[test] + fn test_update_shared_prefix_patterns() { + let ctx = ParallelSparseTrieTestContext; + let mut trie = + ParallelSparseTrie::from_root(TrieNode::EmptyRoot, TrieMasks::none(), true).unwrap(); + + // Test edge case: different patterns of shared prefixes + // + // Final trie structure: + // Upper trie: + // 0x: Branch { state_mask: 0x6 } + // ├── 0x1: Leaf { key: 0x234 } + // └── 0x2: Extension { key: 0x3 } + // └── Subtrie (0x23): pointer + // + // Lower subtrie (0x23): + // 0x23: Branch { state_mask: 0x10 | 0x20 } + // ├── 0x234: Leaf { key: 0x5 } + // └── 0x235: Leaf { key: 0x6 } + + // Create leaves with different shared prefix patterns + let (leaf1_path, value1) = ctx.create_test_leaf([0x1, 0x2, 0x3, 0x4], 1); + let (leaf2_path, value2) = ctx.create_test_leaf([0x2, 0x3, 0x4, 0x5], 2); + let (leaf3_path, value3) = ctx.create_test_leaf([0x2, 0x3, 0x5, 0x6], 3); + + trie.update_leaf(leaf1_path, value1, DefaultBlindedProvider).unwrap(); + trie.update_leaf(leaf2_path, value2.clone(), DefaultBlindedProvider).unwrap(); + trie.update_leaf(leaf3_path, value3.clone(), DefaultBlindedProvider).unwrap(); + + // Verify upper trie structure + ctx.assert_upper_subtrie(&trie) + .has_branch(&Nibbles::default(), &[0x1, 0x2]) + .has_leaf(&Nibbles::from_nibbles([0x1]), &Nibbles::from_nibbles([0x2, 0x3, 0x4])) + .has_extension(&Nibbles::from_nibbles([0x2]), &Nibbles::from_nibbles([0x3])); + + // Verify lower subtrie structure + ctx.assert_subtrie(&trie, Nibbles::from_nibbles([0x2, 0x3])) + .has_branch(&Nibbles::from_nibbles([0x2, 0x3]), &[0x4, 0x5]) + .has_leaf(&Nibbles::from_nibbles([0x2, 0x3, 0x4]), &Nibbles::from_nibbles([0x5])) + .has_leaf(&Nibbles::from_nibbles([0x2, 0x3, 0x5]), &Nibbles::from_nibbles([0x6])) + .has_value(&leaf2_path, &value2) + .has_value(&leaf3_path, &value3); + } + + #[test] + fn test_progressive_branch_creation() { + let ctx = ParallelSparseTrieTestContext; + let mut trie = + ParallelSparseTrie::from_root(TrieNode::EmptyRoot, TrieMasks::none(), true).unwrap(); + + // Test starting with a single leaf and progressively adding leaves + // that create branch nodes at shorter and shorter paths + // + // Step 1: Add leaf at 0x12345 + // Upper trie: + // 0x: Leaf { key: 0x12345 } + // + // Step 2: Add leaf at 0x12346 + // Upper trie: + // 0x: Extension { key: 0x1234 } + // └── Subtrie (0x12): pointer + // Lower subtrie (0x12): + // 0x1234: Branch { state_mask: 0x60 } // bits 5 and 6 set + // ├── 0x12345: Leaf { key: 0x } + // └── 0x12346: Leaf { key: 0x } + // + // Step 3: Add leaf at 0x1235 + // Lower subtrie (0x12) updates to: + // 0x123: Branch { state_mask: 0x30 } // bits 4 and 5 set + // ├── 0x1234: Branch { state_mask: 0x60 } + // │ ├── 0x12345: Leaf { key: 0x } + // │ └── 0x12346: Leaf { key: 0x } + // └── 0x1235: Leaf { key: 0x } + // + // Step 4: Add leaf at 0x124 + // Lower subtrie (0x12) updates to: + // 0x12: Branch { state_mask: 0x18 } // bits 3 and 4 set + // ├── 0x123: Branch { state_mask: 0x30 } + // │ ├── 0x1234: Branch { state_mask: 0x60 } + // │ │ ├── 0x12345: Leaf { key: 0x } + // │ │ └── 0x12346: Leaf { key: 0x } + // │ └── 0x1235: Leaf { key: 0x } + // └── 0x124: Leaf { key: 0x } + + // Step 1: Add first leaf - initially stored as leaf in upper trie + let (leaf1_path, value1) = ctx.create_test_leaf([0x1, 0x2, 0x3, 0x4, 0x5], 1); + trie.update_leaf(leaf1_path, value1.clone(), DefaultBlindedProvider).unwrap(); + + // Verify leaf node in upper trie (optimized single-leaf case) + ctx.assert_upper_subtrie(&trie) + .has_leaf(&Nibbles::default(), &Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x4, 0x5])) + .has_value(&leaf1_path, &value1); + + // Step 2: Add leaf at 0x12346 - creates branch at 0x1234 + let (leaf2_path, value2) = ctx.create_test_leaf([0x1, 0x2, 0x3, 0x4, 0x6], 2); + trie.update_leaf(leaf2_path, value2.clone(), DefaultBlindedProvider).unwrap(); + + // Verify extension now goes to 0x1234 + ctx.assert_upper_subtrie(&trie) + .has_extension(&Nibbles::default(), &Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x4])); + + // Verify subtrie path updated to 0x1234 + ctx.assert_subtrie_path(&trie, [0x1, 0x2], [0x1, 0x2, 0x3, 0x4]); + + ctx.assert_subtrie(&trie, Nibbles::from_nibbles([0x1, 0x2])) + .has_branch(&Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x4]), &[0x5, 0x6]) + .has_leaf(&Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x4, 0x5]), &Nibbles::default()) + .has_leaf(&Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x4, 0x6]), &Nibbles::default()) + .has_value(&leaf1_path, &value1) + .has_value(&leaf2_path, &value2); + + // Step 3: Add leaf at 0x1235 - creates branch at 0x123 + let (leaf3_path, value3) = ctx.create_test_leaf([0x1, 0x2, 0x3, 0x5], 3); + trie.update_leaf(leaf3_path, value3.clone(), DefaultBlindedProvider).unwrap(); + + // Verify extension now goes to 0x123 + ctx.assert_upper_subtrie(&trie) + .has_extension(&Nibbles::default(), &Nibbles::from_nibbles([0x1, 0x2, 0x3])); + + // Verify subtrie path updated to 0x123 + ctx.assert_subtrie_path(&trie, [0x1, 0x2], [0x1, 0x2, 0x3]); + + ctx.assert_subtrie(&trie, Nibbles::from_nibbles([0x1, 0x2])) + .has_branch(&Nibbles::from_nibbles([0x1, 0x2, 0x3]), &[0x4, 0x5]) + .has_branch(&Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x4]), &[0x5, 0x6]) + .has_leaf(&Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x5]), &Nibbles::default()) + .has_value(&leaf1_path, &value1) + .has_value(&leaf2_path, &value2) + .has_value(&leaf3_path, &value3); + + // Step 4: Add leaf at 0x124 - creates branch at 0x12 (subtrie root) + let (leaf4_path, value4) = ctx.create_test_leaf([0x1, 0x2, 0x4], 4); + trie.update_leaf(leaf4_path, value4.clone(), DefaultBlindedProvider).unwrap(); + + // Verify extension now goes to 0x12 + ctx.assert_upper_subtrie(&trie) + .has_extension(&Nibbles::default(), &Nibbles::from_nibbles([0x1, 0x2])); + + // Verify subtrie path updated to 0x12 + ctx.assert_subtrie_path(&trie, [0x1, 0x2], [0x1, 0x2]); + + // Verify final structure + ctx.assert_subtrie(&trie, Nibbles::from_nibbles([0x1, 0x2])) + .has_branch(&Nibbles::from_nibbles([0x1, 0x2]), &[0x3, 0x4]) + .has_branch(&Nibbles::from_nibbles([0x1, 0x2, 0x3]), &[0x4, 0x5]) + .has_branch(&Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x4]), &[0x5, 0x6]) + .has_leaf(&Nibbles::from_nibbles([0x1, 0x2, 0x4]), &Nibbles::default()) + .has_value(&leaf1_path, &value1) + .has_value(&leaf2_path, &value2) + .has_value(&leaf3_path, &value3) + .has_value(&leaf4_path, &value4); + } + + #[test] + fn test_update_max_depth_paths() { + let ctx = ParallelSparseTrieTestContext; + let mut trie = + ParallelSparseTrie::from_root(TrieNode::EmptyRoot, TrieMasks::none(), true).unwrap(); + + // Test edge case: very long paths (64 nibbles - max for addresses/storage) + // + // Final trie structure: + // Upper trie: + // 0x: Extension { key: 0xFF } + // └── Subtrie (0xFF): pointer + // + // Lower subtrie (0xFF): + // Has very long paths with slight differences at the end + + // Create two 64-nibble paths that differ only in the last nibble + let mut path1_nibbles = vec![0xF; 63]; + path1_nibbles.push(0x0); + let mut path2_nibbles = vec![0xF; 63]; + path2_nibbles.push(0x1); + + let (leaf1_path, value1) = ctx.create_test_leaf(&path1_nibbles, 1); + let (leaf2_path, value2) = ctx.create_test_leaf(&path2_nibbles, 2); + + trie.update_leaf(leaf1_path, value1.clone(), DefaultBlindedProvider).unwrap(); + trie.update_leaf(leaf2_path, value2.clone(), DefaultBlindedProvider).unwrap(); + + // The common prefix of 63 F's will create a very long extension + let extension_key = vec![0xF; 63]; + ctx.assert_upper_subtrie(&trie) + .has_extension(&Nibbles::default(), &Nibbles::from_nibbles(&extension_key)); + + // Verify the subtrie has the branch at the end + ctx.assert_subtrie(&trie, Nibbles::from_nibbles([0xF, 0xF])) + .has_branch(&Nibbles::from_nibbles(&path1_nibbles[..63]), &[0x0, 0x1]) + .has_value(&leaf1_path, &value1) + .has_value(&leaf2_path, &value2); + } } From c2737957d7d3663f995d54f1c42049301e4a3947 Mon Sep 17 00:00:00 2001 From: PixelPilot <161360836+PixelPil0t1@users.noreply.github.com> Date: Thu, 3 Jul 2025 11:30:26 +0200 Subject: [PATCH 028/305] docs: update snapshot URL from downloads.merkle.io to snapshots.merkle.io (#17190) Co-authored-by: Matthias Seitz From c6e6a54d5bce914467b04b33b3322ad6d4112788 Mon Sep 17 00:00:00 2001 From: crStiv Date: Thu, 3 Jul 2025 12:46:32 +0300 Subject: [PATCH 029/305] docs: typos (#17168) --- docs/crates/stages.md | 4 ++-- docs/design/review.md | 4 ++-- docs/repo/labels.md | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/crates/stages.md b/docs/crates/stages.md index cfa2d5012d5..2c35e065c56 100644 --- a/docs/crates/stages.md +++ b/docs/crates/stages.md @@ -1,6 +1,6 @@ # Stages -The `stages` lib plays a central role in syncing the node, maintaining state, updating the database and more. The stages involved in the Reth pipeline are the `HeaderStage`, `BodyStage`, `SenderRecoveryStage`, and `ExecutionStage` (note that this list is non-exhaustive, and more pipeline stages will be added in the near future). Each of these stages are queued up and stored within the Reth pipeline. +The `stages` lib plays a central role in syncing the node, maintaining state, updating the database and more. The stages involved in the Reth pipeline are the `HeaderStage`, `BodyStage`, `SenderRecoveryStage`, and `ExecutionStage` (note that this list is non-exhaustive, and more pipeline stages will be added in the near future). Each of these stages is queued up and stored within the Reth pipeline. When the node is first started, a new `Pipeline` is initialized and all of the stages are added into `Pipeline.stages`. Then, the `Pipeline::run` function is called, which starts the pipeline, executing all of the stages continuously in an infinite loop. This process syncs the chain, keeping everything up to date with the chain tip. @@ -108,7 +108,7 @@ The `IndexAccountHistoryStage` builds indices for account history, tracking how ## FinishStage -The `FinishStage` is the final stage in the pipeline that performs cleanup and verification tasks. It ensures that all previous stages have completed successfully and that the node's state is consistent. This stage may also update various metrics and status indicators to reflect the completion of a sync cycle. +The `FinishStage` is the final stage in the pipeline that performs cleanup and verification tasks. It ensures that all previous stages have been completed successfully and that the node's state is consistent. This stage may also update various metrics and status indicators to reflect the completion of a sync cycle.
diff --git a/docs/design/review.md b/docs/design/review.md index 702ab7722f8..22a32ef904f 100644 --- a/docs/design/review.md +++ b/docs/design/review.md @@ -1,6 +1,6 @@ # Review of other codebases -This document contains some of our research in how other codebases designed various parts of their stack. +This document contains some of our research on how other codebases designed various parts of their stack. ## P2P @@ -18,7 +18,7 @@ This document contains some of our research in how other codebases designed vari ## Database -* [Erigon's DB walkthrough](https://github.com/ledgerwatch/erigon/blob/12ee33a492f5d240458822d052820d9998653a63/docs/programmers_guide/db_walkthrough.MD) contains an overview. They made the most noticeable improvements on storage reduction. +* [Erigon's DB walkthrough](https://github.com/ledgerwatch/erigon/blob/12ee33a492f5d240458822d052820d9998653a63/docs/programmers_guide/db_walkthrough.MD) contains an overview. They made the most noticeable improvements in storage reduction. * [Gio's erigon-db table macros](https://github.com/gio256/erigon-db) + [Akula's macros](https://github.com/akula-bft/akula/blob/74b172ee1d2d2a4f04ce057b5a76679c1b83df9c/src/kv/tables.rs#L61). ## Header Downloaders diff --git a/docs/repo/labels.md b/docs/repo/labels.md index 6772b828ffc..2c830194415 100644 --- a/docs/repo/labels.md +++ b/docs/repo/labels.md @@ -4,7 +4,7 @@ Each label in the repository has a description attached that describes what the There are 7 label categories in the repository: -- **Area labels**: These labels denote the general area of the project an issue or PR affects. These start with [`A-`][area]. +- **Area labels**: These labels denote the general area of the project that an issue or PR affects. These start with [`A-`][area]. - **Category labels**: These labels denote the type of issue or change being made, for example https://github.com/paradigmxyz/reth/labels/C-bug or https://github.com/paradigmxyz/reth/labels/C-enhancement. These start with [`C-`][category]. - **Difficulty labels**: These are reserved for the very easy or very hard issues. Any issue without one of these labels can be considered to be of "average difficulty". They start with [`D-`][difficulty]. - **Meta labels**: These start with [`M-`][meta] and convey meaning to the core contributors, usually about the release process. From c2a2d7d44927fb379d08577238618a2fac750149 Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Thu, 3 Jul 2025 12:03:34 +0200 Subject: [PATCH 030/305] feat(trie): ParallelSparseTrie: Compute lower subtrie hashes in parallel (#17173) Co-authored-by: Matthias Seitz --- Cargo.lock | 1 + crates/trie/sparse-parallel/Cargo.toml | 16 ++++++++++++++++ crates/trie/sparse-parallel/src/trie.rs | 19 +++++++++++++++++-- 3 files changed, 34 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 984db80c7be..b040ce6d09e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10611,6 +10611,7 @@ dependencies = [ "proptest-arbitrary-interop", "rand 0.8.5", "rand 0.9.1", + "rayon", "reth-execution-errors", "reth-primitives-traits", "reth-trie", diff --git a/crates/trie/sparse-parallel/Cargo.toml b/crates/trie/sparse-parallel/Cargo.toml index 21764ff429f..039f6d82a5f 100644 --- a/crates/trie/sparse-parallel/Cargo.toml +++ b/crates/trie/sparse-parallel/Cargo.toml @@ -25,6 +25,7 @@ alloy-rlp.workspace = true # misc smallvec.workspace = true +rayon = { workspace = true, optional = true } [dev-dependencies] # reth @@ -33,6 +34,7 @@ reth-trie-common = { workspace = true, features = ["test-utils", "arbitrary"] } reth-trie.workspace = true reth-trie-sparse = { workspace = true, features = ["test-utils"] } +# misc arbitrary.workspace = true assert_matches.workspace = true itertools.workspace = true @@ -40,3 +42,17 @@ proptest-arbitrary-interop.workspace = true proptest.workspace = true rand.workspace = true rand_08.workspace = true + +[features] +default = ["std"] +std = [ + "dep:rayon", + "alloy-primitives/std", + "alloy-rlp/std", + "alloy-trie/std", + "reth-execution-errors/std", + "reth-primitives-traits/std", + "reth-trie-common/std", + "reth-trie-sparse/std", + "tracing/std", +] diff --git a/crates/trie/sparse-parallel/src/trie.rs b/crates/trie/sparse-parallel/src/trie.rs index 96fccea84b7..e4951b9550b 100644 --- a/crates/trie/sparse-parallel/src/trie.rs +++ b/crates/trie/sparse-parallel/src/trie.rs @@ -769,13 +769,28 @@ impl ParallelSparseTrie { // Update the prefix set with the keys that didn't have matching subtries self.prefix_set = unchanged_prefix_set; - // Update subtrie hashes in parallel - // TODO: call `update_hashes` on each subtrie in parallel let (tx, rx) = mpsc::channel(); + + #[cfg(not(feature = "std"))] + // Update subtrie hashes serially if nostd for ChangedSubtrie { index, mut subtrie, mut prefix_set } in subtries { subtrie.update_hashes(&mut prefix_set); tx.send((index, subtrie)).unwrap(); } + + #[cfg(feature = "std")] + // Update subtrie hashes in parallel + { + use rayon::iter::{IntoParallelIterator, ParallelIterator}; + subtries + .into_par_iter() + .map(|ChangedSubtrie { index, mut subtrie, mut prefix_set }| { + subtrie.update_hashes(&mut prefix_set); + (index, subtrie) + }) + .for_each_init(|| tx.clone(), |tx, result| tx.send(result).unwrap()); + } + drop(tx); // Return updated subtries back to the trie From d949061fc02390ee28ae098de5e915e12968f60a Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 3 Jul 2025 15:09:29 +0200 Subject: [PATCH 031/305] chore: bump inspectors (#17198) --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b040ce6d09e..e363630d5d5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10758,9 +10758,9 @@ dependencies = [ [[package]] name = "revm-inspectors" -version = "0.26.0" +version = "0.26.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c42441fb05ac958e69262bd86841f8a91220e6794f9a0b99db1e1af51d8013e" +checksum = "c7b99a2332cf8eed9e9a22fffbf76dfadc99d2c45de6ae6431a1eb9f657dd97a" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", diff --git a/Cargo.toml b/Cargo.toml index 0cb3085eda2..0c9204feb8e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -461,7 +461,7 @@ revm-context = { version = "8.0.1", default-features = false } revm-context-interface = { version = "8.0.0", default-features = false } revm-database-interface = { version = "7.0.0", default-features = false } op-revm = { version = "8.0.1", default-features = false } -revm-inspectors = "0.26.0" +revm-inspectors = "0.26.5" # eth alloy-chains = { version = "0.2.0", default-features = false } From 8c38c8b33afec8f300f83c0ac0a6fbca5d3e70f3 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Thu, 3 Jul 2025 15:19:57 +0100 Subject: [PATCH 032/305] perf(trie): sparse trie trait (#17181) Co-authored-by: Claude --- crates/trie/sparse/src/lib.rs | 3 + crates/trie/sparse/src/traits.rs | 307 +++++++++++++++++++++++++++++++ crates/trie/sparse/src/trie.rs | 84 +-------- 3 files changed, 314 insertions(+), 80 deletions(-) create mode 100644 crates/trie/sparse/src/traits.rs diff --git a/crates/trie/sparse/src/lib.rs b/crates/trie/sparse/src/lib.rs index 617622d194f..220a712d8c8 100644 --- a/crates/trie/sparse/src/lib.rs +++ b/crates/trie/sparse/src/lib.rs @@ -11,6 +11,9 @@ pub use state::*; mod trie; pub use trie::*; +mod traits; +pub use traits::*; + pub mod blinded; #[cfg(feature = "metrics")] diff --git a/crates/trie/sparse/src/traits.rs b/crates/trie/sparse/src/traits.rs new file mode 100644 index 00000000000..c707af23d11 --- /dev/null +++ b/crates/trie/sparse/src/traits.rs @@ -0,0 +1,307 @@ +//! Traits for sparse trie implementations. + +use core::fmt::Debug; + +use alloc::vec::Vec; +use alloy_primitives::{ + map::{HashMap, HashSet}, + B256, +}; +use alloy_trie::{BranchNodeCompact, TrieMask}; +use reth_execution_errors::SparseTrieResult; +use reth_trie_common::{Nibbles, TrieNode}; + +use crate::blinded::BlindedProvider; + +/// Trait defining common operations for revealed sparse trie implementations. +/// +/// This trait abstracts over different sparse trie implementations (serial vs parallel) +/// while providing a unified interface for the core trie operations needed by the +/// [`crate::SparseTrie`] enum. +pub trait SparseTrieInterface: Default + Debug { + /// Creates a new revealed sparse trie from the given root node. + /// + /// This function initializes the internal structures and then reveals the root. + /// It is a convenient method to create a trie when you already have the root node available. + /// + /// # Arguments + /// + /// * `root` - The root node of the trie + /// * `masks` - Trie masks for root branch node + /// * `retain_updates` - Whether to track updates + /// + /// # Returns + /// + /// Self if successful, or an error if revealing fails. + fn from_root(root: TrieNode, masks: TrieMasks, retain_updates: bool) -> SparseTrieResult; + + /// Configures the trie to have the given root node revealed. + /// + /// # Arguments + /// + /// * `root` - The root node to reveal + /// * `masks` - Trie masks for root branch node + /// * `retain_updates` - Whether to track updates + /// + /// # Returns + /// + /// Self if successful, or an error if revealing fails. + /// + /// # Panics + /// + /// May panic if the trie is not new/cleared, and has already revealed nodes. + fn with_root( + self, + root: TrieNode, + masks: TrieMasks, + retain_updates: bool, + ) -> SparseTrieResult; + + /// Configures the trie to retain information about updates. + /// + /// If `retain_updates` is true, the trie will record branch node updates + /// and deletions. This information can be used to efficiently update + /// an external database. + /// + /// # Arguments + /// + /// * `retain_updates` - Whether to track updates + /// + /// # Returns + /// + /// Self for method chaining. + fn with_updates(self, retain_updates: bool) -> Self; + + /// Reserves capacity for additional trie nodes. + /// + /// # Arguments + /// + /// * `additional` - The number of additional trie nodes to reserve capacity for. + fn reserve_nodes(&mut self, additional: usize); + + /// Reveals a trie node if it has not been revealed before. + /// + /// This function decodes a trie node and inserts it into the trie structure. + /// It handles different node types (leaf, extension, branch) by appropriately + /// adding them to the trie and recursively revealing their children. + /// + /// # Arguments + /// + /// * `path` - The path where the node should be revealed + /// * `node` - The trie node to reveal + /// * `masks` - Trie masks for branch nodes + /// + /// # Returns + /// + /// `Ok(())` if successful, or an error if the node was not revealed. + fn reveal_node( + &mut self, + path: Nibbles, + node: TrieNode, + masks: TrieMasks, + ) -> SparseTrieResult<()>; + + /// Updates the value of a leaf node at the specified path. + /// + /// If the leaf doesn't exist, it will be created. + /// If it does exist, its value will be updated. + /// + /// # Arguments + /// + /// * `full_path` - The full path to the leaf + /// * `value` - The new value for the leaf + /// * `provider` - The blinded provider for resolving missing nodes + /// + /// # Returns + /// + /// `Ok(())` if successful, or an error if the update failed. + fn update_leaf( + &mut self, + full_path: Nibbles, + value: Vec, + provider: P, + ) -> SparseTrieResult<()>; + + /// Removes a leaf node at the specified path. + /// + /// This will also handle collapsing the trie structure as needed + /// (e.g., removing branch nodes that become unnecessary). + /// + /// # Arguments + /// + /// * `full_path` - The full path to the leaf to remove + /// * `provider` - The blinded provider for resolving missing nodes + /// + /// # Returns + /// + /// `Ok(())` if successful, or an error if the removal failed. + fn remove_leaf( + &mut self, + full_path: &Nibbles, + provider: P, + ) -> SparseTrieResult<()>; + + /// Calculates and returns the root hash of the trie. + /// + /// This processes any dirty nodes by updating their RLP encodings + /// and returns the root hash. + /// + /// # Returns + /// + /// The root hash of the trie. + fn root(&mut self) -> B256; + + /// Recalculates and updates the RLP hashes of subtries deeper than a certain level. The level + /// is defined in the implementation. + /// + /// The root node is considered to be at level 0. This method is useful for optimizing + /// hash recalculations after localized changes to the trie structure. + fn update_subtrie_hashes(&mut self); + + /// Retrieves a reference to the leaf value at the specified path. + /// + /// # Arguments + /// + /// * `full_path` - The full path to the leaf value + /// + /// # Returns + /// + /// A reference to the leaf value stored at the given full path, if it is revealed. + /// + /// Note: a value can exist in the full trie and this function still returns `None` + /// because the value has not been revealed. + /// + /// Hence a `None` indicates two possibilities: + /// - The value does not exists in the trie, so it cannot be revealed + /// - The value has not yet been revealed. In order to determine which is true, one would need + /// an exclusion proof. + fn get_leaf_value(&self, full_path: &Nibbles) -> Option<&Vec>; + + /// Attempts to find a leaf node at the specified path. + /// + /// This method traverses the trie from the root down to the given path, checking + /// if a leaf exists at that path. It can be used to verify the existence of a leaf + /// or to generate an exclusion proof (proof that a leaf does not exist). + /// + /// # Parameters + /// + /// - `full_path`: The path to search for. + /// - `expected_value`: Optional expected value. If provided, will verify the leaf value + /// matches. + /// + /// # Returns + /// + /// - `Ok(LeafLookup::Exists)` if the leaf exists with the expected value. + /// - `Ok(LeafLookup::NonExistent)` if the leaf definitely does not exist (exclusion proof). + /// - `Err(LeafLookupError)` if the search encountered a blinded node or found a different + /// value. + fn find_leaf( + &self, + full_path: &Nibbles, + expected_value: Option<&Vec>, + ) -> Result; + + /// Consumes and returns the currently accumulated trie updates. + /// + /// This is useful when you want to apply the updates to an external database + /// and then start tracking a new set of updates. + /// + /// # Returns + /// + /// The accumulated updates, or an empty set if updates weren't being tracked. + fn take_updates(&mut self) -> SparseTrieUpdates; + + /// Removes all nodes and values from the trie, resetting it to a blank state + /// with only an empty root node. This is used when a storage root is deleted. + /// + /// This should not be used when intending to re-use the trie for a fresh account/storage root; + /// use `clear` for that. + /// + /// Note: All previously tracked changes to the trie are also removed. + fn wipe(&mut self); + + /// This clears all data structures in the sparse trie, keeping the backing data structures + /// allocated. A [`crate::SparseNode::Empty`] is inserted at the root. + /// + /// This is useful for reusing the trie without needing to reallocate memory. + fn clear(&mut self); +} + +/// Struct for passing around branch node mask information. +/// +/// Branch nodes can have up to 16 children (one for each nibble). +/// The masks represent which children are stored in different ways: +/// - `hash_mask`: Indicates which children are stored as hashes in the database +/// - `tree_mask`: Indicates which children are complete subtrees stored in the database +/// +/// These masks are essential for efficient trie traversal and serialization, as they +/// determine how nodes should be encoded and stored on disk. +#[derive(Debug)] +pub struct TrieMasks { + /// Branch node hash mask, if any. + /// + /// When a bit is set, the corresponding child node's hash is stored in the trie. + /// + /// This mask enables selective hashing of child nodes. + pub hash_mask: Option, + /// Branch node tree mask, if any. + /// + /// When a bit is set, the corresponding child subtree is stored in the database. + pub tree_mask: Option, +} + +impl TrieMasks { + /// Helper function, returns both fields `hash_mask` and `tree_mask` as [`None`] + pub const fn none() -> Self { + Self { hash_mask: None, tree_mask: None } + } +} + +/// Tracks modifications to the sparse trie structure. +/// +/// Maintains references to both modified and pruned/removed branches, enabling +/// one to make batch updates to a persistent database. +#[derive(Debug, Clone, Default, PartialEq, Eq)] +pub struct SparseTrieUpdates { + /// Collection of updated intermediate nodes indexed by full path. + pub updated_nodes: HashMap, + /// Collection of removed intermediate nodes indexed by full path. + pub removed_nodes: HashSet, + /// Flag indicating whether the trie was wiped. + pub wiped: bool, +} + +/// Error type for a leaf lookup operation +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum LeafLookupError { + /// The path leads to a blinded node, cannot determine if leaf exists. + /// This means the witness is not complete. + BlindedNode { + /// Path to the blinded node. + path: Nibbles, + /// Hash of the blinded node. + hash: B256, + }, + /// The path leads to a leaf with a different value than expected. + /// This means the witness is malformed. + ValueMismatch { + /// Path to the leaf. + path: Nibbles, + /// Expected value. + expected: Option>, + /// Actual value found. + actual: Vec, + }, +} + +/// Success value for a leaf lookup operation +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum LeafLookup { + /// Leaf exists with expected value. + Exists, + /// Leaf does not exist (exclusion proof found). + NonExistent { + /// Path where the search diverged from the target path. + diverged_at: Nibbles, + }, +} diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index fbb8b08c2d4..544f52554d4 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -1,4 +1,7 @@ -use crate::blinded::{BlindedProvider, RevealedNode}; +use crate::{ + blinded::{BlindedProvider, RevealedNode}, + LeafLookup, LeafLookupError, SparseTrieUpdates, TrieMasks, +}; use alloc::{ borrow::Cow, boxed::Box, @@ -22,36 +25,6 @@ use reth_trie_common::{ use smallvec::SmallVec; use tracing::trace; -/// Struct for passing around branch node mask information. -/// -/// Branch nodes can have up to 16 children (one for each nibble). -/// The masks represent which children are stored in different ways: -/// - `hash_mask`: Indicates which children are stored as hashes in the database -/// - `tree_mask`: Indicates which children are complete subtrees stored in the database -/// -/// These masks are essential for efficient trie traversal and serialization, as they -/// determine how nodes should be encoded and stored on disk. -#[derive(Debug)] -pub struct TrieMasks { - /// Branch node hash mask, if any. - /// - /// When a bit is set, the corresponding child node's hash is stored in the trie. - /// - /// This mask enables selective hashing of child nodes. - pub hash_mask: Option, - /// Branch node tree mask, if any. - /// - /// When a bit is set, the corresponding child subtree is stored in the database. - pub tree_mask: Option, -} - -impl TrieMasks { - /// Helper function, returns both fields `hash_mask` and `tree_mask` as [`None`] - pub const fn none() -> Self { - Self { hash_mask: None, tree_mask: None } - } -} - /// A sparse trie that is either in a "blind" state (no nodes are revealed, root node hash is /// unknown) or in a "revealed" state (root node has been revealed and the trie can be updated). /// @@ -1265,41 +1238,6 @@ impl RevealedSparseTrie { } } -/// Error type for a leaf lookup operation -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum LeafLookupError { - /// The path leads to a blinded node, cannot determine if leaf exists. - /// This means the witness is not complete. - BlindedNode { - /// Path to the blinded node. - path: Nibbles, - /// Hash of the blinded node. - hash: B256, - }, - /// The path leads to a leaf with a different value than expected. - /// This means the witness is malformed. - ValueMismatch { - /// Path to the leaf. - path: Nibbles, - /// Expected value. - expected: Option>, - /// Actual value found. - actual: Vec, - }, -} - -/// Success value for a leaf lookup operation -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum LeafLookup { - /// Leaf exists with expected value. - Exists, - /// Leaf does not exist (exclusion proof found). - NonExistent { - /// Path where the search diverged from the target path. - diverged_at: Nibbles, - }, -} - impl RevealedSparseTrie { /// Attempts to find a leaf node at the specified path. /// @@ -2030,20 +1968,6 @@ pub struct RlpNodeStackItem { pub node_type: SparseNodeType, } -/// Tracks modifications to the sparse trie structure. -/// -/// Maintains references to both modified and pruned/removed branches, enabling -/// one to make batch updates to a persistent database. -#[derive(Debug, Clone, Default, PartialEq, Eq)] -pub struct SparseTrieUpdates { - /// Collection of updated intermediate nodes indexed by full path. - pub updated_nodes: HashMap, - /// Collection of removed intermediate nodes indexed by full path. - pub removed_nodes: HashSet, - /// Flag indicating whether the trie was wiped. - pub wiped: bool, -} - impl SparseTrieUpdates { /// Create new wiped sparse trie updates. pub fn wiped() -> Self { From a550025a8fd061a4b63d3d1cc6f0d46194bf4054 Mon Sep 17 00:00:00 2001 From: leopardracer <136604165+leopardracer@users.noreply.github.com> Date: Thu, 3 Jul 2025 17:24:03 +0300 Subject: [PATCH 033/305] docs: fix typo in trie test comment (#17199) --- crates/trie/sparse-parallel/src/trie.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/trie/sparse-parallel/src/trie.rs b/crates/trie/sparse-parallel/src/trie.rs index e4951b9550b..1cbcc2ca6ca 100644 --- a/crates/trie/sparse-parallel/src/trie.rs +++ b/crates/trie/sparse-parallel/src/trie.rs @@ -3761,7 +3761,7 @@ mod tests { ParallelSparseTrie::from_root(TrieNode::EmptyRoot, TrieMasks::none(), true).unwrap(); // First, add multiple leaves that will create a subtrie structure - // All leaves share the prefix [0x1, 0x2] to ensure they create a branch ndoe and subtrie + // All leaves share the prefix [0x1, 0x2] to ensure they create a branch node and subtrie // // This should result in a trie with the following structure // 0x: Extension { key: 0x12 } From 7a8a0da1a5c41f5447318adbf0ec936b3d665e39 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Thu, 3 Jul 2025 16:01:18 +0100 Subject: [PATCH 034/305] perf(trie): implement `SparseTrieInterface` for `RevealedSparseTrie` (#17191) Co-authored-by: Claude --- .../src/tree/payload_processor/sparse_trie.rs | 10 +- crates/stateless/src/trie.rs | 2 +- crates/trie/sparse/benches/rlp_node.rs | 2 +- crates/trie/sparse/src/state.rs | 7 +- crates/trie/sparse/src/trie.rs | 1875 ++++++++--------- crates/trie/trie/src/witness.rs | 1 + 6 files changed, 898 insertions(+), 999 deletions(-) diff --git a/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs b/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs index 92115b40d94..eeb6acde2a0 100644 --- a/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs +++ b/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs @@ -19,10 +19,6 @@ use std::{ }; use tracing::{debug, trace, trace_span}; -/// The level below which the sparse trie hashes are calculated in -/// [`update_sparse_trie`]. -const SPARSE_TRIE_INCREMENTAL_LEVEL: usize = 2; - /// A task responsible for populating the sparse trie. pub(super) struct SparseTrieTask where @@ -261,16 +257,14 @@ where let elapsed_before = started_at.elapsed(); trace!( target: "engine::root::sparse", - level=SPARSE_TRIE_INCREMENTAL_LEVEL, - "Calculating intermediate nodes below trie level" + "Calculating subtries" ); - trie.calculate_below_level(SPARSE_TRIE_INCREMENTAL_LEVEL); + trie.calculate_subtries(); let elapsed = started_at.elapsed(); let below_level_elapsed = elapsed - elapsed_before; trace!( target: "engine::root::sparse", - level=SPARSE_TRIE_INCREMENTAL_LEVEL, ?below_level_elapsed, "Intermediate nodes calculated" ); diff --git a/crates/stateless/src/trie.rs b/crates/stateless/src/trie.rs index c8c6e652209..9cc95ff5848 100644 --- a/crates/stateless/src/trie.rs +++ b/crates/stateless/src/trie.rs @@ -11,7 +11,7 @@ use reth_trie_common::{HashedPostState, Nibbles, TRIE_ACCOUNT_RLP_MAX_SIZE}; use reth_trie_sparse::{ blinded::{DefaultBlindedProvider, DefaultBlindedProviderFactory}, errors::SparseStateTrieResult, - SparseStateTrie, SparseTrie, + SparseStateTrie, SparseTrie, SparseTrieInterface, }; /// Trait for stateless trie implementations that can be used for stateless validation. diff --git a/crates/trie/sparse/benches/rlp_node.rs b/crates/trie/sparse/benches/rlp_node.rs index 2b6fadeda1f..cfffd614203 100644 --- a/crates/trie/sparse/benches/rlp_node.rs +++ b/crates/trie/sparse/benches/rlp_node.rs @@ -7,7 +7,7 @@ use proptest::{prelude::*, test_runner::TestRunner}; use rand::{seq::IteratorRandom, Rng}; use reth_testing_utils::generators; use reth_trie::Nibbles; -use reth_trie_sparse::{blinded::DefaultBlindedProvider, RevealedSparseTrie}; +use reth_trie_sparse::{blinded::DefaultBlindedProvider, RevealedSparseTrie, SparseTrieInterface}; fn update_rlp_node_level(c: &mut Criterion) { let mut rng = generators::rng(); diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index 7eaa99e500f..d46c15560ed 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -1,5 +1,6 @@ use crate::{ blinded::{BlindedProvider, BlindedProviderFactory}, + traits::SparseTrieInterface, LeafLookup, RevealedSparseTrie, SparseTrie, TrieMasks, }; use alloc::{collections::VecDeque, vec::Vec}; @@ -583,12 +584,12 @@ impl SparseStateTrie { Ok(()) } - /// Calculates the hashes of the nodes below the provided level. + /// Calculates the hashes of subtries. /// /// If the trie has not been revealed, this function does nothing. - pub fn calculate_below_level(&mut self, level: usize) { + pub fn calculate_subtries(&mut self) { if let SparseTrie::Revealed(trie) = &mut self.state { - trie.update_rlp_node_level(level); + trie.update_subtrie_hashes(); } } diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 544f52554d4..4d93aacdeb2 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -1,6 +1,6 @@ use crate::{ blinded::{BlindedProvider, RevealedNode}, - LeafLookup, LeafLookupError, SparseTrieUpdates, TrieMasks, + LeafLookup, LeafLookupError, SparseTrieInterface, SparseTrieUpdates, TrieMasks, }; use alloc::{ borrow::Cow, @@ -25,6 +25,10 @@ use reth_trie_common::{ use smallvec::SmallVec; use tracing::trace; +/// The level below which the sparse trie hashes are calculated in +/// [`RevealedSparseTrie::update_subtrie_hashes`]. +const SPARSE_TRIE_SUBTRIE_HASHES_LEVEL: usize = 2; + /// A sparse trie that is either in a "blind" state (no nodes are revealed, root node hash is /// unknown) or in a "revealed" state (root node has been revealed and the trie can be updated). /// @@ -374,41 +378,12 @@ impl Default for RevealedSparseTrie { } } -impl RevealedSparseTrie { - /// Creates a new revealed sparse trie from the given root node. - /// - /// This function initializes the internal structures and then reveals the root. - /// It is a convenient method to create a [`RevealedSparseTrie`] when you already have - /// the root node available. - /// - /// # Returns - /// - /// A [`RevealedSparseTrie`] if successful, or an error if revealing fails. - pub fn from_root( - root: TrieNode, - masks: TrieMasks, - retain_updates: bool, - ) -> SparseTrieResult { +impl SparseTrieInterface for RevealedSparseTrie { + fn from_root(root: TrieNode, masks: TrieMasks, retain_updates: bool) -> SparseTrieResult { Self::default().with_root(root, masks, retain_updates) } - /// Configures the trie to retain information about updates. - /// - /// If `retain_updates` is true, the trie will record branch node updates and deletions. - /// This information can then be used to efficiently update an external database. - pub fn with_updates(mut self, retain_updates: bool) -> Self { - if retain_updates { - self.updates = Some(SparseTrieUpdates::default()); - } - self - } - - /// Configures the trie to have the given root node revealed. - /// - /// ## Panics - /// - /// - If called on a [`RevealedSparseTrie`] which was not newly created or cleared. - pub fn with_root( + fn with_root( mut self, root: TrieNode, masks: TrieMasks, @@ -419,64 +394,25 @@ impl RevealedSparseTrie { // A fresh/cleared `RevealedSparseTrie` has a `SparseNode::Empty` at its root. Delete that // so we can reveal the new root node. let path = Nibbles::default(); - let _removed_root = self.nodes.remove(&path).unwrap(); + let _removed_root = self.nodes.remove(&path).expect("root node should exist"); debug_assert_eq!(_removed_root, SparseNode::Empty); self.reveal_node(path, root, masks)?; Ok(self) } - /// Returns a reference to the current sparse trie updates. - /// - /// If no updates have been made/recorded, returns an empty update set. - pub fn updates_ref(&self) -> Cow<'_, SparseTrieUpdates> { - self.updates.as_ref().map_or(Cow::Owned(SparseTrieUpdates::default()), Cow::Borrowed) - } - - /// Returns an immutable reference to all nodes in the sparse trie. - pub const fn nodes_ref(&self) -> &HashMap { - &self.nodes - } - - /// Retrieves a reference to the leaf value stored at the given key path, if it is revealed. - /// - /// This method efficiently retrieves values from the trie without traversing - /// the entire node structure, as values are stored in a separate map. - /// - /// Note: a value can exist in the full trie and this function still returns `None` - /// because the value has not been revealed. - /// Hence a `None` indicates two possibilities: - /// - The value does not exists in the trie, so it cannot be revealed - /// - The value has not yet been revealed. In order to determine which is true, one would need - /// an exclusion proof. - pub fn get_leaf_value(&self, path: &Nibbles) -> Option<&Vec> { - self.values.get(path) - } - - /// Consumes and returns the currently accumulated trie updates. - /// - /// This is useful when you want to apply the updates to an external database, - /// and then start tracking a new set of updates. - pub fn take_updates(&mut self) -> SparseTrieUpdates { - self.updates.take().unwrap_or_default() + fn with_updates(mut self, retain_updates: bool) -> Self { + if retain_updates { + self.updates = Some(SparseTrieUpdates::default()); + } + self } - /// Reserves capacity in the nodes map for at least `additional` more nodes. - pub fn reserve_nodes(&mut self, additional: usize) { + fn reserve_nodes(&mut self, additional: usize) { self.nodes.reserve(additional); } - /// Reveals a trie node if it has not been revealed before. - /// - /// This internal function decodes a trie node and inserts it into the nodes map. - /// It handles different node types (leaf, extension, branch) by appropriately - /// adding them to the trie structure and recursively revealing their children. - /// - /// - /// # Returns - /// - /// `Ok(())` if successful, or an error if node was not revealed. - pub fn reveal_node( + fn reveal_node( &mut self, path: Nibbles, node: TrieNode, @@ -619,647 +555,381 @@ impl RevealedSparseTrie { Ok(()) } - /// Reveals either a node or its hash placeholder based on the provided child data. - /// - /// When traversing the trie, we often encounter references to child nodes that - /// are either directly embedded or represented by their hash. This method - /// handles both cases: - /// - /// 1. If the child data represents a hash (32+1=33 bytes), store it as a hash node - /// 2. Otherwise, decode the data as a [`TrieNode`] and recursively reveal it using - /// `reveal_node` - /// - /// # Returns - /// - /// Returns `Ok(())` if successful, or an error if the node cannot be revealed. - /// - /// # Error Handling - /// - /// Will error if there's a conflict between a new hash node and an existing one - /// at the same path - fn reveal_node_or_hash(&mut self, path: Nibbles, child: &[u8]) -> SparseTrieResult<()> { - if child.len() == B256::len_bytes() + 1 { - let hash = B256::from_slice(&child[1..]); - match self.nodes.entry(path) { - Entry::Occupied(entry) => match entry.get() { - // Hash node with a different hash can't be handled. - SparseNode::Hash(previous_hash) if previous_hash != &hash => { - return Err(SparseTrieErrorKind::Reveal { - path: *entry.key(), - node: Box::new(SparseNode::Hash(hash)), - } - .into()) - } - _ => {} - }, - Entry::Vacant(entry) => { - entry.insert(SparseNode::Hash(hash)); - } - } + fn update_leaf( + &mut self, + full_path: Nibbles, + value: Vec, + provider: P, + ) -> SparseTrieResult<()> { + self.prefix_set.insert(full_path); + let existing = self.values.insert(full_path, value); + if existing.is_some() { + // trie structure unchanged, return immediately return Ok(()) } - self.reveal_node(path, TrieNode::decode(&mut &child[..])?, TrieMasks::none()) - } - - /// Traverse the trie from the root down to the leaf at the given path, - /// removing and collecting all nodes along that path. - /// - /// This helper function is used during leaf removal to extract the nodes of the trie - /// that will be affected by the deletion. These nodes are then re-inserted and modified - /// as needed (collapsing extension nodes etc) given that the leaf has now been removed. - /// - /// # Returns - /// - /// Returns a vector of [`RemovedSparseNode`] representing the nodes removed during the - /// traversal. - /// - /// # Errors - /// - /// Returns an error if a blinded node or an empty node is encountered unexpectedly, - /// as these prevent proper removal of the leaf. - fn take_nodes_for_path(&mut self, path: &Nibbles) -> SparseTrieResult> { - let mut current = Nibbles::default(); // Start traversal from the root - let mut nodes = Vec::new(); // Collect traversed nodes - - while let Some(node) = self.nodes.remove(¤t) { - match &node { - SparseNode::Empty => return Err(SparseTrieErrorKind::Blind.into()), - &SparseNode::Hash(hash) => { + let mut current = Nibbles::default(); + while let Some(node) = self.nodes.get_mut(¤t) { + match node { + SparseNode::Empty => { + *node = SparseNode::new_leaf(full_path); + break + } + &mut SparseNode::Hash(hash) => { return Err(SparseTrieErrorKind::BlindedNode { path: current, hash }.into()) } - SparseNode::Leaf { key: _key, .. } => { - // Leaf node is always the one that we're deleting, and no other leaf nodes can - // be found during traversal. + SparseNode::Leaf { key: current_key, .. } => { + current.extend(current_key); - #[cfg(debug_assertions)] - { - let mut current = current; - current.extend(_key); - assert_eq!(¤t, path); + // this leaf is being updated + if current == full_path { + unreachable!("we already checked leaf presence in the beginning"); } - nodes.push(RemovedSparseNode { - path: current, - node, - unset_branch_nibble: None, - }); - break + // find the common prefix + let common = current.common_prefix_length(&full_path); + + // update existing node + let new_ext_key = current.slice(current.len() - current_key.len()..common); + *node = SparseNode::new_ext(new_ext_key); + + // create a branch node and corresponding leaves + self.nodes.reserve(3); + self.nodes.insert( + current.slice(..common), + SparseNode::new_split_branch( + current.get_unchecked(common), + full_path.get_unchecked(common), + ), + ); + self.nodes.insert( + full_path.slice(..=common), + SparseNode::new_leaf(full_path.slice(common + 1..)), + ); + self.nodes.insert( + current.slice(..=common), + SparseNode::new_leaf(current.slice(common + 1..)), + ); + + break; } SparseNode::Extension { key, .. } => { - #[cfg(debug_assertions)] - { - let mut current = current; - current.extend(key); - assert!( - path.starts_with(¤t), - "path: {path:?}, current: {current:?}, key: {key:?}", - ); - } - - let path = current; current.extend(key); - nodes.push(RemovedSparseNode { path, node, unset_branch_nibble: None }); - } - SparseNode::Branch { state_mask, .. } => { - let nibble = path.get_unchecked(current.len()); - debug_assert!( - state_mask.is_bit_set(nibble), - "current: {current:?}, path: {path:?}, nibble: {nibble:?}, state_mask: {state_mask:?}", - ); - // If the branch node has a child that is a leaf node that we're removing, - // we need to unset this nibble. - // Any other branch nodes will not require unsetting the nibble, because - // deleting one leaf node can not remove the whole path - // where the branch node is located. - let mut child_path = current; - child_path.push_unchecked(nibble); - let unset_branch_nibble = self - .nodes - .get(&child_path) - .is_some_and(move |node| match node { - SparseNode::Leaf { key, .. } => { - // Get full path of the leaf node - child_path.extend(key); - &child_path == path + if !full_path.starts_with(¤t) { + // find the common prefix + let common = current.common_prefix_length(&full_path); + *key = current.slice(current.len() - key.len()..common); + + // If branch node updates retention is enabled, we need to query the + // extension node child to later set the hash mask for a parent branch node + // correctly. + if self.updates.is_some() { + // Check if the extension node child is a hash that needs to be revealed + if self.nodes.get(¤t).unwrap().is_hash() { + if let Some(RevealedNode { node, tree_mask, hash_mask }) = + provider.blinded_node(¤t)? + { + let decoded = TrieNode::decode(&mut &node[..])?; + trace!( + target: "trie::sparse", + ?current, + ?decoded, + ?tree_mask, + ?hash_mask, + "Revealing extension node child", + ); + self.reveal_node( + current, + decoded, + TrieMasks { hash_mask, tree_mask }, + )?; + } } - _ => false, - }) - .then_some(nibble); + } - nodes.push(RemovedSparseNode { path: current, node, unset_branch_nibble }); + // create state mask for new branch node + // NOTE: this might overwrite the current extension node + self.nodes.reserve(3); + let branch = SparseNode::new_split_branch( + current.get_unchecked(common), + full_path.get_unchecked(common), + ); + self.nodes.insert(current.slice(..common), branch); + + // create new leaf + let new_leaf = SparseNode::new_leaf(full_path.slice(common + 1..)); + self.nodes.insert(full_path.slice(..=common), new_leaf); + + // recreate extension to previous child if needed + let key = current.slice(common + 1..); + if !key.is_empty() { + self.nodes.insert(current.slice(..=common), SparseNode::new_ext(key)); + } + break; + } + } + SparseNode::Branch { state_mask, .. } => { + let nibble = full_path.get_unchecked(current.len()); current.push_unchecked(nibble); + if !state_mask.is_bit_set(nibble) { + state_mask.set_bit(nibble); + let new_leaf = SparseNode::new_leaf(full_path.slice(current.len()..)); + self.nodes.insert(current, new_leaf); + break; + } } - } + }; } - Ok(nodes) + Ok(()) } - /// Removes all nodes and values from the trie, resetting it to a blank state - /// with only an empty root node. This is used when a storage root is deleted. - /// - /// This should not be used when intending to re-use the trie for a fresh account/storage root; - /// use [`Self::clear`] for that. - /// - /// Note: All previously tracked changes to the trie are also removed. - pub fn wipe(&mut self) { - self.nodes = HashMap::from_iter([(Nibbles::default(), SparseNode::Empty)]); - self.values = HashMap::default(); - self.prefix_set = PrefixSetMut::all(); - self.updates = self.updates.is_some().then(SparseTrieUpdates::wiped); - } - - /// This clears all data structures in the sparse trie, keeping the backing data structures - /// allocated. A [`SparseNode::Empty`] is inserted at the root. - /// - /// This is useful for reusing the trie without needing to reallocate memory. - pub fn clear(&mut self) { - self.nodes.clear(); - self.nodes.insert(Nibbles::default(), SparseNode::Empty); - - self.branch_node_tree_masks.clear(); - self.branch_node_hash_masks.clear(); - self.values.clear(); - self.prefix_set.clear(); - self.updates = None; - self.rlp_buf.clear(); - } + fn remove_leaf( + &mut self, + full_path: &Nibbles, + provider: P, + ) -> SparseTrieResult<()> { + if self.values.remove(full_path).is_none() { + if let Some(&SparseNode::Hash(hash)) = self.nodes.get(full_path) { + // Leaf is present in the trie, but it's blinded. + return Err(SparseTrieErrorKind::BlindedNode { path: *full_path, hash }.into()) + } - /// Calculates and returns the root hash of the trie. - /// - /// Before computing the hash, this function processes any remaining (dirty) nodes by - /// updating their RLP encodings. The root hash is either: - /// 1. The cached hash (if no dirty nodes were found) - /// 2. The keccak256 hash of the root node's RLP representation - pub fn root(&mut self) -> B256 { - // Take the current prefix set - let mut prefix_set = core::mem::take(&mut self.prefix_set).freeze(); - let rlp_node = self.rlp_node_allocate(&mut prefix_set); - if let Some(root_hash) = rlp_node.as_hash() { - root_hash - } else { - keccak256(rlp_node) + trace!(target: "trie::sparse", ?full_path, "Leaf node is not present in the trie"); + // Leaf is not present in the trie. + return Ok(()) } - } + self.prefix_set.insert(*full_path); - /// Recalculates and updates the RLP hashes of nodes deeper than or equal to the specified - /// `depth`. - /// - /// The root node is considered to be at level 0. This method is useful for optimizing - /// hash recalculations after localized changes to the trie structure: - /// - /// This function identifies all nodes that have changed (based on the prefix set) at the given - /// depth and recalculates their RLP representation. - pub fn update_rlp_node_level(&mut self, depth: usize) { - // Take the current prefix set - let mut prefix_set = core::mem::take(&mut self.prefix_set).freeze(); - let mut buffers = RlpNodeBuffers::default(); + // If the path wasn't present in `values`, we still need to walk the trie and ensure that + // there is no node at the path. When a leaf node is a blinded `Hash`, it will have an entry + // in `nodes`, but not in the `values`. - // Get the nodes that have changed at the given depth. - let (targets, new_prefix_set) = self.get_changed_nodes_at_depth(&mut prefix_set, depth); - // Update the prefix set to the prefix set of the nodes that still need to be updated. - self.prefix_set = new_prefix_set; + let mut removed_nodes = self.take_nodes_for_path(full_path)?; + // Pop the first node from the stack which is the leaf node we want to remove. + let mut child = removed_nodes.pop().expect("leaf exists"); + #[cfg(debug_assertions)] + { + let mut child_path = child.path; + let SparseNode::Leaf { key, .. } = &child.node else { panic!("expected leaf node") }; + child_path.extend(key); + assert_eq!(&child_path, full_path); + } - trace!(target: "trie::sparse", ?depth, ?targets, "Updating nodes at depth"); + // If we don't have any other removed nodes, insert an empty node at the root. + if removed_nodes.is_empty() { + debug_assert!(self.nodes.is_empty()); + self.nodes.insert(Nibbles::default(), SparseNode::Empty); - let mut temp_rlp_buf = core::mem::take(&mut self.rlp_buf); - for (level, path) in targets { - buffers.path_stack.push(RlpNodePathStackItem { - level, - path, - is_in_prefix_set: Some(true), - }); - self.rlp_node(&mut prefix_set, &mut buffers, &mut temp_rlp_buf); + return Ok(()) } - self.rlp_buf = temp_rlp_buf; - } - - /// Returns a list of (level, path) tuples identifying the nodes that have changed at the - /// specified depth, along with a new prefix set for the paths above the provided depth that - /// remain unchanged. - /// - /// Leaf nodes with a depth less than `depth` are returned too. - /// - /// This method helps optimize hash recalculations by identifying which specific - /// nodes need to be updated at each level of the trie. - /// - /// # Parameters - /// - /// - `prefix_set`: The current prefix set tracking which paths need updates. - /// - `depth`: The minimum depth (relative to the root) to include nodes in the targets. - /// - /// # Returns - /// - /// A tuple containing: - /// - A vector of `(level, Nibbles)` pairs for nodes that require updates at or below the - /// specified depth. - /// - A `PrefixSetMut` containing paths shallower than the specified depth that still need to be - /// tracked for future updates. - fn get_changed_nodes_at_depth( - &self, - prefix_set: &mut PrefixSet, - depth: usize, - ) -> (Vec<(usize, Nibbles)>, PrefixSetMut) { - let mut unchanged_prefix_set = PrefixSetMut::default(); - let mut paths = Vec::from([(Nibbles::default(), 0)]); - let mut targets = Vec::new(); - while let Some((mut path, level)) = paths.pop() { - match self.nodes.get(&path).unwrap() { - SparseNode::Empty | SparseNode::Hash(_) => {} - SparseNode::Leaf { key: _, hash } => { - if hash.is_some() && !prefix_set.contains(&path) { - continue - } + // Walk the stack of removed nodes from the back and re-insert them back into the trie, + // adjusting the node type as needed. + while let Some(removed_node) = removed_nodes.pop() { + let removed_path = removed_node.path; - targets.push((level, path)); + let new_node = match &removed_node.node { + SparseNode::Empty => return Err(SparseTrieErrorKind::Blind.into()), + &SparseNode::Hash(hash) => { + return Err(SparseTrieErrorKind::BlindedNode { path: removed_path, hash }.into()) } - SparseNode::Extension { key, hash, store_in_db_trie: _ } => { - if hash.is_some() && !prefix_set.contains(&path) { - continue - } + SparseNode::Leaf { .. } => { + unreachable!("we already popped the leaf node") + } + SparseNode::Extension { key, .. } => { + // If the node is an extension node, we need to look at its child to see if we + // need to merge them. + match &child.node { + SparseNode::Empty => return Err(SparseTrieErrorKind::Blind.into()), + &SparseNode::Hash(hash) => { + return Err( + SparseTrieErrorKind::BlindedNode { path: child.path, hash }.into() + ) + } + // For a leaf node, we collapse the extension node into a leaf node, + // extending the key. While it's impossible to encounter an extension node + // followed by a leaf node in a complete trie, it's possible here because we + // could have downgraded the extension node's child into a leaf node from + // another node type. + SparseNode::Leaf { key: leaf_key, .. } => { + self.nodes.remove(&child.path); - if level >= depth { - targets.push((level, path)); - } else { - unchanged_prefix_set.insert(path); + let mut new_key = *key; + new_key.extend(leaf_key); + SparseNode::new_leaf(new_key) + } + // For an extension node, we collapse them into one extension node, + // extending the key + SparseNode::Extension { key: extension_key, .. } => { + self.nodes.remove(&child.path); - path.extend(key); - paths.push((path, level + 1)); + let mut new_key = *key; + new_key.extend(extension_key); + SparseNode::new_ext(new_key) + } + // For a branch node, we just leave the extension node as-is. + SparseNode::Branch { .. } => removed_node.node, } } - SparseNode::Branch { state_mask, hash, store_in_db_trie: _ } => { - if hash.is_some() && !prefix_set.contains(&path) { - continue + &SparseNode::Branch { mut state_mask, hash: _, store_in_db_trie: _ } => { + // If the node is a branch node, we need to check the number of children left + // after deleting the child at the given nibble. + + if let Some(removed_nibble) = removed_node.unset_branch_nibble { + state_mask.unset_bit(removed_nibble); } - if level >= depth { - targets.push((level, path)); - } else { - unchanged_prefix_set.insert(path); + // If only one child is left set in the branch node, we need to collapse it. + if state_mask.count_bits() == 1 { + let child_nibble = + state_mask.first_set_bit_index().expect("state mask is not empty"); - for bit in CHILD_INDEX_RANGE.rev() { - if state_mask.is_bit_set(bit) { - let mut child_path = path; - child_path.push_unchecked(bit); - paths.push((child_path, level + 1)); + // Get full path of the only child node left. + let mut child_path = removed_path; + child_path.push_unchecked(child_nibble); + + trace!(target: "trie::sparse", ?removed_path, ?child_path, "Branch node has only one child"); + + if self.nodes.get(&child_path).unwrap().is_hash() { + trace!(target: "trie::sparse", ?child_path, "Retrieving remaining blinded branch child"); + if let Some(RevealedNode { node, tree_mask, hash_mask }) = + provider.blinded_node(&child_path)? + { + let decoded = TrieNode::decode(&mut &node[..])?; + trace!( + target: "trie::sparse", + ?child_path, + ?decoded, + ?tree_mask, + ?hash_mask, + "Revealing remaining blinded branch child" + ); + self.reveal_node( + child_path, + decoded, + TrieMasks { hash_mask, tree_mask }, + )?; } } - } - } - } - } - - (targets, unchanged_prefix_set) - } - /// Look up or calculate the RLP of the node at the root path. - /// - /// # Panics - /// - /// If the node at provided path does not exist. - pub fn rlp_node_allocate(&mut self, prefix_set: &mut PrefixSet) -> RlpNode { - let mut buffers = RlpNodeBuffers::new_with_root_path(); - let mut temp_rlp_buf = core::mem::take(&mut self.rlp_buf); - let result = self.rlp_node(prefix_set, &mut buffers, &mut temp_rlp_buf); - self.rlp_buf = temp_rlp_buf; + // Get the only child node. + let child = self.nodes.get(&child_path).unwrap(); - result - } + let mut delete_child = false; + let new_node = match child { + SparseNode::Empty => return Err(SparseTrieErrorKind::Blind.into()), + &SparseNode::Hash(hash) => { + return Err(SparseTrieErrorKind::BlindedNode { + path: child_path, + hash, + } + .into()) + } + // If the only child is a leaf node, we downgrade the branch node into a + // leaf node, prepending the nibble to the key, and delete the old + // child. + SparseNode::Leaf { key, .. } => { + delete_child = true; - /// Looks up or computes the RLP encoding of the node specified by the current - /// path in the provided buffers. - /// - /// The function uses a stack (`RlpNodeBuffers::path_stack`) to track the traversal and - /// accumulate RLP encodings. - /// - /// # Parameters - /// - /// - `prefix_set`: The set of trie paths that need their nodes updated. - /// - `buffers`: The reusable buffers for stack management and temporary RLP values. - /// - /// # Panics - /// - /// If the node at provided path does not exist. - pub fn rlp_node( - &mut self, - prefix_set: &mut PrefixSet, - buffers: &mut RlpNodeBuffers, - rlp_buf: &mut Vec, - ) -> RlpNode { - let _starting_path = buffers.path_stack.last().map(|item| item.path); - - 'main: while let Some(RlpNodePathStackItem { level, path, mut is_in_prefix_set }) = - buffers.path_stack.pop() - { - let node = self.nodes.get_mut(&path).unwrap(); - trace!( - target: "trie::sparse", - ?_starting_path, - ?level, - ?path, - ?is_in_prefix_set, - ?node, - "Popped node from path stack" - ); - - // Check if the path is in the prefix set. - // First, check the cached value. If it's `None`, then check the prefix set, and update - // the cached value. - let mut prefix_set_contains = - |path: &Nibbles| *is_in_prefix_set.get_or_insert_with(|| prefix_set.contains(path)); - - let (rlp_node, node_type) = match node { - SparseNode::Empty => (RlpNode::word_rlp(&EMPTY_ROOT_HASH), SparseNodeType::Empty), - SparseNode::Hash(hash) => (RlpNode::word_rlp(hash), SparseNodeType::Hash), - SparseNode::Leaf { key, hash } => { - let mut path = path; - path.extend(key); - if let Some(hash) = hash.filter(|_| !prefix_set_contains(&path)) { - (RlpNode::word_rlp(&hash), SparseNodeType::Leaf) - } else { - let value = self.values.get(&path).unwrap(); - rlp_buf.clear(); - let rlp_node = LeafNodeRef { key, value }.rlp(rlp_buf); - *hash = rlp_node.as_hash(); - (rlp_node, SparseNodeType::Leaf) - } - } - SparseNode::Extension { key, hash, store_in_db_trie } => { - let mut child_path = path; - child_path.extend(key); - if let Some((hash, store_in_db_trie)) = - hash.zip(*store_in_db_trie).filter(|_| !prefix_set_contains(&path)) - { - ( - RlpNode::word_rlp(&hash), - SparseNodeType::Extension { store_in_db_trie: Some(store_in_db_trie) }, - ) - } else if buffers.rlp_node_stack.last().is_some_and(|e| e.path == child_path) { - let RlpNodeStackItem { - path: _, - rlp_node: child, - node_type: child_node_type, - } = buffers.rlp_node_stack.pop().unwrap(); - rlp_buf.clear(); - let rlp_node = ExtensionNodeRef::new(key, &child).rlp(rlp_buf); - *hash = rlp_node.as_hash(); + let mut new_key = Nibbles::from_nibbles_unchecked([child_nibble]); + new_key.extend(key); + SparseNode::new_leaf(new_key) + } + // If the only child node is an extension node, we downgrade the branch + // node into an even longer extension node, prepending the nibble to the + // key, and delete the old child. + SparseNode::Extension { key, .. } => { + delete_child = true; - let store_in_db_trie_value = child_node_type.store_in_db_trie(); + let mut new_key = Nibbles::from_nibbles_unchecked([child_nibble]); + new_key.extend(key); + SparseNode::new_ext(new_key) + } + // If the only child is a branch node, we downgrade the current branch + // node into a one-nibble extension node. + SparseNode::Branch { .. } => { + SparseNode::new_ext(Nibbles::from_nibbles_unchecked([child_nibble])) + } + }; - trace!( - target: "trie::sparse", - ?path, - ?child_path, - ?child_node_type, - "Extension node" - ); + if delete_child { + self.nodes.remove(&child_path); + } - *store_in_db_trie = store_in_db_trie_value; + if let Some(updates) = self.updates.as_mut() { + updates.updated_nodes.remove(&removed_path); + updates.removed_nodes.insert(removed_path); + } - ( - rlp_node, - SparseNodeType::Extension { - // Inherit the `store_in_db_trie` flag from the child node, which is - // always the branch node - store_in_db_trie: store_in_db_trie_value, - }, - ) - } else { - // need to get rlp node for child first - buffers.path_stack.extend([ - RlpNodePathStackItem { level, path, is_in_prefix_set }, - RlpNodePathStackItem { - level: level + 1, - path: child_path, - is_in_prefix_set: None, - }, - ]); - continue - } - } - SparseNode::Branch { state_mask, hash, store_in_db_trie } => { - if let Some((hash, store_in_db_trie)) = - hash.zip(*store_in_db_trie).filter(|_| !prefix_set_contains(&path)) - { - buffers.rlp_node_stack.push(RlpNodeStackItem { - path, - rlp_node: RlpNode::word_rlp(&hash), - node_type: SparseNodeType::Branch { - store_in_db_trie: Some(store_in_db_trie), - }, - }); - continue + new_node } - let retain_updates = self.updates.is_some() && prefix_set_contains(&path); - - buffers.branch_child_buf.clear(); - // Walk children in a reverse order from `f` to `0`, so we pop the `0` first - // from the stack and keep walking in the sorted order. - for bit in CHILD_INDEX_RANGE.rev() { - if state_mask.is_bit_set(bit) { - let mut child = path; - child.push_unchecked(bit); - buffers.branch_child_buf.push(child); - } + // If more than one child is left set in the branch, we just re-insert it as-is. + else { + SparseNode::new_branch(state_mask) } + } + }; - buffers - .branch_value_stack_buf - .resize(buffers.branch_child_buf.len(), Default::default()); - let mut added_children = false; - - let mut tree_mask = TrieMask::default(); - let mut hash_mask = TrieMask::default(); - let mut hashes = Vec::new(); - for (i, child_path) in buffers.branch_child_buf.iter().enumerate() { - if buffers.rlp_node_stack.last().is_some_and(|e| &e.path == child_path) { - let RlpNodeStackItem { - path: _, - rlp_node: child, - node_type: child_node_type, - } = buffers.rlp_node_stack.pop().unwrap(); - - // Update the masks only if we need to retain trie updates - if retain_updates { - // SAFETY: it's a child, so it's never empty - let last_child_nibble = child_path.last().unwrap(); - - // Determine whether we need to set trie mask bit. - let should_set_tree_mask_bit = if let Some(store_in_db_trie) = - child_node_type.store_in_db_trie() - { - // A branch or an extension node explicitly set the - // `store_in_db_trie` flag - store_in_db_trie - } else { - // A blinded node has the tree mask bit set - child_node_type.is_hash() && - self.branch_node_tree_masks.get(&path).is_some_and( - |mask| mask.is_bit_set(last_child_nibble), - ) - }; - if should_set_tree_mask_bit { - tree_mask.set_bit(last_child_nibble); - } - - // Set the hash mask. If a child node is a revealed branch node OR - // is a blinded node that has its hash mask bit set according to the - // database, set the hash mask bit and save the hash. - let hash = child.as_hash().filter(|_| { - child_node_type.is_branch() || - (child_node_type.is_hash() && - self.branch_node_hash_masks - .get(&path) - .is_some_and(|mask| { - mask.is_bit_set(last_child_nibble) - })) - }); - if let Some(hash) = hash { - hash_mask.set_bit(last_child_nibble); - hashes.push(hash); - } - } - - // Insert children in the resulting buffer in a normal order, - // because initially we iterated in reverse. - // SAFETY: i < len and len is never 0 - let original_idx = buffers.branch_child_buf.len() - i - 1; - buffers.branch_value_stack_buf[original_idx] = child; - added_children = true; - } else { - debug_assert!(!added_children); - buffers.path_stack.push(RlpNodePathStackItem { - level, - path, - is_in_prefix_set, - }); - buffers.path_stack.extend(buffers.branch_child_buf.drain(..).map( - |path| RlpNodePathStackItem { - level: level + 1, - path, - is_in_prefix_set: None, - }, - )); - continue 'main - } - } + child = RemovedSparseNode { + path: removed_path, + node: new_node.clone(), + unset_branch_nibble: None, + }; + trace!(target: "trie::sparse", ?removed_path, ?new_node, "Re-inserting the node"); + self.nodes.insert(removed_path, new_node); + } - trace!( - target: "trie::sparse", - ?path, - ?tree_mask, - ?hash_mask, - "Branch node masks" - ); + Ok(()) + } - rlp_buf.clear(); - let branch_node_ref = - BranchNodeRef::new(&buffers.branch_value_stack_buf, *state_mask); - let rlp_node = branch_node_ref.rlp(rlp_buf); - *hash = rlp_node.as_hash(); + fn root(&mut self) -> B256 { + // Take the current prefix set + let mut prefix_set = core::mem::take(&mut self.prefix_set).freeze(); + let rlp_node = self.rlp_node_allocate(&mut prefix_set); + if let Some(root_hash) = rlp_node.as_hash() { + root_hash + } else { + keccak256(rlp_node) + } + } - // Save a branch node update only if it's not a root node, and we need to - // persist updates. - let store_in_db_trie_value = if let Some(updates) = - self.updates.as_mut().filter(|_| retain_updates && !path.is_empty()) - { - let store_in_db_trie = !tree_mask.is_empty() || !hash_mask.is_empty(); - if store_in_db_trie { - // Store in DB trie if there are either any children that are stored in - // the DB trie, or any children represent hashed values - hashes.reverse(); - let branch_node = BranchNodeCompact::new( - *state_mask, - tree_mask, - hash_mask, - hashes, - hash.filter(|_| path.is_empty()), - ); - updates.updated_nodes.insert(path, branch_node); - } else if self - .branch_node_tree_masks - .get(&path) - .is_some_and(|mask| !mask.is_empty()) || - self.branch_node_hash_masks - .get(&path) - .is_some_and(|mask| !mask.is_empty()) - { - // If new tree and hash masks are empty, but previously they weren't, we - // need to remove the node update and add the node itself to the list of - // removed nodes. - updates.updated_nodes.remove(&path); - updates.removed_nodes.insert(path); - } else if self - .branch_node_hash_masks - .get(&path) - .is_none_or(|mask| mask.is_empty()) && - self.branch_node_hash_masks - .get(&path) - .is_none_or(|mask| mask.is_empty()) - { - // If new tree and hash masks are empty, and they were previously empty - // as well, we need to remove the node update. - updates.updated_nodes.remove(&path); - } + fn update_subtrie_hashes(&mut self) { + self.update_rlp_node_level(SPARSE_TRIE_SUBTRIE_HASHES_LEVEL); + } - store_in_db_trie - } else { - false - }; - *store_in_db_trie = Some(store_in_db_trie_value); + fn get_leaf_value(&self, full_path: &Nibbles) -> Option<&Vec> { + self.values.get(full_path) + } - ( - rlp_node, - SparseNodeType::Branch { store_in_db_trie: Some(store_in_db_trie_value) }, - ) - } - }; + fn take_updates(&mut self) -> SparseTrieUpdates { + self.updates.take().unwrap_or_default() + } - trace!( - target: "trie::sparse", - ?_starting_path, - ?level, - ?path, - ?node, - ?node_type, - ?is_in_prefix_set, - "Added node to rlp node stack" - ); + fn wipe(&mut self) { + self.nodes = HashMap::from_iter([(Nibbles::default(), SparseNode::Empty)]); + self.values = HashMap::default(); + self.prefix_set = PrefixSetMut::all(); + self.updates = self.updates.is_some().then(SparseTrieUpdates::wiped); + } - buffers.rlp_node_stack.push(RlpNodeStackItem { path, rlp_node, node_type }); - } + fn clear(&mut self) { + self.nodes.clear(); + self.nodes.insert(Nibbles::default(), SparseNode::Empty); - debug_assert_eq!(buffers.rlp_node_stack.len(), 1); - buffers.rlp_node_stack.pop().unwrap().rlp_node + self.branch_node_tree_masks.clear(); + self.branch_node_hash_masks.clear(); + self.values.clear(); + self.prefix_set.clear(); + self.updates = None; + self.rlp_buf.clear(); } -} -impl RevealedSparseTrie { - /// Attempts to find a leaf node at the specified path. - /// - /// This method traverses the trie from the root down to the given path, checking - /// if a leaf exists at that path. It can be used to verify the existence of a leaf - /// or to generate an exclusion proof (proof that a leaf does not exist). - /// - /// # Parameters - /// - /// - `path`: The path to search for. - /// - `expected_value`: Optional expected value. If provided, will verify the leaf value - /// matches. - /// - /// # Returns - /// - /// - `Ok(LeafLookup::Exists)` if the leaf exists with the expected value. - /// - `Ok(LeafLookup::NonExistent)` if the leaf definitely does not exist (exclusion proof). - /// - `Err(LeafLookupError)` if the search encountered a blinded node or found a different - /// value. - pub fn find_leaf( + fn find_leaf( &self, - path: &Nibbles, + full_path: &Nibbles, expected_value: Option<&Vec>, ) -> Result { // Helper function to check if a value matches the expected value @@ -1287,9 +957,9 @@ impl RevealedSparseTrie { // First, do a quick check if the value exists in our values map. // We assume that if there exists a leaf node, then its value will // be in the `values` map. - if let Some(actual_value) = self.values.get(path) { + if let Some(actual_value) = self.values.get(full_path) { // We found the leaf, check if the value matches (if expected value was provided) - check_value_match(actual_value, expected_value, path)?; + check_value_match(actual_value, expected_value, full_path)?; return Ok(LeafLookup::Exists); } @@ -1299,7 +969,7 @@ impl RevealedSparseTrie { // We traverse the trie to find the location where this leaf would have been, showing // that it is not in the trie. Or we find a blinded node, showing that the witness is // not complete. - while current.len() < path.len() { + while current.len() < full_path.len() { match self.nodes.get(¤t) { Some(SparseNode::Empty) | None => { // None implies no node is at the current path (even in the full trie) @@ -1317,10 +987,10 @@ impl RevealedSparseTrie { let saved_len = current.len(); current.extend(key); - if ¤t == path { + if ¤t == full_path { // This should have been handled by our initial values map check - if let Some(value) = self.values.get(path) { - check_value_match(value, expected_value, path)?; + if let Some(value) = self.values.get(full_path) { + check_value_match(value, expected_value, full_path)?; return Ok(LeafLookup::Exists); } } @@ -1336,7 +1006,7 @@ impl RevealedSparseTrie { let saved_len = current.len(); current.extend(key); - if path.len() < current.len() || !path.starts_with(¤t) { + if full_path.len() < current.len() || !full_path.starts_with(¤t) { let diverged_at = current.slice(..saved_len); current.truncate(saved_len); // restore return Ok(LeafLookup::NonExistent { diverged_at }); @@ -1345,397 +1015,630 @@ impl RevealedSparseTrie { } Some(SparseNode::Branch { state_mask, .. }) => { // Check if branch has a child at the next nibble in our path - let nibble = path.get_unchecked(current.len()); + let nibble = full_path.get_unchecked(current.len()); if !state_mask.is_bit_set(nibble) { // No child at this nibble - exclusion proof return Ok(LeafLookup::NonExistent { diverged_at: current }); } - - // Continue down the branch - current.push_unchecked(nibble); - } - } - } - - // We've traversed to the end of the path and didn't find a leaf - // Check if there's a node exactly at our target path - match self.nodes.get(path) { - Some(SparseNode::Leaf { key, .. }) if key.is_empty() => { - // We found a leaf with an empty key (exact match) - // This should be handled by the values map check above - if let Some(value) = self.values.get(path) { - check_value_match(value, expected_value, path)?; - return Ok(LeafLookup::Exists); + + // Continue down the branch + current.push_unchecked(nibble); + } + } + } + + // We've traversed to the end of the path and didn't find a leaf + // Check if there's a node exactly at our target path + match self.nodes.get(full_path) { + Some(SparseNode::Leaf { key, .. }) if key.is_empty() => { + // We found a leaf with an empty key (exact match) + // This should be handled by the values map check above + if let Some(value) = self.values.get(full_path) { + check_value_match(value, expected_value, full_path)?; + return Ok(LeafLookup::Exists); + } + } + Some(&SparseNode::Hash(hash)) => { + return Err(LeafLookupError::BlindedNode { path: *full_path, hash }); + } + _ => { + // No leaf at exactly the target path + let parent_path = if full_path.is_empty() { + Nibbles::default() + } else { + full_path.slice(0..full_path.len() - 1) + }; + return Ok(LeafLookup::NonExistent { diverged_at: parent_path }); + } + } + + // If we get here, there's no leaf at the target path + Ok(LeafLookup::NonExistent { diverged_at: current }) + } +} + +impl RevealedSparseTrie { + /// Returns a reference to the current sparse trie updates. + /// + /// If no updates have been made/recorded, returns an empty update set. + pub fn updates_ref(&self) -> Cow<'_, SparseTrieUpdates> { + self.updates.as_ref().map_or(Cow::Owned(SparseTrieUpdates::default()), Cow::Borrowed) + } + + /// Returns an immutable reference to all nodes in the sparse trie. + pub const fn nodes_ref(&self) -> &HashMap { + &self.nodes + } + + /// Reveals either a node or its hash placeholder based on the provided child data. + /// + /// When traversing the trie, we often encounter references to child nodes that + /// are either directly embedded or represented by their hash. This method + /// handles both cases: + /// + /// 1. If the child data represents a hash (32+1=33 bytes), store it as a hash node + /// 2. Otherwise, decode the data as a [`TrieNode`] and recursively reveal it using + /// `reveal_node` + /// + /// # Returns + /// + /// Returns `Ok(())` if successful, or an error if the node cannot be revealed. + /// + /// # Error Handling + /// + /// Will error if there's a conflict between a new hash node and an existing one + /// at the same path + fn reveal_node_or_hash(&mut self, path: Nibbles, child: &[u8]) -> SparseTrieResult<()> { + if child.len() == B256::len_bytes() + 1 { + let hash = B256::from_slice(&child[1..]); + match self.nodes.entry(path) { + Entry::Occupied(entry) => match entry.get() { + // Hash node with a different hash can't be handled. + SparseNode::Hash(previous_hash) if previous_hash != &hash => { + return Err(SparseTrieErrorKind::Reveal { + path: *entry.key(), + node: Box::new(SparseNode::Hash(hash)), + } + .into()) + } + _ => {} + }, + Entry::Vacant(entry) => { + entry.insert(SparseNode::Hash(hash)); } } - Some(&SparseNode::Hash(hash)) => { - return Err(LeafLookupError::BlindedNode { path: *path, hash }); - } - _ => { - // No leaf at exactly the target path - let parent_path = if path.is_empty() { - Nibbles::default() - } else { - path.slice(0..path.len() - 1) - }; - return Ok(LeafLookup::NonExistent { diverged_at: parent_path }); - } + return Ok(()) } - // If we get here, there's no leaf at the target path - Ok(LeafLookup::NonExistent { diverged_at: current }) + self.reveal_node(path, TrieNode::decode(&mut &child[..])?, TrieMasks::none()) } - /// Updates or inserts a leaf node at the specified key path with the provided RLP-encoded - /// value. + /// Traverse the trie from the root down to the leaf at the given path, + /// removing and collecting all nodes along that path. /// - /// This method updates the internal prefix set and, if the leaf did not previously exist, - /// adjusts the trie structure by inserting new leaf nodes, splitting branch nodes, or - /// collapsing extension nodes as needed. + /// This helper function is used during leaf removal to extract the nodes of the trie + /// that will be affected by the deletion. These nodes are then re-inserted and modified + /// as needed (collapsing extension nodes etc) given that the leaf has now been removed. /// /// # Returns /// - /// Returns `Ok(())` if the update is successful. + /// Returns a vector of [`RemovedSparseNode`] representing the nodes removed during the + /// traversal. /// - /// Note: If an update requires revealing a blinded node, an error is returned if the blinded - /// provider returns an error. - pub fn update_leaf( - &mut self, - path: Nibbles, - value: Vec, - provider: impl BlindedProvider, - ) -> SparseTrieResult<()> { - self.prefix_set.insert(path); - let existing = self.values.insert(path, value); - if existing.is_some() { - // trie structure unchanged, return immediately - return Ok(()) - } + /// # Errors + /// + /// Returns an error if a blinded node or an empty node is encountered unexpectedly, + /// as these prevent proper removal of the leaf. + fn take_nodes_for_path(&mut self, path: &Nibbles) -> SparseTrieResult> { + let mut current = Nibbles::default(); // Start traversal from the root + let mut nodes = Vec::new(); // Collect traversed nodes - let mut current = Nibbles::default(); - while let Some(node) = self.nodes.get_mut(¤t) { - match node { - SparseNode::Empty => { - *node = SparseNode::new_leaf(path); - break - } - &mut SparseNode::Hash(hash) => { + while let Some(node) = self.nodes.remove(¤t) { + match &node { + SparseNode::Empty => return Err(SparseTrieErrorKind::Blind.into()), + &SparseNode::Hash(hash) => { return Err(SparseTrieErrorKind::BlindedNode { path: current, hash }.into()) } - SparseNode::Leaf { key: current_key, .. } => { - current.extend(current_key); + SparseNode::Leaf { key: _key, .. } => { + // Leaf node is always the one that we're deleting, and no other leaf nodes can + // be found during traversal. - // this leaf is being updated - if current == path { - unreachable!("we already checked leaf presence in the beginning"); + #[cfg(debug_assertions)] + { + let mut current = current; + current.extend(_key); + assert_eq!(¤t, path); } - // find the common prefix - let common = current.common_prefix_length(&path); - - // update existing node - let new_ext_key = current.slice(current.len() - current_key.len()..common); - *node = SparseNode::new_ext(new_ext_key); + nodes.push(RemovedSparseNode { + path: current, + node, + unset_branch_nibble: None, + }); + break + } + SparseNode::Extension { key, .. } => { + #[cfg(debug_assertions)] + { + let mut current = current; + current.extend(key); + assert!( + path.starts_with(¤t), + "path: {path:?}, current: {current:?}, key: {key:?}", + ); + } - // create a branch node and corresponding leaves - self.nodes.reserve(3); - self.nodes.insert( - current.slice(..common), - SparseNode::new_split_branch( - current.get_unchecked(common), - path.get_unchecked(common), - ), - ); - self.nodes.insert( - path.slice(..=common), - SparseNode::new_leaf(path.slice(common + 1..)), - ); - self.nodes.insert( - current.slice(..=common), - SparseNode::new_leaf(current.slice(common + 1..)), + let path = current; + current.extend(key); + nodes.push(RemovedSparseNode { path, node, unset_branch_nibble: None }); + } + SparseNode::Branch { state_mask, .. } => { + let nibble = path.get_unchecked(current.len()); + debug_assert!( + state_mask.is_bit_set(nibble), + "current: {current:?}, path: {path:?}, nibble: {nibble:?}, state_mask: {state_mask:?}", ); - break; + // If the branch node has a child that is a leaf node that we're removing, + // we need to unset this nibble. + // Any other branch nodes will not require unsetting the nibble, because + // deleting one leaf node can not remove the whole path + // where the branch node is located. + let mut child_path = current; + child_path.push_unchecked(nibble); + let unset_branch_nibble = self + .nodes + .get(&child_path) + .is_some_and(move |node| match node { + SparseNode::Leaf { key, .. } => { + // Get full path of the leaf node + child_path.extend(key); + &child_path == path + } + _ => false, + }) + .then_some(nibble); + + nodes.push(RemovedSparseNode { path: current, node, unset_branch_nibble }); + + current.push_unchecked(nibble); } - SparseNode::Extension { key, .. } => { - current.extend(key); + } + } - if !path.starts_with(¤t) { - // find the common prefix - let common = current.common_prefix_length(&path); - *key = current.slice(current.len() - key.len()..common); + Ok(nodes) + } - // If branch node updates retention is enabled, we need to query the - // extension node child to later set the hash mask for a parent branch node - // correctly. - if self.updates.is_some() { - // Check if the extension node child is a hash that needs to be revealed - if self.nodes.get(¤t).unwrap().is_hash() { - if let Some(RevealedNode { node, tree_mask, hash_mask }) = - provider.blinded_node(¤t)? - { - let decoded = TrieNode::decode(&mut &node[..])?; - trace!( - target: "trie::sparse", - ?current, - ?decoded, - ?tree_mask, - ?hash_mask, - "Revealing extension node child", - ); - self.reveal_node( - current, - decoded, - TrieMasks { hash_mask, tree_mask }, - )?; - } - } - } + /// Recalculates and updates the RLP hashes of nodes deeper than or equal to the specified + /// `depth`. + /// + /// The root node is considered to be at level 0. This method is useful for optimizing + /// hash recalculations after localized changes to the trie structure: + /// + /// This function identifies all nodes that have changed (based on the prefix set) at the given + /// depth and recalculates their RLP representation. + pub fn update_rlp_node_level(&mut self, depth: usize) { + // Take the current prefix set + let mut prefix_set = core::mem::take(&mut self.prefix_set).freeze(); + let mut buffers = RlpNodeBuffers::default(); + + // Get the nodes that have changed at the given depth. + let (targets, new_prefix_set) = self.get_changed_nodes_at_depth(&mut prefix_set, depth); + // Update the prefix set to the prefix set of the nodes that still need to be updated. + self.prefix_set = new_prefix_set; + + trace!(target: "trie::sparse", ?depth, ?targets, "Updating nodes at depth"); + + let mut temp_rlp_buf = core::mem::take(&mut self.rlp_buf); + for (level, path) in targets { + buffers.path_stack.push(RlpNodePathStackItem { + level, + path, + is_in_prefix_set: Some(true), + }); + self.rlp_node(&mut prefix_set, &mut buffers, &mut temp_rlp_buf); + } + self.rlp_buf = temp_rlp_buf; + } + + /// Returns a list of (level, path) tuples identifying the nodes that have changed at the + /// specified depth, along with a new prefix set for the paths above the provided depth that + /// remain unchanged. + /// + /// Leaf nodes with a depth less than `depth` are returned too. + /// + /// This method helps optimize hash recalculations by identifying which specific + /// nodes need to be updated at each level of the trie. + /// + /// # Parameters + /// + /// - `prefix_set`: The current prefix set tracking which paths need updates. + /// - `depth`: The minimum depth (relative to the root) to include nodes in the targets. + /// + /// # Returns + /// + /// A tuple containing: + /// - A vector of `(level, Nibbles)` pairs for nodes that require updates at or below the + /// specified depth. + /// - A `PrefixSetMut` containing paths shallower than the specified depth that still need to be + /// tracked for future updates. + fn get_changed_nodes_at_depth( + &self, + prefix_set: &mut PrefixSet, + depth: usize, + ) -> (Vec<(usize, Nibbles)>, PrefixSetMut) { + let mut unchanged_prefix_set = PrefixSetMut::default(); + let mut paths = Vec::from([(Nibbles::default(), 0)]); + let mut targets = Vec::new(); - // create state mask for new branch node - // NOTE: this might overwrite the current extension node - self.nodes.reserve(3); - let branch = SparseNode::new_split_branch( - current.get_unchecked(common), - path.get_unchecked(common), - ); - self.nodes.insert(current.slice(..common), branch); + while let Some((mut path, level)) = paths.pop() { + match self.nodes.get(&path).unwrap() { + SparseNode::Empty | SparseNode::Hash(_) => {} + SparseNode::Leaf { key: _, hash } => { + if hash.is_some() && !prefix_set.contains(&path) { + continue + } - // create new leaf - let new_leaf = SparseNode::new_leaf(path.slice(common + 1..)); - self.nodes.insert(path.slice(..=common), new_leaf); + targets.push((level, path)); + } + SparseNode::Extension { key, hash, store_in_db_trie: _ } => { + if hash.is_some() && !prefix_set.contains(&path) { + continue + } - // recreate extension to previous child if needed - let key = current.slice(common + 1..); - if !key.is_empty() { - self.nodes.insert(current.slice(..=common), SparseNode::new_ext(key)); - } + if level >= depth { + targets.push((level, path)); + } else { + unchanged_prefix_set.insert(path); - break; + path.extend(key); + paths.push((path, level + 1)); } } - SparseNode::Branch { state_mask, .. } => { - let nibble = path.get_unchecked(current.len()); - current.push_unchecked(nibble); - if !state_mask.is_bit_set(nibble) { - state_mask.set_bit(nibble); - let new_leaf = SparseNode::new_leaf(path.slice(current.len()..)); - self.nodes.insert(current, new_leaf); - break; + SparseNode::Branch { state_mask, hash, store_in_db_trie: _ } => { + if hash.is_some() && !prefix_set.contains(&path) { + continue + } + + if level >= depth { + targets.push((level, path)); + } else { + unchanged_prefix_set.insert(path); + + for bit in CHILD_INDEX_RANGE.rev() { + if state_mask.is_bit_set(bit) { + let mut child_path = path; + child_path.push_unchecked(bit); + paths.push((child_path, level + 1)); + } + } } } - }; + } } - Ok(()) + (targets, unchanged_prefix_set) } - /// Removes a leaf node from the trie at the specified key path. - /// - /// This function removes the leaf value from the internal values map and then traverses - /// the trie to remove or adjust intermediate nodes, merging or collapsing them as necessary. + /// Look up or calculate the RLP of the node at the root path. /// - /// # Returns + /// # Panics /// - /// Returns `Ok(())` if the leaf is successfully removed, otherwise returns an error - /// if the leaf is not present or if a blinded node prevents removal. - pub fn remove_leaf( - &mut self, - path: &Nibbles, - provider: impl BlindedProvider, - ) -> SparseTrieResult<()> { - if self.values.remove(path).is_none() { - if let Some(&SparseNode::Hash(hash)) = self.nodes.get(path) { - // Leaf is present in the trie, but it's blinded. - return Err(SparseTrieErrorKind::BlindedNode { path: *path, hash }.into()) - } + /// If the node at provided path does not exist. + pub fn rlp_node_allocate(&mut self, prefix_set: &mut PrefixSet) -> RlpNode { + let mut buffers = RlpNodeBuffers::new_with_root_path(); + let mut temp_rlp_buf = core::mem::take(&mut self.rlp_buf); + let result = self.rlp_node(prefix_set, &mut buffers, &mut temp_rlp_buf); + self.rlp_buf = temp_rlp_buf; - trace!(target: "trie::sparse", ?path, "Leaf node is not present in the trie"); - // Leaf is not present in the trie. - return Ok(()) - } - self.prefix_set.insert(*path); + result + } - // If the path wasn't present in `values`, we still need to walk the trie and ensure that - // there is no node at the path. When a leaf node is a blinded `Hash`, it will have an entry - // in `nodes`, but not in the `values`. + /// Looks up or computes the RLP encoding of the node specified by the current + /// path in the provided buffers. + /// + /// The function uses a stack (`RlpNodeBuffers::path_stack`) to track the traversal and + /// accumulate RLP encodings. + /// + /// # Parameters + /// + /// - `prefix_set`: The set of trie paths that need their nodes updated. + /// - `buffers`: The reusable buffers for stack management and temporary RLP values. + /// + /// # Panics + /// + /// If the node at provided path does not exist. + pub fn rlp_node( + &mut self, + prefix_set: &mut PrefixSet, + buffers: &mut RlpNodeBuffers, + rlp_buf: &mut Vec, + ) -> RlpNode { + let _starting_path = buffers.path_stack.last().map(|item| item.path); - let mut removed_nodes = self.take_nodes_for_path(path)?; - // Pop the first node from the stack which is the leaf node we want to remove. - let mut child = removed_nodes.pop().expect("leaf exists"); - #[cfg(debug_assertions)] + 'main: while let Some(RlpNodePathStackItem { level, path, mut is_in_prefix_set }) = + buffers.path_stack.pop() { - let mut child_path = child.path; - let SparseNode::Leaf { key, .. } = &child.node else { panic!("expected leaf node") }; - child_path.extend(key); - assert_eq!(&child_path, path); - } + let node = self.nodes.get_mut(&path).unwrap(); + trace!( + target: "trie::sparse", + ?_starting_path, + ?level, + ?path, + ?is_in_prefix_set, + ?node, + "Popped node from path stack" + ); - // If we don't have any other removed nodes, insert an empty node at the root. - if removed_nodes.is_empty() { - debug_assert!(self.nodes.is_empty()); - self.nodes.insert(Nibbles::default(), SparseNode::Empty); + // Check if the path is in the prefix set. + // First, check the cached value. If it's `None`, then check the prefix set, and update + // the cached value. + let mut prefix_set_contains = + |path: &Nibbles| *is_in_prefix_set.get_or_insert_with(|| prefix_set.contains(path)); - return Ok(()) - } + let (rlp_node, node_type) = match node { + SparseNode::Empty => (RlpNode::word_rlp(&EMPTY_ROOT_HASH), SparseNodeType::Empty), + SparseNode::Hash(hash) => (RlpNode::word_rlp(hash), SparseNodeType::Hash), + SparseNode::Leaf { key, hash } => { + let mut path = path; + path.extend(key); + if let Some(hash) = hash.filter(|_| !prefix_set_contains(&path)) { + (RlpNode::word_rlp(&hash), SparseNodeType::Leaf) + } else { + let value = self.values.get(&path).unwrap(); + rlp_buf.clear(); + let rlp_node = LeafNodeRef { key, value }.rlp(rlp_buf); + *hash = rlp_node.as_hash(); + (rlp_node, SparseNodeType::Leaf) + } + } + SparseNode::Extension { key, hash, store_in_db_trie } => { + let mut child_path = path; + child_path.extend(key); + if let Some((hash, store_in_db_trie)) = + hash.zip(*store_in_db_trie).filter(|_| !prefix_set_contains(&path)) + { + ( + RlpNode::word_rlp(&hash), + SparseNodeType::Extension { store_in_db_trie: Some(store_in_db_trie) }, + ) + } else if buffers.rlp_node_stack.last().is_some_and(|e| e.path == child_path) { + let RlpNodeStackItem { + path: _, + rlp_node: child, + node_type: child_node_type, + } = buffers.rlp_node_stack.pop().unwrap(); + rlp_buf.clear(); + let rlp_node = ExtensionNodeRef::new(key, &child).rlp(rlp_buf); + *hash = rlp_node.as_hash(); - // Walk the stack of removed nodes from the back and re-insert them back into the trie, - // adjusting the node type as needed. - while let Some(removed_node) = removed_nodes.pop() { - let removed_path = removed_node.path; + let store_in_db_trie_value = child_node_type.store_in_db_trie(); - let new_node = match &removed_node.node { - SparseNode::Empty => return Err(SparseTrieErrorKind::Blind.into()), - &SparseNode::Hash(hash) => { - return Err(SparseTrieErrorKind::BlindedNode { path: removed_path, hash }.into()) - } - SparseNode::Leaf { .. } => { - unreachable!("we already popped the leaf node") - } - SparseNode::Extension { key, .. } => { - // If the node is an extension node, we need to look at its child to see if we - // need to merge them. - match &child.node { - SparseNode::Empty => return Err(SparseTrieErrorKind::Blind.into()), - &SparseNode::Hash(hash) => { - return Err( - SparseTrieErrorKind::BlindedNode { path: child.path, hash }.into() - ) - } - // For a leaf node, we collapse the extension node into a leaf node, - // extending the key. While it's impossible to encounter an extension node - // followed by a leaf node in a complete trie, it's possible here because we - // could have downgraded the extension node's child into a leaf node from - // another node type. - SparseNode::Leaf { key: leaf_key, .. } => { - self.nodes.remove(&child.path); + trace!( + target: "trie::sparse", + ?path, + ?child_path, + ?child_node_type, + "Extension node" + ); - let mut new_key = *key; - new_key.extend(leaf_key); - SparseNode::new_leaf(new_key) - } - // For an extension node, we collapse them into one extension node, - // extending the key - SparseNode::Extension { key: extension_key, .. } => { - self.nodes.remove(&child.path); + *store_in_db_trie = store_in_db_trie_value; - let mut new_key = *key; - new_key.extend(extension_key); - SparseNode::new_ext(new_key) - } - // For a branch node, we just leave the extension node as-is. - SparseNode::Branch { .. } => removed_node.node, + ( + rlp_node, + SparseNodeType::Extension { + // Inherit the `store_in_db_trie` flag from the child node, which is + // always the branch node + store_in_db_trie: store_in_db_trie_value, + }, + ) + } else { + // need to get rlp node for child first + buffers.path_stack.extend([ + RlpNodePathStackItem { level, path, is_in_prefix_set }, + RlpNodePathStackItem { + level: level + 1, + path: child_path, + is_in_prefix_set: None, + }, + ]); + continue + } + } + SparseNode::Branch { state_mask, hash, store_in_db_trie } => { + if let Some((hash, store_in_db_trie)) = + hash.zip(*store_in_db_trie).filter(|_| !prefix_set_contains(&path)) + { + buffers.rlp_node_stack.push(RlpNodeStackItem { + path, + rlp_node: RlpNode::word_rlp(&hash), + node_type: SparseNodeType::Branch { + store_in_db_trie: Some(store_in_db_trie), + }, + }); + continue } - } - &SparseNode::Branch { mut state_mask, hash: _, store_in_db_trie: _ } => { - // If the node is a branch node, we need to check the number of children left - // after deleting the child at the given nibble. + let retain_updates = self.updates.is_some() && prefix_set_contains(&path); - if let Some(removed_nibble) = removed_node.unset_branch_nibble { - state_mask.unset_bit(removed_nibble); + buffers.branch_child_buf.clear(); + // Walk children in a reverse order from `f` to `0`, so we pop the `0` first + // from the stack and keep walking in the sorted order. + for bit in CHILD_INDEX_RANGE.rev() { + if state_mask.is_bit_set(bit) { + let mut child = path; + child.push_unchecked(bit); + buffers.branch_child_buf.push(child); + } } - // If only one child is left set in the branch node, we need to collapse it. - if state_mask.count_bits() == 1 { - let child_nibble = - state_mask.first_set_bit_index().expect("state mask is not empty"); - - // Get full path of the only child node left. - let mut child_path = removed_path; - child_path.push_unchecked(child_nibble); + buffers + .branch_value_stack_buf + .resize(buffers.branch_child_buf.len(), Default::default()); + let mut added_children = false; - trace!(target: "trie::sparse", ?removed_path, ?child_path, "Branch node has only one child"); + let mut tree_mask = TrieMask::default(); + let mut hash_mask = TrieMask::default(); + let mut hashes = Vec::new(); + for (i, child_path) in buffers.branch_child_buf.iter().enumerate() { + if buffers.rlp_node_stack.last().is_some_and(|e| &e.path == child_path) { + let RlpNodeStackItem { + path: _, + rlp_node: child, + node_type: child_node_type, + } = buffers.rlp_node_stack.pop().unwrap(); - if self.nodes.get(&child_path).unwrap().is_hash() { - trace!(target: "trie::sparse", ?child_path, "Retrieving remaining blinded branch child"); - if let Some(RevealedNode { node, tree_mask, hash_mask }) = - provider.blinded_node(&child_path)? - { - let decoded = TrieNode::decode(&mut &node[..])?; - trace!( - target: "trie::sparse", - ?child_path, - ?decoded, - ?tree_mask, - ?hash_mask, - "Revealing remaining blinded branch child" - ); - self.reveal_node( - child_path, - decoded, - TrieMasks { hash_mask, tree_mask }, - )?; - } - } + // Update the masks only if we need to retain trie updates + if retain_updates { + // SAFETY: it's a child, so it's never empty + let last_child_nibble = child_path.last().unwrap(); - // Get the only child node. - let child = self.nodes.get(&child_path).unwrap(); + // Determine whether we need to set trie mask bit. + let should_set_tree_mask_bit = if let Some(store_in_db_trie) = + child_node_type.store_in_db_trie() + { + // A branch or an extension node explicitly set the + // `store_in_db_trie` flag + store_in_db_trie + } else { + // A blinded node has the tree mask bit set + child_node_type.is_hash() && + self.branch_node_tree_masks.get(&path).is_some_and( + |mask| mask.is_bit_set(last_child_nibble), + ) + }; + if should_set_tree_mask_bit { + tree_mask.set_bit(last_child_nibble); + } - let mut delete_child = false; - let new_node = match child { - SparseNode::Empty => return Err(SparseTrieErrorKind::Blind.into()), - &SparseNode::Hash(hash) => { - return Err(SparseTrieErrorKind::BlindedNode { - path: child_path, - hash, + // Set the hash mask. If a child node is a revealed branch node OR + // is a blinded node that has its hash mask bit set according to the + // database, set the hash mask bit and save the hash. + let hash = child.as_hash().filter(|_| { + child_node_type.is_branch() || + (child_node_type.is_hash() && + self.branch_node_hash_masks + .get(&path) + .is_some_and(|mask| { + mask.is_bit_set(last_child_nibble) + })) + }); + if let Some(hash) = hash { + hash_mask.set_bit(last_child_nibble); + hashes.push(hash); } - .into()) } - // If the only child is a leaf node, we downgrade the branch node into a - // leaf node, prepending the nibble to the key, and delete the old - // child. - SparseNode::Leaf { key, .. } => { - delete_child = true; - let mut new_key = Nibbles::from_nibbles_unchecked([child_nibble]); - new_key.extend(key); - SparseNode::new_leaf(new_key) - } - // If the only child node is an extension node, we downgrade the branch - // node into an even longer extension node, prepending the nibble to the - // key, and delete the old child. - SparseNode::Extension { key, .. } => { - delete_child = true; + // Insert children in the resulting buffer in a normal order, + // because initially we iterated in reverse. + // SAFETY: i < len and len is never 0 + let original_idx = buffers.branch_child_buf.len() - i - 1; + buffers.branch_value_stack_buf[original_idx] = child; + added_children = true; + } else { + debug_assert!(!added_children); + buffers.path_stack.push(RlpNodePathStackItem { + level, + path, + is_in_prefix_set, + }); + buffers.path_stack.extend(buffers.branch_child_buf.drain(..).map( + |path| RlpNodePathStackItem { + level: level + 1, + path, + is_in_prefix_set: None, + }, + )); + continue 'main + } + } - let mut new_key = Nibbles::from_nibbles_unchecked([child_nibble]); - new_key.extend(key); - SparseNode::new_ext(new_key) - } - // If the only child is a branch node, we downgrade the current branch - // node into a one-nibble extension node. - SparseNode::Branch { .. } => { - SparseNode::new_ext(Nibbles::from_nibbles_unchecked([child_nibble])) - } - }; + trace!( + target: "trie::sparse", + ?path, + ?tree_mask, + ?hash_mask, + "Branch node masks" + ); - if delete_child { - self.nodes.remove(&child_path); - } + rlp_buf.clear(); + let branch_node_ref = + BranchNodeRef::new(&buffers.branch_value_stack_buf, *state_mask); + let rlp_node = branch_node_ref.rlp(rlp_buf); + *hash = rlp_node.as_hash(); - if let Some(updates) = self.updates.as_mut() { - updates.updated_nodes.remove(&removed_path); - updates.removed_nodes.insert(removed_path); + // Save a branch node update only if it's not a root node, and we need to + // persist updates. + let store_in_db_trie_value = if let Some(updates) = + self.updates.as_mut().filter(|_| retain_updates && !path.is_empty()) + { + let store_in_db_trie = !tree_mask.is_empty() || !hash_mask.is_empty(); + if store_in_db_trie { + // Store in DB trie if there are either any children that are stored in + // the DB trie, or any children represent hashed values + hashes.reverse(); + let branch_node = BranchNodeCompact::new( + *state_mask, + tree_mask, + hash_mask, + hashes, + hash.filter(|_| path.is_empty()), + ); + updates.updated_nodes.insert(path, branch_node); + } else if self + .branch_node_tree_masks + .get(&path) + .is_some_and(|mask| !mask.is_empty()) || + self.branch_node_hash_masks + .get(&path) + .is_some_and(|mask| !mask.is_empty()) + { + // If new tree and hash masks are empty, but previously they weren't, we + // need to remove the node update and add the node itself to the list of + // removed nodes. + updates.updated_nodes.remove(&path); + updates.removed_nodes.insert(path); + } else if self + .branch_node_hash_masks + .get(&path) + .is_none_or(|mask| mask.is_empty()) && + self.branch_node_hash_masks + .get(&path) + .is_none_or(|mask| mask.is_empty()) + { + // If new tree and hash masks are empty, and they were previously empty + // as well, we need to remove the node update. + updates.updated_nodes.remove(&path); } - new_node - } - // If more than one child is left set in the branch, we just re-insert it as-is. - else { - SparseNode::new_branch(state_mask) - } + store_in_db_trie + } else { + false + }; + *store_in_db_trie = Some(store_in_db_trie_value); + + ( + rlp_node, + SparseNodeType::Branch { store_in_db_trie: Some(store_in_db_trie_value) }, + ) } }; - child = RemovedSparseNode { - path: removed_path, - node: new_node.clone(), - unset_branch_nibble: None, - }; - trace!(target: "trie::sparse", ?removed_path, ?new_node, "Re-inserting the node"); - self.nodes.insert(removed_path, new_node); + trace!( + target: "trie::sparse", + ?_starting_path, + ?level, + ?path, + ?node, + ?node_type, + ?is_in_prefix_set, + "Added node to rlp node stack" + ); + + buffers.rlp_node_stack.push(RlpNodeStackItem { path, rlp_node, node_type }); } - Ok(()) + debug_assert_eq!(buffers.rlp_node_stack.len(), 1); + buffers.rlp_node_stack.pop().unwrap().rlp_node } } diff --git a/crates/trie/trie/src/witness.rs b/crates/trie/trie/src/witness.rs index 8417b6875a9..3683c2327e8 100644 --- a/crates/trie/trie/src/witness.rs +++ b/crates/trie/trie/src/witness.rs @@ -7,6 +7,7 @@ use crate::{ use alloy_rlp::EMPTY_STRING_CODE; use alloy_trie::EMPTY_ROOT_HASH; use reth_trie_common::HashedPostState; +use reth_trie_sparse::SparseTrieInterface; use alloy_primitives::{ keccak256, From d026630746773c5f6fe90c957a8723601b5009e6 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Thu, 3 Jul 2025 16:06:08 +0100 Subject: [PATCH 035/305] perf(trie): implement `SparseTrieInterface` for `ParallelSparseTrie` (#17192) Co-authored-by: Claude --- crates/trie/sparse-parallel/src/trie.rs | 899 ++++++++++++------------ crates/trie/sparse/src/traits.rs | 2 +- 2 files changed, 469 insertions(+), 432 deletions(-) diff --git a/crates/trie/sparse-parallel/src/trie.rs b/crates/trie/sparse-parallel/src/trie.rs index 1cbcc2ca6ca..34c44cc613f 100644 --- a/crates/trie/sparse-parallel/src/trie.rs +++ b/crates/trie/sparse-parallel/src/trie.rs @@ -11,7 +11,8 @@ use reth_trie_common::{ }; use reth_trie_sparse::{ blinded::{BlindedProvider, RevealedNode}, - RlpNodeStackItem, SparseNode, SparseNodeType, SparseTrieUpdates, TrieMasks, + RlpNodeStackItem, SparseNode, SparseNodeType, SparseTrieInterface, SparseTrieUpdates, + TrieMasks, }; use smallvec::SmallVec; use std::sync::mpsc; @@ -55,77 +56,37 @@ impl Default for ParallelSparseTrie { } } -impl ParallelSparseTrie { - /// Returns a mutable reference to the lower `SparseSubtrie` for the given path, or None if the - /// path belongs to the upper trie. - /// - /// This method will create a new lower subtrie if one doesn't exist for the given path. If one - /// does exist, but its path field is longer than the given path, then the field will be set - /// to the given path. - fn lower_subtrie_for_path(&mut self, path: &Nibbles) -> Option<&mut Box> { - match SparseSubtrieType::from_path(path) { - SparseSubtrieType::Upper => None, - SparseSubtrieType::Lower(idx) => { - if let Some(subtrie) = self.lower_subtries[idx].as_mut() { - if path.len() < subtrie.path.len() { - subtrie.path = *path; - } - } else { - self.lower_subtries[idx] = Some(Box::new(SparseSubtrie::new(*path))); - } - - self.lower_subtries[idx].as_mut() - } - } - } - - /// Returns a mutable reference to either the lower or upper `SparseSubtrie` for the given path, - /// depending on the path's length. - /// - /// This method will create a new lower subtrie if one doesn't exist for the given path. If one - /// does exist, but its path field is longer than the given path, then the field will be set - /// to the given path. - fn subtrie_for_path(&mut self, path: &Nibbles) -> &mut Box { - // We can't just call `lower_subtrie_for_path` and return `upper_subtrie` if it returns - // None, because Rust complains about double mutable borrowing `self`. - if SparseSubtrieType::path_len_is_upper(path.len()) { - &mut self.upper_subtrie - } else { - self.lower_subtrie_for_path(path).unwrap() - } +impl SparseTrieInterface for ParallelSparseTrie { + fn from_root(root: TrieNode, masks: TrieMasks, retain_updates: bool) -> SparseTrieResult { + let mut trie = Self::default().with_updates(retain_updates); + trie.reveal_node(Nibbles::default(), root, masks)?; + Ok(trie) } - /// Creates a new revealed sparse trie from the given root node. - /// - /// # Returns - /// - /// A [`ParallelSparseTrie`] if successful, or an error if revealing fails. - pub fn from_root( - root_node: TrieNode, + fn with_root( + mut self, + root: TrieNode, masks: TrieMasks, retain_updates: bool, ) -> SparseTrieResult { - let mut trie = Self::default().with_updates(retain_updates); - trie.reveal_node(Nibbles::default(), root_node, masks)?; - Ok(trie) + self = self.with_updates(retain_updates); + + self.reveal_node(Nibbles::default(), root, masks)?; + Ok(self) } - /// Reveals a trie node if it has not been revealed before. - /// - /// This internal function decodes a trie node and inserts it into the nodes map. - /// It handles different node types (leaf, extension, branch) by appropriately - /// adding them to the trie structure and recursively revealing their children. - /// - /// # Returns - /// - /// `Ok(())` if successful, or an error if node was not revealed. - pub fn reveal_node( + fn with_updates(mut self, retain_updates: bool) -> Self { + self.updates = retain_updates.then_some(SparseTrieUpdates::default()); + self + } + + fn reveal_node( &mut self, path: Nibbles, node: TrieNode, masks: TrieMasks, ) -> SparseTrieResult<()> { - if let Some(subtrie) = self.lower_subtrie_for_path(&path) { + if let Some(subtrie) = self.lower_subtrie_for_path_mut(&path) { return subtrie.reveal_node(path, &node, masks); } @@ -148,7 +109,7 @@ impl ParallelSparseTrie { if branch.state_mask.is_bit_set(idx) { let mut child_path = path; child_path.push_unchecked(idx); - self.lower_subtrie_for_path(&child_path) + self.lower_subtrie_for_path_mut(&child_path) .expect("child_path must have a lower subtrie") .reveal_node_or_hash(child_path, &branch.stack[stack_ptr])?; stack_ptr += 1; @@ -159,7 +120,7 @@ impl ParallelSparseTrie { TrieNode::Extension(ext) => { let mut child_path = path; child_path.extend(&ext.key); - if let Some(subtrie) = self.lower_subtrie_for_path(&child_path) { + if let Some(subtrie) = self.lower_subtrie_for_path_mut(&child_path) { subtrie.reveal_node_or_hash(child_path, &ext.child)?; } } @@ -169,24 +130,11 @@ impl ParallelSparseTrie { Ok(()) } - /// Updates or inserts a leaf node at the specified key path with the provided RLP-encoded - /// value. - /// - /// This method updates the internal prefix set and, if the leaf did not previously exist, - /// adjusts the trie structure by inserting new leaf nodes, splitting branch nodes, or - /// collapsing extension nodes as needed. - /// - /// # Returns - /// - /// Returns `Ok(())` if the update is successful. - /// - /// Note: If an update requires revealing a blinded node, an error is returned if the blinded - /// provider returns an error. - pub fn update_leaf( + fn update_leaf( &mut self, full_path: Nibbles, value: Vec, - provider: impl BlindedProvider, + provider: P, ) -> SparseTrieResult<()> { self.prefix_set.insert(full_path); let existing = self.upper_subtrie.inner.values.insert(full_path, value.clone()); @@ -256,7 +204,7 @@ impl ParallelSparseTrie { }; // Get or create the subtrie with the exact node path (not truncated to 2 nibbles). - let subtrie = self.subtrie_for_path(node_path); + let subtrie = self.subtrie_for_path_mut(node_path); // Insert the leaf value if we have one if let Some((leaf_full_path, value)) = leaf_value { @@ -273,7 +221,7 @@ impl ParallelSparseTrie { // // The next_path here represents where we need to continue traversal, which may // be longer than 2 nibbles if we're following an extension node. - let subtrie = self.subtrie_for_path(&next_path); + let subtrie = self.subtrie_for_path_mut(&next_path); // Create an empty root at the subtrie path if the subtrie is empty if subtrie.nodes.is_empty() { @@ -288,244 +236,10 @@ impl ParallelSparseTrie { Ok(()) } - /// Returns the next node in the traversal path from the given path towards the leaf for the - /// given full leaf path, or an error if any node along the traversal path is not revealed. - /// - /// - /// ## Panics - /// - /// If `from_path` is not a prefix of `leaf_full_path`. - fn find_next_to_leaf( - from_path: &Nibbles, - from_node: &SparseNode, - leaf_full_path: &Nibbles, - ) -> SparseTrieResult { - debug_assert!(leaf_full_path.len() >= from_path.len()); - debug_assert!(leaf_full_path.starts_with(from_path)); - - match from_node { - SparseNode::Empty => Err(SparseTrieErrorKind::Blind.into()), - SparseNode::Hash(hash) => { - Err(SparseTrieErrorKind::BlindedNode { path: *from_path, hash: *hash }.into()) - } - SparseNode::Leaf { key, .. } => { - let mut found_full_path = *from_path; - found_full_path.extend(key); - - if &found_full_path == leaf_full_path { - return Ok(FindNextToLeafOutcome::Found) - } - Ok(FindNextToLeafOutcome::NotFound) - } - SparseNode::Extension { key, .. } => { - if leaf_full_path.len() == from_path.len() { - return Ok(FindNextToLeafOutcome::NotFound) - } - - let mut child_path = *from_path; - child_path.extend(key); - - if !leaf_full_path.starts_with(&child_path) { - return Ok(FindNextToLeafOutcome::NotFound) - } - Ok(FindNextToLeafOutcome::ContinueFrom(child_path)) - } - SparseNode::Branch { state_mask, .. } => { - if leaf_full_path.len() == from_path.len() { - return Ok(FindNextToLeafOutcome::NotFound) - } - - let nibble = leaf_full_path.get_unchecked(from_path.len()); - if !state_mask.is_bit_set(nibble) { - return Ok(FindNextToLeafOutcome::NotFound) - } - - let mut child_path = *from_path; - child_path.push_unchecked(nibble); - - Ok(FindNextToLeafOutcome::ContinueFrom(child_path)) - } - } - } - - /// Called when a child node has collapsed into its parent as part of `remove_leaf`. If the - /// new parent node is a leaf, then the previous child also was, and if the previous child was - /// on a lower subtrie while the parent is on an upper then the leaf value needs to be moved to - /// the upper. - fn move_value_on_leaf_removal( - &mut self, - parent_path: &Nibbles, - new_parent_node: &SparseNode, - prev_child_path: &Nibbles, - ) { - // If the parent path isn't in the upper then it doesn't matter what the new node is, - // there's no situation where a leaf value needs to be moved. - if SparseSubtrieType::from_path(parent_path).lower_index().is_some() { - return; - } - - if let SparseNode::Leaf { key, .. } = new_parent_node { - let Some(prev_child_subtrie) = self.lower_subtrie_for_path(prev_child_path) else { - return; - }; - - let mut leaf_full_path = *parent_path; - leaf_full_path.extend(key); - - let val = prev_child_subtrie.inner.values.remove(&leaf_full_path).expect("ParallelSparseTrie is in an inconsistent state, expected value on subtrie which wasn't found"); - self.upper_subtrie.inner.values.insert(leaf_full_path, val); - } - } - - /// Used by `remove_leaf` to ensure that when a node is removed from a lower subtrie that any - /// externalities are handled. These can include: - /// - Removing the lower subtrie completely, if it is now empty. - /// - Updating the `path` field of the lower subtrie to indicate that its root node has changed. - /// - /// This method assumes that the caller will deal with putting all other nodes in the trie into - /// a consistent state after the removal of this one. - /// - /// ## Panics - /// - /// - If the removed node was not a leaf or extension. - fn remove_node(&mut self, path: &Nibbles) { - let subtrie = self.subtrie_for_path(path); - let node = subtrie.nodes.remove(path); - - let Some(idx) = SparseSubtrieType::from_path(path).lower_index() else { - // When removing a node from the upper trie there's nothing special we need to do to fix - // its path field; the upper trie's path is always empty. - return; - }; - - match node { - Some(SparseNode::Leaf { .. }) => { - // If the leaf was the final node in its lower subtrie then we can remove the lower - // subtrie completely. - if subtrie.nodes.is_empty() { - self.lower_subtries[idx] = None; - } - } - Some(SparseNode::Extension { key, .. }) => { - // If the removed extension was the root node of a lower subtrie then the lower - // subtrie's `path` needs to be updated to be whatever node the extension used to - // point to. - if &subtrie.path == path { - subtrie.path.extend(&key); - } - } - _ => panic!("Expected to remove a leaf or extension, but removed {node:?}"), - } - } - - /// Given the path to a parent branch node and a child node which is the sole remaining child on - /// that branch after removing a leaf, returns a node to replace the parent branch node and a - /// boolean indicating if the child should be deleted. - /// - /// ## Panics - /// - /// - If either parent or child node is not already revealed. - /// - If parent's path is not a prefix of the child's path. - fn branch_changes_on_leaf_removal( - parent_path: &Nibbles, - remaining_child_path: &Nibbles, - remaining_child_node: &SparseNode, - ) -> (SparseNode, bool) { - debug_assert!(remaining_child_path.len() > parent_path.len()); - debug_assert!(remaining_child_path.starts_with(parent_path)); - - let remaining_child_nibble = remaining_child_path.get_unchecked(parent_path.len()); - - // If we swap the branch node out either an extension or leaf, depending on - // what its remaining child is. - match remaining_child_node { - SparseNode::Empty | SparseNode::Hash(_) => { - panic!("remaining child must have been revealed already") - } - // If the only child is a leaf node, we downgrade the branch node into a - // leaf node, prepending the nibble to the key, and delete the old - // child. - SparseNode::Leaf { key, .. } => { - let mut new_key = Nibbles::from_nibbles_unchecked([remaining_child_nibble]); - new_key.extend(key); - (SparseNode::new_leaf(new_key), true) - } - // If the only child node is an extension node, we downgrade the branch - // node into an even longer extension node, prepending the nibble to the - // key, and delete the old child. - SparseNode::Extension { key, .. } => { - let mut new_key = Nibbles::from_nibbles_unchecked([remaining_child_nibble]); - new_key.extend(key); - (SparseNode::new_ext(new_key), true) - } - // If the only child is a branch node, we downgrade the current branch - // node into a one-nibble extension node. - SparseNode::Branch { .. } => ( - SparseNode::new_ext(Nibbles::from_nibbles_unchecked([remaining_child_nibble])), - false, - ), - } - } - - /// Given the path to a parent extension and its key, and a child node (not necessarily on this - /// subtrie), returns an optional replacement parent node. If a replacement is returned then the - /// child node should be deleted. - /// - /// ## Panics - /// - /// - If either parent or child node is not already revealed. - /// - If parent's path is not a prefix of the child's path. - fn extension_changes_on_leaf_removal( - parent_path: &Nibbles, - parent_key: &Nibbles, - child_path: &Nibbles, - child: &SparseNode, - ) -> Option { - debug_assert!(child_path.len() > parent_path.len()); - debug_assert!(child_path.starts_with(parent_path)); - - // If the parent node is an extension node, we need to look at its child to see - // if we need to merge it. - match child { - SparseNode::Empty | SparseNode::Hash(_) => { - panic!("child must be revealed") - } - // For a leaf node, we collapse the extension node into a leaf node, - // extending the key. While it's impossible to encounter an extension node - // followed by a leaf node in a complete trie, it's possible here because we - // could have downgraded the extension node's child into a leaf node from a - // branch in a previous call to `branch_changes_on_leaf_removal`. - SparseNode::Leaf { key, .. } => { - let mut new_key = *parent_key; - new_key.extend(key); - Some(SparseNode::new_leaf(new_key)) - } - // Similar to the leaf node, for an extension node, we collapse them into one - // extension node, extending the key. - SparseNode::Extension { key, .. } => { - let mut new_key = *parent_key; - new_key.extend(key); - Some(SparseNode::new_ext(new_key)) - } - // For a branch node, we just leave the extension node as-is. - SparseNode::Branch { .. } => None, - } - } - - /// Removes a leaf node from the trie at the specified full path of a value (that is, the leaf's - /// path + its key). - /// - /// This function removes the leaf value from the internal values map and then traverses - /// the trie to remove or adjust intermediate nodes, merging or collapsing them as necessary. - /// - /// # Returns - /// - /// Returns `Ok(())` if the leaf is successfully removed or was not present in the trie, - /// otherwise returns an error if a blinded node prevents removal. - pub fn remove_leaf( + fn remove_leaf( &mut self, - leaf_full_path: &Nibbles, - provider: impl BlindedProvider, + full_path: &Nibbles, + provider: P, ) -> SparseTrieResult<()> { // When removing a leaf node it's possibly necessary to modify its parent node, and possibly // the parent's parent node. It is not ever necessary to descend further than that; once an @@ -558,7 +272,7 @@ impl ParallelSparseTrie { loop { let curr_node = curr_subtrie.nodes.get_mut(&curr_path).unwrap(); - match Self::find_next_to_leaf(&curr_path, curr_node, leaf_full_path)? { + match Self::find_next_to_leaf(&curr_path, curr_node, full_path)? { FindNextToLeafOutcome::NotFound => return Ok(()), // leaf isn't in the trie FindNextToLeafOutcome::Found => { // this node is the target leaf @@ -617,8 +331,8 @@ impl ParallelSparseTrie { // We've traversed to the leaf and collected its ancestors as necessary. Remove the leaf // from its SparseSubtrie. - self.prefix_set.insert(*leaf_full_path); - leaf_subtrie.inner.values.remove(leaf_full_path); + self.prefix_set.insert(*full_path); + leaf_subtrie.inner.values.remove(full_path); self.remove_node(&leaf_path); // If the leaf was at the root replace its node with the empty value. We can stop execution @@ -655,7 +369,7 @@ impl ParallelSparseTrie { "Branch node has only one child", ); - let remaining_child_subtrie = self.subtrie_for_path(&remaining_child_path); + let remaining_child_subtrie = self.subtrie_for_path_mut(&remaining_child_path); // If the remaining child node is not yet revealed then we have to reveal it here, // otherwise it's not possible to know how to collapse the branch. @@ -715,87 +429,421 @@ impl ParallelSparseTrie { updates.removed_nodes.insert(*branch_path); } - new_branch_node - } else { - // If more than one child is left set in the branch, we just re-insert it with the - // updated state_mask. - SparseNode::new_branch(state_mask) - }; - - let branch_subtrie = self.subtrie_for_path(branch_path); - branch_subtrie.nodes.insert(*branch_path, new_branch_node.clone()); - branch_parent_node = Some(new_branch_node); - }; + new_branch_node + } else { + // If more than one child is left set in the branch, we just re-insert it with the + // updated state_mask. + SparseNode::new_branch(state_mask) + }; + + let branch_subtrie = self.subtrie_for_path_mut(branch_path); + branch_subtrie.nodes.insert(*branch_path, new_branch_node.clone()); + branch_parent_node = Some(new_branch_node); + }; + + // If there is a grandparent extension node then there will necessarily be a parent branch + // node. Execute any required changes for the extension node, relative to the (possibly now + // replaced with a leaf or extension) branch node. + if let (Some(ext_path), Some(SparseNode::Extension { key: shortkey, .. })) = + (ext_grandparent_path, &ext_grandparent_node) + { + let ext_subtrie = self.subtrie_for_path_mut(&ext_path); + let branch_path = branch_parent_path.as_ref().unwrap(); + + if let Some(new_ext_node) = Self::extension_changes_on_leaf_removal( + &ext_path, + shortkey, + branch_path, + branch_parent_node.as_ref().unwrap(), + ) { + ext_subtrie.nodes.insert(ext_path, new_ext_node.clone()); + self.move_value_on_leaf_removal(&ext_path, &new_ext_node, branch_path); + self.remove_node(branch_path); + } + } + + Ok(()) + } + + fn root(&mut self) -> B256 { + trace!(target: "trie::parallel_sparse", "Calculating trie root hash"); + + // Update all lower subtrie hashes + self.update_subtrie_hashes(); + + // Update hashes for the upper subtrie using our specialized function + // that can access both upper and lower subtrie nodes + let mut prefix_set = core::mem::take(&mut self.prefix_set).freeze(); + let root_rlp = self.update_upper_subtrie_hashes(&mut prefix_set); + + // Return the root hash + root_rlp.as_hash().unwrap_or(EMPTY_ROOT_HASH) + } + + fn update_subtrie_hashes(&mut self) { + trace!(target: "trie::parallel_sparse", "Updating subtrie hashes"); + + // Take changed subtries according to the prefix set + let mut prefix_set = core::mem::take(&mut self.prefix_set).freeze(); + let (subtries, unchanged_prefix_set) = self.take_changed_lower_subtries(&mut prefix_set); + + // Update the prefix set with the keys that didn't have matching subtries + self.prefix_set = unchanged_prefix_set; + + let (tx, rx) = mpsc::channel(); + + #[cfg(not(feature = "std"))] + // Update subtrie hashes serially if nostd + for ChangedSubtrie { index, mut subtrie, mut prefix_set } in subtries { + subtrie.update_hashes(&mut prefix_set); + tx.send((index, subtrie)).unwrap(); + } + + #[cfg(feature = "std")] + // Update subtrie hashes in parallel + { + use rayon::iter::{IntoParallelIterator, ParallelIterator}; + subtries + .into_par_iter() + .map(|ChangedSubtrie { index, mut subtrie, mut prefix_set }| { + subtrie.update_hashes(&mut prefix_set); + (index, subtrie) + }) + .for_each_init(|| tx.clone(), |tx, result| tx.send(result).unwrap()); + } + + drop(tx); + + // Return updated subtries back to the trie + for (index, subtrie) in rx { + self.lower_subtries[index] = Some(subtrie); + } + } + + fn get_leaf_value(&self, full_path: &Nibbles) -> Option<&Vec> { + self.subtrie_for_path(full_path).and_then(|subtrie| subtrie.inner.values.get(full_path)) + } + + fn take_updates(&mut self) -> SparseTrieUpdates { + core::iter::once(&mut self.upper_subtrie) + .chain(self.lower_subtries.iter_mut().flatten()) + .fold(SparseTrieUpdates::default(), |mut acc, subtrie| { + acc.extend(subtrie.take_updates()); + acc + }) + } + + fn wipe(&mut self) { + self.upper_subtrie.wipe(); + self.lower_subtries = [const { None }; NUM_LOWER_SUBTRIES]; + self.prefix_set = PrefixSetMut::all(); + self.updates = self.updates.is_some().then(SparseTrieUpdates::wiped); + } + + fn clear(&mut self) { + self.upper_subtrie.clear(); + for subtrie in self.lower_subtries.iter_mut().flatten() { + subtrie.clear(); + } + self.prefix_set.clear(); + self.updates = None; + } + + fn find_leaf( + &self, + _full_path: &Nibbles, + _expected_value: Option<&Vec>, + ) -> Result { + todo!() + } +} + +impl ParallelSparseTrie { + /// Returns a reference to the lower `SparseSubtrie` for the given path, or None if the + /// path belongs to the upper trie or a lower subtrie for the path doesn't exist. + fn lower_subtrie_for_path(&self, path: &Nibbles) -> Option<&SparseSubtrie> { + match SparseSubtrieType::from_path(path) { + SparseSubtrieType::Upper => None, + SparseSubtrieType::Lower(idx) => { + self.lower_subtries[idx].as_ref().map(|subtrie| subtrie.as_ref()) + } + } + } + + /// Returns a mutable reference to the lower `SparseSubtrie` for the given path, or None if the + /// path belongs to the upper trie. + /// + /// This method will create a new lower subtrie if one doesn't exist for the given path. If one + /// does exist, but its path field is longer than the given path, then the field will be set + /// to the given path. + fn lower_subtrie_for_path_mut(&mut self, path: &Nibbles) -> Option<&mut Box> { + match SparseSubtrieType::from_path(path) { + SparseSubtrieType::Upper => None, + SparseSubtrieType::Lower(idx) => { + if let Some(subtrie) = self.lower_subtries[idx].as_mut() { + if path.len() < subtrie.path.len() { + subtrie.path = *path; + } + } else { + self.lower_subtries[idx] = Some(Box::new(SparseSubtrie::new(*path))); + } + + self.lower_subtries[idx].as_mut() + } + } + } + + /// Returns a reference to either the lower or upper `SparseSubtrie` for the given path, + /// depending on the path's length. + /// + /// Returns `None` if a lower subtrie does not exist for the given path. + fn subtrie_for_path(&self, path: &Nibbles) -> Option<&SparseSubtrie> { + // We can't just call `lower_subtrie_for_path` and return `upper_subtrie` if it returns + // None, because Rust complains about double mutable borrowing `self`. + if SparseSubtrieType::path_len_is_upper(path.len()) { + Some(&self.upper_subtrie) + } else { + self.lower_subtrie_for_path(path) + } + } + + /// Returns a mutable reference to either the lower or upper `SparseSubtrie` for the given path, + /// depending on the path's length. + /// + /// This method will create a new lower subtrie if one doesn't exist for the given path. If one + /// does exist, but its path field is longer than the given path, then the field will be set + /// to the given path. + fn subtrie_for_path_mut(&mut self, path: &Nibbles) -> &mut Box { + // We can't just call `lower_subtrie_for_path` and return `upper_subtrie` if it returns + // None, because Rust complains about double mutable borrowing `self`. + if SparseSubtrieType::path_len_is_upper(path.len()) { + &mut self.upper_subtrie + } else { + self.lower_subtrie_for_path_mut(path).unwrap() + } + } + + /// Returns the next node in the traversal path from the given path towards the leaf for the + /// given full leaf path, or an error if any node along the traversal path is not revealed. + /// + /// + /// ## Panics + /// + /// If `from_path` is not a prefix of `leaf_full_path`. + fn find_next_to_leaf( + from_path: &Nibbles, + from_node: &SparseNode, + leaf_full_path: &Nibbles, + ) -> SparseTrieResult { + debug_assert!(leaf_full_path.len() >= from_path.len()); + debug_assert!(leaf_full_path.starts_with(from_path)); + + match from_node { + SparseNode::Empty => Err(SparseTrieErrorKind::Blind.into()), + SparseNode::Hash(hash) => { + Err(SparseTrieErrorKind::BlindedNode { path: *from_path, hash: *hash }.into()) + } + SparseNode::Leaf { key, .. } => { + let mut found_full_path = *from_path; + found_full_path.extend(key); + + if &found_full_path == leaf_full_path { + return Ok(FindNextToLeafOutcome::Found) + } + Ok(FindNextToLeafOutcome::NotFound) + } + SparseNode::Extension { key, .. } => { + if leaf_full_path.len() == from_path.len() { + return Ok(FindNextToLeafOutcome::NotFound) + } + + let mut child_path = *from_path; + child_path.extend(key); + + if !leaf_full_path.starts_with(&child_path) { + return Ok(FindNextToLeafOutcome::NotFound) + } + Ok(FindNextToLeafOutcome::ContinueFrom(child_path)) + } + SparseNode::Branch { state_mask, .. } => { + if leaf_full_path.len() == from_path.len() { + return Ok(FindNextToLeafOutcome::NotFound) + } + + let nibble = leaf_full_path.get_unchecked(from_path.len()); + if !state_mask.is_bit_set(nibble) { + return Ok(FindNextToLeafOutcome::NotFound) + } - // If there is a grandparent extension node then there will necessarily be a parent branch - // node. Execute any required changes for the extension node, relative to the (possibly now - // replaced with a leaf or extension) branch node. - if let (Some(ext_path), Some(SparseNode::Extension { key: shortkey, .. })) = - (ext_grandparent_path, &ext_grandparent_node) - { - let ext_subtrie = self.subtrie_for_path(&ext_path); - let branch_path = branch_parent_path.as_ref().unwrap(); + let mut child_path = *from_path; + child_path.push_unchecked(nibble); - if let Some(new_ext_node) = Self::extension_changes_on_leaf_removal( - &ext_path, - shortkey, - branch_path, - branch_parent_node.as_ref().unwrap(), - ) { - ext_subtrie.nodes.insert(ext_path, new_ext_node.clone()); - self.move_value_on_leaf_removal(&ext_path, &new_ext_node, branch_path); - self.remove_node(branch_path); + Ok(FindNextToLeafOutcome::ContinueFrom(child_path)) } } + } - Ok(()) + /// Called when a child node has collapsed into its parent as part of `remove_leaf`. If the + /// new parent node is a leaf, then the previous child also was, and if the previous child was + /// on a lower subtrie while the parent is on an upper then the leaf value needs to be moved to + /// the upper. + fn move_value_on_leaf_removal( + &mut self, + parent_path: &Nibbles, + new_parent_node: &SparseNode, + prev_child_path: &Nibbles, + ) { + // If the parent path isn't in the upper then it doesn't matter what the new node is, + // there's no situation where a leaf value needs to be moved. + if SparseSubtrieType::from_path(parent_path).lower_index().is_some() { + return; + } + + if let SparseNode::Leaf { key, .. } = new_parent_node { + let Some(prev_child_subtrie) = self.lower_subtrie_for_path_mut(prev_child_path) else { + return; + }; + + let mut leaf_full_path = *parent_path; + leaf_full_path.extend(key); + + let val = prev_child_subtrie.inner.values.remove(&leaf_full_path).expect("ParallelSparseTrie is in an inconsistent state, expected value on subtrie which wasn't found"); + self.upper_subtrie.inner.values.insert(leaf_full_path, val); + } } - /// Recalculates and updates the RLP hashes of nodes up to level [`UPPER_TRIE_MAX_DEPTH`] of the - /// trie. + /// Used by `remove_leaf` to ensure that when a node is removed from a lower subtrie that any + /// externalities are handled. These can include: + /// - Removing the lower subtrie completely, if it is now empty. + /// - Updating the `path` field of the lower subtrie to indicate that its root node has changed. /// - /// The root node is considered to be at level 0. This method is useful for optimizing - /// hash recalculations after localized changes to the trie structure. + /// This method assumes that the caller will deal with putting all other nodes in the trie into + /// a consistent state after the removal of this one. /// - /// This function first identifies all nodes that have changed (based on the prefix set) below - /// level [`UPPER_TRIE_MAX_DEPTH`] of the trie, then recalculates their RLP representation. - pub fn update_lower_subtrie_hashes(&mut self) { - trace!(target: "trie::parallel_sparse", "Updating subtrie hashes"); + /// ## Panics + /// + /// - If the removed node was not a leaf or extension. + fn remove_node(&mut self, path: &Nibbles) { + let subtrie = self.subtrie_for_path_mut(path); + let node = subtrie.nodes.remove(path); - // Take changed subtries according to the prefix set - let mut prefix_set = core::mem::take(&mut self.prefix_set).freeze(); - let (subtries, unchanged_prefix_set) = self.take_changed_lower_subtries(&mut prefix_set); + let Some(idx) = SparseSubtrieType::from_path(path).lower_index() else { + // When removing a node from the upper trie there's nothing special we need to do to fix + // its path field; the upper trie's path is always empty. + return; + }; - // Update the prefix set with the keys that didn't have matching subtries - self.prefix_set = unchanged_prefix_set; + match node { + Some(SparseNode::Leaf { .. }) => { + // If the leaf was the final node in its lower subtrie then we can remove the lower + // subtrie completely. + if subtrie.nodes.is_empty() { + self.lower_subtries[idx] = None; + } + } + Some(SparseNode::Extension { key, .. }) => { + // If the removed extension was the root node of a lower subtrie then the lower + // subtrie's `path` needs to be updated to be whatever node the extension used to + // point to. + if &subtrie.path == path { + subtrie.path.extend(&key); + } + } + _ => panic!("Expected to remove a leaf or extension, but removed {node:?}"), + } + } - let (tx, rx) = mpsc::channel(); + /// Given the path to a parent branch node and a child node which is the sole remaining child on + /// that branch after removing a leaf, returns a node to replace the parent branch node and a + /// boolean indicating if the child should be deleted. + /// + /// ## Panics + /// + /// - If either parent or child node is not already revealed. + /// - If parent's path is not a prefix of the child's path. + fn branch_changes_on_leaf_removal( + parent_path: &Nibbles, + remaining_child_path: &Nibbles, + remaining_child_node: &SparseNode, + ) -> (SparseNode, bool) { + debug_assert!(remaining_child_path.len() > parent_path.len()); + debug_assert!(remaining_child_path.starts_with(parent_path)); - #[cfg(not(feature = "std"))] - // Update subtrie hashes serially if nostd - for ChangedSubtrie { index, mut subtrie, mut prefix_set } in subtries { - subtrie.update_hashes(&mut prefix_set); - tx.send((index, subtrie)).unwrap(); - } + let remaining_child_nibble = remaining_child_path.get_unchecked(parent_path.len()); - #[cfg(feature = "std")] - // Update subtrie hashes in parallel - { - use rayon::iter::{IntoParallelIterator, ParallelIterator}; - subtries - .into_par_iter() - .map(|ChangedSubtrie { index, mut subtrie, mut prefix_set }| { - subtrie.update_hashes(&mut prefix_set); - (index, subtrie) - }) - .for_each_init(|| tx.clone(), |tx, result| tx.send(result).unwrap()); + // If we swap the branch node out either an extension or leaf, depending on + // what its remaining child is. + match remaining_child_node { + SparseNode::Empty | SparseNode::Hash(_) => { + panic!("remaining child must have been revealed already") + } + // If the only child is a leaf node, we downgrade the branch node into a + // leaf node, prepending the nibble to the key, and delete the old + // child. + SparseNode::Leaf { key, .. } => { + let mut new_key = Nibbles::from_nibbles_unchecked([remaining_child_nibble]); + new_key.extend(key); + (SparseNode::new_leaf(new_key), true) + } + // If the only child node is an extension node, we downgrade the branch + // node into an even longer extension node, prepending the nibble to the + // key, and delete the old child. + SparseNode::Extension { key, .. } => { + let mut new_key = Nibbles::from_nibbles_unchecked([remaining_child_nibble]); + new_key.extend(key); + (SparseNode::new_ext(new_key), true) + } + // If the only child is a branch node, we downgrade the current branch + // node into a one-nibble extension node. + SparseNode::Branch { .. } => ( + SparseNode::new_ext(Nibbles::from_nibbles_unchecked([remaining_child_nibble])), + false, + ), } + } - drop(tx); + /// Given the path to a parent extension and its key, and a child node (not necessarily on this + /// subtrie), returns an optional replacement parent node. If a replacement is returned then the + /// child node should be deleted. + /// + /// ## Panics + /// + /// - If either parent or child node is not already revealed. + /// - If parent's path is not a prefix of the child's path. + fn extension_changes_on_leaf_removal( + parent_path: &Nibbles, + parent_key: &Nibbles, + child_path: &Nibbles, + child: &SparseNode, + ) -> Option { + debug_assert!(child_path.len() > parent_path.len()); + debug_assert!(child_path.starts_with(parent_path)); - // Return updated subtries back to the trie - for (index, subtrie) in rx { - self.lower_subtries[index] = Some(subtrie); + // If the parent node is an extension node, we need to look at its child to see + // if we need to merge it. + match child { + SparseNode::Empty | SparseNode::Hash(_) => { + panic!("child must be revealed") + } + // For a leaf node, we collapse the extension node into a leaf node, + // extending the key. While it's impossible to encounter an extension node + // followed by a leaf node in a complete trie, it's possible here because we + // could have downgraded the extension node's child into a leaf node from a + // branch in a previous call to `branch_changes_on_leaf_removal`. + SparseNode::Leaf { key, .. } => { + let mut new_key = *parent_key; + new_key.extend(key); + Some(SparseNode::new_leaf(new_key)) + } + // Similar to the leaf node, for an extension node, we collapse them into one + // extension node, extending the key. + SparseNode::Extension { key, .. } => { + let mut new_key = *parent_key; + new_key.extend(key); + Some(SparseNode::new_ext(new_key)) + } + // For a branch node, we just leave the extension node as-is. + SparseNode::Branch { .. } => None, } } @@ -836,49 +884,6 @@ impl ParallelSparseTrie { self.upper_subtrie.inner.buffers.rlp_node_stack.pop().unwrap().rlp_node } - /// Calculates and returns the root hash of the trie. - /// - /// Before computing the hash, this function processes any remaining (dirty) nodes by - /// updating their RLP encodings. The root hash is either: - /// 1. The cached hash (if no dirty nodes were found) - /// 2. The keccak256 hash of the root node's RLP representation - pub fn root(&mut self) -> B256 { - trace!(target: "trie::parallel_sparse", "Calculating trie root hash"); - - // Update all lower subtrie hashes - self.update_lower_subtrie_hashes(); - - // Update hashes for the upper subtrie using our specialized function - // that can access both upper and lower subtrie nodes - let mut prefix_set = core::mem::take(&mut self.prefix_set).freeze(); - let root_rlp = self.update_upper_subtrie_hashes(&mut prefix_set); - - // Return the root hash - root_rlp.as_hash().unwrap_or(EMPTY_ROOT_HASH) - } - - /// Configures the trie to retain information about updates. - /// - /// If `retain_updates` is true, the trie will record branch node updates and deletions. - /// This information can then be used to efficiently update an external database. - pub fn with_updates(mut self, retain_updates: bool) -> Self { - self.updates = retain_updates.then_some(SparseTrieUpdates::default()); - self - } - - /// Consumes and returns the currently accumulated trie updates. - /// - /// This is useful when you want to apply the updates to an external database, - /// and then start tracking a new set of updates. - pub fn take_updates(&mut self) -> SparseTrieUpdates { - core::iter::once(&mut self.upper_subtrie) - .chain(self.lower_subtries.iter_mut().flatten()) - .fold(SparseTrieUpdates::default(), |mut acc, subtrie| { - acc.extend(subtrie.take_updates()); - acc - }) - } - /// Returns: /// 1. List of lower [subtries](SparseSubtrie) that have changed according to the provided /// [prefix set](PrefixSet). See documentation of [`ChangedSubtrie`] for more details. @@ -1440,6 +1445,18 @@ impl SparseSubtrie { fn take_updates(&mut self) -> SparseTrieUpdates { self.inner.updates.take().unwrap_or_default() } + + /// Removes all nodes and values from the subtrie, resetting it to a blank state + /// with only an empty root node. This is used when a storage root is deleted. + fn wipe(&mut self) { + self.nodes = HashMap::from_iter([(Nibbles::default(), SparseNode::Empty)]); + self.inner.clear(); + } + + /// Clears the subtrie, keeping the data structures allocated. + fn clear(&mut self) { + self.nodes.clear(); + } } /// Helper type for [`SparseSubtrie`] to mutably access only a subset of fields from the original @@ -1772,6 +1789,15 @@ impl SparseSubtrieInner { "Added node to RLP node stack" ); } + + /// Clears the subtrie, keeping the data structures allocated. + fn clear(&mut self) { + self.branch_node_tree_masks.clear(); + self.branch_node_hash_masks.clear(); + self.values.clear(); + self.updates = None; + self.buffers.clear(); + } } /// Represents the outcome of processing a node during leaf insertion @@ -1865,6 +1891,17 @@ pub struct SparseSubtrieBuffers { rlp_buf: Vec, } +impl SparseSubtrieBuffers { + /// Clears all buffers. + fn clear(&mut self) { + self.path_stack.clear(); + self.rlp_node_stack.clear(); + self.branch_child_buf.clear(); + self.branch_value_stack_buf.clear(); + self.rlp_buf.clear(); + } +} + /// RLP node path stack item. #[derive(Clone, PartialEq, Eq, Debug)] pub struct RlpNodePathStackItem { @@ -1929,7 +1966,7 @@ mod tests { }; use reth_trie_sparse::{ blinded::{BlindedProvider, DefaultBlindedProvider, RevealedNode}, - SparseNode, TrieMasks, + SparseNode, SparseTrieInterface, TrieMasks, }; /// Mock blinded provider for testing that allows pre-setting nodes at specific paths. @@ -2234,7 +2271,7 @@ mod tests { let mut trie = ParallelSparseTrie::default().with_updates(true); for (path, node) in nodes { - let subtrie = trie.subtrie_for_path(&path); + let subtrie = trie.subtrie_for_path_mut(&path); if let SparseNode::Leaf { key, .. } = &node { let mut full_key = path; full_key.extend(key); @@ -2756,7 +2793,7 @@ mod tests { trie.prefix_set = prefix_set; // Update subtrie hashes - trie.update_lower_subtrie_hashes(); + trie.update_subtrie_hashes(); // Check that the prefix set was updated assert_eq!(trie.prefix_set, unchanged_prefix_set); @@ -3188,7 +3225,7 @@ mod tests { // Verify initial state - the lower subtrie's path should be 0x123 let lower_subtrie_root_path = Nibbles::from_nibbles([0x1, 0x2, 0x3]); assert_matches!( - trie.lower_subtrie_for_path(&lower_subtrie_root_path), + trie.lower_subtrie_for_path_mut(&lower_subtrie_root_path), Some(subtrie) if subtrie.path == lower_subtrie_root_path ); diff --git a/crates/trie/sparse/src/traits.rs b/crates/trie/sparse/src/traits.rs index c707af23d11..6f1acbfca9c 100644 --- a/crates/trie/sparse/src/traits.rs +++ b/crates/trie/sparse/src/traits.rs @@ -77,7 +77,7 @@ pub trait SparseTrieInterface: Default + Debug { /// # Arguments /// /// * `additional` - The number of additional trie nodes to reserve capacity for. - fn reserve_nodes(&mut self, additional: usize); + fn reserve_nodes(&mut self, _additional: usize) {} /// Reveals a trie node if it has not been revealed before. /// From 037be8d7ac27d53ed160489366593a87bfe3bd42 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Thu, 3 Jul 2025 20:01:00 +0400 Subject: [PATCH 036/305] chore(test): don't use `EvmInternals::new` (#17188) --- .../engine/tree/src/tree/precompile_cache.rs | 64 ++++++++++++------- 1 file changed, 40 insertions(+), 24 deletions(-) diff --git a/crates/engine/tree/src/tree/precompile_cache.rs b/crates/engine/tree/src/tree/precompile_cache.rs index 9d59ccbce49..9838856317f 100644 --- a/crates/engine/tree/src/tree/precompile_cache.rs +++ b/crates/engine/tree/src/tree/precompile_cache.rs @@ -241,10 +241,10 @@ mod tests { use std::hash::DefaultHasher; use super::*; - use reth_evm::EvmInternals; + use reth_evm::{EthEvmFactory, Evm, EvmEnv, EvmFactory}; use reth_revm::db::EmptyDB; - use revm::{context::JournalTr, precompile::PrecompileOutput, Journal}; - use revm_primitives::{hardfork::SpecId, U256}; + use revm::{context::TxEnv, precompile::PrecompileOutput}; + use revm_primitives::hardfork::SpecId; #[test] fn test_cache_key_ref_hash() { @@ -290,6 +290,7 @@ mod tests { #[test] fn test_precompile_cache_map_separate_addresses() { + let mut evm = EthEvmFactory::default().create_evm(EmptyDB::default(), EvmEnv::default()); let input_data = b"same_input"; let gas_limit = 100_000; @@ -337,41 +338,56 @@ mod tests { None, ); + let precompile1_address = Address::with_last_byte(1); + let precompile2_address = Address::with_last_byte(2); + + evm.precompiles_mut().apply_precompile(&precompile1_address, |_| Some(wrapped_precompile1)); + evm.precompiles_mut().apply_precompile(&precompile2_address, |_| Some(wrapped_precompile2)); + // first invocation of precompile1 (cache miss) - let result1 = wrapped_precompile1 - .call(PrecompileInput { - data: input_data, - gas: gas_limit, + let result1 = evm + .transact_raw(TxEnv { caller: Address::ZERO, - value: U256::ZERO, - internals: EvmInternals::new(&mut Journal::<_>::new(EmptyDB::new())), + gas_limit, + data: input_data.into(), + kind: precompile1_address.into(), + ..Default::default() }) + .unwrap() + .result + .into_output() .unwrap(); - assert_eq!(result1.bytes.as_ref(), b"output_from_precompile_1"); + assert_eq!(result1.as_ref(), b"output_from_precompile_1"); // first invocation of precompile2 with the same input (should be a cache miss) // if cache was incorrectly shared, we'd get precompile1's result - let result2 = wrapped_precompile2 - .call(PrecompileInput { - data: input_data, - gas: gas_limit, + let result2 = evm + .transact_raw(TxEnv { caller: Address::ZERO, - value: U256::ZERO, - internals: EvmInternals::new(&mut Journal::<_>::new(EmptyDB::new())), + gas_limit, + data: input_data.into(), + kind: precompile2_address.into(), + ..Default::default() }) + .unwrap() + .result + .into_output() .unwrap(); - assert_eq!(result2.bytes.as_ref(), b"output_from_precompile_2"); + assert_eq!(result2.as_ref(), b"output_from_precompile_2"); // second invocation of precompile1 (should be a cache hit) - let result3 = wrapped_precompile1 - .call(PrecompileInput { - data: input_data, - gas: gas_limit, + let result3 = evm + .transact_raw(TxEnv { caller: Address::ZERO, - value: U256::ZERO, - internals: EvmInternals::new(&mut Journal::<_>::new(EmptyDB::new())), + gas_limit, + data: input_data.into(), + kind: precompile1_address.into(), + ..Default::default() }) + .unwrap() + .result + .into_output() .unwrap(); - assert_eq!(result3.bytes.as_ref(), b"output_from_precompile_1"); + assert_eq!(result3.as_ref(), b"output_from_precompile_1"); } } From 3b1b2a0229fcb0faf139904af8682f832ee523ce Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 3 Jul 2025 18:11:36 +0200 Subject: [PATCH 037/305] fix: dont double serialize resp (#17204) --- crates/optimism/rpc/src/historical.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/crates/optimism/rpc/src/historical.rs b/crates/optimism/rpc/src/historical.rs index 6434d6bd519..07cbadf4619 100644 --- a/crates/optimism/rpc/src/historical.rs +++ b/crates/optimism/rpc/src/historical.rs @@ -181,8 +181,7 @@ where .request::<_, serde_json::Value>(req.method_name(), params) .await { - let payload = - jsonrpsee_types::ResponsePayload::success(raw.to_string()).into(); + let payload = jsonrpsee_types::ResponsePayload::success(raw).into(); return MethodResponse::response(req.id, payload, usize::MAX); } } From e49bbe416edf8f235579d582046c1d352d8a1a2e Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 3 Jul 2025 22:39:13 +0200 Subject: [PATCH 038/305] chore: bump evm 0.14 (#17206) --- Cargo.lock | 52 ++++++++++++++++++++++++++-------------------------- Cargo.toml | 24 ++++++++++++------------ 2 files changed, 38 insertions(+), 38 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e363630d5d5..c5d96efc8e5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -259,9 +259,9 @@ dependencies = [ [[package]] name = "alloy-evm" -version = "0.13.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a198edb5172413c2300bdc591b4dec1caa643398bd7facc21d0925487dffcd8f" +checksum = "ef2d6e0448bfd057a4438226b3d2fd547a0530fa4226217dfb1682d09f108bd4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -372,9 +372,9 @@ dependencies = [ [[package]] name = "alloy-op-evm" -version = "0.13.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de31eff0ae512dcca4fa0a58d158aa6d68e3b8b4a4e50ca5d6aff09c248a0aa2" +checksum = "98354b9c3d50de701a63693d5b6a37e468a93b970b2224f934dd745c727ef998" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6072,9 +6072,9 @@ dependencies = [ [[package]] name = "op-revm" -version = "8.0.1" +version = "8.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84de364c50baff786d09ab18d3cdd4f5ff23612e96c00a96b65de3c470f553df" +checksum = "bf1273c005f27528400dae0e2489a41378cfc29f0e42ea17f21b7d9679aef679" dependencies = [ "auto_impl", "once_cell", @@ -10630,9 +10630,9 @@ dependencies = [ [[package]] name = "revm" -version = "27.0.1" +version = "27.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0eff49cb058b1100aba529a048655594d89f6b86cefd1b50b63facd2465b6a0e" +checksum = "24188978ab59b8fd508d0193f8a08848bdcd19ae0f73f2ad1d6ee3b2cd6c0903" dependencies = [ "revm-bytecode", "revm-context", @@ -10649,9 +10649,9 @@ dependencies = [ [[package]] name = "revm-bytecode" -version = "6.0.0" +version = "6.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6a7d034cdf74c5f952ffc26e9667dd4285c86379ce1b1190b5d597c398a7565" +checksum = "7a685758a4f375ae9392b571014b9779cfa63f0d8eb91afb4626ddd958b23615" dependencies = [ "bitvec", "once_cell", @@ -10662,9 +10662,9 @@ dependencies = [ [[package]] name = "revm-context" -version = "8.0.1" +version = "8.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "199000545a2516f3fef7241e33df677275f930f56203ec4a586f7815e7fb5598" +checksum = "2c949e6b9d996ae5c7606cd4f82d997dabad30909f85601b5876b704d95b505b" dependencies = [ "cfg-if", "derive-where", @@ -10678,9 +10678,9 @@ dependencies = [ [[package]] name = "revm-context-interface" -version = "8.0.0" +version = "8.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47db30cb6579fddb974462ea385d297ea57d0d13750fc1086d65166c4fb281eb" +checksum = "a303a93102fceccec628265efd550ce49f2817b38ac3a492c53f7d524f18a1ca" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -10694,9 +10694,9 @@ dependencies = [ [[package]] name = "revm-database" -version = "7.0.0" +version = "7.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbe1906ae0f5f83153a6d46da8791405eb30385b9deb4845c27b4a6802e342e8" +checksum = "7db360729b61cc347f9c2f12adb9b5e14413aea58778cf9a3b7676c6a4afa115" dependencies = [ "alloy-eips", "revm-bytecode", @@ -10708,9 +10708,9 @@ dependencies = [ [[package]] name = "revm-database-interface" -version = "7.0.0" +version = "7.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "faffdc496bad90183f31a144ed122caefa4e74ffb02f57137dc8a94d20611550" +checksum = "b8500194cad0b9b1f0567d72370795fd1a5e0de9ec719b1607fa1566a23f039a" dependencies = [ "auto_impl", "either", @@ -10721,9 +10721,9 @@ dependencies = [ [[package]] name = "revm-handler" -version = "8.0.1" +version = "8.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "844ecdeb61f8067a7ccb61e32c69d303fe9081b5f1e21e09a337c883f4dda1ad" +checksum = "35b3a613d012189571b28fb13befc8c8af54e54f4f76997a0c02828cea0584a3" dependencies = [ "auto_impl", "derive-where", @@ -10740,9 +10740,9 @@ dependencies = [ [[package]] name = "revm-inspector" -version = "8.0.1" +version = "8.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee95fd546963e456ab9b615adc3564f64a801a49d9ebcdc31ff63ce3a601069c" +checksum = "64aee1f5f5b07cfa73250f530edf4c8c3bb8da693d5d00fe9f94f70499978f00" dependencies = [ "auto_impl", "either", @@ -10778,9 +10778,9 @@ dependencies = [ [[package]] name = "revm-interpreter" -version = "23.0.0" +version = "23.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1776f996bb79805b361badd8b6326ac04a8580764aebf72b145620a6e21cf1c3" +checksum = "8d2a89c40b7c72220f3d4b753ca0ce9ae912cf5dad7d3517182e4e1473b9b55e" dependencies = [ "revm-bytecode", "revm-context-interface", @@ -10828,9 +10828,9 @@ dependencies = [ [[package]] name = "revm-state" -version = "7.0.0" +version = "7.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f7bc9492e94ad3280c4540879d28d3fdbfbc432ebff60f17711740ebb4309ff" +checksum = "106fec5c634420118c7d07a6c37110186ae7f23025ceac3a5dbe182eea548363" dependencies = [ "bitflags 2.9.1", "revm-bytecode", diff --git a/Cargo.toml b/Cargo.toml index 0c9204feb8e..bae98a512ef 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -450,24 +450,24 @@ reth-ress-protocol = { path = "crates/ress/protocol" } reth-ress-provider = { path = "crates/ress/provider" } # revm -revm = { version = "27.0.1", default-features = false } -revm-bytecode = { version = "6.0.0", default-features = false } -revm-database = { version = "7.0.0", default-features = false } -revm-state = { version = "7.0.0", default-features = false } +revm = { version = "27.0.2", default-features = false } +revm-bytecode = { version = "6.0.1", default-features = false } +revm-database = { version = "7.0.1", default-features = false } +revm-state = { version = "7.0.1", default-features = false } revm-primitives = { version = "20.0.0", default-features = false } -revm-interpreter = { version = "23.0.0", default-features = false } -revm-inspector = { version = "8.0.1", default-features = false } -revm-context = { version = "8.0.1", default-features = false } -revm-context-interface = { version = "8.0.0", default-features = false } -revm-database-interface = { version = "7.0.0", default-features = false } -op-revm = { version = "8.0.1", default-features = false } +revm-interpreter = { version = "23.0.1", default-features = false } +revm-inspector = { version = "8.0.2", default-features = false } +revm-context = { version = "8.0.2", default-features = false } +revm-context-interface = { version = "8.0.1", default-features = false } +revm-database-interface = { version = "7.0.1", default-features = false } +op-revm = { version = "8.0.2", default-features = false } revm-inspectors = "0.26.5" # eth alloy-chains = { version = "0.2.0", default-features = false } alloy-dyn-abi = "1.2.0" alloy-eip2124 = { version = "0.2.0", default-features = false } -alloy-evm = { version = "0.13", default-features = false } +alloy-evm = { version = "0.14", default-features = false } alloy-primitives = { version = "1.2.0", default-features = false, features = ["map-foldhash"] } alloy-rlp = { version = "0.3.10", default-features = false, features = ["core-net"] } alloy-sol-macro = "1.2.0" @@ -505,7 +505,7 @@ alloy-transport-ipc = { version = "1.0.16", default-features = false } alloy-transport-ws = { version = "1.0.16", default-features = false } # op -alloy-op-evm = { version = "0.13", default-features = false } +alloy-op-evm = { version = "0.14", default-features = false } alloy-op-hardforks = "0.2.2" op-alloy-rpc-types = { version = "0.18.7", default-features = false } op-alloy-rpc-types-engine = { version = "0.18.7", default-features = false } From 345735888070050f0f08bff8b708c40d35930524 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Fri, 4 Jul 2025 12:35:23 +0200 Subject: [PATCH 039/305] chore: make clippy happy (#17219) --- crates/rpc/rpc-eth-api/src/helpers/transaction.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index c7849011d25..c0c759d400d 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -489,7 +489,7 @@ pub trait EthTransactions: LoadTransaction { fn find_signer( &self, account: &Address, - ) -> Result> + 'static)>, Self::Error> { + ) -> Result> + 'static>, Self::Error> { self.signers() .read() .iter() From 3b92a235995555f2da52fd0c2ea07103595d37ae Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Fri, 4 Jul 2025 12:53:28 +0200 Subject: [PATCH 040/305] chore(trie): make SparseStateTrie generic with respect to trie implementation (#17205) Co-authored-by: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Co-authored-by: Claude Co-authored-by: Federico Gimenez --- .../tree/src/tree/payload_processor/mod.rs | 2 +- crates/trie/sparse/benches/root.rs | 6 +-- crates/trie/sparse/benches/update.rs | 28 +++++----- crates/trie/sparse/src/state.rs | 53 +++++++++++-------- crates/trie/sparse/src/trie.rs | 40 +++++++------- crates/trie/trie/src/witness.rs | 4 +- 6 files changed, 72 insertions(+), 61 deletions(-) diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index 21c34d952ec..cc322490e58 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -46,7 +46,7 @@ pub mod prewarm; pub mod sparse_trie; /// Entrypoint for executing the payload. -#[derive(Debug, Clone)] +#[derive(Debug)] pub struct PayloadProcessor where N: NodePrimitives, diff --git a/crates/trie/sparse/benches/root.rs b/crates/trie/sparse/benches/root.rs index e34718ffc5a..d61760910b0 100644 --- a/crates/trie/sparse/benches/root.rs +++ b/crates/trie/sparse/benches/root.rs @@ -13,7 +13,7 @@ use reth_trie::{ HashedStorage, }; use reth_trie_common::{HashBuilder, Nibbles}; -use reth_trie_sparse::{blinded::DefaultBlindedProvider, SparseTrie}; +use reth_trie_sparse::{blinded::DefaultBlindedProvider, RevealedSparseTrie, SparseTrie}; fn calculate_root_from_leaves(c: &mut Criterion) { let mut group = c.benchmark_group("calculate root from leaves"); @@ -42,7 +42,7 @@ fn calculate_root_from_leaves(c: &mut Criterion) { // sparse trie let provider = DefaultBlindedProvider; group.bench_function(BenchmarkId::new("sparse trie", size), |b| { - b.iter_with_setup(SparseTrie::revealed_empty, |mut sparse| { + b.iter_with_setup(SparseTrie::::revealed_empty, |mut sparse| { for (key, value) in &state { sparse .update_leaf( @@ -189,7 +189,7 @@ fn calculate_root_from_leaves_repeated(c: &mut Criterion) { group.bench_function(benchmark_id, |b| { b.iter_with_setup( || { - let mut sparse = SparseTrie::revealed_empty(); + let mut sparse = SparseTrie::::revealed_empty(); for (key, value) in &init_state { sparse .update_leaf( diff --git a/crates/trie/sparse/benches/update.rs b/crates/trie/sparse/benches/update.rs index d230d51c58b..dd4005291a0 100644 --- a/crates/trie/sparse/benches/update.rs +++ b/crates/trie/sparse/benches/update.rs @@ -5,7 +5,7 @@ use criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criteri use proptest::{prelude::*, strategy::ValueTree}; use rand::seq::IteratorRandom; use reth_trie_common::Nibbles; -use reth_trie_sparse::{blinded::DefaultBlindedProvider, SparseTrie}; +use reth_trie_sparse::{blinded::DefaultBlindedProvider, RevealedSparseTrie, SparseTrie}; const LEAF_COUNTS: [usize; 2] = [1_000, 5_000]; @@ -17,14 +17,15 @@ fn update_leaf(c: &mut Criterion) { let leaves = generate_leaves(leaf_count); // Start with an empty trie let provider = DefaultBlindedProvider; - let mut trie = SparseTrie::revealed_empty(); - // Pre-populate with data - for (path, value) in leaves.iter().cloned() { - trie.update_leaf(path, value, &provider).unwrap(); - } b.iter_batched( || { + let mut trie = SparseTrie::::revealed_empty(); + // Pre-populate with data + for (path, value) in leaves.iter().cloned() { + trie.update_leaf(path, value, &provider).unwrap(); + } + let new_leaves = leaves .iter() // Update 10% of existing leaves with new values @@ -38,7 +39,7 @@ fn update_leaf(c: &mut Criterion) { }) .collect::>(); - (trie.clone(), new_leaves) + (trie, new_leaves) }, |(mut trie, new_leaves)| { for (path, new_value) in new_leaves { @@ -60,21 +61,22 @@ fn remove_leaf(c: &mut Criterion) { let leaves = generate_leaves(leaf_count); // Start with an empty trie let provider = DefaultBlindedProvider; - let mut trie = SparseTrie::revealed_empty(); - // Pre-populate with data - for (path, value) in leaves.iter().cloned() { - trie.update_leaf(path, value, &provider).unwrap(); - } b.iter_batched( || { + let mut trie = SparseTrie::::revealed_empty(); + // Pre-populate with data + for (path, value) in leaves.iter().cloned() { + trie.update_leaf(path, value, &provider).unwrap(); + } + let delete_leaves = leaves .iter() .map(|(path, _)| path) // Remove 10% leaves .choose_multiple(&mut rand::rng(), leaf_count / 10); - (trie.clone(), delete_leaves) + (trie, delete_leaves) }, |(mut trie, delete_leaves)| { for path in delete_leaves { diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index d46c15560ed..9dacb9800bd 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -23,11 +23,14 @@ use tracing::trace; #[derive(Debug)] /// Sparse state trie representing lazy-loaded Ethereum state trie. -pub struct SparseStateTrie { +pub struct SparseStateTrie< + A = RevealedSparseTrie, // Account trie implementation + S = RevealedSparseTrie, // Storage trie implementation +> { /// Sparse account trie. - state: SparseTrie, + state: SparseTrie, /// Sparse storage tries. - storages: B256Map, + storages: B256Map>, /// Collection of revealed account trie paths. revealed_account_paths: HashSet, /// Collection of revealed storage trie paths, per account. @@ -41,7 +44,11 @@ pub struct SparseStateTrie { metrics: crate::metrics::SparseStateTrieMetrics, } -impl Default for SparseStateTrie { +impl Default for SparseStateTrie +where + A: Default, + S: Default, +{ fn default() -> Self { Self { state: Default::default(), @@ -64,7 +71,11 @@ impl SparseStateTrie { } } -impl SparseStateTrie { +impl SparseStateTrie +where + A: SparseTrieInterface, + S: SparseTrieInterface, +{ /// Create new [`SparseStateTrie`] pub fn new() -> Self { Self::default() @@ -77,13 +88,13 @@ impl SparseStateTrie { } /// Set the accounts trie to the given `SparseTrie`. - pub fn with_accounts_trie(mut self, trie: SparseTrie) -> Self { + pub fn with_accounts_trie(mut self, trie: SparseTrie) -> Self { self.state = trie; self } /// Takes the `SparseTrie` from within the state root and clears it if it is not blinded. - pub fn take_cleared_accounts_trie(&mut self) -> SparseTrie { + pub fn take_cleared_accounts_trie(&mut self) -> SparseTrie { core::mem::take(&mut self.state).clear() } @@ -138,27 +149,27 @@ impl SparseStateTrie { } /// Returns reference to state trie if it was revealed. - pub const fn state_trie_ref(&self) -> Option<&RevealedSparseTrie> { + pub const fn state_trie_ref(&self) -> Option<&A> { self.state.as_revealed_ref() } /// Returns reference to storage trie if it was revealed. - pub fn storage_trie_ref(&self, address: &B256) -> Option<&RevealedSparseTrie> { + pub fn storage_trie_ref(&self, address: &B256) -> Option<&S> { self.storages.get(address).and_then(|e| e.as_revealed_ref()) } /// Returns mutable reference to storage sparse trie if it was revealed. - pub fn storage_trie_mut(&mut self, address: &B256) -> Option<&mut RevealedSparseTrie> { + pub fn storage_trie_mut(&mut self, address: &B256) -> Option<&mut S> { self.storages.get_mut(address).and_then(|e| e.as_revealed_mut()) } /// Takes the storage trie for the provided address. - pub fn take_storage_trie(&mut self, address: &B256) -> Option { + pub fn take_storage_trie(&mut self, address: &B256) -> Option> { self.storages.remove(address) } /// Inserts storage trie for the provided address. - pub fn insert_storage_trie(&mut self, address: B256, storage_trie: SparseTrie) { + pub fn insert_storage_trie(&mut self, address: B256, storage_trie: SparseTrie) { self.storages.insert(address, storage_trie); } @@ -598,13 +609,13 @@ impl SparseStateTrie { self.storages.get_mut(&account).and_then(|trie| trie.root()) } - /// Returns mutable reference to the revealed sparse trie. + /// Returns mutable reference to the revealed account sparse trie. /// /// If the trie is not revealed yet, its root will be revealed using the blinded node provider. fn revealed_trie_mut( &mut self, provider_factory: impl BlindedProviderFactory, - ) -> SparseStateTrieResult<&mut RevealedSparseTrie> { + ) -> SparseStateTrieResult<&mut A> { match self.state { SparseTrie::Blind(_) => { let (root_node, hash_mask, tree_mask) = provider_factory @@ -919,7 +930,7 @@ mod tests { #[test] fn validate_root_node_first_node_not_root() { - let sparse = SparseStateTrie::default(); + let sparse = SparseStateTrie::::default(); let proof = [(Nibbles::from_nibbles([0x1]), Bytes::from([EMPTY_STRING_CODE]))]; assert_matches!( sparse.validate_root_node(&mut proof.into_iter().peekable()).map_err(|e| e.into_kind()), @@ -929,7 +940,7 @@ mod tests { #[test] fn validate_root_node_invalid_proof_with_empty_root() { - let sparse = SparseStateTrie::default(); + let sparse = SparseStateTrie::::default(); let proof = [ (Nibbles::default(), Bytes::from([EMPTY_STRING_CODE])), (Nibbles::from_nibbles([0x1]), Bytes::new()), @@ -948,7 +959,7 @@ mod tests { let proofs = hash_builder.take_proof_nodes(); assert_eq!(proofs.len(), 1); - let mut sparse = SparseStateTrie::default(); + let mut sparse = SparseStateTrie::::default(); assert_eq!(sparse.state, SparseTrie::Blind(None)); sparse.reveal_account(Default::default(), proofs.into_inner()).unwrap(); @@ -963,7 +974,7 @@ mod tests { let proofs = hash_builder.take_proof_nodes(); assert_eq!(proofs.len(), 1); - let mut sparse = SparseStateTrie::default(); + let mut sparse = SparseStateTrie::::default(); assert!(sparse.storages.is_empty()); sparse @@ -978,7 +989,7 @@ mod tests { #[test] fn reveal_account_path_twice() { let provider_factory = DefaultBlindedProviderFactory; - let mut sparse = SparseStateTrie::default(); + let mut sparse = SparseStateTrie::::default(); let leaf_value = alloy_rlp::encode(TrieAccount::default()); let leaf_1 = alloy_rlp::encode(TrieNode::Leaf(LeafNode::new( @@ -1050,7 +1061,7 @@ mod tests { #[test] fn reveal_storage_path_twice() { let provider_factory = DefaultBlindedProviderFactory; - let mut sparse = SparseStateTrie::default(); + let mut sparse = SparseStateTrie::::default(); let leaf_value = alloy_rlp::encode(TrieAccount::default()); let leaf_1 = alloy_rlp::encode(TrieNode::Leaf(LeafNode::new( @@ -1182,7 +1193,7 @@ mod tests { let proof_nodes = hash_builder.take_proof_nodes(); let provider_factory = DefaultBlindedProviderFactory; - let mut sparse = SparseStateTrie::default().with_updates(true); + let mut sparse = SparseStateTrie::::default().with_updates(true); sparse .reveal_decoded_multiproof( MultiProof { diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 4d93aacdeb2..2bbe94d4f7a 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -41,8 +41,8 @@ const SPARSE_TRIE_SUBTRIE_HASHES_LEVEL: usize = 2; /// 2. Update tracking - changes to the trie structure can be tracked and selectively persisted /// 3. Incremental operations - nodes can be revealed as needed without loading the entire trie. /// This is what gives rise to the notion of a "sparse" trie. -#[derive(PartialEq, Eq, Clone, Debug)] -pub enum SparseTrie { +#[derive(PartialEq, Eq, Debug)] +pub enum SparseTrie { /// The trie is blind -- no nodes have been revealed /// /// This is the default state. In this state, the trie cannot be directly queried or modified @@ -50,32 +50,32 @@ pub enum SparseTrie { /// /// In this state the `SparseTrie` can optionally carry with it a cleared `RevealedSparseTrie`. /// This allows for re-using the trie's allocations between payload executions. - Blind(Option>), + Blind(Option>), /// Some nodes in the Trie have been revealed. /// /// In this state, the trie can be queried and modified for the parts /// that have been revealed. Other parts remain blind and require revealing /// before they can be accessed. - Revealed(Box), + Revealed(Box), } -impl Default for SparseTrie { +impl Default for SparseTrie { fn default() -> Self { Self::Blind(None) } } -impl SparseTrie { +impl SparseTrie { /// Creates a new blind sparse trie. /// /// # Examples /// /// ``` - /// use reth_trie_sparse::{blinded::DefaultBlindedProvider, SparseTrie}; + /// use reth_trie_sparse::{blinded::DefaultBlindedProvider, RevealedSparseTrie, SparseTrie}; /// - /// let trie = SparseTrie::blind(); + /// let trie = SparseTrie::::blind(); /// assert!(trie.is_blind()); - /// let trie = SparseTrie::default(); + /// let trie = SparseTrie::::default(); /// assert!(trie.is_blind()); /// ``` pub const fn blind() -> Self { @@ -87,9 +87,9 @@ impl SparseTrie { /// # Examples /// /// ``` - /// use reth_trie_sparse::{blinded::DefaultBlindedProvider, SparseTrie}; + /// use reth_trie_sparse::{blinded::DefaultBlindedProvider, RevealedSparseTrie, SparseTrie}; /// - /// let trie = SparseTrie::revealed_empty(); + /// let trie = SparseTrie::::revealed_empty(); /// assert!(!trie.is_blind()); /// ``` pub fn revealed_empty() -> Self { @@ -106,13 +106,13 @@ impl SparseTrie { /// /// # Returns /// - /// A mutable reference to the underlying [`RevealedSparseTrie`]. + /// A mutable reference to the underlying [`SparseTrieInterface`]. pub fn reveal_root( &mut self, root: TrieNode, masks: TrieMasks, retain_updates: bool, - ) -> SparseTrieResult<&mut RevealedSparseTrie> { + ) -> SparseTrieResult<&mut T> { // if `Blind`, we initialize the revealed trie with the given root node, using a // pre-allocated trie if available. if self.is_blind() { @@ -137,7 +137,7 @@ impl SparseTrie { /// Returns an immutable reference to the underlying revealed sparse trie. /// /// Returns `None` if the trie is blinded. - pub const fn as_revealed_ref(&self) -> Option<&RevealedSparseTrie> { + pub const fn as_revealed_ref(&self) -> Option<&T> { if let Self::Revealed(revealed) = self { Some(revealed) } else { @@ -148,7 +148,7 @@ impl SparseTrie { /// Returns a mutable reference to the underlying revealed sparse trie. /// /// Returns `None` if the trie is blinded. - pub fn as_revealed_mut(&mut self) -> Option<&mut RevealedSparseTrie> { + pub fn as_revealed_mut(&mut self) -> Option<&mut T> { if let Self::Revealed(revealed) = self { Some(revealed) } else { @@ -198,8 +198,8 @@ impl SparseTrie { } /// Returns a [`SparseTrie::Blind`] based on this one. If this instance was revealed, or was - /// itself a `Blind` with a pre-allocated [`RevealedSparseTrie`], this will return - /// a `Blind` carrying a cleared pre-allocated [`RevealedSparseTrie`]. + /// itself a `Blind` with a pre-allocated [`SparseTrieInterface`], this will return + /// a `Blind` carrying a cleared pre-allocated [`SparseTrieInterface`]. pub fn clear(self) -> Self { match self { Self::Blind(_) => self, @@ -209,9 +209,7 @@ impl SparseTrie { } } } -} -impl SparseTrie { /// Updates (or inserts) a leaf at the given key path with the specified RLP-encoded value. /// /// # Errors @@ -2384,8 +2382,8 @@ mod tests { #[test] fn sparse_trie_is_blind() { - assert!(SparseTrie::blind().is_blind()); - assert!(!SparseTrie::revealed_empty().is_blind()); + assert!(SparseTrie::::blind().is_blind()); + assert!(!SparseTrie::::revealed_empty().is_blind()); } #[test] diff --git a/crates/trie/trie/src/witness.rs b/crates/trie/trie/src/witness.rs index 3683c2327e8..50b3834a1f4 100644 --- a/crates/trie/trie/src/witness.rs +++ b/crates/trie/trie/src/witness.rs @@ -22,7 +22,7 @@ use reth_execution_errors::{ use reth_trie_common::{MultiProofTargets, Nibbles}; use reth_trie_sparse::{ blinded::{BlindedProvider, BlindedProviderFactory, RevealedNode}, - SparseStateTrie, + RevealedSparseTrie, SparseStateTrie, }; use std::sync::{mpsc, Arc}; @@ -154,7 +154,7 @@ where ), tx, ); - let mut sparse_trie = SparseStateTrie::new(); + let mut sparse_trie = SparseStateTrie::::new(); sparse_trie.reveal_multiproof(multiproof)?; // Attempt to update state trie to gather additional information for the witness. From 5c47be25c4449d50cd3f714d61f7b4fae52084fd Mon Sep 17 00:00:00 2001 From: Francis Li Date: Fri, 4 Jul 2025 04:22:48 -0700 Subject: [PATCH 041/305] feat(txpool): add minimal priority fee configuration for transaction pool (#17183) --- crates/ethereum/node/src/node.rs | 1 + crates/node/core/src/args/txpool.rs | 7 + crates/optimism/node/src/node.rs | 1 + crates/rpc/rpc-eth-types/src/error/mod.rs | 11 ++ crates/transaction-pool/src/config.rs | 3 + crates/transaction-pool/src/error.rs | 7 + crates/transaction-pool/src/validate/eth.rs | 179 +++++++++++++++++++- docs/vocs/docs/pages/cli/reth/node.mdx | 3 + 8 files changed, 209 insertions(+), 3 deletions(-) diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index 672b427feee..b1c89d7ddc4 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -427,6 +427,7 @@ where .with_local_transactions_config(pool_config.local_transactions_config.clone()) .set_tx_fee_cap(ctx.config().rpc.rpc_tx_fee_cap) .with_max_tx_gas_limit(ctx.config().txpool.max_tx_gas_limit) + .with_minimum_priority_fee(ctx.config().txpool.minimum_priority_fee) .with_additional_tasks(ctx.config().txpool.additional_validation_tasks) .build_with_tasks(ctx.task_executor().clone(), blob_store.clone()); diff --git a/crates/node/core/src/args/txpool.rs b/crates/node/core/src/args/txpool.rs index cae968f2d7e..bcb033301fc 100644 --- a/crates/node/core/src/args/txpool.rs +++ b/crates/node/core/src/args/txpool.rs @@ -65,6 +65,11 @@ pub struct TxPoolArgs { #[arg(long = "txpool.minimal-protocol-fee", default_value_t = MIN_PROTOCOL_BASE_FEE)] pub minimal_protocol_basefee: u64, + /// Minimum priority fee required for transaction acceptance into the pool. + /// Transactions with priority fee below this value will be rejected. + #[arg(long = "txpool.minimum-priority-fee")] + pub minimum_priority_fee: Option, + /// The default enforced gas limit for transactions entering the pool #[arg(long = "txpool.gas-limit", default_value_t = ETHEREUM_BLOCK_GAS_LIMIT_30M)] pub enforced_gas_limit: u64, @@ -144,6 +149,7 @@ impl Default for TxPoolArgs { max_account_slots: TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER, price_bump: DEFAULT_PRICE_BUMP, minimal_protocol_basefee: MIN_PROTOCOL_BASE_FEE, + minimum_priority_fee: None, enforced_gas_limit: ETHEREUM_BLOCK_GAS_LIMIT_30M, max_tx_gas_limit: None, blob_transaction_price_bump: REPLACE_BLOB_PRICE_BUMP, @@ -195,6 +201,7 @@ impl RethTransactionPoolConfig for TxPoolArgs { replace_blob_tx_price_bump: self.blob_transaction_price_bump, }, minimal_protocol_basefee: self.minimal_protocol_basefee, + minimum_priority_fee: self.minimum_priority_fee, gas_limit: self.enforced_gas_limit, pending_tx_listener_buffer_size: self.pending_tx_listener_buffer_size, new_tx_listener_buffer_size: self.new_tx_listener_buffer_size, diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 117adde1d46..62433c1ba58 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -807,6 +807,7 @@ where .kzg_settings(ctx.kzg_settings()?) .set_tx_fee_cap(ctx.config().rpc.rpc_tx_fee_cap) .with_max_tx_gas_limit(ctx.config().txpool.max_tx_gas_limit) + .with_minimum_priority_fee(ctx.config().txpool.minimum_priority_fee) .with_additional_tasks( pool_config_overrides .additional_validation_tasks diff --git a/crates/rpc/rpc-eth-types/src/error/mod.rs b/crates/rpc/rpc-eth-types/src/error/mod.rs index e598ea3df76..dcec8482f3d 100644 --- a/crates/rpc/rpc-eth-types/src/error/mod.rs +++ b/crates/rpc/rpc-eth-types/src/error/mod.rs @@ -570,6 +570,12 @@ pub enum RpcInvalidTransactionError { /// EIP-7702 transaction has invalid fields set. #[error("EIP-7702 authorization list has invalid fields")] AuthorizationListInvalidFields, + /// Transaction priority fee is below the minimum required priority fee. + #[error("transaction priority fee below minimum required priority fee {minimum_priority_fee}")] + PriorityFeeBelowMinimum { + /// Minimum required priority fee. + minimum_priority_fee: u128, + }, /// Any other error #[error("{0}")] Other(Box), @@ -915,6 +921,11 @@ impl From for RpcPoolError { InvalidPoolTransactionError::Overdraft { cost, balance } => { Self::Invalid(RpcInvalidTransactionError::InsufficientFunds { cost, balance }) } + InvalidPoolTransactionError::PriorityFeeBelowMinimum { minimum_priority_fee } => { + Self::Invalid(RpcInvalidTransactionError::PriorityFeeBelowMinimum { + minimum_priority_fee, + }) + } } } } diff --git a/crates/transaction-pool/src/config.rs b/crates/transaction-pool/src/config.rs index 5263cd18344..a58b02bb327 100644 --- a/crates/transaction-pool/src/config.rs +++ b/crates/transaction-pool/src/config.rs @@ -50,6 +50,8 @@ pub struct PoolConfig { pub price_bumps: PriceBumpConfig, /// Minimum base fee required by the protocol. pub minimal_protocol_basefee: u64, + /// Minimum priority fee required for transaction acceptance into the pool. + pub minimum_priority_fee: Option, /// The max gas limit for transactions in the pool pub gas_limit: u64, /// How to handle locally received transactions: @@ -87,6 +89,7 @@ impl Default for PoolConfig { max_account_slots: TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER, price_bumps: Default::default(), minimal_protocol_basefee: MIN_PROTOCOL_BASE_FEE, + minimum_priority_fee: None, gas_limit: ETHEREUM_BLOCK_GAS_LIMIT_30M, local_transactions_config: Default::default(), pending_tx_listener_buffer_size: PENDING_TX_LISTENER_BUFFER_SIZE, diff --git a/crates/transaction-pool/src/error.rs b/crates/transaction-pool/src/error.rs index 09aec26bd1e..3f2948b94ed 100644 --- a/crates/transaction-pool/src/error.rs +++ b/crates/transaction-pool/src/error.rs @@ -267,6 +267,12 @@ pub enum InvalidPoolTransactionError { /// invocation. #[error("intrinsic gas too low")] IntrinsicGasTooLow, + /// The transaction priority fee is below the minimum required priority fee. + #[error("transaction priority fee below minimum required priority fee {minimum_priority_fee}")] + PriorityFeeBelowMinimum { + /// Minimum required priority fee. + minimum_priority_fee: u128, + }, } // === impl InvalidPoolTransactionError === @@ -381,6 +387,7 @@ impl InvalidPoolTransactionError { Eip7702PoolTransactionError::InflightTxLimitReached => false, Eip7702PoolTransactionError::AuthorityReserved => false, }, + Self::PriorityFeeBelowMinimum { .. } => false, } } diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index deea3598013..5f302d1a14a 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -444,7 +444,11 @@ where { return Err(TransactionValidationOutcome::Invalid( transaction, - InvalidPoolTransactionError::Underpriced, + InvalidPoolTransactionError::PriorityFeeBelowMinimum { + minimum_priority_fee: self + .minimum_priority_fee + .expect("minimum priority fee is expected inside if statement"), + }, )) } @@ -927,8 +931,8 @@ impl EthTransactionValidatorBuilder { } /// Sets a minimum priority fee that's enforced for acceptance into the pool. - pub const fn with_minimum_priority_fee(mut self, minimum_priority_fee: u128) -> Self { - self.minimum_priority_fee = Some(minimum_priority_fee); + pub const fn with_minimum_priority_fee(mut self, minimum_priority_fee: Option) -> Self { + self.minimum_priority_fee = minimum_priority_fee; self } @@ -1409,4 +1413,173 @@ mod tests { let outcome = validator.validate_one(TransactionOrigin::External, transaction); assert!(outcome.is_valid()); } + + // Helper function to set up common test infrastructure for priority fee tests + fn setup_priority_fee_test() -> (EthPooledTransaction, MockEthProvider) { + let transaction = get_transaction(); + let provider = MockEthProvider::default(); + provider.add_account( + transaction.sender(), + ExtendedAccount::new(transaction.nonce(), U256::MAX), + ); + (transaction, provider) + } + + // Helper function to create a validator with minimum priority fee + fn create_validator_with_minimum_fee( + provider: MockEthProvider, + minimum_priority_fee: Option, + local_config: Option, + ) -> EthTransactionValidator { + let blob_store = InMemoryBlobStore::default(); + let mut builder = EthTransactionValidatorBuilder::new(provider) + .with_minimum_priority_fee(minimum_priority_fee); + + if let Some(config) = local_config { + builder = builder.with_local_transactions_config(config); + } + + builder.build(blob_store) + } + + #[tokio::test] + async fn invalid_on_priority_fee_lower_than_configured_minimum() { + let (transaction, provider) = setup_priority_fee_test(); + + // Verify the test transaction is a dynamic fee transaction + assert!(transaction.is_dynamic_fee()); + + // Set minimum priority fee to be double the transaction's priority fee + let minimum_priority_fee = + transaction.max_priority_fee_per_gas().expect("priority fee is expected") * 2; + + let validator = + create_validator_with_minimum_fee(provider, Some(minimum_priority_fee), None); + + // External transaction should be rejected due to low priority fee + let outcome = validator.validate_one(TransactionOrigin::External, transaction.clone()); + assert!(outcome.is_invalid()); + + if let TransactionValidationOutcome::Invalid(_, err) = outcome { + assert!(matches!( + err, + InvalidPoolTransactionError::PriorityFeeBelowMinimum { minimum_priority_fee: min_fee } + if min_fee == minimum_priority_fee + )); + } + + // Test pool integration + let blob_store = InMemoryBlobStore::default(); + let pool = + Pool::new(validator, CoinbaseTipOrdering::default(), blob_store, Default::default()); + + let res = pool.add_external_transaction(transaction.clone()).await; + assert!(res.is_err()); + assert!(matches!( + res.unwrap_err().kind, + PoolErrorKind::InvalidTransaction( + InvalidPoolTransactionError::PriorityFeeBelowMinimum { .. } + ) + )); + let tx = pool.get(transaction.hash()); + assert!(tx.is_none()); + + // Local transactions should still be accepted regardless of minimum priority fee + let (_, local_provider) = setup_priority_fee_test(); + let validator_local = + create_validator_with_minimum_fee(local_provider, Some(minimum_priority_fee), None); + + let local_outcome = validator_local.validate_one(TransactionOrigin::Local, transaction); + assert!(local_outcome.is_valid()); + } + + #[tokio::test] + async fn valid_on_priority_fee_equal_to_minimum() { + let (transaction, provider) = setup_priority_fee_test(); + + // Set minimum priority fee equal to transaction's priority fee + let tx_priority_fee = + transaction.max_priority_fee_per_gas().expect("priority fee is expected"); + let validator = create_validator_with_minimum_fee(provider, Some(tx_priority_fee), None); + + let outcome = validator.validate_one(TransactionOrigin::External, transaction); + assert!(outcome.is_valid()); + } + + #[tokio::test] + async fn valid_on_priority_fee_above_minimum() { + let (transaction, provider) = setup_priority_fee_test(); + + // Set minimum priority fee below transaction's priority fee + let tx_priority_fee = + transaction.max_priority_fee_per_gas().expect("priority fee is expected"); + let minimum_priority_fee = tx_priority_fee / 2; // Half of transaction's priority fee + + let validator = + create_validator_with_minimum_fee(provider, Some(minimum_priority_fee), None); + + let outcome = validator.validate_one(TransactionOrigin::External, transaction); + assert!(outcome.is_valid()); + } + + #[tokio::test] + async fn valid_on_minimum_priority_fee_disabled() { + let (transaction, provider) = setup_priority_fee_test(); + + // No minimum priority fee set (default is None) + let validator = create_validator_with_minimum_fee(provider, None, None); + + let outcome = validator.validate_one(TransactionOrigin::External, transaction); + assert!(outcome.is_valid()); + } + + #[tokio::test] + async fn priority_fee_validation_applies_to_private_transactions() { + let (transaction, provider) = setup_priority_fee_test(); + + // Set minimum priority fee to be double the transaction's priority fee + let minimum_priority_fee = + transaction.max_priority_fee_per_gas().expect("priority fee is expected") * 2; + + let validator = + create_validator_with_minimum_fee(provider, Some(minimum_priority_fee), None); + + // Private transactions are also subject to minimum priority fee validation + // because they are not considered "local" by default unless specifically configured + let outcome = validator.validate_one(TransactionOrigin::Private, transaction); + assert!(outcome.is_invalid()); + + if let TransactionValidationOutcome::Invalid(_, err) = outcome { + assert!(matches!( + err, + InvalidPoolTransactionError::PriorityFeeBelowMinimum { minimum_priority_fee: min_fee } + if min_fee == minimum_priority_fee + )); + } + } + + #[tokio::test] + async fn valid_on_local_config_exempts_private_transactions() { + let (transaction, provider) = setup_priority_fee_test(); + + // Set minimum priority fee to be double the transaction's priority fee + let minimum_priority_fee = + transaction.max_priority_fee_per_gas().expect("priority fee is expected") * 2; + + // Configure local transactions to include all private transactions + let local_config = + LocalTransactionConfig { propagate_local_transactions: true, ..Default::default() }; + + let validator = create_validator_with_minimum_fee( + provider, + Some(minimum_priority_fee), + Some(local_config), + ); + + // With appropriate local config, the behavior depends on the local transaction logic + // This test documents the current behavior - private transactions are still validated + // unless the sender is specifically whitelisted in local_transactions_config + let outcome = validator.validate_one(TransactionOrigin::Private, transaction); + assert!(outcome.is_invalid()); // Still invalid because sender not in whitelist + } } diff --git a/docs/vocs/docs/pages/cli/reth/node.mdx b/docs/vocs/docs/pages/cli/reth/node.mdx index 8d53c821fc3..fe9d08b8dee 100644 --- a/docs/vocs/docs/pages/cli/reth/node.mdx +++ b/docs/vocs/docs/pages/cli/reth/node.mdx @@ -489,6 +489,9 @@ TxPool: [default: 7] + --txpool.minimum-priority-fee + Minimum priority fee required for transaction acceptance into the pool. Transactions with priority fee below this value will be rejected + --txpool.gas-limit The default enforced gas limit for transactions entering the pool From 342bab5e82bfb4d3fcf185fded4f8944f93e90eb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Roman=20Hodul=C3=A1k?= Date: Fri, 4 Jul 2025 13:31:28 +0200 Subject: [PATCH 042/305] deps: Upgrade `alloy` version `1.0.16` => `1.0.17` and all other deps minor versions (#17217) Co-authored-by: Matthias Seitz --- Cargo.lock | 180 ++++++++++++++++------------ Cargo.toml | 54 ++++----- crates/primitives-traits/Cargo.toml | 1 + crates/primitives/Cargo.toml | 1 + crates/trie/common/Cargo.toml | 1 + 5 files changed, 133 insertions(+), 104 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c5d96efc8e5..0422efa0fcc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -97,9 +97,9 @@ checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "alloy-chains" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19a9cc9d81ace3da457883b0bdf76776e55f1b84219a9e9d55c27ad308548d3f" +checksum = "5674914c2cfdb866c21cb0c09d82374ee39a1395cf512e7515f4c014083b3fff" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -112,9 +112,9 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8b77018eec2154eb158869f9f2914a3ea577adf87b11be2764d4795d5ccccf7" +checksum = "e9c6ad411efe0f49e0e99b9c7d8749a1eb55f6dbf74a1bc6953ab285b02c4f67" dependencies = [ "alloy-eips", "alloy-primitives", @@ -138,9 +138,9 @@ dependencies = [ [[package]] name = "alloy-consensus-any" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65bf8b058ff364d6e94bcd2979d7da1862e94d2987065a4eb41fa9eac36e028a" +checksum = "0bf397edad57b696501702d5887e4e14d7d0bbae9fbb6439e148d361f7254f45" dependencies = [ "alloy-consensus", "alloy-eips", @@ -153,9 +153,9 @@ dependencies = [ [[package]] name = "alloy-contract" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "049ed4836d368929d7c5e206bab2e8d92f00524222edc0026c6bf2a3cb8a02d5" +checksum = "977b97d271159578afcb26e39e1ca5ce1a7f937697793d7d571b0166dd8b8225" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -236,9 +236,9 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33d134f3ac4926124eaf521a1031d11ea98816df3d39fc446fcfd6b36884603f" +checksum = "749b8449e4daf7359bdf1dabdba6ce424ff8b1bdc23bdb795661b2e991a08d87" dependencies = [ "alloy-eip2124", "alloy-eip2930", @@ -279,15 +279,16 @@ dependencies = [ [[package]] name = "alloy-genesis" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb1c2792605e648bdd1fddcfed8ce0d39d3db495c71d2240cb53df8aee8aea1f" +checksum = "5fcbae2107f3f2df2b02bb7d9e81e8aa730ae371ca9dd7fd0c81c3d0cb78a452" dependencies = [ "alloy-eips", "alloy-primitives", "alloy-serde", "alloy-trie", "serde", + "serde_with", ] [[package]] @@ -318,9 +319,9 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31cfdacfeb6b6b40bf6becf92e69e575c68c9f80311c3961d019e29c0b8d6be2" +checksum = "bc30b0e20fcd0843834ecad2a716661c7b9d5aca2486f8e96b93d5246eb83e06" dependencies = [ "alloy-primitives", "alloy-sol-types", @@ -333,9 +334,9 @@ dependencies = [ [[package]] name = "alloy-network" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de68a3f09cd9ab029cf87d08630e1336ca9a530969689fd151d505fa888a2603" +checksum = "eaeb681024cf71f5ca14f3d812c0a8d8b49f13f7124713538e66d74d3bfe6aff" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -359,9 +360,9 @@ dependencies = [ [[package]] name = "alloy-network-primitives" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcc2689c8addfc43461544d07a6f5f3a3e1f5f4efae61206cb5783dc383cfc8f" +checksum = "a03ad273e1c55cc481889b4130e82860e33624e6969e9a08854e0f3ebe659295" dependencies = [ "alloy-consensus", "alloy-eips", @@ -432,9 +433,9 @@ dependencies = [ [[package]] name = "alloy-provider" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ced931220f547d30313530ad315654b7862ef52631c90ab857d792865f84a7d" +checksum = "abc164acf8c41c756e76c7aea3be8f0fb03f8a3ef90a33e3ddcea5d1614d8779" dependencies = [ "alloy-chains", "alloy-consensus", @@ -476,9 +477,9 @@ dependencies = [ [[package]] name = "alloy-pubsub" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e37d6cf286fd30bacac525ab1491f9d1030d39ecce237821f2a5d5922eb9a37" +checksum = "670d155c3e35bcaa252ca706a2757a456c56aa71b80ad1539d07d49b86304e78" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -519,9 +520,9 @@ dependencies = [ [[package]] name = "alloy-rpc-client" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d1d1eac6e48b772c7290f0f79211a0e822a38b057535b514cc119abd857d5b6" +checksum = "03c44d31bcb9afad460915fe1fba004a2af5a07a3376c307b9bdfeec3678c209" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -547,9 +548,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8589c6ae318fcc9624d42e9166f7f82b630d9ad13e180c52addf20b93a8af266" +checksum = "2ba2cf3d3c6ece87f1c6bb88324a997f28cf0ad7e98d5e0b6fa91c4003c30916" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -560,9 +561,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-admin" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0182187bcbe47f3a737f5eced007b7788d4ed37aba19d43fd3df123169b3b05e" +checksum = "e4ce874dde17cc749f1aa8883e0c1431ddda6ba6dd9c9eb9b31d1fb0a6023830" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -572,9 +573,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "754d5062b594ed300a3bb0df615acb7bacdbd7bd1cd1a6e5b59fb936c5025a13" +checksum = "65e80e2ffa56956a92af375df1422b239fde6552bd222dfeaeb39f07949060fa" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -584,9 +585,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-any" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02cfd7ecb21a1bfe68ac6b551172e4d41f828bcc33a2e1563a65d482d4efc1cf" +checksum = "ef5b22062142ce3b2ed3374337d4b343437e5de6959397f55d2c9fe2c2ce0162" dependencies = [ "alloy-consensus-any", "alloy-rpc-types-eth", @@ -595,9 +596,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-beacon" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32c1ddf8fb2e41fa49316185d7826ed034f55819e0017e65dc6715f911b8a1ee" +checksum = "438a7a3a5c5d11877787e324dd1ffd9ab82314ca145513ebe8d12257cbfefb5b" dependencies = [ "alloy-eips", "alloy-primitives", @@ -613,9 +614,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-debug" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c81ae89a04859751bac72e5e73459bceb3e6a4d2541f2f1374e35be358fd171" +checksum = "1f53a2a78b44582e0742ab96d5782842d9b90cebf6ef6ccd8be864ae246fdd0f" dependencies = [ "alloy-primitives", "serde", @@ -623,9 +624,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "662b720c498883427ffb9f5e38c7f02b56ac5c0cdd60b457e88ce6b6a20b9ce9" +checksum = "7041c3fd4dcd7af95e86280944cc33b4492ac2ddbe02f84079f8019742ec2857" dependencies = [ "alloy-consensus", "alloy-eips", @@ -644,9 +645,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb082c325bdfd05a7c71f52cd1060e62491fbf6edf55962720bdc380847b0784" +checksum = "391e59f81bacbffc7bddd2da3a26d6eec0e2058e9237c279e9b1052bdf21b49e" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -665,9 +666,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-mev" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84c1b50012f55de4a6d58ee9512944089fa61a835e6fe3669844075bb6e0312e" +checksum = "de3f327d4cd140eca2c6c27c82c381aba6fa6a32cbb697c146b5607532f82167" dependencies = [ "alloy-consensus", "alloy-eips", @@ -680,9 +681,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eaf52c884c7114c5d1f1f2735634ba0f6579911427281fb02cbd5cb8147723ca" +checksum = "29d96238f37e8a72dcf2cf6bead4c4f91dec1c0720b12be10558406e1633a804" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -694,9 +695,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-txpool" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e4fd0df1af2ed62d02e7acbc408a162a06f30cb91550c2ec34b11c760cdc0ba" +checksum = "e45d00db47a280d0a6e498b6e63344bccd9485d8860d2e2f06b680200c37ebc2" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -706,9 +707,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7f26c17270c2ac1bd555c4304fe067639f0ddafdd3c8d07a200b2bb5a326e03" +checksum = "0ea08bc854235d4dff08fd57df8033285c11b8d7548b20c6da218194e7e6035f" dependencies = [ "alloy-primitives", "arbitrary", @@ -718,9 +719,9 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d9fd649d6ed5b8d7e5014e01758efb937e8407124b182a7f711bf487a1a2697" +checksum = "bcb3759f85ef5f010a874d9ebd5ee6ce01cac65211510863124e0ebac6552db0" dependencies = [ "alloy-primitives", "async-trait", @@ -733,9 +734,9 @@ dependencies = [ [[package]] name = "alloy-signer-local" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c288c5b38be486bb84986701608f5d815183de990e884bb747f004622783e125" +checksum = "14d95902d29e1290809e1c967a1e974145b44b78f6e3e12fc07a60c1225e3df0" dependencies = [ "alloy-consensus", "alloy-network", @@ -821,9 +822,9 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1b790b89e31e183ae36ac0a1419942e21e94d745066f5281417c3e4299ea39e" +checksum = "dcdf4b7fc58ebb2605b2fc5a33dae5cf15527ea70476978351cc0db1c596ea93" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -844,9 +845,9 @@ dependencies = [ [[package]] name = "alloy-transport-http" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f643645a33a681d09ac1ca2112014c2ca09c68aad301da4400484d59c746bc70" +checksum = "4c4b0f3a9c28bcd3761504d9eb3578838d6d115c8959fc1ea05f59a3a8f691af" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -859,9 +860,9 @@ dependencies = [ [[package]] name = "alloy-transport-ipc" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c2d843199d0bdb4cbed8f1b6f2da7f68bcb9c5da7f57e789009e4e7e76d1bec" +checksum = "758edb7c266374374e001c50fb1ea89cb5ed48d47ffbf297599f2a557804dd3b" dependencies = [ "alloy-json-rpc", "alloy-pubsub", @@ -879,9 +880,9 @@ dependencies = [ [[package]] name = "alloy-transport-ws" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d27aae8c7a6403d3d3e874ad2eeeadbf46267b614bac2d4d82786b9b8496464" +checksum = "c5596b913d1299ee37a9c1bb5118b2639bf253dc1088957bdf2073ae63a6fdfa" dependencies = [ "alloy-pubsub", "alloy-transport", @@ -917,9 +918,9 @@ dependencies = [ [[package]] name = "alloy-tx-macros" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4ef40a046b9bf141afc440cef596c79292708aade57c450dc74e843270fd8e7" +checksum = "79bf2869e66904b2148c809e7a75e23ca26f5d7b46663a149a1444fb98a69d1d" dependencies = [ "alloy-primitives", "darling", @@ -4071,9 +4072,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.10" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9421a676d1b147b16b82c9225157dc629087ef8ec4d5e2960f9437a90dac0a5" +checksum = "17da50a276f1e01e0ba6c029e47b7100754904ee8a278f886546e98575380785" dependencies = [ "atomic-waker", "bytes", @@ -4831,6 +4832,17 @@ dependencies = [ "memoffset", ] +[[package]] +name = "io-uring" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b86e202f00093dcba4275d4636b93ef9dd75d025ae560d2521b45ea28ab49013" +dependencies = [ + "bitflags 2.9.1", + "cfg-if", + "libc", +] + [[package]] name = "ipconfig" version = "0.3.2" @@ -5750,12 +5762,11 @@ dependencies = [ [[package]] name = "notify" -version = "8.0.0" +version = "8.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fee8403b3d66ac7b26aee6e40a897d85dc5ce26f44da36b8b73e987cc52e943" +checksum = "3163f59cd3fa0e9ef8c32f242966a7b9994fd7378366099593e0e73077cd8c97" dependencies = [ "bitflags 2.9.1", - "filetime", "fsevent-sys", "inotify", "kqueue", @@ -5764,7 +5775,7 @@ dependencies = [ "mio", "notify-types", "walkdir", - "windows-sys 0.59.0", + "windows-sys 0.60.2", ] [[package]] @@ -7083,9 +7094,9 @@ checksum = "ba39f3699c378cd8970968dcbff9c43159ea4cfbd88d43c00b22f2ef10a435d2" [[package]] name = "reqwest" -version = "0.12.20" +version = "0.12.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eabf4c97d9130e2bf606614eb937e86edac8292eaa6f422f995d7e8de1eb1813" +checksum = "cbc931937e6ca3a06e3b6c0aa7841849b160a90351d6ab467a8b9b9959767531" dependencies = [ "base64 0.22.1", "bytes", @@ -11225,6 +11236,18 @@ dependencies = [ "serde_json", ] +[[package]] +name = "schemars" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1375ba8ef45a6f15d83fa8748f1079428295d403d6ea991d09ab100155fbc06d" +dependencies = [ + "dyn-clone", + "ref-cast", + "serde", + "serde_json", +] + [[package]] name = "schnellru" version = "0.2.4" @@ -11442,16 +11465,17 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.13.0" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf65a400f8f66fb7b0552869ad70157166676db75ed8181f8104ea91cf9d0b42" +checksum = "f2c45cd61fefa9db6f254525d46e392b852e0e61d9a1fd36e5bd183450a556d5" dependencies = [ "base64 0.22.1", "chrono", "hex", "indexmap 1.9.3", "indexmap 2.10.0", - "schemars", + "schemars 0.9.0", + "schemars 1.0.3", "serde", "serde_derive", "serde_json", @@ -11461,9 +11485,9 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.13.0" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81679d9ed988d5e9a5e6531dc3f2c28efbd639cbd1dfb628df08edea6004da77" +checksum = "de90945e6565ce0d9a25098082ed4ee4002e047cb59892c318d66821e14bb30f" dependencies = [ "darling", "proc-macro2", @@ -12196,17 +12220,19 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.45.1" +version = "1.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75ef51a33ef1da925cea3e4eb122833cb377c61439ca401b770f54902b806779" +checksum = "1140bb80481756a8cbe10541f37433b459c5aa1e727b4c020fbfebdc25bf3ec4" dependencies = [ "backtrace", "bytes", + "io-uring", "libc", "mio", "parking_lot", "pin-project-lite", "signal-hook-registry", + "slab", "socket2", "tokio-macros", "windows-sys 0.52.0", diff --git a/Cargo.toml b/Cargo.toml index bae98a512ef..919d48f772e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -476,33 +476,33 @@ alloy-trie = { version = "0.9.0", default-features = false } alloy-hardforks = "0.2.7" -alloy-consensus = { version = "1.0.16", default-features = false } -alloy-contract = { version = "1.0.16", default-features = false } -alloy-eips = { version = "1.0.16", default-features = false } -alloy-genesis = { version = "1.0.16", default-features = false } -alloy-json-rpc = { version = "1.0.16", default-features = false } -alloy-network = { version = "1.0.16", default-features = false } -alloy-network-primitives = { version = "1.0.16", default-features = false } -alloy-provider = { version = "1.0.16", features = ["reqwest"], default-features = false } -alloy-pubsub = { version = "1.0.16", default-features = false } -alloy-rpc-client = { version = "1.0.16", default-features = false } -alloy-rpc-types = { version = "1.0.16", features = ["eth"], default-features = false } -alloy-rpc-types-admin = { version = "1.0.16", default-features = false } -alloy-rpc-types-anvil = { version = "1.0.16", default-features = false } -alloy-rpc-types-beacon = { version = "1.0.16", default-features = false } -alloy-rpc-types-debug = { version = "1.0.16", default-features = false } -alloy-rpc-types-engine = { version = "1.0.16", default-features = false } -alloy-rpc-types-eth = { version = "1.0.16", default-features = false } -alloy-rpc-types-mev = { version = "1.0.16", default-features = false } -alloy-rpc-types-trace = { version = "1.0.16", default-features = false } -alloy-rpc-types-txpool = { version = "1.0.16", default-features = false } -alloy-serde = { version = "1.0.16", default-features = false } -alloy-signer = { version = "1.0.16", default-features = false } -alloy-signer-local = { version = "1.0.16", default-features = false } -alloy-transport = { version = "1.0.16" } -alloy-transport-http = { version = "1.0.16", features = ["reqwest-rustls-tls"], default-features = false } -alloy-transport-ipc = { version = "1.0.16", default-features = false } -alloy-transport-ws = { version = "1.0.16", default-features = false } +alloy-consensus = { version = "1.0.17", default-features = false } +alloy-contract = { version = "1.0.17", default-features = false } +alloy-eips = { version = "1.0.17", default-features = false } +alloy-genesis = { version = "1.0.17", default-features = false } +alloy-json-rpc = { version = "1.0.17", default-features = false } +alloy-network = { version = "1.0.17", default-features = false } +alloy-network-primitives = { version = "1.0.17", default-features = false } +alloy-provider = { version = "1.0.17", features = ["reqwest"], default-features = false } +alloy-pubsub = { version = "1.0.17", default-features = false } +alloy-rpc-client = { version = "1.0.17", default-features = false } +alloy-rpc-types = { version = "1.0.17", features = ["eth"], default-features = false } +alloy-rpc-types-admin = { version = "1.0.17", default-features = false } +alloy-rpc-types-anvil = { version = "1.0.17", default-features = false } +alloy-rpc-types-beacon = { version = "1.0.17", default-features = false } +alloy-rpc-types-debug = { version = "1.0.17", default-features = false } +alloy-rpc-types-engine = { version = "1.0.17", default-features = false } +alloy-rpc-types-eth = { version = "1.0.17", default-features = false } +alloy-rpc-types-mev = { version = "1.0.17", default-features = false } +alloy-rpc-types-trace = { version = "1.0.17", default-features = false } +alloy-rpc-types-txpool = { version = "1.0.17", default-features = false } +alloy-serde = { version = "1.0.17", default-features = false } +alloy-signer = { version = "1.0.17", default-features = false } +alloy-signer-local = { version = "1.0.17", default-features = false } +alloy-transport = { version = "1.0.17" } +alloy-transport-http = { version = "1.0.17", features = ["reqwest-rustls-tls"], default-features = false } +alloy-transport-ipc = { version = "1.0.17", default-features = false } +alloy-transport-ws = { version = "1.0.17", default-features = false } # op alloy-op-evm = { version = "0.14", default-features = false } diff --git a/crates/primitives-traits/Cargo.toml b/crates/primitives-traits/Cargo.toml index 12bce2a1e5c..a920a6cb5af 100644 --- a/crates/primitives-traits/Cargo.toml +++ b/crates/primitives-traits/Cargo.toml @@ -126,6 +126,7 @@ serde-bincode-compat = [ "alloy-eips/serde-bincode-compat", "op-alloy-consensus?/serde", "op-alloy-consensus?/serde-bincode-compat", + "alloy-genesis/serde-bincode-compat", ] serde = [ "dep:serde", diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 4f1aa64b30c..67fae820d93 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -101,6 +101,7 @@ serde-bincode-compat = [ "alloy-consensus/serde-bincode-compat", "reth-primitives-traits/serde-bincode-compat", "reth-ethereum-primitives/serde-bincode-compat", + "alloy-genesis/serde-bincode-compat", ] [[bench]] diff --git a/crates/trie/common/Cargo.toml b/crates/trie/common/Cargo.toml index ff6c5a58539..29b75342070 100644 --- a/crates/trie/common/Cargo.toml +++ b/crates/trie/common/Cargo.toml @@ -104,6 +104,7 @@ serde-bincode-compat = [ "reth-primitives-traits/serde-bincode-compat", "alloy-consensus/serde-bincode-compat", "dep:serde_with", + "alloy-genesis/serde-bincode-compat", ] test-utils = [ "dep:plain_hasher", From 2962f2ea3582eadec7c56df093da8ea20ecf8ca4 Mon Sep 17 00:00:00 2001 From: MozirDmitriy Date: Fri, 4 Jul 2025 15:00:17 +0300 Subject: [PATCH 043/305] chore: fix typo in documentation comment in environment.rs (#17218) --- crates/storage/libmdbx-rs/src/environment.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/storage/libmdbx-rs/src/environment.rs b/crates/storage/libmdbx-rs/src/environment.rs index ba3ecb95c42..b3730bf6d17 100644 --- a/crates/storage/libmdbx-rs/src/environment.rs +++ b/crates/storage/libmdbx-rs/src/environment.rs @@ -511,7 +511,7 @@ impl Default for Geometry { /// Read transactions prevent reuse of pages freed by newer write transactions, thus the database /// can grow quickly. This callback will be called when there is not enough space in the database /// (i.e. before increasing the database size or before `MDBX_MAP_FULL` error) and thus can be -/// used to resolve issues with a "long-lived" read transacttions. +/// used to resolve issues with a "long-lived" read transactions. /// /// Depending on the arguments and needs, your implementation may wait, /// terminate a process or thread that is performing a long read, or perform From 62b1d574e1cd56aa5c8217ff484353d8861890d2 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 4 Jul 2025 14:40:18 +0200 Subject: [PATCH 044/305] docs: improve NodeAddOns trait documentation (#17178) Co-authored-by: Claude --- crates/node/api/src/node.rs | 73 +++++++++++++++++++++++++++++++++++++ 1 file changed, 73 insertions(+) diff --git a/crates/node/api/src/node.rs b/crates/node/api/src/node.rs index 88b0ae5cf5c..5622554cd46 100644 --- a/crates/node/api/src/node.rs +++ b/crates/node/api/src/node.rs @@ -121,11 +121,84 @@ pub struct AddOnsContext<'a, N: FullNodeComponents> { } /// Customizable node add-on types. +/// +/// This trait defines the interface for extending a node with additional functionality beyond +/// the core [`FullNodeComponents`]. It provides a way to launch supplementary services such as +/// RPC servers, monitoring, external integrations, or any custom functionality that builds on +/// top of the core node components. +/// +/// ## Purpose +/// +/// The `NodeAddOns` trait serves as an extension point in the node builder architecture, +/// allowing developers to: +/// - Define custom services that run alongside the main node +/// - Access all node components and configuration during initialization +/// - Return a handle for managing the launched services (e.g. handle to rpc server) +/// +/// ## How it fits into `NodeBuilder` +/// +/// In the node builder pattern, add-ons are the final layer that gets applied after all core +/// components are configured and started. The builder flow typically follows: +/// +/// 1. Configure [`NodeTypes`] (chain spec, database types, etc.) +/// 2. Build [`FullNodeComponents`] (consensus, networking, transaction pool, etc.) +/// 3. Launch [`NodeAddOns`] with access to all components via [`AddOnsContext`] +/// +/// ## Primary Use Case +/// +/// The primary use of this trait is to launch RPC servers that provide external API access to +/// the node. For Ethereum nodes, this typically includes two main servers: the regular RPC +/// server (HTTP/WS/IPC) that handles user requests and the authenticated Engine API server +/// that communicates with the consensus layer. The returned handle contains the necessary +/// endpoints and control mechanisms for these servers, allowing the node to serve JSON-RPC +/// requests and participate in consensus. While RPC is the main use case, the trait is +/// intentionally flexible to support other kinds of add-ons such as monitoring, indexing, or +/// custom protocol extensions. +/// +/// ## Context Access +/// +/// The [`AddOnsContext`] provides access to: +/// - All node components via the `node` field +/// - Node configuration +/// - Engine API handles for consensus layer communication +/// - JWT secrets for authenticated endpoints +/// +/// This ensures add-ons can integrate deeply with the node while maintaining clean separation +/// of concerns. pub trait NodeAddOns: Send { /// Handle to add-ons. + /// + /// This type is returned by [`launch_add_ons`](Self::launch_add_ons) and represents a + /// handle to the launched services. It must be `Clone` to allow multiple components to + /// hold references and should provide methods to interact with the running services. + /// + /// For RPC add-ons, this typically includes: + /// - Server handles to access local addresses and shutdown methods + /// - RPC module registry for runtime inspection of available methods + /// - Configured middleware and transport-specific settings + /// - For Engine API implementations, this also includes handles for consensus layer + /// communication type Handle: Send + Sync + Clone; /// Configures and launches the add-ons. + /// + /// This method is called once during node startup after all core components are initialized. + /// It receives an [`AddOnsContext`] that provides access to: + /// + /// - The fully configured node with all its components + /// - Node configuration for reading settings + /// - Engine API handles for consensus layer communication + /// - JWT secrets for setting up authenticated endpoints (if any). + /// + /// The implementation should: + /// 1. Use the context to configure the add-on services + /// 2. Launch any background tasks using the node's task executor + /// 3. Return a handle that allows interaction with the launched services + /// + /// # Errors + /// + /// This method may fail if the add-ons cannot be properly configured or launched, + /// for example due to port binding issues or invalid configuration. fn launch_add_ons( self, ctx: AddOnsContext<'_, N>, From cc46a27ebfb262b4a11506873dcf122106f103d0 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Fri, 4 Jul 2025 16:35:49 +0400 Subject: [PATCH 045/305] chore: make receipt root mismatch log more useful (#17225) --- crates/ethereum/consensus/src/validation.rs | 8 ++++++-- crates/optimism/consensus/src/validation/mod.rs | 7 ++++++- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/crates/ethereum/consensus/src/validation.rs b/crates/ethereum/consensus/src/validation.rs index 5b243f92680..f58b77cc575 100644 --- a/crates/ethereum/consensus/src/validation.rs +++ b/crates/ethereum/consensus/src/validation.rs @@ -1,7 +1,7 @@ use alloc::vec::Vec; use alloy_consensus::{proofs::calculate_receipt_root, BlockHeader, TxReceipt}; -use alloy_eips::eip7685::Requests; -use alloy_primitives::{Bloom, B256}; +use alloy_eips::{eip7685::Requests, Encodable2718}; +use alloy_primitives::{Bloom, Bytes, B256}; use reth_chainspec::EthereumHardforks; use reth_consensus::ConsensusError; use reth_primitives_traits::{ @@ -41,6 +41,10 @@ where if let Err(error) = verify_receipts(block.header().receipts_root(), block.header().logs_bloom(), receipts) { + let receipts = receipts + .iter() + .map(|r| Bytes::from(r.with_bloom_ref().encoded_2718())) + .collect::>(); tracing::debug!(%error, ?receipts, "receipts verification failed"); return Err(error) } diff --git a/crates/optimism/consensus/src/validation/mod.rs b/crates/optimism/consensus/src/validation/mod.rs index 4977647d89c..a025ae8931c 100644 --- a/crates/optimism/consensus/src/validation/mod.rs +++ b/crates/optimism/consensus/src/validation/mod.rs @@ -6,7 +6,8 @@ pub mod isthmus; use crate::proof::calculate_receipt_root_optimism; use alloc::vec::Vec; use alloy_consensus::{BlockHeader, TxReceipt, EMPTY_OMMER_ROOT_HASH}; -use alloy_primitives::{Bloom, B256}; +use alloy_eips::Encodable2718; +use alloy_primitives::{Bloom, Bytes, B256}; use alloy_trie::EMPTY_ROOT_HASH; use op_alloy_consensus::{decode_holocene_extra_data, EIP1559ParamError}; use reth_chainspec::{BaseFeeParams, EthChainSpec}; @@ -99,6 +100,10 @@ pub fn validate_block_post_execution( chain_spec, header.timestamp(), ) { + let receipts = receipts + .iter() + .map(|r| Bytes::from(r.with_bloom_ref().encoded_2718())) + .collect::>(); tracing::debug!(%error, ?receipts, "receipts verification failed"); return Err(error) } From 9a58ef18a73a039bdf93d5a75b61e11fd5fbd26c Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 4 Jul 2025 14:41:45 +0200 Subject: [PATCH 046/305] chore: load kzg settings in background (#17224) --- crates/ethereum/node/src/node.rs | 11 +++++++++++ crates/transaction-pool/src/validate/eth.rs | 3 --- crates/transaction-pool/src/validate/task.rs | 5 +++++ 3 files changed, 16 insertions(+), 3 deletions(-) diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index b1c89d7ddc4..41ea86eb2f8 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -431,6 +431,17 @@ where .with_additional_tasks(ctx.config().txpool.additional_validation_tasks) .build_with_tasks(ctx.task_executor().clone(), blob_store.clone()); + if validator.validator().eip4844() { + // initializing the KZG settings can be expensive, this should be done upfront so that + // it doesn't impact the first block or the first gossiped blob transaction, so we + // initialize this in the background + let kzg_settings = validator.validator().kzg_settings().clone(); + ctx.task_executor().spawn_blocking(async move { + let _ = kzg_settings.get(); + debug!(target: "reth::cli", "Initialized KZG settings"); + }); + } + let transaction_pool = TxPoolBuilder::new(ctx) .with_validator(validator) .build_and_spawn_maintenance_task(blob_store, pool_config)?; diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index 5f302d1a14a..76d9da17969 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -1029,9 +1029,6 @@ impl EthTransactionValidatorBuilder { max_blob_count: AtomicU64::new(max_blob_count), }; - // Ensure the kzg setup is loaded right away. - let _kzg_settings = kzg_settings.get(); - let inner = EthTransactionValidatorInner { client, eip2718, diff --git a/crates/transaction-pool/src/validate/task.rs b/crates/transaction-pool/src/validate/task.rs index 7e417681fe8..93f16a585b0 100644 --- a/crates/transaction-pool/src/validate/task.rs +++ b/crates/transaction-pool/src/validate/task.rs @@ -106,6 +106,11 @@ impl TransactionValidationTaskExecutor { to_validation_task: self.to_validation_task, } } + + /// Returns the validator. + pub const fn validator(&self) -> &V { + &self.validator + } } impl TransactionValidationTaskExecutor> { From 250f2104ca082cc48fa55ef9f5ac484311d0134b Mon Sep 17 00:00:00 2001 From: Udoagwa Franklin <54338168+frankudoags@users.noreply.github.com> Date: Fri, 4 Jul 2025 13:43:17 +0100 Subject: [PATCH 047/305] fix: Returns Arc in BlockAndReceiptsResult (#17213) Co-authored-by: frankudoags --- crates/alloy-provider/src/lib.rs | 8 +++----- crates/chain-state/src/in_memory.rs | 6 +++--- crates/rpc/rpc-eth-api/src/helpers/block.rs | 18 ++++++++++-------- .../src/providers/blockchain_provider.rs | 7 +++++-- .../provider/src/providers/consistent.rs | 6 ++---- .../provider/src/providers/database/mod.rs | 4 ++-- .../src/providers/database/provider.rs | 4 ++-- .../src/providers/static_file/manager.rs | 4 ++-- crates/storage/provider/src/test_utils/mock.rs | 5 ++--- crates/storage/storage-api/src/block.rs | 8 ++++---- crates/storage/storage-api/src/noop.rs | 6 ++---- 11 files changed, 37 insertions(+), 39 deletions(-) diff --git a/crates/alloy-provider/src/lib.rs b/crates/alloy-provider/src/lib.rs index ba4767006a4..c3f5e40a4da 100644 --- a/crates/alloy-provider/src/lib.rs +++ b/crates/alloy-provider/src/lib.rs @@ -34,9 +34,7 @@ use reth_db_api::{ }; use reth_errors::{ProviderError, ProviderResult}; use reth_node_types::{BlockTy, HeaderTy, NodeTypes, PrimitivesTy, ReceiptTy, TxTy}; -use reth_primitives::{ - Account, Bytecode, RecoveredBlock, SealedBlock, SealedHeader, TransactionMeta, -}; +use reth_primitives::{Account, Bytecode, RecoveredBlock, SealedHeader, TransactionMeta}; use reth_provider::{ AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BytecodeReader, CanonChainTracker, CanonStateNotification, CanonStateNotifications, CanonStateSubscriptions, @@ -381,7 +379,7 @@ where fn pending_block_and_receipts( &self, - ) -> ProviderResult, Vec)>> { + ) -> ProviderResult, Vec)>> { Err(ProviderError::UnsupportedProvider) } @@ -1252,7 +1250,7 @@ where fn pending_block_and_receipts( &self, - ) -> Result, Vec)>, ProviderError> { + ) -> Result, Vec)>, ProviderError> { Err(ProviderError::UnsupportedProvider) } diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index 20f2a2a4c21..22fae8951d3 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -159,7 +159,7 @@ impl CanonicalInMemoryStateInner { } type PendingBlockAndReceipts = - (SealedBlock<::Block>, Vec>); + (RecoveredBlock<::Block>, Vec>); /// This type is responsible for providing the blocks, receipts, and state for /// all canonical blocks not on disk yet and keeps track of the block range that @@ -480,7 +480,7 @@ impl CanonicalInMemoryState { pub fn pending_block_and_receipts(&self) -> Option> { self.pending_state().map(|block_state| { ( - block_state.block_ref().recovered_block().sealed_block().clone(), + block_state.block_ref().recovered_block().clone(), block_state.executed_block_receipts(), ) }) @@ -1347,7 +1347,7 @@ mod tests { // Check the pending block and receipts assert_eq!( state.pending_block_and_receipts().unwrap(), - (block2.recovered_block().sealed_block().clone(), vec![]) + (block2.recovered_block().clone(), vec![]) ); } diff --git a/crates/rpc/rpc-eth-api/src/helpers/block.rs b/crates/rpc/rpc-eth-api/src/helpers/block.rs index 91a6739b8b3..0b88dc3bdc2 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/block.rs @@ -12,7 +12,7 @@ use alloy_rpc_types_eth::{Block, BlockTransactions, Header, Index}; use futures::Future; use reth_evm::ConfigureEvm; use reth_node_api::BlockBody; -use reth_primitives_traits::{NodePrimitives, RecoveredBlock, SealedBlock}; +use reth_primitives_traits::{NodePrimitives, RecoveredBlock}; use reth_rpc_convert::RpcConvert; use reth_storage_api::{BlockIdReader, BlockReader, ProviderHeader, ProviderReceipt, ProviderTx}; use reth_transaction_pool::{PoolTransaction, TransactionPool}; @@ -23,7 +23,7 @@ pub type BlockReceiptsResult = Result>>, E>; /// Result type of the fetched block and its receipts. pub type BlockAndReceiptsResult = Result< Option<( - SealedBlock<<::Provider as BlockReader>::Block>, + Arc::Provider as BlockReader>::Block>>, Arc::Provider>>>, )>, ::Error, @@ -80,7 +80,7 @@ pub trait EthBlocks: LoadBlock { .provider() .pending_block() .map_err(Self::Error::from_eth_err)? - .map(|block| block.body().transactions().len())) + .map(|block| block.body().transaction_count())); } let block_hash = match self @@ -130,24 +130,26 @@ pub trait EthBlocks: LoadBlock { .pending_block_and_receipts() .map_err(Self::Error::from_eth_err)? { - return Ok(Some((block, Arc::new(receipts)))); + return Ok(Some((Arc::new(block), Arc::new(receipts)))); } // If no pending block from provider, build the pending block locally. if let Some((block, receipts)) = self.local_pending_block().await? { - return Ok(Some((block.into_sealed_block(), Arc::new(receipts)))); + return Ok(Some((Arc::new(block), Arc::new(receipts)))); } } if let Some(block_hash) = self.provider().block_hash_for_id(block_id).map_err(Self::Error::from_eth_err)? { - return self + if let Some((block, receipts)) = self .cache() .get_block_and_receipts(block_hash) .await - .map_err(Self::Error::from_eth_err) - .map(|b| b.map(|(b, r)| (b.clone_sealed_block(), r))) + .map_err(Self::Error::from_eth_err)? + { + return Ok(Some((block, receipts))); + } } Ok(None) diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 5bc5e707153..06118aa4141 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -305,7 +305,7 @@ impl BlockReader for BlockchainProvider { fn pending_block_and_receipts( &self, - ) -> ProviderResult, Vec)>> { + ) -> ProviderResult, Vec)>> { Ok(self.canonical_in_memory_state.pending_block_and_receipts()) } @@ -1205,7 +1205,10 @@ mod tests { Some(RecoveredBlock::new_sealed(block.clone(), block.senders().unwrap())) ); - assert_eq!(provider.pending_block_and_receipts()?, Some((block, vec![]))); + assert_eq!( + provider.pending_block_and_receipts()?, + Some((RecoveredBlock::new_sealed(block.clone(), block.senders().unwrap()), vec![])) + ); Ok(()) } diff --git a/crates/storage/provider/src/providers/consistent.rs b/crates/storage/provider/src/providers/consistent.rs index 3922e286c29..f617c3f6fa4 100644 --- a/crates/storage/provider/src/providers/consistent.rs +++ b/crates/storage/provider/src/providers/consistent.rs @@ -20,9 +20,7 @@ use reth_chainspec::ChainInfo; use reth_db_api::models::{AccountBeforeTx, BlockNumberAddress, StoredBlockBodyIndices}; use reth_execution_types::{BundleStateInit, ExecutionOutcome, RevertsInit}; use reth_node_types::{BlockTy, HeaderTy, ReceiptTy, TxTy}; -use reth_primitives_traits::{ - Account, BlockBody, RecoveredBlock, SealedBlock, SealedHeader, StorageEntry, -}; +use reth_primitives_traits::{Account, BlockBody, RecoveredBlock, SealedHeader, StorageEntry}; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_api::{ @@ -853,7 +851,7 @@ impl BlockReader for ConsistentProvider { fn pending_block_and_receipts( &self, - ) -> ProviderResult, Vec)>> { + ) -> ProviderResult, Vec)>> { Ok(self.canonical_in_memory_state.pending_block_and_receipts()) } diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index 6fdff7bfa88..eeedf55b7ac 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -18,7 +18,7 @@ use reth_errors::{RethError, RethResult}; use reth_node_types::{ BlockTy, HeaderTy, NodeTypes, NodeTypesWithDB, NodeTypesWithDBAdapter, ReceiptTy, TxTy, }; -use reth_primitives_traits::{RecoveredBlock, SealedBlock, SealedHeader}; +use reth_primitives_traits::{RecoveredBlock, SealedHeader}; use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; use reth_static_file_types::StaticFileSegment; @@ -377,7 +377,7 @@ impl BlockReader for ProviderFactory { fn pending_block_and_receipts( &self, - ) -> ProviderResult, Vec)>> { + ) -> ProviderResult, Vec)>> { self.provider()?.pending_block_and_receipts() } diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 82dcae7a8a1..848ad45f087 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -47,7 +47,7 @@ use reth_execution_types::{Chain, ExecutionOutcome}; use reth_node_types::{BlockTy, BodyTy, HeaderTy, NodeTypes, ReceiptTy, TxTy}; use reth_primitives_traits::{ Account, Block as _, BlockBody as _, Bytecode, GotExpected, NodePrimitives, RecoveredBlock, - SealedBlock, SealedHeader, SignedTransaction, StorageEntry, + SealedHeader, SignedTransaction, StorageEntry, }; use reth_prune_types::{ PruneCheckpoint, PruneMode, PruneModes, PruneSegment, MINIMUM_PRUNING_DISTANCE, @@ -1213,7 +1213,7 @@ impl BlockReader for DatabaseProvid fn pending_block_and_receipts( &self, - ) -> ProviderResult, Vec)>> { + ) -> ProviderResult, Vec)>> { Ok(None) } diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index aabcb248c02..721c11c6564 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -36,7 +36,7 @@ use reth_db_api::{ use reth_ethereum_primitives::{Receipt, TransactionSigned}; use reth_nippy_jar::{NippyJar, NippyJarChecker, CONFIG_FILE_EXTENSION}; use reth_node_types::{FullNodePrimitives, NodePrimitives}; -use reth_primitives_traits::{RecoveredBlock, SealedBlock, SealedHeader, SignedTransaction}; +use reth_primitives_traits::{RecoveredBlock, SealedHeader, SignedTransaction}; use reth_stages_types::{PipelineTarget, StageId}; use reth_static_file_types::{ find_fixed_range, HighestStaticFiles, SegmentHeader, SegmentRangeInclusive, StaticFileSegment, @@ -1760,7 +1760,7 @@ impl> fn pending_block_and_receipts( &self, - ) -> ProviderResult, Vec)>> { + ) -> ProviderResult, Vec)>> { // Required data not present in static_files Err(ProviderError::UnsupportedProvider) } diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index 2d0cfb665df..889712259c7 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -23,8 +23,7 @@ use reth_ethereum_primitives::{EthPrimitives, Receipt}; use reth_execution_types::ExecutionOutcome; use reth_node_types::NodeTypes; use reth_primitives_traits::{ - Account, Bytecode, GotExpected, NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader, - SignerRecoverable, + Account, Bytecode, GotExpected, NodePrimitives, RecoveredBlock, SealedHeader, SignerRecoverable, }; use reth_prune_types::PruneModes; use reth_stages_types::{StageCheckpoint, StageId}; @@ -675,7 +674,7 @@ impl BlockReader fn pending_block_and_receipts( &self, - ) -> ProviderResult, Vec)>> { + ) -> ProviderResult, Vec)>> { Ok(None) } diff --git a/crates/storage/storage-api/src/block.rs b/crates/storage/storage-api/src/block.rs index 4316e5af673..40a009935ca 100644 --- a/crates/storage/storage-api/src/block.rs +++ b/crates/storage/storage-api/src/block.rs @@ -6,7 +6,7 @@ use alloc::{sync::Arc, vec::Vec}; use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumberOrTag}; use alloy_primitives::{BlockNumber, B256}; use core::ops::RangeInclusive; -use reth_primitives_traits::{RecoveredBlock, SealedBlock, SealedHeader}; +use reth_primitives_traits::{RecoveredBlock, SealedHeader}; use reth_storage_errors::provider::ProviderResult; /// A helper enum that represents the origin of the requested block. @@ -88,7 +88,7 @@ pub trait BlockReader: #[expect(clippy::type_complexity)] fn pending_block_and_receipts( &self, - ) -> ProviderResult, Vec)>>; + ) -> ProviderResult, Vec)>>; /// Returns the block with matching hash from the database. /// @@ -164,7 +164,7 @@ impl BlockReader for Arc { } fn pending_block_and_receipts( &self, - ) -> ProviderResult, Vec)>> { + ) -> ProviderResult, Vec)>> { T::pending_block_and_receipts(self) } fn block_by_hash(&self, hash: B256) -> ProviderResult> { @@ -222,7 +222,7 @@ impl BlockReader for &T { } fn pending_block_and_receipts( &self, - ) -> ProviderResult, Vec)>> { + ) -> ProviderResult, Vec)>> { T::pending_block_and_receipts(self) } fn block_by_hash(&self, hash: B256) -> ProviderResult> { diff --git a/crates/storage/storage-api/src/noop.rs b/crates/storage/storage-api/src/noop.rs index 2afa4b616f5..5eff34025d0 100644 --- a/crates/storage/storage-api/src/noop.rs +++ b/crates/storage/storage-api/src/noop.rs @@ -22,9 +22,7 @@ use core::{ use reth_chainspec::{ChainInfo, ChainSpecProvider, EthChainSpec, MAINNET}; use reth_db_models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_ethereum_primitives::EthPrimitives; -use reth_primitives_traits::{ - Account, Bytecode, NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader, -}; +use reth_primitives_traits::{Account, Bytecode, NodePrimitives, RecoveredBlock, SealedHeader}; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_errors::provider::{ProviderError, ProviderResult}; @@ -169,7 +167,7 @@ impl BlockReader for NoopProvider { fn pending_block_and_receipts( &self, - ) -> ProviderResult, Vec)>> { + ) -> ProviderResult, Vec)>> { Ok(None) } From d101fb7b900ca998cb117a1bd52c92c1e134c211 Mon Sep 17 00:00:00 2001 From: GarmashAlex Date: Fri, 4 Jul 2025 16:01:55 +0300 Subject: [PATCH 048/305] Update metrics documentation link to new official Reth docs (#17220) --- docs/vocs/docs/pages/run/monitoring.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/vocs/docs/pages/run/monitoring.mdx b/docs/vocs/docs/pages/run/monitoring.mdx index 6a8a35fcec7..30ce967bb10 100644 --- a/docs/vocs/docs/pages/run/monitoring.mdx +++ b/docs/vocs/docs/pages/run/monitoring.mdx @@ -141,4 +141,4 @@ This will all be very useful to you, whether you're simply running a home node a [installation]: ../installation/installation [release-profile]: https://doc.rust-lang.org/cargo/reference/profiles.html#release [docs]: https://github.com/paradigmxyz/reth/tree/main/docs -[metrics]: https://github.com/paradigmxyz/reth/blob/main/docs/design/metrics#current-metrics +[metrics]: https://reth.rs/run/observability.html From 6bf87384cae0ef2cdd73d68d1e9fd134e5ea6b7e Mon Sep 17 00:00:00 2001 From: Galoretka Date: Fri, 4 Jul 2025 15:53:42 +0300 Subject: [PATCH 049/305] Fix typo in EVM component documentation (#17227) --- docs/vocs/docs/pages/sdk/node-components/evm.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/vocs/docs/pages/sdk/node-components/evm.mdx b/docs/vocs/docs/pages/sdk/node-components/evm.mdx index 6047f69bd73..1460f8938f4 100644 --- a/docs/vocs/docs/pages/sdk/node-components/evm.mdx +++ b/docs/vocs/docs/pages/sdk/node-components/evm.mdx @@ -1,6 +1,6 @@ # EVM Component -The EVM (Ethereum Virtual Machine) component handles transaction execution and state transitionss. It's responsible for processing transactions and updating the blockchain state. +The EVM (Ethereum Virtual Machine) component handles transaction execution and state transitions. It's responsible for processing transactions and updating the blockchain state. ## Overview @@ -42,4 +42,4 @@ Block builders construct new blocks for proposal: - Learn about [RPC](/sdk/node-components/rpc) server integration - Explore [Transaction Pool](/sdk/node-components/pool) interaction -- Review [Consensus](/sdk/node-components/consensus) validation \ No newline at end of file +- Review [Consensus](/sdk/node-components/consensus) validation From 19d4d4f4f3a23b4ae05b2404741a97f49f2f2d5a Mon Sep 17 00:00:00 2001 From: Fallengirl <155266340+Fallengirl@users.noreply.github.com> Date: Fri, 4 Jul 2025 16:34:25 +0200 Subject: [PATCH 050/305] docs: fix typos across documentation (#17212) --- crates/exex/exex/src/notifications.rs | 2 +- crates/net/discv5/src/filter.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/exex/exex/src/notifications.rs b/crates/exex/exex/src/notifications.rs index 651bd7d5b29..c624fd4ff4e 100644 --- a/crates/exex/exex/src/notifications.rs +++ b/crates/exex/exex/src/notifications.rs @@ -350,7 +350,7 @@ where /// Compares the node head against the ExEx head, and backfills if needed. /// - /// CAUTON: This method assumes that the ExEx head is <= the node head, and that it's on the + /// CAUTION: This method assumes that the ExEx head is <= the node head, and that it's on the /// canonical chain. /// /// Possible situations are: diff --git a/crates/net/discv5/src/filter.rs b/crates/net/discv5/src/filter.rs index a83345a9a5e..def00f54dc3 100644 --- a/crates/net/discv5/src/filter.rs +++ b/crates/net/discv5/src/filter.rs @@ -1,4 +1,4 @@ -//! Predicates to constraint peer lookups. +//! Predicates to constrain peer lookups. use std::collections::HashSet; From a46d0c02736c9c8aac638ad5045c3261b78d5ddc Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 4 Jul 2025 16:52:59 +0200 Subject: [PATCH 051/305] chore: use alloy traits for build receipt (#17211) --- crates/rpc/rpc-eth-types/src/receipt.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/crates/rpc/rpc-eth-types/src/receipt.rs b/crates/rpc/rpc-eth-types/src/receipt.rs index d10bb1d4a33..a99d4eff493 100644 --- a/crates/rpc/rpc-eth-types/src/receipt.rs +++ b/crates/rpc/rpc-eth-types/src/receipt.rs @@ -1,12 +1,14 @@ //! RPC receipt response builder, extends a layer one receipt with layer two data. use super::EthResult; -use alloy_consensus::{transaction::TransactionMeta, ReceiptEnvelope, TxReceipt}; +use alloy_consensus::{ + transaction::{SignerRecoverable, TransactionMeta}, + ReceiptEnvelope, Transaction, TxReceipt, +}; use alloy_eips::eip7840::BlobParams; use alloy_primitives::{Address, TxKind}; use alloy_rpc_types_eth::{Log, ReceiptWithBloom, TransactionReceipt}; use reth_ethereum_primitives::{Receipt, TransactionSigned, TxType}; -use reth_primitives_traits::SignedTransaction; /// Builds an [`TransactionReceipt`] obtaining the inner receipt envelope from the given closure. pub fn build_receipt( @@ -19,7 +21,7 @@ pub fn build_receipt( ) -> EthResult> where R: TxReceipt, - T: SignedTransaction, + T: Transaction + SignerRecoverable, { // Note: we assume this transaction is valid, because it's mined (or part of pending block) // and we don't need to check for pre EIP-2 From 47d2ed55d14bc32fc021e40868997df0cc9eb099 Mon Sep 17 00:00:00 2001 From: leopardracer <136604165+leopardracer@users.noreply.github.com> Date: Fri, 4 Jul 2025 17:53:29 +0300 Subject: [PATCH 052/305] docs: fix typo in documentation comments (#17207) --- crates/engine/tree/src/tree/payload_processor/mod.rs | 2 +- crates/trie/sparse/src/traits.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index cc322490e58..2f70ff5cbd0 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -68,7 +68,7 @@ where precompile_cache_disabled: bool, /// Precompile cache map. precompile_cache_map: PrecompileCacheMap>, - /// A cleared sparse trie, kept around to be re-used for the state root computation so that + /// A cleared sparse trie, kept around to be reused for the state root computation so that /// allocations can be minimized. sparse_trie: Option, _marker: std::marker::PhantomData, diff --git a/crates/trie/sparse/src/traits.rs b/crates/trie/sparse/src/traits.rs index 6f1acbfca9c..62ca424cd32 100644 --- a/crates/trie/sparse/src/traits.rs +++ b/crates/trie/sparse/src/traits.rs @@ -214,7 +214,7 @@ pub trait SparseTrieInterface: Default + Debug { /// Removes all nodes and values from the trie, resetting it to a blank state /// with only an empty root node. This is used when a storage root is deleted. /// - /// This should not be used when intending to re-use the trie for a fresh account/storage root; + /// This should not be used when intending to reuse the trie for a fresh account/storage root; /// use `clear` for that. /// /// Note: All previously tracked changes to the trie are also removed. From 89d0e6a919a4038fca43efea670a5a8039f00742 Mon Sep 17 00:00:00 2001 From: Ritesh Das <98543992+Dyslex7c@users.noreply.github.com> Date: Fri, 4 Jul 2025 20:58:12 +0530 Subject: [PATCH 053/305] feat(p2p): separate args for (header, body) (#17184) Co-authored-by: Matthias Seitz --- crates/cli/commands/src/p2p/mod.rs | 228 +++++++++++------- docs/vocs/docs/pages/cli/reth/p2p.mdx | 236 +------------------ docs/vocs/docs/pages/cli/reth/p2p/body.mdx | 213 ++++++++++++++++- docs/vocs/docs/pages/cli/reth/p2p/header.mdx | 213 ++++++++++++++++- docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx | 2 +- 5 files changed, 558 insertions(+), 334 deletions(-) diff --git a/crates/cli/commands/src/p2p/mod.rs b/crates/cli/commands/src/p2p/mod.rs index 3aa7569e9b6..c3a20231638 100644 --- a/crates/cli/commands/src/p2p/mod.rs +++ b/crates/cli/commands/src/p2p/mod.rs @@ -13,7 +13,7 @@ use reth_config::Config; use reth_network::{BlockDownloaderProvider, NetworkConfigBuilder}; use reth_network_p2p::bodies::client::BodiesClient; use reth_node_core::{ - args::{DatabaseArgs, DatadirArgs, NetworkArgs}, + args::{DatadirArgs, NetworkArgs}, utils::get_single_header, }; @@ -23,63 +23,149 @@ pub mod rlpx; /// `reth p2p` command #[derive(Debug, Parser)] pub struct Command { - /// The path to the configuration file to use. - #[arg(long, value_name = "FILE", verbatim_doc_comment)] - config: Option, + #[command(subcommand)] + command: Subcommands, +} - /// The chain this node is running. - /// - /// Possible values are either a built-in chain or the path to a chain specification file. - #[arg( - long, - value_name = "CHAIN_OR_PATH", - long_help = C::help_message(), - default_value = C::SUPPORTED_CHAINS[0], - value_parser = C::parser() - )] - chain: Arc, +impl> Command { + /// Execute `p2p` command + pub async fn execute>(self) -> eyre::Result<()> { + match self.command { + Subcommands::Header { args, id } => { + let handle = args.launch_network::().await?; + let fetch_client = handle.fetch_client().await?; + let backoff = args.backoff(); - /// The number of retries per request - #[arg(long, default_value = "5")] - retries: usize, + let header = (move || get_single_header(fetch_client.clone(), id)) + .retry(backoff) + .notify(|err, _| println!("Error requesting header: {err}. Retrying...")) + .await?; + println!("Successfully downloaded header: {header:?}"); + } - #[command(flatten)] - network: NetworkArgs, + Subcommands::Body { args, id } => { + let handle = args.launch_network::().await?; + let fetch_client = handle.fetch_client().await?; + let backoff = args.backoff(); - #[command(flatten)] - datadir: DatadirArgs, + let hash = match id { + BlockHashOrNumber::Hash(hash) => hash, + BlockHashOrNumber::Number(number) => { + println!("Block number provided. Downloading header first..."); + let client = fetch_client.clone(); + let header = (move || { + get_single_header(client.clone(), BlockHashOrNumber::Number(number)) + }) + .retry(backoff) + .notify(|err, _| println!("Error requesting header: {err}. Retrying...")) + .await?; + header.hash() + } + }; + let (_, result) = (move || { + let client = fetch_client.clone(); + client.get_block_bodies(vec![hash]) + }) + .retry(backoff) + .notify(|err, _| println!("Error requesting block: {err}. Retrying...")) + .await? + .split(); + if result.len() != 1 { + eyre::bail!( + "Invalid number of headers received. Expected: 1. Received: {}", + result.len() + ) + } + let body = result.into_iter().next().unwrap(); + println!("Successfully downloaded body: {body:?}") + } + Subcommands::Rlpx(command) => { + command.execute().await?; + } + Subcommands::Bootnode(command) => { + command.execute().await?; + } + } - #[command(flatten)] - db: DatabaseArgs, + Ok(()) + } +} - #[command(subcommand)] - command: Subcommands, +impl Command { + /// Returns the underlying chain being used to run this command + pub fn chain_spec(&self) -> Option<&Arc> { + match &self.command { + Subcommands::Header { args, .. } => Some(&args.chain), + Subcommands::Body { args, .. } => Some(&args.chain), + Subcommands::Rlpx(_) => None, + Subcommands::Bootnode(_) => None, + } + } } /// `reth p2p` subcommands #[derive(Subcommand, Debug)] -pub enum Subcommands { +pub enum Subcommands { /// Download block header Header { + #[command(flatten)] + args: DownloadArgs, /// The header number or hash #[arg(value_parser = hash_or_num_value_parser)] id: BlockHashOrNumber, }, /// Download block body Body { + #[command(flatten)] + args: DownloadArgs, /// The block number or hash #[arg(value_parser = hash_or_num_value_parser)] id: BlockHashOrNumber, }, - /// RLPx utilities + // RLPx utilities Rlpx(rlpx::Command), /// Bootnode command Bootnode(bootnode::Command), } -impl> Command { - /// Execute `p2p` command - pub async fn execute>(self) -> eyre::Result<()> { +#[derive(Debug, Clone, Parser)] +pub struct DownloadArgs { + /// The number of retries per request + #[arg(long, default_value = "5")] + retries: usize, + + #[command(flatten)] + network: NetworkArgs, + + #[command(flatten)] + datadir: DatadirArgs, + + /// The path to the configuration file to use. + #[arg(long, value_name = "FILE", verbatim_doc_comment)] + config: Option, + + /// The chain this node is running. + /// + /// Possible values are either a built-in chain or the path to a chain specification file. + #[arg( + long, + value_name = "CHAIN_OR_PATH", + long_help = C::help_message(), + default_value = C::SUPPORTED_CHAINS[0], + value_parser = C::parser() + )] + chain: Arc, +} + +impl DownloadArgs { + /// Creates and spawns the network and returns the handle. + pub async fn launch_network( + &self, + ) -> eyre::Result> + where + C::ChainSpec: EthChainSpec + Hardforks + EthereumHardforks + Send + Sync + 'static, + N: CliNodeTypes, + { let data_dir = self.datadir.clone().resolve_datadir(self.chain.chain()); let config_path = self.config.clone().unwrap_or_else(|| data_dir.config()); @@ -106,76 +192,38 @@ impl let net = NetworkConfigBuilder::::new(p2p_secret_key) .peer_config(config.peers_config_with_basic_nodes_from_file(None)) .external_ip_resolver(self.network.nat) - .disable_discv4_discovery_if(self.chain.chain().is_optimism()) .boot_nodes(boot_nodes.clone()) .apply(|builder| { self.network.discovery.apply_to_builder(builder, rlpx_socket, boot_nodes) }) - .build_with_noop_provider(self.chain) + .build_with_noop_provider(self.chain.clone()) .manager() .await?; - let network = net.handle().clone(); + let handle = net.handle().clone(); tokio::task::spawn(net); - let fetch_client = network.fetch_client().await?; - let retries = self.retries.max(1); - let backoff = ConstantBuilder::default().with_max_times(retries); - - match self.command { - Subcommands::Header { id } => { - let header = (move || get_single_header(fetch_client.clone(), id)) - .retry(backoff) - .notify(|err, _| println!("Error requesting header: {err}. Retrying...")) - .await?; - println!("Successfully downloaded header: {header:?}"); - } - Subcommands::Body { id } => { - let hash = match id { - BlockHashOrNumber::Hash(hash) => hash, - BlockHashOrNumber::Number(number) => { - println!("Block number provided. Downloading header first..."); - let client = fetch_client.clone(); - let header = (move || { - get_single_header(client.clone(), BlockHashOrNumber::Number(number)) - }) - .retry(backoff) - .notify(|err, _| println!("Error requesting header: {err}. Retrying...")) - .await?; - header.hash() - } - }; - let (_, result) = (move || { - let client = fetch_client.clone(); - client.get_block_bodies(vec![hash]) - }) - .retry(backoff) - .notify(|err, _| println!("Error requesting block: {err}. Retrying...")) - .await? - .split(); - if result.len() != 1 { - eyre::bail!( - "Invalid number of headers received. Expected: 1. Received: {}", - result.len() - ) - } - let body = result.into_iter().next().unwrap(); - println!("Successfully downloaded body: {body:?}") - } - Subcommands::Rlpx(command) => { - command.execute().await?; - } - Subcommands::Bootnode(command) => { - command.execute().await?; - } - } + Ok(handle) + } - Ok(()) + pub fn backoff(&self) -> ConstantBuilder { + ConstantBuilder::default().with_max_times(self.retries.max(1)) } } -impl Command { - /// Returns the underlying chain being used to run this command - pub fn chain_spec(&self) -> Option<&Arc> { - Some(&self.chain) +#[cfg(test)] +mod tests { + use super::*; + use reth_ethereum_cli::chainspec::EthereumChainSpecParser; + + #[test] + fn parse_header_cmd() { + let _args: Command = + Command::parse_from(["reth", "header", "--chain", "mainnet", "1000"]); + } + + #[test] + fn parse_body_cmd() { + let _args: Command = + Command::parse_from(["reth", "body", "--chain", "mainnet", "1000"]); } } diff --git a/docs/vocs/docs/pages/cli/reth/p2p.mdx b/docs/vocs/docs/pages/cli/reth/p2p.mdx index 53a6f214532..151c386ef48 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p.mdx @@ -11,248 +11,14 @@ Usage: reth p2p [OPTIONS] Commands: header Download block header body Download block body - rlpx RLPx utilities + rlpx RLPx commands bootnode Bootnode command help Print this message or the help of the given subcommand(s) Options: - --config - The path to the configuration file to use. - - --chain - The chain this node is running. - Possible values are either a built-in chain or the path to a chain specification file. - - Built-in chains: - mainnet, sepolia, holesky, hoodi, dev - - [default: mainnet] - - --retries - The number of retries per request - - [default: 5] - -h, --help Print help (see a summary with '-h') -Networking: - -d, --disable-discovery - Disable the discovery service - - --disable-dns-discovery - Disable the DNS discovery - - --disable-discv4-discovery - Disable Discv4 discovery - - --enable-discv5-discovery - Enable Discv5 discovery - - --disable-nat - Disable Nat discovery - - --discovery.addr - The UDP address to use for devp2p peer discovery version 4 - - [default: 0.0.0.0] - - --discovery.port - The UDP port to use for devp2p peer discovery version 4 - - [default: 30303] - - --discovery.v5.addr - The UDP IPv4 address to use for devp2p peer discovery version 5. Overwritten by `RLPx` address, if it's also IPv4 - - --discovery.v5.addr.ipv6 - The UDP IPv6 address to use for devp2p peer discovery version 5. Overwritten by `RLPx` address, if it's also IPv6 - - --discovery.v5.port - The UDP IPv4 port to use for devp2p peer discovery version 5. Not used unless `--addr` is IPv4, or `--discovery.v5.addr` is set - - [default: 9200] - - --discovery.v5.port.ipv6 - The UDP IPv6 port to use for devp2p peer discovery version 5. Not used unless `--addr` is IPv6, or `--discovery.addr.ipv6` is set - - [default: 9200] - - --discovery.v5.lookup-interval - The interval in seconds at which to carry out periodic lookup queries, for the whole run of the program - - [default: 20] - - --discovery.v5.bootstrap.lookup-interval - The interval in seconds at which to carry out boost lookup queries, for a fixed number of times, at bootstrap - - [default: 5] - - --discovery.v5.bootstrap.lookup-countdown - The number of times to carry out boost lookup queries at bootstrap - - [default: 200] - - --trusted-peers - Comma separated enode URLs of trusted peers for P2P connections. - - --trusted-peers enode://abcd@192.168.0.1:30303 - - --trusted-only - Connect to or accept from trusted peers only - - --bootnodes - Comma separated enode URLs for P2P discovery bootstrap. - - Will fall back to a network-specific default if not specified. - - --dns-retries - Amount of DNS resolution requests retries to perform when peering - - [default: 0] - - --peers-file - The path to the known peers file. Connected peers are dumped to this file on nodes - shutdown, and read on startup. Cannot be used with `--no-persist-peers`. - - --identity - Custom node identity - - [default: reth/-/] - - --p2p-secret-key - Secret key to use for this node. - - This will also deterministically set the peer ID. If not specified, it will be set in the data dir for the chain being used. - - --no-persist-peers - Do not persist peers. - - --nat - NAT resolution method (any|none|upnp|publicip|extip:\) - - [default: any] - - --addr - Network listening address - - [default: 0.0.0.0] - - --port - Network listening port - - [default: 30303] - - --max-outbound-peers - Maximum number of outbound requests. default: 100 - - --max-inbound-peers - Maximum number of inbound requests. default: 30 - - --max-tx-reqs - Max concurrent `GetPooledTransactions` requests. - - [default: 130] - - --max-tx-reqs-peer - Max concurrent `GetPooledTransactions` requests per peer. - - [default: 1] - - --max-seen-tx-history - Max number of seen transactions to remember per peer. - - Default is 320 transaction hashes. - - [default: 320] - - --max-pending-imports - Max number of transactions to import concurrently. - - [default: 4096] - - --pooled-tx-response-soft-limit - Experimental, for usage in research. Sets the max accumulated byte size of transactions - to pack in one response. - Spec'd at 2MiB. - - [default: 2097152] - - --pooled-tx-pack-soft-limit - Experimental, for usage in research. Sets the max accumulated byte size of transactions to - request in one request. - - Since `RLPx` protocol version 68, the byte size of a transaction is shared as metadata in a - transaction announcement (see `RLPx` specs). This allows a node to request a specific size - response. - - By default, nodes request only 128 KiB worth of transactions, but should a peer request - more, up to 2 MiB, a node will answer with more than 128 KiB. - - Default is 128 KiB. - - [default: 131072] - - --max-tx-pending-fetch - Max capacity of cache of hashes for transactions pending fetch. - - [default: 25600] - - --net-if.experimental - Name of network interface used to communicate with peers. - - If flag is set, but no value is passed, the default interface for docker `eth0` is tried. - - --tx-propagation-policy - Transaction Propagation Policy - - The policy determines which peers transactions are gossiped to. - - [default: All] - -Datadir: - --datadir - The path to the data dir for all reth files and subdirectories. - - Defaults to the OS-specific data directory: - - - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - - Windows: `{FOLDERID_RoamingAppData}/reth/` - - macOS: `$HOME/Library/Application Support/reth/` - - [default: default] - - --datadir.static-files - The absolute path to store static files in. - -Database: - --db.log-level - Database logging level. Levels higher than "notice" require a debug build - - Possible values: - - fatal: Enables logging for critical conditions, i.e. assertion failures - - error: Enables logging for error conditions - - warn: Enables logging for warning conditions - - notice: Enables logging for normal but significant condition - - verbose: Enables logging for verbose informational - - debug: Enables logging for debug-level messages - - trace: Enables logging for trace debug-level messages - - extra: Enables logging for extra debug-level messages - - --db.exclusive - Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume - - [possible values: true, false] - - --db.max-size - Maximum database size (e.g., 4TB, 8MB) - - --db.growth-step - Database growth step (e.g., 4GB, 4KB) - - --db.read-transaction-timeout - Read transaction timeout in seconds, 0 means no timeout - Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/docs/vocs/docs/pages/cli/reth/p2p/body.mdx b/docs/vocs/docs/pages/cli/reth/p2p/body.mdx index e5092f274ea..223dec04d25 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/body.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/body.mdx @@ -8,14 +8,219 @@ $ reth p2p body --help ```txt Usage: reth p2p body [OPTIONS] -Arguments: - - The block number or hash - Options: + --retries + The number of retries per request + + [default: 5] + -h, --help Print help (see a summary with '-h') +Networking: + -d, --disable-discovery + Disable the discovery service + + --disable-dns-discovery + Disable the DNS discovery + + --disable-discv4-discovery + Disable Discv4 discovery + + --enable-discv5-discovery + Enable Discv5 discovery + + --disable-nat + Disable Nat discovery + + --discovery.addr + The UDP address to use for devp2p peer discovery version 4 + + [default: 0.0.0.0] + + --discovery.port + The UDP port to use for devp2p peer discovery version 4 + + [default: 30303] + + --discovery.v5.addr + The UDP IPv4 address to use for devp2p peer discovery version 5. Overwritten by `RLPx` address, if it's also IPv4 + + --discovery.v5.addr.ipv6 + The UDP IPv6 address to use for devp2p peer discovery version 5. Overwritten by `RLPx` address, if it's also IPv6 + + --discovery.v5.port + The UDP IPv4 port to use for devp2p peer discovery version 5. Not used unless `--addr` is IPv4, or `--discovery.v5.addr` is set + + [default: 9200] + + --discovery.v5.port.ipv6 + The UDP IPv6 port to use for devp2p peer discovery version 5. Not used unless `--addr` is IPv6, or `--discovery.addr.ipv6` is set + + [default: 9200] + + --discovery.v5.lookup-interval + The interval in seconds at which to carry out periodic lookup queries, for the whole run of the program + + [default: 20] + + --discovery.v5.bootstrap.lookup-interval + The interval in seconds at which to carry out boost lookup queries, for a fixed number of times, at bootstrap + + [default: 5] + + --discovery.v5.bootstrap.lookup-countdown + The number of times to carry out boost lookup queries at bootstrap + + [default: 200] + + --trusted-peers + Comma separated enode URLs of trusted peers for P2P connections. + + --trusted-peers enode://abcd@192.168.0.1:30303 + + --trusted-only + Connect to or accept from trusted peers only + + --bootnodes + Comma separated enode URLs for P2P discovery bootstrap. + + Will fall back to a network-specific default if not specified. + + --dns-retries + Amount of DNS resolution requests retries to perform when peering + + [default: 0] + + --peers-file + The path to the known peers file. Connected peers are dumped to this file on nodes + shutdown, and read on startup. Cannot be used with `--no-persist-peers`. + + --identity + Custom node identity + + [default: reth/-/] + + --p2p-secret-key + Secret key to use for this node. + + This will also deterministically set the peer ID. If not specified, it will be set in the data dir for the chain being used. + + --no-persist-peers + Do not persist peers. + + --nat + NAT resolution method (any|none|upnp|publicip|extip:\) + + [default: any] + + --addr + Network listening address + + [default: 0.0.0.0] + + --port + Network listening port + + [default: 30303] + + --max-outbound-peers + Maximum number of outbound requests. default: 100 + + --max-inbound-peers + Maximum number of inbound requests. default: 30 + + --max-tx-reqs + Max concurrent `GetPooledTransactions` requests. + + [default: 130] + + --max-tx-reqs-peer + Max concurrent `GetPooledTransactions` requests per peer. + + [default: 1] + + --max-seen-tx-history + Max number of seen transactions to remember per peer. + + Default is 320 transaction hashes. + + [default: 320] + + --max-pending-imports + Max number of transactions to import concurrently. + + [default: 4096] + + --pooled-tx-response-soft-limit + Experimental, for usage in research. Sets the max accumulated byte size of transactions + to pack in one response. + Spec'd at 2MiB. + + [default: 2097152] + + --pooled-tx-pack-soft-limit + Experimental, for usage in research. Sets the max accumulated byte size of transactions to + request in one request. + + Since `RLPx` protocol version 68, the byte size of a transaction is shared as metadata in a + transaction announcement (see `RLPx` specs). This allows a node to request a specific size + response. + + By default, nodes request only 128 KiB worth of transactions, but should a peer request + more, up to 2 MiB, a node will answer with more than 128 KiB. + + Default is 128 KiB. + + [default: 131072] + + --max-tx-pending-fetch + Max capacity of cache of hashes for transactions pending fetch. + + [default: 25600] + + --net-if.experimental + Name of network interface used to communicate with peers. + + If flag is set, but no value is passed, the default interface for docker `eth0` is tried. + + --tx-propagation-policy + Transaction Propagation Policy + + The policy determines which peers transactions are gossiped to. + + [default: All] + +Datadir: + --datadir + The path to the data dir for all reth files and subdirectories. + + Defaults to the OS-specific data directory: + + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + - Windows: `{FOLDERID_RoamingAppData}/reth/` + - macOS: `$HOME/Library/Application Support/reth/` + + [default: default] + + --datadir.static-files + The absolute path to store static files in. + + --config + The path to the configuration file to use. + + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + mainnet, sepolia, holesky, hoodi, dev + + [default: mainnet] + + + The block number or hash + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/docs/vocs/docs/pages/cli/reth/p2p/header.mdx b/docs/vocs/docs/pages/cli/reth/p2p/header.mdx index 8b1f6b96cd8..1fbaa1b1989 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/header.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/header.mdx @@ -8,14 +8,219 @@ $ reth p2p header --help ```txt Usage: reth p2p header [OPTIONS] -Arguments: - - The header number or hash - Options: + --retries + The number of retries per request + + [default: 5] + -h, --help Print help (see a summary with '-h') +Networking: + -d, --disable-discovery + Disable the discovery service + + --disable-dns-discovery + Disable the DNS discovery + + --disable-discv4-discovery + Disable Discv4 discovery + + --enable-discv5-discovery + Enable Discv5 discovery + + --disable-nat + Disable Nat discovery + + --discovery.addr + The UDP address to use for devp2p peer discovery version 4 + + [default: 0.0.0.0] + + --discovery.port + The UDP port to use for devp2p peer discovery version 4 + + [default: 30303] + + --discovery.v5.addr + The UDP IPv4 address to use for devp2p peer discovery version 5. Overwritten by `RLPx` address, if it's also IPv4 + + --discovery.v5.addr.ipv6 + The UDP IPv6 address to use for devp2p peer discovery version 5. Overwritten by `RLPx` address, if it's also IPv6 + + --discovery.v5.port + The UDP IPv4 port to use for devp2p peer discovery version 5. Not used unless `--addr` is IPv4, or `--discovery.v5.addr` is set + + [default: 9200] + + --discovery.v5.port.ipv6 + The UDP IPv6 port to use for devp2p peer discovery version 5. Not used unless `--addr` is IPv6, or `--discovery.addr.ipv6` is set + + [default: 9200] + + --discovery.v5.lookup-interval + The interval in seconds at which to carry out periodic lookup queries, for the whole run of the program + + [default: 20] + + --discovery.v5.bootstrap.lookup-interval + The interval in seconds at which to carry out boost lookup queries, for a fixed number of times, at bootstrap + + [default: 5] + + --discovery.v5.bootstrap.lookup-countdown + The number of times to carry out boost lookup queries at bootstrap + + [default: 200] + + --trusted-peers + Comma separated enode URLs of trusted peers for P2P connections. + + --trusted-peers enode://abcd@192.168.0.1:30303 + + --trusted-only + Connect to or accept from trusted peers only + + --bootnodes + Comma separated enode URLs for P2P discovery bootstrap. + + Will fall back to a network-specific default if not specified. + + --dns-retries + Amount of DNS resolution requests retries to perform when peering + + [default: 0] + + --peers-file + The path to the known peers file. Connected peers are dumped to this file on nodes + shutdown, and read on startup. Cannot be used with `--no-persist-peers`. + + --identity + Custom node identity + + [default: reth/-/] + + --p2p-secret-key + Secret key to use for this node. + + This will also deterministically set the peer ID. If not specified, it will be set in the data dir for the chain being used. + + --no-persist-peers + Do not persist peers. + + --nat + NAT resolution method (any|none|upnp|publicip|extip:\) + + [default: any] + + --addr + Network listening address + + [default: 0.0.0.0] + + --port + Network listening port + + [default: 30303] + + --max-outbound-peers + Maximum number of outbound requests. default: 100 + + --max-inbound-peers + Maximum number of inbound requests. default: 30 + + --max-tx-reqs + Max concurrent `GetPooledTransactions` requests. + + [default: 130] + + --max-tx-reqs-peer + Max concurrent `GetPooledTransactions` requests per peer. + + [default: 1] + + --max-seen-tx-history + Max number of seen transactions to remember per peer. + + Default is 320 transaction hashes. + + [default: 320] + + --max-pending-imports + Max number of transactions to import concurrently. + + [default: 4096] + + --pooled-tx-response-soft-limit + Experimental, for usage in research. Sets the max accumulated byte size of transactions + to pack in one response. + Spec'd at 2MiB. + + [default: 2097152] + + --pooled-tx-pack-soft-limit + Experimental, for usage in research. Sets the max accumulated byte size of transactions to + request in one request. + + Since `RLPx` protocol version 68, the byte size of a transaction is shared as metadata in a + transaction announcement (see `RLPx` specs). This allows a node to request a specific size + response. + + By default, nodes request only 128 KiB worth of transactions, but should a peer request + more, up to 2 MiB, a node will answer with more than 128 KiB. + + Default is 128 KiB. + + [default: 131072] + + --max-tx-pending-fetch + Max capacity of cache of hashes for transactions pending fetch. + + [default: 25600] + + --net-if.experimental + Name of network interface used to communicate with peers. + + If flag is set, but no value is passed, the default interface for docker `eth0` is tried. + + --tx-propagation-policy + Transaction Propagation Policy + + The policy determines which peers transactions are gossiped to. + + [default: All] + +Datadir: + --datadir + The path to the data dir for all reth files and subdirectories. + + Defaults to the OS-specific data directory: + + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + - Windows: `{FOLDERID_RoamingAppData}/reth/` + - macOS: `$HOME/Library/Application Support/reth/` + + [default: default] + + --datadir.static-files + The absolute path to store static files in. + + --config + The path to the configuration file to use. + + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + mainnet, sepolia, holesky, hoodi, dev + + [default: mainnet] + + + The header number or hash + Logging: --log.stdout.format The format to use for logs written to stdout diff --git a/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx b/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx index 145409e767e..484a8005cbd 100644 --- a/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx +++ b/docs/vocs/docs/pages/cli/reth/p2p/rlpx.mdx @@ -1,6 +1,6 @@ # reth p2p rlpx -RLPx utilities +RLPx commands ```bash $ reth p2p rlpx --help From dcf3469d56a39f3e2cee2477e2331e2dbd035260 Mon Sep 17 00:00:00 2001 From: fantasyup <59591096+fantasyup@users.noreply.github.com> Date: Fri, 4 Jul 2025 12:44:50 -0400 Subject: [PATCH 054/305] chore(doc): update exclude list for doc/cli (#17234) Co-authored-by: Matthias Seitz --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 919d48f772e..67ba12f33bd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -169,7 +169,7 @@ members = [ "crates/tracing-otlp", ] default-members = ["bin/reth"] -exclude = ["book/sources", "book/cli"] +exclude = ["docs/cli"] # Explicitly set the resolver to version 2, which is the default for packages with edition >= 2021 # https://doc.rust-lang.org/edition-guide/rust-2021/default-cargo-resolver.html From ca36316f3b30e410ef89fb38a4267ed87ca73c45 Mon Sep 17 00:00:00 2001 From: fantasyup <59591096+fantasyup@users.noreply.github.com> Date: Fri, 4 Jul 2025 14:13:52 -0400 Subject: [PATCH 055/305] chore: add capabilities to NetworkStatus (#17236) --- crates/net/network-api/src/lib.rs | 5 ++++- crates/net/network-api/src/noop.rs | 1 + crates/net/network/src/manager.rs | 5 +++++ crates/rpc/rpc/src/eth/helpers/sync_listener.rs | 1 + 4 files changed, 11 insertions(+), 1 deletion(-) diff --git a/crates/net/network-api/src/lib.rs b/crates/net/network-api/src/lib.rs index ff469f16a47..58fe2c124e8 100644 --- a/crates/net/network-api/src/lib.rs +++ b/crates/net/network-api/src/lib.rs @@ -35,7 +35,8 @@ pub use events::{ }; use reth_eth_wire_types::{ - capability::Capabilities, DisconnectReason, EthVersion, NetworkPrimitives, UnifiedStatus, + capability::Capabilities, Capability, DisconnectReason, EthVersion, NetworkPrimitives, + UnifiedStatus, }; use reth_network_p2p::sync::NetworkSyncUpdater; use reth_network_peers::NodeRecord; @@ -285,4 +286,6 @@ pub struct NetworkStatus { pub protocol_version: u64, /// Information about the Ethereum Wire Protocol. pub eth_protocol_info: EthProtocolInfo, + /// The list of supported capabilities and their versions. + pub capabilities: Vec, } diff --git a/crates/net/network-api/src/noop.rs b/crates/net/network-api/src/noop.rs index 4b5a49c91c4..2183f276bab 100644 --- a/crates/net/network-api/src/noop.rs +++ b/crates/net/network-api/src/noop.rs @@ -73,6 +73,7 @@ where config: Default::default(), head: Default::default(), }, + capabilities: vec![], }) } diff --git a/crates/net/network/src/manager.rs b/crates/net/network/src/manager.rs index dcb77e30937..465039ec193 100644 --- a/crates/net/network/src/manager.rs +++ b/crates/net/network/src/manager.rs @@ -457,6 +457,11 @@ impl NetworkManager { genesis: status.genesis, config: Default::default(), }, + capabilities: hello_message + .protocols + .into_iter() + .map(|protocol| protocol.cap) + .collect(), } } diff --git a/crates/rpc/rpc/src/eth/helpers/sync_listener.rs b/crates/rpc/rpc/src/eth/helpers/sync_listener.rs index 13c8de19b0d..e444f76d3af 100644 --- a/crates/rpc/rpc/src/eth/helpers/sync_listener.rs +++ b/crates/rpc/rpc/src/eth/helpers/sync_listener.rs @@ -91,6 +91,7 @@ mod tests { config: Default::default(), head: Default::default(), }, + capabilities: vec![], }) } From e948ab12fc8b64d1c04a3e88cf6134f29f8f4a3c Mon Sep 17 00:00:00 2001 From: Galoretka Date: Fri, 4 Jul 2025 21:40:15 +0300 Subject: [PATCH 056/305] fix: logical error in pruning test for storage_history PruneMode::Full (#17235) --- crates/stages/stages/benches/README.md | 4 ++-- crates/stages/stages/src/stages/mod.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/stages/stages/benches/README.md b/crates/stages/stages/benches/README.md index 7c482c59c60..c3d3268e318 100644 --- a/crates/stages/stages/benches/README.md +++ b/crates/stages/stages/benches/README.md @@ -13,10 +13,10 @@ It will generate a flamegraph report without running any criterion analysis. ``` cargo bench --package reth-stages --bench criterion --features test-utils -- --profile-time=2 ``` -Flamegraph reports can be find at `target/criterion/Stages/$STAGE_LABEL/profile/flamegraph.svg` +Flamegraph reports can be found at `target/criterion/Stages/$STAGE_LABEL/profile/flamegraph.svg` ## External DB support To choose an external DB, just pass an environment variable to the `cargo bench` command. -* Account Hashing Stage: `ACCOUNT_HASHING_DB=` \ No newline at end of file +* Account Hashing Stage: `ACCOUNT_HASHING_DB=` diff --git a/crates/stages/stages/src/stages/mod.rs b/crates/stages/stages/src/stages/mod.rs index 726609b2350..b73136d0922 100644 --- a/crates/stages/stages/src/stages/mod.rs +++ b/crates/stages/stages/src/stages/mod.rs @@ -213,7 +213,7 @@ mod tests { if prune_modes.storage_history == Some(PruneMode::Full) { // Full is not supported - assert!(acc_indexing_stage.execute(&provider, input).is_err()); + assert!(storage_indexing_stage.execute(&provider, input).is_err()); } else { storage_indexing_stage.execute(&provider, input).unwrap(); From 29c1a35e8d2952ad3f1645aa0bdcbac2caaf6957 Mon Sep 17 00:00:00 2001 From: Micke <155267459+reallesee@users.noreply.github.com> Date: Fri, 4 Jul 2025 21:12:05 +0200 Subject: [PATCH 057/305] docs: fix typo mod.rs (#17233) --- crates/optimism/primitives/src/transaction/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/optimism/primitives/src/transaction/mod.rs b/crates/optimism/primitives/src/transaction/mod.rs index 3284b67fcbf..306f5459046 100644 --- a/crates/optimism/primitives/src/transaction/mod.rs +++ b/crates/optimism/primitives/src/transaction/mod.rs @@ -2,7 +2,7 @@ mod tx_type; -/// Kept for concistency tests +/// Kept for consistency tests #[cfg(test)] mod signed; From 593477c6739aeda80686426ea56826789b7c943f Mon Sep 17 00:00:00 2001 From: Udoagwa Franklin <54338168+frankudoags@users.noreply.github.com> Date: Sat, 5 Jul 2025 05:24:56 +0100 Subject: [PATCH 058/305] refactor(txpool): Remove txhash from PoolUpdate (#17239) Co-authored-by: frankudoags --- crates/transaction-pool/src/pool/txpool.rs | 33 ++++++++++++++++++---- crates/transaction-pool/src/pool/update.rs | 3 -- 2 files changed, 27 insertions(+), 9 deletions(-) diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 3c84bea80be..76607221fba 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -832,11 +832,11 @@ impl TxPool { /// This will move/discard the given transaction according to the `PoolUpdate` fn process_updates(&mut self, updates: Vec) -> UpdateOutcome { let mut outcome = UpdateOutcome::default(); - for PoolUpdate { id, hash, current, destination } in updates { + for PoolUpdate { id, current, destination } in updates { match destination { Destination::Discard => { // remove the transaction from the pool and subpool - if let Some(tx) = self.prune_transaction_by_hash(&hash) { + if let Some(tx) = self.prune_transaction_by_id(&id) { outcome.discarded.push(tx); } self.metrics.removed_transactions.increment(1); @@ -958,6 +958,17 @@ impl TxPool { let (tx, pool) = self.all_transactions.remove_transaction_by_hash(tx_hash)?; self.remove_from_subpool(pool, tx.id()) } + /// This removes the transaction from the pool and advances any descendant state inside the + /// subpool. + /// + /// This is intended to be used when we call [`Self::process_updates`]. + fn prune_transaction_by_id( + &mut self, + tx_id: &TransactionId, + ) -> Option>> { + let (tx, pool) = self.all_transactions.remove_transaction_by_id(tx_id)?; + self.remove_from_subpool(pool, tx.id()) + } /// Removes the transaction from the given pool. /// @@ -1363,7 +1374,6 @@ impl AllTransactions { if id.nonce < info.state_nonce { updates.push(PoolUpdate { id: *tx.transaction.id(), - hash: *tx.transaction.hash(), current: tx.subpool, destination: Destination::Discard, }); @@ -1473,7 +1483,6 @@ impl AllTransactions { if current_pool != tx.subpool { updates.push(PoolUpdate { id: *tx.transaction.id(), - hash: *tx.transaction.hash(), current: current_pool, destination: tx.subpool.into(), }) @@ -1563,6 +1572,20 @@ impl AllTransactions { Some((tx, internal.subpool)) } + /// Removes a transaction from the set using its id. + pub(crate) fn remove_transaction_by_id( + &mut self, + tx_id: &TransactionId, + ) -> Option<(Arc>, SubPool)> { + let internal = self.txs.remove(tx_id)?; + let tx = self.by_hash.remove(internal.transaction.hash())?; + self.remove_auths(&internal); + // decrement the counter for the sender. + self.tx_decr(tx.sender_id()); + self.update_size_metrics(); + Some((tx, internal.subpool)) + } + /// If a tx is removed (_not_ mined), all descendants are set to parked due to the nonce gap pub(crate) fn park_descendant_transactions( &mut self, @@ -1582,7 +1605,6 @@ impl AllTransactions { if current_pool != tx.subpool { updates.push(PoolUpdate { id: *id, - hash: *tx.transaction.hash(), current: current_pool, destination: tx.subpool.into(), }) @@ -1942,7 +1964,6 @@ impl AllTransactions { if current_pool != tx.subpool { updates.push(PoolUpdate { id: *id, - hash: *tx.transaction.hash(), current: current_pool, destination: tx.subpool.into(), }) diff --git a/crates/transaction-pool/src/pool/update.rs b/crates/transaction-pool/src/pool/update.rs index ca2b3358201..2322ccf6e65 100644 --- a/crates/transaction-pool/src/pool/update.rs +++ b/crates/transaction-pool/src/pool/update.rs @@ -3,7 +3,6 @@ use crate::{ identifier::TransactionId, pool::state::SubPool, PoolTransaction, ValidPoolTransaction, }; -use alloy_primitives::TxHash; use std::sync::Arc; /// A change of the transaction's location @@ -13,8 +12,6 @@ use std::sync::Arc; pub(crate) struct PoolUpdate { /// Internal tx id. pub(crate) id: TransactionId, - /// Hash of the transaction. - pub(crate) hash: TxHash, /// Where the transaction is currently held. pub(crate) current: SubPool, /// Where to move the transaction to. From beb8fac91ba49c8c71654faae3a8ff0f5c7c1db3 Mon Sep 17 00:00:00 2001 From: Varun Doshi Date: Sat, 5 Jul 2025 09:58:10 +0530 Subject: [PATCH 059/305] feat: add v5 flashbots relay block validation api for Fusaka (#17179) Co-authored-by: Matthias Seitz --- crates/rpc/rpc-api/src/validation.rs | 8 ++++ crates/rpc/rpc/Cargo.toml | 2 +- crates/rpc/rpc/src/validation.rs | 70 +++++++++++++++++++++++++++- 3 files changed, 77 insertions(+), 3 deletions(-) diff --git a/crates/rpc/rpc-api/src/validation.rs b/crates/rpc/rpc-api/src/validation.rs index 5e4f2e26143..9ff47b5eaf2 100644 --- a/crates/rpc/rpc-api/src/validation.rs +++ b/crates/rpc/rpc-api/src/validation.rs @@ -3,6 +3,7 @@ use alloy_rpc_types_beacon::relay::{ BuilderBlockValidationRequest, BuilderBlockValidationRequestV2, BuilderBlockValidationRequestV3, BuilderBlockValidationRequestV4, + BuilderBlockValidationRequestV5, }; use jsonrpsee::proc_macros::rpc; @@ -37,4 +38,11 @@ pub trait BlockSubmissionValidationApi { &self, request: BuilderBlockValidationRequestV4, ) -> jsonrpsee::core::RpcResult<()>; + + /// A Request to validate a block submission. + #[method(name = "validateBuilderSubmissionV5")] + async fn validate_builder_submission_v5( + &self, + request: BuilderBlockValidationRequestV5, + ) -> jsonrpsee::core::RpcResult<()>; } diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index 2f41caa5480..389502a2c73 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -60,7 +60,7 @@ alloy-rpc-types-trace.workspace = true alloy-rpc-types-mev.workspace = true alloy-rpc-types-txpool.workspace = true alloy-rpc-types-admin.workspace = true -alloy-rpc-types-engine.workspace = true +alloy-rpc-types-engine = { workspace = true, features = ["kzg"] } alloy-serde.workspace = true revm = { workspace = true, features = ["optional_block_gas_limit", "optional_eip3607", "optional_no_base_fee"] } revm-primitives = { workspace = true, features = ["serde"] } diff --git a/crates/rpc/rpc/src/validation.rs b/crates/rpc/rpc/src/validation.rs index e2d5a553d54..6ec2a1b7207 100644 --- a/crates/rpc/rpc/src/validation.rs +++ b/crates/rpc/rpc/src/validation.rs @@ -5,10 +5,11 @@ use alloy_eips::{eip4844::kzg_to_versioned_hash, eip7685::RequestsOrHash}; use alloy_rpc_types_beacon::relay::{ BidTrace, BuilderBlockValidationRequest, BuilderBlockValidationRequestV2, BuilderBlockValidationRequestV3, BuilderBlockValidationRequestV4, + BuilderBlockValidationRequestV5, }; use alloy_rpc_types_engine::{ - BlobsBundleV1, CancunPayloadFields, ExecutionData, ExecutionPayload, ExecutionPayloadSidecar, - PraguePayloadFields, + BlobsBundleV1, BlobsBundleV2, CancunPayloadFields, ExecutionData, ExecutionPayload, + ExecutionPayloadSidecar, PraguePayloadFields, }; use async_trait::async_trait; use core::fmt; @@ -365,6 +366,24 @@ where Ok(versioned_hashes) } + /// Validates the given [`BlobsBundleV1`] and returns versioned hashes for blobs. + pub fn validate_blobs_bundle_v2( + &self, + blobs_bundle: BlobsBundleV2, + ) -> Result, ValidationApiError> { + let versioned_hashes = blobs_bundle + .commitments + .iter() + .map(|c| kzg_to_versioned_hash(c.as_slice())) + .collect::>(); + + blobs_bundle + .try_into_sidecar() + .map_err(|_| ValidationApiError::InvalidBlobsBundle)? + .validate(&versioned_hashes, EnvKzgSettings::default().get())?; + + Ok(versioned_hashes) + } /// Core logic for validating the builder submission v3 async fn validate_builder_submission_v3( @@ -414,6 +433,35 @@ where ) .await } + + /// Core logic for validating the builder submission v5 + async fn validate_builder_submission_v5( + &self, + request: BuilderBlockValidationRequestV5, + ) -> Result<(), ValidationApiError> { + let block = self.payload_validator.ensure_well_formed_payload(ExecutionData { + payload: ExecutionPayload::V3(request.request.execution_payload), + sidecar: ExecutionPayloadSidecar::v4( + CancunPayloadFields { + parent_beacon_block_root: request.parent_beacon_block_root, + versioned_hashes: self + .validate_blobs_bundle_v2(request.request.blobs_bundle)?, + }, + PraguePayloadFields { + requests: RequestsOrHash::Requests( + request.request.execution_requests.to_requests(), + ), + }, + ), + })?; + + self.validate_message_against_block( + block, + request.request.message, + request.registered_gas_limit, + ) + .await + } } #[async_trait] @@ -477,6 +525,24 @@ where rx.await.map_err(|_| internal_rpc_err("Internal blocking task error"))? } + + /// Validates a block submitted to the relay + async fn validate_builder_submission_v5( + &self, + request: BuilderBlockValidationRequestV5, + ) -> RpcResult<()> { + let this = self.clone(); + let (tx, rx) = oneshot::channel(); + + self.task_spawner.spawn_blocking(Box::pin(async move { + let result = Self::validate_builder_submission_v5(&this, request) + .await + .map_err(ErrorObject::from); + let _ = tx.send(result); + })); + + rx.await.map_err(|_| internal_rpc_err("Internal blocking task error"))? + } } pub struct ValidationApiInner { From 30a9690a4dd803c4266487dd0c0d43b92104ed04 Mon Sep 17 00:00:00 2001 From: bigbear <155267841+aso20455@users.noreply.github.com> Date: Sat, 5 Jul 2025 06:50:14 +0200 Subject: [PATCH 060/305] fix: correct typo in ValidationApi comment (#17241) --- crates/rpc/rpc/src/validation.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/rpc/rpc/src/validation.rs b/crates/rpc/rpc/src/validation.rs index 6ec2a1b7207..9cdc20e3ca8 100644 --- a/crates/rpc/rpc/src/validation.rs +++ b/crates/rpc/rpc/src/validation.rs @@ -185,7 +185,7 @@ where let output = executor.execute_with_state_closure(&block, |state| { if !self.disallow.is_empty() { // Check whether the submission interacted with any blacklisted account by scanning - // the `State`'s cache that records everything read form database during execution. + // the `State`'s cache that records everything read from database during execution. for account in state.cache.accounts.keys() { if self.disallow.contains(account) { accessed_blacklisted = Some(*account); From 0592bd06a87a9f011dcf30e7231cd3ae3bb17c48 Mon Sep 17 00:00:00 2001 From: leopardracer <136604165+leopardracer@users.noreply.github.com> Date: Sat, 5 Jul 2025 10:38:25 +0300 Subject: [PATCH 061/305] docs: Consistent Spelling for "Reuse" in Documentation (#17232) --- crates/engine/tree/src/tree/payload_processor/sparse_trie.rs | 2 +- crates/trie/sparse/src/trie.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs b/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs index eeb6acde2a0..bd8702826d4 100644 --- a/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs +++ b/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs @@ -149,7 +149,7 @@ where self.metrics.sparse_trie_final_update_duration_histogram.record(start.elapsed()); self.metrics.sparse_trie_total_duration_histogram.record(now.elapsed()); - // take the account trie so that we can re-use its already allocated data structures. + // take the account trie so that we can reuse its already allocated data structures. let trie = self.trie.take_cleared_accounts_trie(); Ok(StateRootComputeOutcome { state_root, trie_updates, trie }) diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 2bbe94d4f7a..5bb8c7aef84 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -49,7 +49,7 @@ pub enum SparseTrie { /// until nodes are revealed. /// /// In this state the `SparseTrie` can optionally carry with it a cleared `RevealedSparseTrie`. - /// This allows for re-using the trie's allocations between payload executions. + /// This allows for reusing the trie's allocations between payload executions. Blind(Option>), /// Some nodes in the Trie have been revealed. /// From 1e9866c8588f1215d5d2e04ea9f197067f7296e7 Mon Sep 17 00:00:00 2001 From: Udoagwa Franklin <54338168+frankudoags@users.noreply.github.com> Date: Sat, 5 Jul 2025 09:26:29 +0100 Subject: [PATCH 062/305] refactor(rpc): Arc PendingBlock internals (#17240) Co-authored-by: frankudoags --- crates/optimism/rpc/src/eth/pending_block.rs | 8 +++++--- crates/rpc/rpc-eth-api/src/helpers/block.rs | 4 ++-- .../rpc/rpc-eth-api/src/helpers/pending_block.rs | 14 ++++++++++---- crates/rpc/rpc-eth-types/src/pending_block.rs | 10 +++++----- 4 files changed, 22 insertions(+), 14 deletions(-) diff --git a/crates/optimism/rpc/src/eth/pending_block.rs b/crates/optimism/rpc/src/eth/pending_block.rs index de011aa2797..8d6eae8a2f6 100644 --- a/crates/optimism/rpc/src/eth/pending_block.rs +++ b/crates/optimism/rpc/src/eth/pending_block.rs @@ -1,5 +1,7 @@ //! Loads OP pending block for a RPC response. +use std::sync::Arc; + use crate::OpEthApi; use alloy_consensus::BlockHeader; use alloy_eips::BlockNumberOrTag; @@ -78,8 +80,8 @@ where &self, ) -> Result< Option<( - RecoveredBlock>, - Vec>, + Arc>>, + Arc>>, )>, Self::Error, > { @@ -102,6 +104,6 @@ where .map_err(Self::Error::from_eth_err)? .ok_or(EthApiError::ReceiptsNotFound(block_id.into()))?; - Ok(Some((block, receipts))) + Ok(Some((Arc::new(block), Arc::new(receipts)))) } } diff --git a/crates/rpc/rpc-eth-api/src/helpers/block.rs b/crates/rpc/rpc-eth-api/src/helpers/block.rs index 0b88dc3bdc2..a0503f4946e 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/block.rs @@ -135,7 +135,7 @@ pub trait EthBlocks: LoadBlock { // If no pending block from provider, build the pending block locally. if let Some((block, receipts)) = self.local_pending_block().await? { - return Ok(Some((Arc::new(block), Arc::new(receipts)))); + return Ok(Some((block, receipts))); } } @@ -245,7 +245,7 @@ pub trait LoadBlock: // If no pending block from provider, try to get local pending block return match self.local_pending_block().await? { - Some((block, _)) => Ok(Some(Arc::new(block))), + Some((block, _)) => Ok(Some(block)), None => Ok(None), }; } diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index 272f3c18f1f..691a5f42bff 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -30,7 +30,10 @@ use reth_transaction_pool::{ TransactionPool, }; use revm::context_interface::Block; -use std::time::{Duration, Instant}; +use std::{ + sync::Arc, + time::{Duration, Instant}, +}; use tokio::sync::Mutex; use tracing::debug; @@ -92,7 +95,7 @@ pub trait LoadPendingBlock: return Ok(PendingBlockEnv::new( evm_env, - PendingBlockEnvOrigin::ActualPending(block, receipts), + PendingBlockEnvOrigin::ActualPending(Arc::new(block), Arc::new(receipts)), )); } } @@ -127,8 +130,8 @@ pub trait LoadPendingBlock: ) -> impl Future< Output = Result< Option<( - RecoveredBlock<::Block>, - Vec>, + Arc::Block>>, + Arc>>, )>, Self::Error, >, @@ -178,6 +181,9 @@ pub trait LoadPendingBlock: } }; + let sealed_block = Arc::new(sealed_block); + let receipts = Arc::new(receipts); + let now = Instant::now(); *lock = Some(PendingBlock::new( now + Duration::from_secs(1), diff --git a/crates/rpc/rpc-eth-types/src/pending_block.rs b/crates/rpc/rpc-eth-types/src/pending_block.rs index 7990e2334b1..fa9b554558b 100644 --- a/crates/rpc/rpc-eth-types/src/pending_block.rs +++ b/crates/rpc/rpc-eth-types/src/pending_block.rs @@ -2,7 +2,7 @@ //! //! Types used in block building. -use std::time::Instant; +use std::{sync::Arc, time::Instant}; use alloy_consensus::BlockHeader; use alloy_eips::{BlockId, BlockNumberOrTag}; @@ -25,7 +25,7 @@ pub struct PendingBlockEnv { #[derive(Clone, Debug)] pub enum PendingBlockEnvOrigin { /// The pending block as received from the CL. - ActualPending(RecoveredBlock, Vec), + ActualPending(Arc>, Arc>), /// The _modified_ header of the latest block. /// /// This derives the pending state based on the latest header by modifying: @@ -42,7 +42,7 @@ impl PendingBlockEnvOrigin { } /// Consumes the type and returns the actual pending block. - pub fn into_actual_pending(self) -> Option> { + pub fn into_actual_pending(self) -> Option>> { match self { Self::ActualPending(block, _) => Some(block), _ => None, @@ -79,7 +79,7 @@ pub struct PendingBlock { /// Timestamp when the pending block is considered outdated. pub expires_at: Instant, /// The locally built pending block. - pub block: RecoveredBlock, + pub block: Arc>, /// The receipts for the pending block - pub receipts: Vec, + pub receipts: Arc>, } From 3277333df6ba9bd798f059e7a2d43d712e028d5c Mon Sep 17 00:00:00 2001 From: emmmm <155267286+eeemmmmmm@users.noreply.github.com> Date: Sat, 5 Jul 2025 06:50:18 -0400 Subject: [PATCH 063/305] docs: correction comments (#17244) --- crates/rpc/ipc/src/server/connection.rs | 2 +- crates/rpc/rpc/src/eth/bundle.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/rpc/ipc/src/server/connection.rs b/crates/rpc/ipc/src/server/connection.rs index 0734296b98e..e8b827078ba 100644 --- a/crates/rpc/ipc/src/server/connection.rs +++ b/crates/rpc/ipc/src/server/connection.rs @@ -1,4 +1,4 @@ -//! A IPC connection. +//! An IPC connection. use crate::stream_codec::StreamCodec; use futures::{stream::FuturesUnordered, FutureExt, Sink, Stream}; diff --git a/crates/rpc/rpc/src/eth/bundle.rs b/crates/rpc/rpc/src/eth/bundle.rs index 08fe3c84cd4..0ff7fb1dde4 100644 --- a/crates/rpc/rpc/src/eth/bundle.rs +++ b/crates/rpc/rpc/src/eth/bundle.rs @@ -273,7 +273,7 @@ where } } -/// Container type for `EthBundle` internals +/// Container type for `EthBundle` internals #[derive(Debug)] struct EthBundleInner { /// Access to commonly used code of the `eth` namespace From 8e800d6f7368c7ded0e3e5557e858c10edf83ff2 Mon Sep 17 00:00:00 2001 From: Fallengirl <155266340+Fallengirl@users.noreply.github.com> Date: Sun, 6 Jul 2025 11:19:27 +0200 Subject: [PATCH 064/305] docs: deleted extra duplicate environment.rs (#17249) --- crates/storage/libmdbx-rs/src/environment.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/storage/libmdbx-rs/src/environment.rs b/crates/storage/libmdbx-rs/src/environment.rs index b3730bf6d17..648526a7fc5 100644 --- a/crates/storage/libmdbx-rs/src/environment.rs +++ b/crates/storage/libmdbx-rs/src/environment.rs @@ -778,7 +778,7 @@ impl EnvironmentBuilder { /// Sets the maximum number of threads or reader slots for the environment. /// /// This defines the number of slots in the lock table that is used to track readers in the - /// the environment. The default is 126. Starting a read-only transaction normally ties a lock + /// environment. The default is 126. Starting a read-only transaction normally ties a lock /// table slot to the [Transaction] object until it or the [Environment] object is destroyed. pub const fn set_max_readers(&mut self, max_readers: u64) -> &mut Self { self.max_readers = Some(max_readers); From 651f1b97e52846f67d75dfbdd6fbd0b5fd9e59ff Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sun, 6 Jul 2025 09:42:47 +0000 Subject: [PATCH 065/305] chore(deps): weekly `cargo update` (#17247) Co-authored-by: github-merge-queue <118344674+github-merge-queue@users.noreply.github.com> --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0422efa0fcc..09af074463e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12220,9 +12220,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.46.0" +version = "1.46.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1140bb80481756a8cbe10541f37433b459c5aa1e727b4c020fbfebdc25bf3ec4" +checksum = "0cc3a2344dafbe23a245241fe8b09735b521110d30fcefbbd5feb1797ca35d17" dependencies = [ "backtrace", "bytes", From 44b361a4e290267083d85ab00e9334fc9a44379f Mon Sep 17 00:00:00 2001 From: Max Bytefield Date: Mon, 7 Jul 2025 12:26:45 +0300 Subject: [PATCH 066/305] fix: correct comment in static file writer (#17254) --- crates/storage/provider/src/providers/static_file/writer.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/storage/provider/src/providers/static_file/writer.rs b/crates/storage/provider/src/providers/static_file/writer.rs index 3fd2828faad..356d46c85bb 100644 --- a/crates/storage/provider/src/providers/static_file/writer.rs +++ b/crates/storage/provider/src/providers/static_file/writer.rs @@ -702,7 +702,7 @@ impl StaticFileProviderRW { Ok(Some(tx_number)) } - /// Adds an instruction to prune `to_delete`transactions during commit. + /// Adds an instruction to prune `to_delete` transactions during commit. /// /// Note: `last_block` refers to the block the unwinds ends at. pub fn prune_transactions( @@ -732,7 +732,7 @@ impl StaticFileProviderRW { self.queue_prune(to_delete, None) } - /// Adds an instruction to prune `to_delete` bloc_ meta rows during commit. + /// Adds an instruction to prune `to_delete` block meta rows during commit. pub fn prune_block_meta(&mut self, to_delete: u64) -> ProviderResult<()> { debug_assert_eq!(self.writer.user_header().segment(), StaticFileSegment::BlockMeta); self.queue_prune(to_delete, None) From a64dafdb5491125a9db89b62e1a02fa6d7a456e9 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Mon, 7 Jul 2025 12:46:23 +0100 Subject: [PATCH 067/305] fix(trie): `ParallelSparseTrie::default` should have an empty root node (#17256) --- crates/trie/sparse-parallel/src/trie.rs | 31 ++++++++++++------------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/crates/trie/sparse-parallel/src/trie.rs b/crates/trie/sparse-parallel/src/trie.rs index 34c44cc613f..14484dd51bc 100644 --- a/crates/trie/sparse-parallel/src/trie.rs +++ b/crates/trie/sparse-parallel/src/trie.rs @@ -48,7 +48,10 @@ pub struct ParallelSparseTrie { impl Default for ParallelSparseTrie { fn default() -> Self { Self { - upper_subtrie: Box::default(), + upper_subtrie: Box::new(SparseSubtrie { + nodes: HashMap::from_iter([(Nibbles::default(), SparseNode::Empty)]), + ..Default::default() + }), lower_subtries: [const { None }; NUM_LOWER_SUBTRIES], prefix_set: PrefixSetMut::default(), updates: None, @@ -58,9 +61,7 @@ impl Default for ParallelSparseTrie { impl SparseTrieInterface for ParallelSparseTrie { fn from_root(root: TrieNode, masks: TrieMasks, retain_updates: bool) -> SparseTrieResult { - let mut trie = Self::default().with_updates(retain_updates); - trie.reveal_node(Nibbles::default(), root, masks)?; - Ok(trie) + Self::default().with_root(root, masks, retain_updates) } fn with_root( @@ -69,6 +70,12 @@ impl SparseTrieInterface for ParallelSparseTrie { masks: TrieMasks, retain_updates: bool, ) -> SparseTrieResult { + // A fresh/cleared `ParallelSparseTrie` has a `SparseNode::Empty` at its root in the upper + // subtrie. Delete that so we can reveal the new root node. + let path = Nibbles::default(); + let _removed_root = self.upper_subtrie.nodes.remove(&path).expect("root node should exist"); + debug_assert_eq!(_removed_root, SparseNode::Empty); + self = self.with_updates(retain_updates); self.reveal_node(Nibbles::default(), root, masks)?; @@ -2600,13 +2607,11 @@ mod tests { #[test] fn test_reveal_node_extension_all_upper() { - let mut trie = ParallelSparseTrie::default(); let path = Nibbles::new(); let child_hash = B256::repeat_byte(0xab); let node = create_extension_node([0x1], child_hash); let masks = TrieMasks::none(); - - trie.reveal_node(path, node, masks).unwrap(); + let trie = ParallelSparseTrie::from_root(node, masks, true).unwrap(); assert_matches!( trie.upper_subtrie.nodes.get(&path), @@ -2621,13 +2626,11 @@ mod tests { #[test] fn test_reveal_node_extension_cross_level() { - let mut trie = ParallelSparseTrie::default(); let path = Nibbles::new(); let child_hash = B256::repeat_byte(0xcd); let node = create_extension_node([0x1, 0x2, 0x3], child_hash); let masks = TrieMasks::none(); - - trie.reveal_node(path, node, masks).unwrap(); + let trie = ParallelSparseTrie::from_root(node, masks, true).unwrap(); // Extension node should be in upper trie assert_matches!( @@ -2675,7 +2678,6 @@ mod tests { #[test] fn test_reveal_node_branch_all_upper() { - let mut trie = ParallelSparseTrie::default(); let path = Nibbles::new(); let child_hashes = [ RlpNode::word_rlp(&B256::repeat_byte(0x11)), @@ -2683,8 +2685,7 @@ mod tests { ]; let node = create_branch_node_with_children(&[0x0, 0x5], child_hashes.clone()); let masks = TrieMasks::none(); - - trie.reveal_node(path, node, masks).unwrap(); + let trie = ParallelSparseTrie::from_root(node, masks, true).unwrap(); // Branch node should be in upper trie assert_matches!( @@ -3448,8 +3449,6 @@ mod tests { #[test] fn test_parallel_sparse_trie_root() { - let mut trie = ParallelSparseTrie::default().with_updates(true); - // Step 1: Create the trie structure // Extension node at 0x with key 0x2 (goes to upper subtrie) let extension_path = Nibbles::new(); @@ -3491,7 +3490,7 @@ mod tests { ); // Step 2: Reveal nodes in the trie - trie.reveal_node(extension_path, extension, TrieMasks::none()).unwrap(); + let mut trie = ParallelSparseTrie::from_root(extension, TrieMasks::none(), true).unwrap(); trie.reveal_node(branch_path, branch, TrieMasks::none()).unwrap(); trie.reveal_node(leaf_1_path, leaf_1, TrieMasks::none()).unwrap(); trie.reveal_node(leaf_2_path, leaf_2, TrieMasks::none()).unwrap(); From e70f6871b8447e2f0931ec0d919259eaebf4d7c8 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Mon, 7 Jul 2025 15:26:20 +0200 Subject: [PATCH 068/305] refactor: extract import functionality to separate module (#17253) --- crates/cli/commands/src/import.rs | 212 +++------------------- crates/cli/commands/src/import_op.rs | 254 +++++++++++++++++++++++++++ crates/cli/commands/src/lib.rs | 1 + 3 files changed, 277 insertions(+), 190 deletions(-) create mode 100644 crates/cli/commands/src/import_op.rs diff --git a/crates/cli/commands/src/import.rs b/crates/cli/commands/src/import.rs index eef67117063..05434de4c21 100644 --- a/crates/cli/commands/src/import.rs +++ b/crates/cli/commands/src/import.rs @@ -1,36 +1,16 @@ //! Command that initializes the node by importing a chain from a file. -use crate::common::{AccessRights, CliNodeComponents, CliNodeTypes, Environment, EnvironmentArgs}; -use alloy_primitives::B256; +use crate::{ + common::{AccessRights, CliNodeComponents, CliNodeTypes, Environment, EnvironmentArgs}, + import_op::{import_blocks_from_file, ImportConfig}, +}; use clap::Parser; -use futures::{Stream, StreamExt}; -use reth_chainspec::{EthChainSpec, EthereumHardforks}; +use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; -use reth_config::Config; -use reth_consensus::{ConsensusError, FullConsensus}; -use reth_db_api::{tables, transaction::DbTx}; -use reth_downloaders::{ - bodies::bodies::BodiesDownloaderBuilder, - file_client::{ChunkedFileReader, FileClient, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE}, - headers::reverse_headers::ReverseHeadersDownloaderBuilder, -}; -use reth_evm::ConfigureEvm; -use reth_network_p2p::{ - bodies::downloader::BodyDownloader, - headers::downloader::{HeaderDownloader, SyncTarget}, -}; -use reth_node_api::BlockTy; use reth_node_core::version::SHORT_VERSION; -use reth_node_events::node::NodeEvent; -use reth_provider::{ - providers::ProviderNodeTypes, BlockNumReader, ChainSpecProvider, HeaderProvider, ProviderError, - ProviderFactory, StageCheckpointReader, -}; -use reth_prune::PruneModes; -use reth_stages::{prelude::*, Pipeline, StageId, StageSet}; -use reth_static_file::StaticFileProducer; use std::{path::PathBuf, sync::Arc}; -use tokio::sync::watch; -use tracing::{debug, error, info}; +use tracing::info; + +pub use crate::import_op::build_import_pipeline_impl as build_import_pipeline; /// Syncs RLP encoded blocks from a file. #[derive(Debug, Parser)] @@ -66,101 +46,29 @@ impl> ImportComm { info!(target: "reth::cli", "reth {} starting", SHORT_VERSION); - if self.no_state { - info!(target: "reth::cli", "Disabled stages requiring state"); - } - - debug!(target: "reth::cli", - chunk_byte_len=self.chunk_len.unwrap_or(DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE), - "Chunking chain import" - ); - let Environment { provider_factory, config, .. } = self.env.init::(AccessRights::RW)?; let components = components(provider_factory.chain_spec()); - let executor = components.evm_config().clone(); - let consensus = Arc::new(components.consensus().clone()); - info!(target: "reth::cli", "Consensus engine initialized"); - - // open file - let mut reader = ChunkedFileReader::new(&self.path, self.chunk_len).await?; - - let mut total_decoded_blocks = 0; - let mut total_decoded_txns = 0; - - let mut sealed_header = provider_factory - .sealed_header(provider_factory.last_block_number()?)? - .expect("should have genesis"); - - while let Some(file_client) = - reader.next_chunk::>(consensus.clone(), Some(sealed_header)).await? - { - // create a new FileClient from chunk read from file - info!(target: "reth::cli", - "Importing chain file chunk" - ); - - let tip = file_client.tip().ok_or(eyre::eyre!("file client has no tip"))?; - info!(target: "reth::cli", "Chain file chunk read"); - - total_decoded_blocks += file_client.headers_len(); - total_decoded_txns += file_client.total_transactions(); - - let (mut pipeline, events) = build_import_pipeline( - &config, - provider_factory.clone(), - &consensus, - Arc::new(file_client), - StaticFileProducer::new(provider_factory.clone(), PruneModes::default()), - self.no_state, - executor.clone(), - )?; - - // override the tip - pipeline.set_tip(tip); - debug!(target: "reth::cli", ?tip, "Tip manually set"); - - let provider = provider_factory.provider()?; - let latest_block_number = - provider.get_stage_checkpoint(StageId::Finish)?.map(|ch| ch.block_number); - tokio::spawn(reth_node_events::node::handle_events(None, latest_block_number, events)); + let import_config = ImportConfig { no_state: self.no_state, chunk_len: self.chunk_len }; - // Run pipeline - info!(target: "reth::cli", "Starting sync pipeline"); - tokio::select! { - res = pipeline.run() => res?, - _ = tokio::signal::ctrl_c() => {}, - } - - sealed_header = provider_factory - .sealed_header(provider_factory.last_block_number()?)? - .expect("should have genesis"); - } - - let provider = provider_factory.provider()?; + let executor = components.evm_config().clone(); + let consensus = Arc::new(components.consensus().clone()); - let total_imported_blocks = provider.tx_ref().entries::()?; - let total_imported_txns = provider.tx_ref().entries::()?; + let result = import_blocks_from_file( + &self.path, + import_config, + provider_factory, + &config, + executor, + consensus, + ) + .await?; - if total_decoded_blocks != total_imported_blocks || - total_decoded_txns != total_imported_txns - { - error!(target: "reth::cli", - total_decoded_blocks, - total_imported_blocks, - total_decoded_txns, - total_imported_txns, - "Chain was partially imported" - ); + if !result.is_complete() { + return Err(eyre::eyre!("Chain was partially imported")); } - info!(target: "reth::cli", - total_imported_blocks, - total_imported_txns, - "Chain file imported" - ); - Ok(()) } } @@ -172,82 +80,6 @@ impl ImportCommand { } } -/// Builds import pipeline. -/// -/// If configured to execute, all stages will run. Otherwise, only stages that don't require state -/// will run. -pub fn build_import_pipeline( - config: &Config, - provider_factory: ProviderFactory, - consensus: &Arc, - file_client: Arc>>, - static_file_producer: StaticFileProducer>, - disable_exec: bool, - evm_config: E, -) -> eyre::Result<(Pipeline, impl Stream>)> -where - N: ProviderNodeTypes, - C: FullConsensus + 'static, - E: ConfigureEvm + 'static, -{ - if !file_client.has_canonical_blocks() { - eyre::bail!("unable to import non canonical blocks"); - } - - // Retrieve latest header found in the database. - let last_block_number = provider_factory.last_block_number()?; - let local_head = provider_factory - .sealed_header(last_block_number)? - .ok_or_else(|| ProviderError::HeaderNotFound(last_block_number.into()))?; - - let mut header_downloader = ReverseHeadersDownloaderBuilder::new(config.stages.headers) - .build(file_client.clone(), consensus.clone()) - .into_task(); - // TODO: The pipeline should correctly configure the downloader on its own. - // Find the possibility to remove unnecessary pre-configuration. - header_downloader.update_local_head(local_head); - header_downloader.update_sync_target(SyncTarget::Tip(file_client.tip().unwrap())); - - let mut body_downloader = BodiesDownloaderBuilder::new(config.stages.bodies) - .build(file_client.clone(), consensus.clone(), provider_factory.clone()) - .into_task(); - // TODO: The pipeline should correctly configure the downloader on its own. - // Find the possibility to remove unnecessary pre-configuration. - body_downloader - .set_download_range(file_client.min_block().unwrap()..=file_client.max_block().unwrap()) - .expect("failed to set download range"); - - let (tip_tx, tip_rx) = watch::channel(B256::ZERO); - - let max_block = file_client.max_block().unwrap_or(0); - - let pipeline = Pipeline::builder() - .with_tip_sender(tip_tx) - // we want to sync all blocks the file client provides or 0 if empty - .with_max_block(max_block) - .with_fail_on_unwind(true) - .add_stages( - DefaultStages::new( - provider_factory.clone(), - tip_rx, - consensus.clone(), - header_downloader, - body_downloader, - evm_config, - config.stages.clone(), - PruneModes::default(), - None, - ) - .builder() - .disable_all_if(&StageId::STATE_REQUIRED, || disable_exec), - ) - .build(provider_factory, static_file_producer); - - let events = pipeline.events().map(Into::into); - - Ok((pipeline, events)) -} - #[cfg(test)] mod tests { use super::*; diff --git a/crates/cli/commands/src/import_op.rs b/crates/cli/commands/src/import_op.rs new file mode 100644 index 00000000000..c3adec10200 --- /dev/null +++ b/crates/cli/commands/src/import_op.rs @@ -0,0 +1,254 @@ +//! Core import functionality without CLI dependencies. + +use alloy_primitives::B256; +use futures::StreamExt; +use reth_config::Config; +use reth_consensus::FullConsensus; +use reth_db_api::{tables, transaction::DbTx}; +use reth_downloaders::{ + bodies::bodies::BodiesDownloaderBuilder, + file_client::{ChunkedFileReader, FileClient, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE}, + headers::reverse_headers::ReverseHeadersDownloaderBuilder, +}; +use reth_evm::ConfigureEvm; +use reth_network_p2p::{ + bodies::downloader::BodyDownloader, + headers::downloader::{HeaderDownloader, SyncTarget}, +}; +use reth_node_api::BlockTy; +use reth_node_events::node::NodeEvent; +use reth_provider::{ + providers::ProviderNodeTypes, BlockNumReader, HeaderProvider, ProviderError, ProviderFactory, + StageCheckpointReader, +}; +use reth_prune::PruneModes; +use reth_stages::{prelude::*, Pipeline, StageId, StageSet}; +use reth_static_file::StaticFileProducer; +use std::{path::Path, sync::Arc}; +use tokio::sync::watch; +use tracing::{debug, error, info}; + +/// Configuration for importing blocks from RLP files. +#[derive(Debug, Clone, Default)] +pub struct ImportConfig { + /// Disables stages that require state. + pub no_state: bool, + /// Chunk byte length to read from file. + pub chunk_len: Option, +} + +/// Result of an import operation. +#[derive(Debug)] +pub struct ImportResult { + /// Total number of blocks decoded from the file. + pub total_decoded_blocks: usize, + /// Total number of transactions decoded from the file. + pub total_decoded_txns: usize, + /// Total number of blocks imported into the database. + pub total_imported_blocks: usize, + /// Total number of transactions imported into the database. + pub total_imported_txns: usize, +} + +impl ImportResult { + /// Returns true if all blocks and transactions were imported successfully. + pub fn is_complete(&self) -> bool { + self.total_decoded_blocks == self.total_imported_blocks && + self.total_decoded_txns == self.total_imported_txns + } +} + +/// Imports blocks from an RLP-encoded file into the database. +/// +/// This function reads RLP-encoded blocks from a file in chunks and imports them +/// using the pipeline infrastructure. It's designed to be used both from the CLI +/// and from test code. +pub async fn import_blocks_from_file( + path: &Path, + import_config: ImportConfig, + provider_factory: ProviderFactory, + config: &Config, + executor: impl ConfigureEvm + 'static, + consensus: Arc< + impl FullConsensus + 'static, + >, +) -> eyre::Result +where + N: ProviderNodeTypes, +{ + if import_config.no_state { + info!(target: "reth::import", "Disabled stages requiring state"); + } + + debug!(target: "reth::import", + chunk_byte_len=import_config.chunk_len.unwrap_or(DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE), + "Chunking chain import" + ); + + info!(target: "reth::import", "Consensus engine initialized"); + + // open file + let mut reader = ChunkedFileReader::new(path, import_config.chunk_len).await?; + + let mut total_decoded_blocks = 0; + let mut total_decoded_txns = 0; + + let mut sealed_header = provider_factory + .sealed_header(provider_factory.last_block_number()?)? + .expect("should have genesis"); + + while let Some(file_client) = + reader.next_chunk::>(consensus.clone(), Some(sealed_header)).await? + { + // create a new FileClient from chunk read from file + info!(target: "reth::import", + "Importing chain file chunk" + ); + + let tip = file_client.tip().ok_or(eyre::eyre!("file client has no tip"))?; + info!(target: "reth::import", "Chain file chunk read"); + + total_decoded_blocks += file_client.headers_len(); + total_decoded_txns += file_client.total_transactions(); + + let (mut pipeline, events) = build_import_pipeline_impl( + config, + provider_factory.clone(), + &consensus, + Arc::new(file_client), + StaticFileProducer::new(provider_factory.clone(), PruneModes::default()), + import_config.no_state, + executor.clone(), + )?; + + // override the tip + pipeline.set_tip(tip); + debug!(target: "reth::import", ?tip, "Tip manually set"); + + let provider = provider_factory.provider()?; + + let latest_block_number = + provider.get_stage_checkpoint(StageId::Finish)?.map(|ch| ch.block_number); + tokio::spawn(reth_node_events::node::handle_events(None, latest_block_number, events)); + + // Run pipeline + info!(target: "reth::import", "Starting sync pipeline"); + tokio::select! { + res = pipeline.run() => res?, + _ = tokio::signal::ctrl_c() => { + info!(target: "reth::import", "Import interrupted by user"); + break; + }, + } + + sealed_header = provider_factory + .sealed_header(provider_factory.last_block_number()?)? + .expect("should have genesis"); + } + + let provider = provider_factory.provider()?; + + let total_imported_blocks = provider.tx_ref().entries::()?; + let total_imported_txns = provider.tx_ref().entries::()?; + + let result = ImportResult { + total_decoded_blocks, + total_decoded_txns, + total_imported_blocks, + total_imported_txns, + }; + + if !result.is_complete() { + error!(target: "reth::import", + total_decoded_blocks, + total_imported_blocks, + total_decoded_txns, + total_imported_txns, + "Chain was partially imported" + ); + } else { + info!(target: "reth::import", + total_imported_blocks, + total_imported_txns, + "Chain file imported" + ); + } + + Ok(result) +} + +/// Builds import pipeline. +/// +/// If configured to execute, all stages will run. Otherwise, only stages that don't require state +/// will run. +pub fn build_import_pipeline_impl( + config: &Config, + provider_factory: ProviderFactory, + consensus: &Arc, + file_client: Arc>>, + static_file_producer: StaticFileProducer>, + disable_exec: bool, + evm_config: E, +) -> eyre::Result<(Pipeline, impl futures::Stream>)> +where + N: ProviderNodeTypes, + C: FullConsensus + 'static, + E: ConfigureEvm + 'static, +{ + if !file_client.has_canonical_blocks() { + eyre::bail!("unable to import non canonical blocks"); + } + + // Retrieve latest header found in the database. + let last_block_number = provider_factory.last_block_number()?; + let local_head = provider_factory + .sealed_header(last_block_number)? + .ok_or_else(|| ProviderError::HeaderNotFound(last_block_number.into()))?; + + let mut header_downloader = ReverseHeadersDownloaderBuilder::new(config.stages.headers) + .build(file_client.clone(), consensus.clone()) + .into_task(); + // TODO: The pipeline should correctly configure the downloader on its own. + // Find the possibility to remove unnecessary pre-configuration. + header_downloader.update_local_head(local_head); + header_downloader.update_sync_target(SyncTarget::Tip(file_client.tip().unwrap())); + + let mut body_downloader = BodiesDownloaderBuilder::new(config.stages.bodies) + .build(file_client.clone(), consensus.clone(), provider_factory.clone()) + .into_task(); + // TODO: The pipeline should correctly configure the downloader on its own. + // Find the possibility to remove unnecessary pre-configuration. + body_downloader + .set_download_range(file_client.min_block().unwrap()..=file_client.max_block().unwrap()) + .expect("failed to set download range"); + + let (tip_tx, tip_rx) = watch::channel(B256::ZERO); + + let max_block = file_client.max_block().unwrap_or(0); + + let pipeline = Pipeline::builder() + .with_tip_sender(tip_tx) + // we want to sync all blocks the file client provides or 0 if empty + .with_max_block(max_block) + .with_fail_on_unwind(true) + .add_stages( + DefaultStages::new( + provider_factory.clone(), + tip_rx, + consensus.clone(), + header_downloader, + body_downloader, + evm_config, + config.stages.clone(), + PruneModes::default(), + None, + ) + .builder() + .disable_all_if(&StageId::STATE_REQUIRED, || disable_exec), + ) + .build(provider_factory, static_file_producer); + + let events = pipeline.events().map(Into::into); + + Ok((pipeline, events)) +} diff --git a/crates/cli/commands/src/lib.rs b/crates/cli/commands/src/lib.rs index 778f284028a..e602fac8207 100644 --- a/crates/cli/commands/src/lib.rs +++ b/crates/cli/commands/src/lib.rs @@ -15,6 +15,7 @@ pub mod download; pub mod dump_genesis; pub mod import; pub mod import_era; +pub mod import_op; pub mod init_cmd; pub mod init_state; pub mod launcher; From 1f557b399a9102625c28d21927427f8f915ef10f Mon Sep 17 00:00:00 2001 From: James Niken Date: Mon, 7 Jul 2025 15:38:13 +0200 Subject: [PATCH 069/305] docs: fix typo `fileted` to `filtered` (#17257) --- crates/transaction-pool/src/pool/best.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/transaction-pool/src/pool/best.rs b/crates/transaction-pool/src/pool/best.rs index eba3c2c35d0..ecf28a519e2 100644 --- a/crates/transaction-pool/src/pool/best.rs +++ b/crates/transaction-pool/src/pool/best.rs @@ -91,7 +91,7 @@ pub struct BestTransactions { /// There might be the case where a yielded transactions is invalid, this will track it. pub(crate) invalid: HashSet, /// Used to receive any new pending transactions that have been added to the pool after this - /// iterator was static fileted + /// iterator was static filtered /// /// These new pending transactions are inserted into this iterator's pool before yielding the /// next value From 927e9c4661ffa1fd1d35f08693a9c8df2410ef7b Mon Sep 17 00:00:00 2001 From: crStiv Date: Mon, 7 Jul 2025 16:38:42 +0300 Subject: [PATCH 070/305] docs: typos (#17246) --- docs/crates/network.md | 12 ++++++------ docs/crates/stages.md | 2 +- docs/design/goals.md | 4 ++-- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/docs/crates/network.md b/docs/crates/network.md index 15c9c2494f5..9aa112b17ef 100644 --- a/docs/crates/network.md +++ b/docs/crates/network.md @@ -215,7 +215,7 @@ pub struct NetworkManager { /// Sender half to send events to the /// [`EthRequestHandler`](crate::eth_requests::EthRequestHandler) task, if configured. to_eth_request_handler: Option>, - /// Tracks the number of active session (connected peers). + /// Tracks the number of active sessions (connected peers). /// /// This is updated via internal events and shared via `Arc` with the [`NetworkHandle`] /// Updated by the `NetworkWorker` and loaded by the `NetworkService`. @@ -400,7 +400,7 @@ pub struct BodiesDownloader { } ``` -Here, similarly, a `FetchClient` is passed in to the `client` field, and the `get_block_bodies` method it implements is used when constructing the stream created by the `BodiesDownloader` in the `execute` method of the `BodyStage`. +Here, similarly, a `FetchClient` is passed into the `client` field, and the `get_block_bodies` method it implements is used when constructing the stream created by the `BodiesDownloader` in the `execute` method of the `BodyStage`. [File: crates/net/downloaders/src/bodies/bodies.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/downloaders/src/bodies/bodies.rs) ```rust,ignore @@ -657,9 +657,9 @@ pub struct TransactionsManager { pool: Pool, /// Network access. network: NetworkHandle, - /// Subscriptions to all network related events. + /// Subscriptions to all network-related events. /// - /// From which we get all new incoming transaction related messages. + /// From which we get all new incoming transaction-related messages. network_events: UnboundedReceiverStream, /// All currently active requests for pooled transactions. inflight_requests: Vec, @@ -696,7 +696,7 @@ pub struct TransactionsHandle { ### Input Streams to the Transactions Task We'll touch on most of the fields in the `TransactionsManager` as the chapter continues, but some worth noting now are the 4 streams from which inputs to the task are fed: -- `transaction_events`: A listener for `NetworkTransactionEvent`s sent from the `NetworkManager`, which consist solely of events related to transactions emitted by the network. +- `transaction_events`: A listener for `NetworkTransactionEvent`s sent from the `NetworkManager`, which consists solely of events related to transactions emitted by the network. - `network_events`: A listener for `NetworkEvent`s sent from the `NetworkManager`, which consist of other "meta" events such as sessions with peers being established or closed. - `command_rx`: A listener for `TransactionsCommand`s sent from the `TransactionsHandle` - `pending`: A listener for new pending transactions added to the `TransactionPool` @@ -1121,7 +1121,7 @@ It iterates over `TransactionsManager.pool_imports`, polling each one, and if it `on_good_import`, called when the transaction was successfully imported into the transaction pool, removes the entry for the given transaction hash from `TransactionsManager.transactions_by_peers`. -`on_bad_import` also removes the entry for the given transaction hash from `TransactionsManager.transactions_by_peers`, but also calls `report_bad_message` for each peer in the entry, decreasing all of their reputation scores as they were propagating a transaction that could not validated. +`on_bad_import` also removes the entry for the given transaction hash from `TransactionsManager.transactions_by_peers`, but also calls `report_bad_message` for each peer in the entry, decreasing all of their reputation scores as they were propagating a transaction that could not be validated. #### Checking on `pending_transactions` diff --git a/docs/crates/stages.md b/docs/crates/stages.md index 2c35e065c56..a6f107c2c0b 100644 --- a/docs/crates/stages.md +++ b/docs/crates/stages.md @@ -36,7 +36,7 @@ The transactions root is a value that is calculated based on the transactions in When the `BodyStage` is looking at the headers to determine which block to download, it will skip the blocks where the `header.ommers_hash` and the `header.transaction_root` are empty, denoting that the block is empty as well. -Once the `BodyStage` determines which block bodies to fetch, a new `bodies_stream` is created which downloads all of the bodies from the `starting_block`, up until the `target_block` specified. Each time the `bodies_stream` yields a value, a `SealedBlock` is created using the block header, the ommers hash and the newly downloaded block body. +Once the `BodyStage` determines which block bodies to fetch, a new `bodies_stream` is created which downloads all of the bodies from the `starting_block`, up until the `target_block` is specified. Each time the `bodies_stream` yields a value, a `SealedBlock` is created using the block header, the ommers hash and the newly downloaded block body. The new block is then pre-validated, checking that the ommers hash and transactions root in the block header are the same in the block body. Following a successful pre-validation, the `BodyStage` loops through each transaction in the `block.body`, adding the transaction to the database. This process is repeated for every downloaded block body, with the `BodyStage` returning `Ok(ExecOutput { stage_progress, done: true })` signaling it successfully completed. diff --git a/docs/design/goals.md b/docs/design/goals.md index a29b3a824c4..6edfb1282c7 100644 --- a/docs/design/goals.md +++ b/docs/design/goals.md @@ -44,7 +44,7 @@ Ideally, we can achieve such fast runtime operation that we can avoid storing ce **Control over tradeoffs** -Almost any given design choice or optimization to the client comes with its own tradeoffs. As such, our long-term goal is not to make opinionated decisions on behalf of everyone, as some users will be negatively impacted and turned away from what could be a great client. +Almost any given design choice or optimization for the client comes with its own tradeoffs. As such, our long-term goal is not to make opinionated decisions on behalf of everyone, as some users will be negatively impacted and turned away from what could be a great client. **Profiles** @@ -80,4 +80,4 @@ It goes without saying that verbose and thorough documentation is a must. The do **Issue tracking** -Everything that is (and is not) being worked on within the client should be tracked accordingly so that anyone in the community can stay on top of the state of development. This makes it clear what kind of help is needed, and where. \ No newline at end of file +Everything that is (and is not) being worked on within the client should be tracked accordingly so that anyone in the community can stay on top of the state of development. This makes it clear what kind of help is needed, and where. From 468e9250773c246b087e82d33caf03e09a3b9429 Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Mon, 7 Jul 2025 16:29:19 +0200 Subject: [PATCH 071/305] fix(trie): track branch node updates only in ParallelSparseTrie, not subtries (#17223) --- crates/trie/sparse-parallel/src/trie.rs | 354 +++++++++++++++++------- 1 file changed, 252 insertions(+), 102 deletions(-) diff --git a/crates/trie/sparse-parallel/src/trie.rs b/crates/trie/sparse-parallel/src/trie.rs index 14484dd51bc..2bc4f495047 100644 --- a/crates/trie/sparse-parallel/src/trie.rs +++ b/crates/trie/sparse-parallel/src/trie.rs @@ -32,7 +32,7 @@ pub const NUM_LOWER_SUBTRIES: usize = 16usize.pow(UPPER_TRIE_MAX_DEPTH as u32); /// - Each leaf entry in the `subtries` and `upper_trie` collection must have a corresponding entry /// in `values` collection. If the root node is a leaf, it must also have an entry in `values`. /// - All keys in `values` collection are full leaf paths. -#[derive(Clone, PartialEq, Eq, Debug)] +#[derive(PartialEq, Eq, Debug)] pub struct ParallelSparseTrie { /// This contains the trie nodes for the upper part of the trie. upper_subtrie: Box, @@ -83,7 +83,7 @@ impl SparseTrieInterface for ParallelSparseTrie { } fn with_updates(mut self, retain_updates: bool) -> Self { - self.updates = retain_updates.then_some(SparseTrieUpdates::default()); + self.updates = retain_updates.then(Default::default); self } @@ -150,6 +150,8 @@ impl SparseTrieInterface for ParallelSparseTrie { return Ok(()) } + let retain_updates = self.updates_enabled(); + // Start at the root, traversing until we find either the node to update or a subtrie to // update. // @@ -170,12 +172,42 @@ impl SparseTrieInterface for ParallelSparseTrie { { // Traverse the next node, keeping track of any changed nodes and the next step in the // trie - match self.upper_subtrie.update_next_node(current, &full_path, &provider)? { + match self.upper_subtrie.update_next_node(current, &full_path, retain_updates)? { LeafUpdateStep::Continue { next_node } => { next = Some(next_node); } - LeafUpdateStep::Complete { inserted_nodes } => { + LeafUpdateStep::Complete { inserted_nodes, reveal_path } => { new_nodes.extend(inserted_nodes); + + if let Some(reveal_path) = reveal_path { + let subtrie = self.subtrie_for_path_mut(&reveal_path); + if subtrie.nodes.get(&reveal_path).expect("node must exist").is_hash() { + if let Some(RevealedNode { node, tree_mask, hash_mask }) = + provider.blinded_node(&reveal_path)? + { + let decoded = TrieNode::decode(&mut &node[..])?; + trace!( + target: "trie::parallel_sparse", + ?reveal_path, + ?decoded, + ?tree_mask, + ?hash_mask, + "Revealing child", + ); + subtrie.reveal_node( + reveal_path, + &decoded, + TrieMasks { hash_mask, tree_mask }, + )?; + } else { + return Err(SparseTrieErrorKind::NodeNotFoundInProvider { + path: reveal_path, + } + .into()) + } + } + } + next = None; } LeafUpdateStep::NodeNotFound => { @@ -237,7 +269,7 @@ impl SparseTrieInterface for ParallelSparseTrie { // If we didn't update the target leaf, we need to call update_leaf on the subtrie // to ensure that the leaf is updated correctly. - subtrie.update_leaf(full_path, value, provider)?; + subtrie.update_leaf(full_path, value, provider, retain_updates)?; } Ok(()) @@ -502,8 +534,9 @@ impl SparseTrieInterface for ParallelSparseTrie { #[cfg(not(feature = "std"))] // Update subtrie hashes serially if nostd for ChangedSubtrie { index, mut subtrie, mut prefix_set } in subtries { - subtrie.update_hashes(&mut prefix_set); - tx.send((index, subtrie)).unwrap(); + let mut update_actions = self.updates_enabled().then(|| Vec::new()); + subtrie.update_hashes(&mut prefix_set, &mut update_actions); + tx.send((index, subtrie, update_actions)).unwrap(); } #[cfg(feature = "std")] @@ -513,16 +546,19 @@ impl SparseTrieInterface for ParallelSparseTrie { subtries .into_par_iter() .map(|ChangedSubtrie { index, mut subtrie, mut prefix_set }| { - subtrie.update_hashes(&mut prefix_set); - (index, subtrie) + let mut update_actions = self.updates_enabled().then(Vec::new); + subtrie.update_hashes(&mut prefix_set, &mut update_actions); + (index, subtrie, update_actions) }) .for_each_init(|| tx.clone(), |tx, result| tx.send(result).unwrap()); } drop(tx); - // Return updated subtries back to the trie - for (index, subtrie) in rx { + // Return updated subtries back to the trie after executing any actions required on the + // top-level `SparseTrieUpdates`. + for (index, subtrie, update_actions) in rx { + self.apply_subtrie_update_actions(update_actions); self.lower_subtries[index] = Some(subtrie); } } @@ -532,19 +568,13 @@ impl SparseTrieInterface for ParallelSparseTrie { } fn take_updates(&mut self) -> SparseTrieUpdates { - core::iter::once(&mut self.upper_subtrie) - .chain(self.lower_subtries.iter_mut().flatten()) - .fold(SparseTrieUpdates::default(), |mut acc, subtrie| { - acc.extend(subtrie.take_updates()); - acc - }) + self.updates.take().unwrap_or_default() } fn wipe(&mut self) { self.upper_subtrie.wipe(); self.lower_subtries = [const { None }; NUM_LOWER_SUBTRIES]; self.prefix_set = PrefixSetMut::all(); - self.updates = self.updates.is_some().then(SparseTrieUpdates::wiped); } fn clear(&mut self) { @@ -566,6 +596,11 @@ impl SparseTrieInterface for ParallelSparseTrie { } impl ParallelSparseTrie { + /// Returns true if retaining updates is enabled for the overall trie. + const fn updates_enabled(&self) -> bool { + self.updates.is_some() + } + /// Returns a reference to the lower `SparseSubtrie` for the given path, or None if the /// path belongs to the upper trie or a lower subtrie for the path doesn't exist. fn lower_subtrie_for_path(&self, path: &Nibbles) -> Option<&SparseSubtrie> { @@ -854,8 +889,32 @@ impl ParallelSparseTrie { } } + /// Drains any [`SparseTrieUpdatesAction`]s from the given subtrie, and applies each action to + /// the given `updates` set. If the given set is None then this is a no-op. + fn apply_subtrie_update_actions( + &mut self, + update_actions: Option>, + ) { + if let (Some(updates), Some(update_actions)) = (self.updates.as_mut(), update_actions) { + for action in update_actions { + match action { + SparseTrieUpdatesAction::InsertRemoved(path) => { + updates.updated_nodes.remove(&path); + updates.removed_nodes.insert(path); + } + SparseTrieUpdatesAction::RemoveUpdated(path) => { + updates.updated_nodes.remove(&path); + } + SparseTrieUpdatesAction::InsertUpdated(path, branch_node) => { + updates.updated_nodes.insert(path, branch_node); + } + } + } + }; + } + /// Updates hashes for the upper subtrie, using nodes from both upper and lower subtries. - #[instrument(level = "trace", target = "engine::tree", skip_all, ret)] + #[instrument(level = "trace", target = "trie::parallel_sparse", skip_all, ret)] fn update_upper_subtrie_hashes(&mut self, prefix_set: &mut PrefixSet) -> RlpNode { trace!(target: "trie::parallel_sparse", "Updating upper subtrie hashes"); @@ -865,6 +924,7 @@ impl ParallelSparseTrie { is_in_prefix_set: None, }); + let mut update_actions = self.updates_enabled().then(Vec::new); while let Some(stack_item) = self.upper_subtrie.inner.buffers.path_stack.pop() { let path = stack_item.path; let node = if path.len() < UPPER_TRIE_MAX_DEPTH { @@ -884,9 +944,13 @@ impl ParallelSparseTrie { }; // Calculate the RLP node for the current node using upper subtrie - self.upper_subtrie.inner.rlp_node(prefix_set, stack_item, node); + self.upper_subtrie.inner.rlp_node(prefix_set, &mut update_actions, stack_item, node); } + // If there were any branch node updates as a result of calculating the RLP node for the + // upper trie then apply them to the top-level set. + self.apply_subtrie_update_actions(update_actions); + debug_assert_eq!(self.upper_subtrie.inner.buffers.rlp_node_stack.len(), 1); self.upper_subtrie.inner.buffers.rlp_node_stack.pop().unwrap().rlp_node } @@ -989,15 +1053,6 @@ impl SparseSubtrie { Self { path, ..Default::default() } } - /// Configures the subtrie to retain information about updates. - /// - /// If `retain_updates` is true, the trie will record branch node updates and deletions. - /// This information can then be used to efficiently update an external database. - pub fn with_updates(mut self, retain_updates: bool) -> Self { - self.inner.updates = retain_updates.then_some(SparseTrieUpdates::default()); - self - } - /// Returns true if the current path and its child are both found in the same level. fn is_child_same_level(current_path: &Nibbles, child_path: &Nibbles) -> bool { let current_level = core::mem::discriminant(&SparseSubtrieType::from_path(current_path)); @@ -1014,7 +1069,6 @@ impl SparseSubtrie { /// # Returns /// /// Returns the `Ok` if the update is successful. - /// If a split branch was added this is returned as well, along with its path. /// /// Note: If an update requires revealing a blinded node, an error is returned if the blinded /// provider returns an error. @@ -1023,6 +1077,7 @@ impl SparseSubtrie { full_path: Nibbles, value: Vec, provider: impl BlindedProvider, + retain_updates: bool, ) -> SparseTrieResult<()> { debug_assert!(full_path.starts_with(&self.path)); let existing = self.inner.values.insert(full_path, value); @@ -1034,11 +1089,42 @@ impl SparseSubtrie { // Here we are starting at the root of the subtrie, and traversing from there. let mut current = Some(self.path); while let Some(current_path) = current { - match self.update_next_node(current_path, &full_path, &provider)? { + match self.update_next_node(current_path, &full_path, retain_updates)? { LeafUpdateStep::Continue { next_node } => { current = Some(next_node); } - LeafUpdateStep::Complete { .. } | LeafUpdateStep::NodeNotFound => { + LeafUpdateStep::Complete { reveal_path, .. } => { + if let Some(reveal_path) = reveal_path { + if self.nodes.get(&reveal_path).expect("node must exist").is_hash() { + if let Some(RevealedNode { node, tree_mask, hash_mask }) = + provider.blinded_node(&reveal_path)? + { + let decoded = TrieNode::decode(&mut &node[..])?; + trace!( + target: "trie::parallel_sparse", + ?reveal_path, + ?decoded, + ?tree_mask, + ?hash_mask, + "Revealing child", + ); + self.reveal_node( + reveal_path, + &decoded, + TrieMasks { hash_mask, tree_mask }, + )?; + } else { + return Err(SparseTrieErrorKind::NodeNotFoundInProvider { + path: reveal_path, + } + .into()) + } + } + } + + current = None; + } + LeafUpdateStep::NodeNotFound => { current = None; } } @@ -1057,7 +1143,7 @@ impl SparseSubtrie { &mut self, mut current: Nibbles, path: &Nibbles, - provider: impl BlindedProvider, + retain_updates: bool, ) -> SparseTrieResult { debug_assert!(path.starts_with(&self.path)); debug_assert!(current.starts_with(&self.path)); @@ -1071,7 +1157,7 @@ impl SparseSubtrie { // the subtrie. let path = path.slice(self.path.len()..); *node = SparseNode::new_leaf(path); - Ok(LeafUpdateStep::complete_with_insertions(vec![current])) + Ok(LeafUpdateStep::complete_with_insertions(vec![current], None)) } SparseNode::Hash(hash) => { Err(SparseTrieErrorKind::BlindedNode { path: current, hash: *hash }.into()) @@ -1109,11 +1195,10 @@ impl SparseSubtrie { self.nodes .insert(existing_leaf_path, SparseNode::new_leaf(current.slice(common + 1..))); - Ok(LeafUpdateStep::complete_with_insertions(vec![ - branch_path, - new_leaf_path, - existing_leaf_path, - ])) + Ok(LeafUpdateStep::complete_with_insertions( + vec![branch_path, new_leaf_path, existing_leaf_path], + None, + )) } SparseNode::Extension { key, .. } => { current.extend(key); @@ -1126,36 +1211,7 @@ impl SparseSubtrie { // If branch node updates retention is enabled, we need to query the // extension node child to later set the hash mask for a parent branch node // correctly. - if self.inner.updates.is_some() { - // Check if the extension node child is a hash that needs to be revealed - if self - .nodes - .get(¤t) - .expect( - "node must exist, extension nodes are only created with children", - ) - .is_hash() - { - if let Some(RevealedNode { node, tree_mask, hash_mask }) = - provider.blinded_node(¤t)? - { - let decoded = TrieNode::decode(&mut &node[..])?; - trace!( - target: "trie::parallel_sparse", - ?current, - ?decoded, - ?tree_mask, - ?hash_mask, - "Revealing extension node child", - ); - self.reveal_node( - current, - &decoded, - TrieMasks { hash_mask, tree_mask }, - )?; - } - } - } + let reveal_path = retain_updates.then_some(current); // create state mask for new branch node // NOTE: this might overwrite the current extension node @@ -1183,7 +1239,7 @@ impl SparseSubtrie { inserted_nodes.push(ext_path); } - return Ok(LeafUpdateStep::complete_with_insertions(inserted_nodes)) + return Ok(LeafUpdateStep::complete_with_insertions(inserted_nodes, reveal_path)) } Ok(LeafUpdateStep::continue_with(current)) @@ -1195,7 +1251,7 @@ impl SparseSubtrie { state_mask.set_bit(nibble); let new_leaf = SparseNode::new_leaf(path.slice(current.len()..)); self.nodes.insert(current, new_leaf); - return Ok(LeafUpdateStep::complete_with_insertions(vec![current])) + return Ok(LeafUpdateStep::complete_with_insertions(vec![current], None)) } // If the nibble is set, we can continue traversing the branch. @@ -1410,17 +1466,24 @@ impl SparseSubtrie { /// # Parameters /// /// - `prefix_set`: The set of trie paths whose nodes have changed. + /// - `update_actions`: A buffer which `SparseTrieUpdatesAction`s will be written to in the + /// event that any changes to the top-level updates are required. If None then update + /// retention is disabled. + /// is disabled. /// /// # Returns /// - /// A tuple containing the root node of the updated subtrie and an optional set of updates. - /// Updates are [`Some`] if [`Self::with_updates`] was set to `true`. + /// A tuple containing the root node of the updated subtrie. /// /// # Panics /// /// If the node at the root path does not exist. - #[instrument(level = "trace", target = "engine::tree", skip_all, fields(root = ?self.path), ret)] - pub fn update_hashes(&mut self, prefix_set: &mut PrefixSet) -> RlpNode { + #[instrument(level = "trace", target = "trie::parallel_sparse", skip_all, fields(root = ?self.path), ret)] + fn update_hashes( + &mut self, + prefix_set: &mut PrefixSet, + update_actions: &mut Option>, + ) -> RlpNode { trace!(target: "trie::parallel_sparse", "Updating subtrie hashes"); debug_assert!(prefix_set.iter().all(|path| path.starts_with(&self.path))); @@ -1438,21 +1501,13 @@ impl SparseSubtrie { .get_mut(&path) .unwrap_or_else(|| panic!("node at path {path:?} does not exist")); - self.inner.rlp_node(prefix_set, stack_item, node); + self.inner.rlp_node(prefix_set, update_actions, stack_item, node); } debug_assert_eq!(self.inner.buffers.rlp_node_stack.len(), 1); self.inner.buffers.rlp_node_stack.pop().unwrap().rlp_node } - /// Consumes and returns the currently accumulated trie updates. - /// - /// This is useful when you want to apply the updates to an external database, - /// and then start tracking a new set of updates. - fn take_updates(&mut self) -> SparseTrieUpdates { - self.inner.updates.take().unwrap_or_default() - } - /// Removes all nodes and values from the subtrie, resetting it to a blank state /// with only an empty root node. This is used when a storage root is deleted. fn wipe(&mut self) { @@ -1463,6 +1518,7 @@ impl SparseSubtrie { /// Clears the subtrie, keeping the data structures allocated. fn clear(&mut self) { self.nodes.clear(); + self.inner.clear(); } } @@ -1477,8 +1533,6 @@ struct SparseSubtrieInner { /// Map from leaf key paths to their values. /// All values are stored here instead of directly in leaf nodes. values: HashMap>, - /// Optional tracking of trie updates for later use. - updates: Option, /// Reusable buffers for [`SparseSubtrie::update_hashes`]. buffers: SparseSubtrieBuffers, } @@ -1496,6 +1550,9 @@ impl SparseSubtrieInner { /// # Parameters /// /// - `prefix_set`: Set of prefixes (key paths) that have been marked as updated + /// - `update_actions`: A buffer which `SparseTrieUpdatesAction`s will be written to in the + /// event that any changes to the top-level updates are required. If None then update + /// retention is disabled. /// - `stack_item`: The stack item to process /// - `node`: The sparse node to process (will be mutated to update hash) /// @@ -1503,8 +1560,6 @@ impl SparseSubtrieInner { /// /// - Updates the node's hash field after computing RLP /// - Pushes nodes to [`SparseSubtrieBuffers::path_stack`] to manage traversal - /// - Updates the (trie updates)[`SparseTrieUpdates`] accumulator when tracking changes, if - /// [`Some`] /// - May push items onto the path stack for deferred processing /// /// # Exit condition @@ -1514,6 +1569,7 @@ impl SparseSubtrieInner { fn rlp_node( &mut self, prefix_set: &mut PrefixSet, + update_actions: &mut Option>, mut stack_item: RlpNodePathStackItem, node: &mut SparseNode, ) { @@ -1625,7 +1681,7 @@ impl SparseSubtrieInner { return } - let retain_updates = self.updates.is_some() && prefix_set_contains(&path); + let retain_updates = update_actions.is_some() && prefix_set_contains(&path); self.buffers.branch_child_buf.clear(); // Walk children in a reverse order from `f` to `0`, so we pop the `0` first @@ -1735,8 +1791,8 @@ impl SparseSubtrieInner { // Save a branch node update only if it's not a root node, and we need to // persist updates. - let store_in_db_trie_value = if let Some(updates) = - self.updates.as_mut().filter(|_| retain_updates && !path.is_empty()) + let store_in_db_trie_value = if let Some(update_actions) = + update_actions.as_mut().filter(|_| retain_updates && !path.is_empty()) { let store_in_db_trie = !tree_mask.is_empty() || !hash_mask.is_empty(); if store_in_db_trie { @@ -1750,7 +1806,8 @@ impl SparseSubtrieInner { hashes, hash.filter(|_| path.is_empty()), ); - updates.updated_nodes.insert(path, branch_node); + update_actions + .push(SparseTrieUpdatesAction::InsertUpdated(path, branch_node)); } else if self .branch_node_tree_masks .get(&path) @@ -1762,8 +1819,7 @@ impl SparseSubtrieInner { // If new tree and hash masks are empty, but previously they weren't, we // need to remove the node update and add the node itself to the list of // removed nodes. - updates.updated_nodes.remove(&path); - updates.removed_nodes.insert(path); + update_actions.push(SparseTrieUpdatesAction::InsertRemoved(path)); } else if self .branch_node_hash_masks .get(&path) @@ -1772,7 +1828,7 @@ impl SparseSubtrieInner { { // If new tree and hash masks are empty, and they were previously empty // as well, we need to remove the node update. - updates.updated_nodes.remove(&path); + update_actions.push(SparseTrieUpdatesAction::RemoveUpdated(path)); } store_in_db_trie @@ -1802,7 +1858,6 @@ impl SparseSubtrieInner { self.branch_node_tree_masks.clear(); self.branch_node_hash_masks.clear(); self.values.clear(); - self.updates = None; self.buffers.clear(); } } @@ -1819,6 +1874,8 @@ pub enum LeafUpdateStep { Complete { /// The node paths that were inserted during this step inserted_nodes: Vec, + /// Path to a node which may need to be revealed + reveal_path: Option, }, /// The node was not found #[default] @@ -1832,8 +1889,11 @@ impl LeafUpdateStep { } /// Creates a step indicating completion with inserted nodes - pub const fn complete_with_insertions(inserted_nodes: Vec) -> Self { - Self::Complete { inserted_nodes } + pub const fn complete_with_insertions( + inserted_nodes: Vec, + reveal_path: Option, + ) -> Self { + Self::Complete { inserted_nodes, reveal_path } } } @@ -1941,6 +2001,18 @@ fn path_subtrie_index_unchecked(path: &Nibbles) -> usize { path.get_byte_unchecked(0) as usize } +/// Used by lower subtries to communicate updates to the the top-level [`SparseTrieUpdates`] set. +#[derive(Clone, Debug, Eq, PartialEq)] +enum SparseTrieUpdatesAction { + /// Remove the path from the `updated_nodes`, if it was present, and add it to `removed_nodes`. + InsertRemoved(Nibbles), + /// Remove the path from the `updated_nodes`, if it was present, leaving `removed_nodes` + /// unaffected. + RemoveUpdated(Nibbles), + /// Insert the branch node into `updated_nodes`. + InsertUpdated(Nibbles, BranchNodeCompact), +} + #[cfg(test)] mod tests { use super::{ @@ -2153,6 +2225,19 @@ mod tests { self } + fn has_hash(self, path: &Nibbles, expected_hash: &B256) -> Self { + match self.subtrie.nodes.get(path) { + Some(SparseNode::Hash(hash)) => { + assert_eq!( + *hash, *expected_hash, + "Expected hash at {path:?} to be {expected_hash:?}, found {hash:?}", + ); + } + node => panic!("Expected hash node at {path:?}, found {node:?}"), + } + self + } + fn has_value(self, path: &Nibbles, expected_value: &[u8]) -> Self { let actual = self.subtrie.inner.values.get(path); assert_eq!( @@ -2806,8 +2891,7 @@ mod tests { #[test] fn test_subtrie_update_hashes() { - let mut subtrie = - Box::new(SparseSubtrie::new(Nibbles::from_nibbles([0x0, 0x0])).with_updates(true)); + let mut subtrie = Box::new(SparseSubtrie::new(Nibbles::from_nibbles([0x0, 0x0]))); // Create leaf nodes with paths 0x0...0, 0x00001...0, 0x0010...0 let leaf_1_full_path = Nibbles::from_nibbles([0; 64]); @@ -2886,6 +2970,7 @@ mod tests { subtrie.update_hashes( &mut PrefixSetMut::from([leaf_1_full_path, leaf_2_full_path, leaf_3_full_path]) .freeze(), + &mut None, ); // Compare hashes between hash builder and subtrie @@ -3564,8 +3649,8 @@ mod tests { [key], ); - let mut sparse = SparseSubtrie::default().with_updates(true); - sparse.update_leaf(key, value_encoded(), DefaultBlindedProvider).unwrap(); + let mut sparse = SparseSubtrie::default(); + sparse.update_leaf(key, value_encoded(), DefaultBlindedProvider, false).unwrap(); // TODO: enable these and make test pass as we have these implemented // let sparse_root = sparse.root(); // let sparse_updates = sparse.take_updates(); @@ -4070,6 +4155,71 @@ mod tests { .has_value(&leaf3_path, &value3); } + #[test] + fn test_update_upper_extension_reveal_lower_hash_node() { + let ctx = ParallelSparseTrieTestContext; + + // Test edge case: extension pointing to hash node that gets updated to branch + // and reveals the hash node from lower trie + // + // Setup: + // Upper trie: + // 0x: Extension { key: 0xAB } + // └── Subtrie (0xAB): pointer + // Lower trie (0xAB): + // 0xAB: Hash + // + // After update: + // Upper trie: + // 0x: Extension { key: 0xA } + // └── 0xA: Branch { state_mask: 0b100000000001 } + // ├── 0xA0: Leaf { value: ... } + // └── 0xAB: pointer + // Lower trie (0xAB): + // 0xAB: Branch { state_mask: 0b11 } + // ├── 0xAB1: Hash + // └── 0xAB2: Hash + + // Create a mock provider that will provide the hash node + let mut provider = MockBlindedProvider::new(); + + // Create revealed branch which will get revealed and add it to the mock provider + let child_hashes = [ + RlpNode::word_rlp(&B256::repeat_byte(0x11)), + RlpNode::word_rlp(&B256::repeat_byte(0x22)), + ]; + let revealed_branch = create_branch_node_with_children(&[0x1, 0x2], child_hashes); + let mut encoded = Vec::new(); + revealed_branch.encode(&mut encoded); + provider.add_revealed_node( + Nibbles::from_nibbles([0xA, 0xB]), + RevealedNode { node: encoded.into(), tree_mask: None, hash_mask: None }, + ); + + let mut trie = new_test_trie( + [ + (Nibbles::default(), SparseNode::new_ext(Nibbles::from_nibbles([0xA, 0xB]))), + (Nibbles::from_nibbles([0xA, 0xB]), SparseNode::Hash(B256::repeat_byte(0x42))), + ] + .into_iter(), + ); + + // Now add a leaf that will force the hash node to become a branch + let (leaf_path, value) = ctx.create_test_leaf([0xA, 0x0], 1); + trie.update_leaf(leaf_path, value, provider).unwrap(); + + // Verify the structure: extension should now terminate in a branch on the upper trie + ctx.assert_upper_subtrie(&trie) + .has_extension(&Nibbles::default(), &Nibbles::from_nibbles([0xA])) + .has_branch(&Nibbles::from_nibbles([0xA]), &[0x0, 0xB]); + + // Verify the lower trie now has a branch structure + ctx.assert_subtrie(&trie, Nibbles::from_nibbles([0xA, 0xB])) + .has_branch(&Nibbles::from_nibbles([0xA, 0xB]), &[0x1, 0x2]) + .has_hash(&Nibbles::from_nibbles([0xA, 0xB, 0x1]), &B256::repeat_byte(0x11)) + .has_hash(&Nibbles::from_nibbles([0xA, 0xB, 0x2]), &B256::repeat_byte(0x22)); + } + #[test] fn test_update_long_shared_prefix_at_boundary() { let ctx = ParallelSparseTrieTestContext; From dddde9eff9cad28db0df8e152af1b179972efdf7 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Mon, 7 Jul 2025 16:34:38 +0200 Subject: [PATCH 072/305] feat(test): allow to create testing nodes with specific datadir (#17260) --- crates/node/builder/src/builder/mod.rs | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index acbca2b7324..8bac819ab69 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -240,14 +240,25 @@ impl NodeBuilder { /// Creates an _ephemeral_ preconfigured node for testing purposes. #[cfg(feature = "test-utils")] pub fn testing_node( + self, + task_executor: TaskExecutor, + ) -> WithLaunchContext< + NodeBuilder>, ChainSpec>, + > { + let path = reth_db::test_utils::tempdir_path(); + self.testing_node_with_datadir(task_executor, path) + } + + /// Creates a preconfigured node for testing purposes with a specific datadir. + #[cfg(feature = "test-utils")] + pub fn testing_node_with_datadir( mut self, task_executor: TaskExecutor, + datadir: impl Into, ) -> WithLaunchContext< NodeBuilder>, ChainSpec>, > { - let path = reth_node_core::dirs::MaybePlatformPath::::from( - reth_db::test_utils::tempdir_path(), - ); + let path = reth_node_core::dirs::MaybePlatformPath::::from(datadir.into()); self.config = self.config.with_datadir_args(reth_node_core::args::DatadirArgs { datadir: path.clone(), ..Default::default() From e66caca5e96466b99e1e8e612ae6deacdd23923b Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Mon, 7 Jul 2025 21:13:32 +0200 Subject: [PATCH 073/305] feat(test): spin up e2e test nodes with imported data (#17261) --- Cargo.lock | 12 + crates/e2e-test-utils/Cargo.toml | 12 + crates/e2e-test-utils/src/lib.rs | 3 + crates/e2e-test-utils/src/setup_import.rs | 737 ++++++++++++++++++++++ 4 files changed, 764 insertions(+) create mode 100644 crates/e2e-test-utils/src/setup_import.rs diff --git a/Cargo.lock b/Cargo.lock index 09af074463e..0857be0bad1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7787,6 +7787,7 @@ dependencies = [ "alloy-eips", "alloy-network", "alloy-primitives", + "alloy-rlp", "alloy-rpc-types-engine", "alloy-rpc-types-eth", "alloy-signer", @@ -7796,9 +7797,15 @@ dependencies = [ "futures-util", "jsonrpsee", "reth-chainspec", + "reth-cli-commands", + "reth-config", + "reth-consensus", "reth-db", + "reth-db-common", "reth-engine-local", + "reth-ethereum-consensus", "reth-ethereum-primitives", + "reth-evm", "reth-network-api", "reth-network-peers", "reth-node-api", @@ -7808,18 +7815,23 @@ dependencies = [ "reth-payload-builder", "reth-payload-builder-primitives", "reth-payload-primitives", + "reth-primitives", + "reth-primitives-traits", "reth-provider", + "reth-prune-types", "reth-rpc-api", "reth-rpc-builder", "reth-rpc-eth-api", "reth-rpc-layer", "reth-rpc-server-types", "reth-stages-types", + "reth-static-file", "reth-tasks", "reth-tokio-util", "reth-tracing", "revm", "serde_json", + "tempfile", "tokio", "tokio-stream", "tracing", diff --git a/crates/e2e-test-utils/Cargo.toml b/crates/e2e-test-utils/Cargo.toml index 1ff7dcb0885..3aa26b9e1d5 100644 --- a/crates/e2e-test-utils/Cargo.toml +++ b/crates/e2e-test-utils/Cargo.toml @@ -34,8 +34,19 @@ reth-engine-local.workspace = true reth-tasks.workspace = true reth-node-ethereum.workspace = true reth-ethereum-primitives.workspace = true +reth-cli-commands.workspace = true +reth-config.workspace = true +reth-consensus.workspace = true +reth-evm.workspace = true +reth-static-file.workspace = true +reth-ethereum-consensus.workspace = true +reth-primitives.workspace = true +reth-prune-types.workspace = true +reth-db-common.workspace = true +reth-primitives-traits.workspace = true revm.workspace = true +tempfile.workspace = true # rpc jsonrpsee.workspace = true @@ -44,6 +55,7 @@ url.workspace = true # ethereum alloy-primitives.workspace = true alloy-eips.workspace = true +alloy-rlp.workspace = true futures-util.workspace = true eyre.workspace = true diff --git a/crates/e2e-test-utils/src/lib.rs b/crates/e2e-test-utils/src/lib.rs index 0a2aa467e7d..647a1ae62e2 100644 --- a/crates/e2e-test-utils/src/lib.rs +++ b/crates/e2e-test-utils/src/lib.rs @@ -33,6 +33,9 @@ pub mod wallet; /// Helper for payload operations mod payload; +/// Helper for setting up nodes with pre-imported chain data +pub mod setup_import; + /// Helper for network operations mod network; diff --git a/crates/e2e-test-utils/src/setup_import.rs b/crates/e2e-test-utils/src/setup_import.rs new file mode 100644 index 00000000000..23977418dce --- /dev/null +++ b/crates/e2e-test-utils/src/setup_import.rs @@ -0,0 +1,737 @@ +//! Setup utilities for importing RLP chain data before starting nodes. + +use crate::{node::NodeTestContext, NodeHelperType, Wallet}; +use reth_chainspec::ChainSpec; +use reth_cli_commands::import_op::{import_blocks_from_file, ImportConfig}; +use reth_config::Config; +use reth_db::DatabaseEnv; +use reth_node_api::{NodeTypesWithDBAdapter, TreeConfig}; +use reth_node_builder::{EngineNodeLauncher, Node, NodeBuilder, NodeConfig, NodeHandle}; +use reth_node_core::args::{DiscoveryArgs, NetworkArgs, RpcServerArgs}; +use reth_node_ethereum::EthereumNode; +use reth_provider::{ + providers::BlockchainProvider, DatabaseProviderFactory, ProviderFactory, StageCheckpointReader, + StaticFileProviderFactory, +}; +use reth_rpc_server_types::RpcModuleSelection; +use reth_stages_types::StageId; +use reth_tasks::TaskManager; +use std::{path::Path, sync::Arc}; +use tempfile::TempDir; +use tracing::{debug, info, span, Level}; + +/// Setup result containing nodes and temporary directories that must be kept alive +#[allow(missing_debug_implementations)] +pub struct ChainImportResult { + /// The nodes that were created + pub nodes: Vec>, + /// The task manager + pub task_manager: TaskManager, + /// The wallet for testing + pub wallet: Wallet, + /// Temporary directories that must be kept alive for the duration of the test + pub _temp_dirs: Vec, +} + +/// Creates a test setup with Ethereum nodes that have pre-imported chain data from RLP files. +/// +/// This function: +/// 1. Creates a temporary datadir for each node +/// 2. Imports the specified RLP chain data into the datadir +/// 3. Starts the nodes with the pre-populated database +/// 4. Returns the running nodes ready for testing +pub async fn setup_engine_with_chain_import( + num_nodes: usize, + chain_spec: Arc, + is_dev: bool, + tree_config: TreeConfig, + rlp_path: &Path, + attributes_generator: impl Fn(u64) -> reth_payload_builder::EthPayloadBuilderAttributes + + Send + + Sync + + Copy + + 'static, +) -> eyre::Result { + let tasks = TaskManager::current(); + let exec = tasks.executor(); + + let network_config = NetworkArgs { + discovery: DiscoveryArgs { disable_discovery: true, ..DiscoveryArgs::default() }, + ..NetworkArgs::default() + }; + + // Create nodes with imported data + let mut nodes: Vec> = Vec::with_capacity(num_nodes); + let mut temp_dirs = Vec::with_capacity(num_nodes); // Keep temp dirs alive + + for idx in 0..num_nodes { + // Create a temporary datadir for this node + let temp_dir = TempDir::new()?; + let datadir = temp_dir.path().to_path_buf(); + + let mut node_config = NodeConfig::new(chain_spec.clone()) + .with_network(network_config.clone()) + .with_unused_ports() + .with_rpc( + RpcServerArgs::default() + .with_unused_ports() + .with_http() + .with_http_api(RpcModuleSelection::All), + ) + .set_dev(is_dev); + + // Set the datadir + node_config.datadir.datadir = + reth_node_core::dirs::MaybePlatformPath::from(datadir.clone()); + debug!(target: "e2e::import", "Node {idx} datadir: {datadir:?}"); + + let span = span!(Level::INFO, "node", idx); + let _enter = span.enter(); + + // First, import the chain data into this datadir + info!(target: "test", "Importing chain data from {:?} for node {} into {:?}", rlp_path, idx, datadir); + + // Create database path and static files path + let db_path = datadir.join("db"); + let static_files_path = datadir.join("static_files"); + + // Initialize the database using init_db (same as CLI import command) + // Use the same database arguments as the node will use + let db_args = reth_node_core::args::DatabaseArgs::default().database_args(); + let db_env = reth_db::init_db(&db_path, db_args)?; + let db = Arc::new(db_env); + + // Create a provider factory with the initialized database (use regular DB, not + // TempDatabase) We need to specify the node types properly for the adapter + type ImportNodeTypes = reth_node_ethereum::EthereumNode; + let provider_factory = ProviderFactory::< + NodeTypesWithDBAdapter>, + >::new( + db.clone(), + chain_spec.clone(), + reth_provider::providers::StaticFileProvider::read_write(static_files_path.clone())?, + ); + + // Initialize genesis if needed + reth_db_common::init::init_genesis(&provider_factory)?; + + // Import the chain data + let import_config = ImportConfig::default(); + let config = Config::default(); + + // Create EVM and consensus for Ethereum + let evm_config = reth_node_ethereum::EthEvmConfig::new(chain_spec.clone()); + let consensus = reth_ethereum_consensus::EthBeaconConsensus::new(chain_spec.clone()); + + let result = import_blocks_from_file( + rlp_path, + import_config, + provider_factory.clone(), + &config, + evm_config, + Arc::new(consensus), + ) + .await?; + + info!( + target: "test", + "Imported {} blocks and {} transactions for node {}", + result.total_imported_blocks, + result.total_imported_txns, + idx + ); + + debug!(target: "e2e::import", + "Import result for node {}: decoded {} blocks, imported {} blocks, complete: {}", + idx, + result.total_decoded_blocks, + result.total_imported_blocks, + result.is_complete() + ); + + // The import counts genesis block in total_imported_blocks, so we expect + // total_imported_blocks to be total_decoded_blocks + 1 + let expected_imported = result.total_decoded_blocks + 1; // +1 for genesis + if result.total_imported_blocks != expected_imported { + debug!(target: "e2e::import", + "Import block count mismatch: expected {} (decoded {} + genesis), got {}", + expected_imported, result.total_decoded_blocks, result.total_imported_blocks + ); + return Err(eyre::eyre!("Chain import block count mismatch for node {}", idx)); + } + + if result.total_decoded_txns != result.total_imported_txns { + debug!(target: "e2e::import", + "Import transaction count mismatch: decoded {} != imported {}", + result.total_decoded_txns, result.total_imported_txns + ); + return Err(eyre::eyre!("Chain import transaction count mismatch for node {}", idx)); + } + + // Verify the database was properly initialized by checking stage checkpoints + { + let provider = provider_factory.database_provider_ro()?; + let headers_checkpoint = provider.get_stage_checkpoint(StageId::Headers)?; + if headers_checkpoint.is_none() { + return Err(eyre::eyre!("Headers stage checkpoint is missing after import!")); + } + debug!(target: "e2e::import", "Headers stage checkpoint after import: {headers_checkpoint:?}"); + drop(provider); + } + + // IMPORTANT: We need to properly flush and close the static files provider + // The static files provider may have open file handles that need to be closed + // before we can reopen the database in the node launcher + { + let static_file_provider = provider_factory.static_file_provider(); + // This will ensure all static file writers are properly closed + drop(static_file_provider); + } + + // Close all database handles to release locks before launching the node + drop(provider_factory); + drop(db); + + // Give the OS a moment to release file locks + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + + // Now launch the node with the pre-populated datadir + debug!(target: "e2e::import", "Launching node with datadir: {:?}", datadir); + + // Use the testing_node_with_datadir method which properly handles opening existing + // databases + let node = EthereumNode::default(); + + let NodeHandle { node, node_exit_future: _ } = NodeBuilder::new(node_config.clone()) + .testing_node_with_datadir(exec.clone(), datadir.clone()) + .with_types_and_provider::>() + .with_components(node.components_builder()) + .with_add_ons(node.add_ons()) + .launch_with_fn(|builder| { + let launcher = EngineNodeLauncher::new( + builder.task_executor().clone(), + builder.config().datadir(), + tree_config.clone(), + ); + builder.launch_with(launcher) + }) + .await?; + + let node_ctx = NodeTestContext::new(node, attributes_generator).await?; + + nodes.push(node_ctx); + temp_dirs.push(temp_dir); // Keep temp dir alive + } + + Ok(ChainImportResult { + nodes, + task_manager: tasks, + wallet: crate::Wallet::default().with_chain_id(chain_spec.chain().into()), + _temp_dirs: temp_dirs, + }) +} + +/// Helper to load forkchoice state from a JSON file +pub fn load_forkchoice_state(path: &Path) -> eyre::Result { + let json_str = std::fs::read_to_string(path)?; + let fcu_data: serde_json::Value = serde_json::from_str(&json_str)?; + + let state = &fcu_data["forkchoiceState"]; + Ok(alloy_rpc_types_engine::ForkchoiceState { + head_block_hash: state["headBlockHash"] + .as_str() + .ok_or_else(|| eyre::eyre!("missing headBlockHash"))? + .parse()?, + safe_block_hash: state["safeBlockHash"] + .as_str() + .ok_or_else(|| eyre::eyre!("missing safeBlockHash"))? + .parse()?, + finalized_block_hash: state["finalizedBlockHash"] + .as_str() + .ok_or_else(|| eyre::eyre!("missing finalizedBlockHash"))? + .parse()?, + }) +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_consensus::{constants::EMPTY_WITHDRAWALS, BlockHeader, Header}; + use alloy_eips::eip4895::Withdrawals; + use alloy_primitives::{Address, B256, B64, U256}; + use reth_chainspec::{ChainSpecBuilder, EthereumHardforks, MAINNET}; + use reth_db::mdbx::DatabaseArguments; + use reth_ethereum_primitives::{Block, BlockBody}; + use reth_payload_builder::EthPayloadBuilderAttributes; + use reth_primitives::SealedBlock; + use reth_primitives_traits::Block as BlockTrait; + use reth_provider::{ + test_utils::MockNodeTypesWithDB, BlockHashReader, BlockNumReader, BlockReaderIdExt, + }; + use std::io::Write; + + /// Generate test blocks for a given chain spec + fn generate_test_blocks(chain_spec: &ChainSpec, count: u64) -> Vec { + let mut blocks: Vec = Vec::new(); + let genesis_header = chain_spec.sealed_genesis_header(); + let mut parent_hash = genesis_header.hash(); + let mut parent_number = genesis_header.number(); + let mut parent_base_fee = genesis_header.base_fee_per_gas; + let mut parent_gas_limit = genesis_header.gas_limit; + debug!(target: "e2e::import", + "Genesis header base fee: {:?}, gas limit: {}, state root: {:?}", + parent_base_fee, + parent_gas_limit, + genesis_header.state_root() + ); + + for i in 1..=count { + // Create a simple header + let mut header = Header { + parent_hash, + number: parent_number + 1, + gas_limit: parent_gas_limit, // Use parent's gas limit + gas_used: 0, // Empty blocks use no gas + timestamp: genesis_header.timestamp() + i * 12, // 12 second blocks + beneficiary: Address::ZERO, + receipts_root: alloy_consensus::constants::EMPTY_RECEIPTS, + logs_bloom: Default::default(), + difficulty: U256::from(1), // Will be overridden for post-merge + // Use the same state root as parent for now (empty state changes) + state_root: if i == 1 { + genesis_header.state_root() + } else { + blocks.last().unwrap().state_root + }, + transactions_root: alloy_consensus::constants::EMPTY_TRANSACTIONS, + ommers_hash: alloy_consensus::constants::EMPTY_OMMER_ROOT_HASH, + mix_hash: B256::ZERO, + nonce: B64::from(0u64), + extra_data: Default::default(), + base_fee_per_gas: None, + withdrawals_root: None, + blob_gas_used: None, + excess_blob_gas: None, + parent_beacon_block_root: None, + requests_hash: None, + }; + + // Set required fields based on chain spec + if chain_spec.is_london_active_at_block(header.number) { + // Calculate base fee based on parent block + if let Some(parent_fee) = parent_base_fee { + // For the first block, we need to use the exact expected base fee + // The consensus rules expect it to be calculated from the genesis + let (parent_gas_used, parent_gas_limit) = if i == 1 { + // Genesis block parameters + (genesis_header.gas_used, genesis_header.gas_limit) + } else { + let last_block = blocks.last().unwrap(); + (last_block.gas_used, last_block.gas_limit) + }; + header.base_fee_per_gas = Some(alloy_eips::calc_next_block_base_fee( + parent_gas_used, + parent_gas_limit, + parent_fee, + chain_spec.base_fee_params_at_timestamp(header.timestamp), + )); + debug!(target: "e2e::import", "Block {} calculated base fee: {:?} (parent gas used: {}, parent gas limit: {}, parent base fee: {})", + i, header.base_fee_per_gas, parent_gas_used, parent_gas_limit, parent_fee); + parent_base_fee = header.base_fee_per_gas; + } + } + + // For post-merge blocks + if chain_spec.is_paris_active_at_block(header.number) { + header.difficulty = U256::ZERO; + header.nonce = B64::ZERO; + } + + // For post-shanghai blocks + if chain_spec.is_shanghai_active_at_timestamp(header.timestamp) { + header.withdrawals_root = Some(EMPTY_WITHDRAWALS); + } + + // For post-cancun blocks + if chain_spec.is_cancun_active_at_timestamp(header.timestamp) { + header.blob_gas_used = Some(0); + header.excess_blob_gas = Some(0); + header.parent_beacon_block_root = Some(B256::ZERO); + } + + // Create an empty block body + let body = BlockBody { + transactions: vec![], + ommers: vec![], + withdrawals: header.withdrawals_root.is_some().then(Withdrawals::default), + }; + + // Create the block + let block = Block { header: header.clone(), body: body.clone() }; + let sealed_block = BlockTrait::seal_slow(block); + + debug!(target: "e2e::import", + "Generated block {} with hash {:?}", + sealed_block.number(), + sealed_block.hash() + ); + debug!(target: "e2e::import", + " Body has {} transactions, {} ommers, withdrawals: {}", + body.transactions.len(), + body.ommers.len(), + body.withdrawals.is_some() + ); + + // Update parent for next iteration + parent_hash = sealed_block.hash(); + parent_number = sealed_block.number(); + parent_gas_limit = sealed_block.gas_limit; + if header.base_fee_per_gas.is_some() { + parent_base_fee = header.base_fee_per_gas; + } + + blocks.push(sealed_block); + } + + blocks + } + + /// Write blocks to RLP file + fn write_blocks_to_rlp(blocks: &[SealedBlock], path: &Path) -> std::io::Result<()> { + use alloy_rlp::Encodable; + + let mut file = std::fs::File::create(path)?; + let mut total_bytes = 0; + + for (i, block) in blocks.iter().enumerate() { + // Convert SealedBlock to Block before encoding + let block_for_encoding = block.clone().unseal(); + + let mut buf = Vec::new(); + block_for_encoding.encode(&mut buf); + debug!(target: "e2e::import", + "Block {} has {} transactions, encoded to {} bytes", + i, + block.body().transactions.len(), + buf.len() + ); + + // Debug: check what's in the encoded data + debug!(target: "e2e::import", "Block {} encoded to {} bytes", i, buf.len()); + if buf.len() < 20 { + debug!(target: "e2e::import", " Raw bytes: {:?}", &buf); + } else { + debug!(target: "e2e::import", " First 20 bytes: {:?}", &buf[..20]); + } + + total_bytes += buf.len(); + file.write_all(&buf)?; + } + + file.flush()?; + debug!(target: "e2e::import", "Total RLP bytes written: {total_bytes}"); + Ok(()) + } + + /// Create FCU JSON for the tip of the chain + fn create_fcu_json(tip: &SealedBlock) -> serde_json::Value { + serde_json::json!({ + "forkchoiceState": { + "headBlockHash": format!("0x{:x}", tip.hash()), + "safeBlockHash": format!("0x{:x}", tip.hash()), + "finalizedBlockHash": format!("0x{:x}", tip.hash()), + } + }) + } + + #[tokio::test] + async fn test_stage_checkpoints_persistence() { + // This test specifically verifies that stage checkpoints are persisted correctly + // when reopening the database + reth_tracing::init_test_tracing(); + + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis( + serde_json::from_str(include_str!("testsuite/assets/genesis.json")).unwrap(), + ) + .london_activated() + .shanghai_activated() + .build(), + ); + + // Generate test blocks + let test_blocks = generate_test_blocks(&chain_spec, 5); + + // Create temporary files for RLP data + let temp_dir = tempfile::tempdir().expect("Failed to create temp dir"); + let rlp_path = temp_dir.path().join("test_chain.rlp"); + write_blocks_to_rlp(&test_blocks, &rlp_path).expect("Failed to write RLP data"); + + // Create a persistent datadir that won't be deleted + let datadir = temp_dir.path().join("datadir"); + std::fs::create_dir_all(&datadir).unwrap(); + let db_path = datadir.join("db"); + let static_files_path = datadir.join("static_files"); + + // Import the chain + { + let db_env = reth_db::init_db(&db_path, DatabaseArguments::default()).unwrap(); + let db = Arc::new(db_env); + + let provider_factory: ProviderFactory< + NodeTypesWithDBAdapter>, + > = ProviderFactory::new( + db.clone(), + chain_spec.clone(), + reth_provider::providers::StaticFileProvider::read_write(static_files_path.clone()) + .unwrap(), + ); + + // Initialize genesis + reth_db_common::init::init_genesis(&provider_factory).unwrap(); + + // Import the chain data + let import_config = ImportConfig::default(); + let config = Config::default(); + let evm_config = reth_node_ethereum::EthEvmConfig::new(chain_spec.clone()); + let consensus = reth_ethereum_consensus::EthBeaconConsensus::new(chain_spec.clone()); + + let result = import_blocks_from_file( + &rlp_path, + import_config, + provider_factory.clone(), + &config, + evm_config, + Arc::new(consensus), + ) + .await + .unwrap(); + + assert_eq!(result.total_decoded_blocks, 5); + assert_eq!(result.total_imported_blocks, 6); // +1 for genesis + + // Verify stage checkpoints exist + let provider = provider_factory.database_provider_ro().unwrap(); + let headers_checkpoint = provider.get_stage_checkpoint(StageId::Headers).unwrap(); + assert!(headers_checkpoint.is_some(), "Headers checkpoint should exist after import"); + assert_eq!( + headers_checkpoint.unwrap().block_number, + 5, + "Headers checkpoint should be at block 5" + ); + drop(provider); + + // Properly close static files to release all file handles + let static_file_provider = provider_factory.static_file_provider(); + drop(static_file_provider); + + drop(provider_factory); + drop(db); + } + + // Give the OS a moment to release file locks + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + + // Now reopen the database and verify checkpoints are still there + { + let db_env = reth_db::init_db(&db_path, DatabaseArguments::default()).unwrap(); + let db = Arc::new(db_env); + + let provider_factory: ProviderFactory< + NodeTypesWithDBAdapter>, + > = ProviderFactory::new( + db, + chain_spec.clone(), + reth_provider::providers::StaticFileProvider::read_only(static_files_path, false) + .unwrap(), + ); + + let provider = provider_factory.database_provider_ro().unwrap(); + + // Check that stage checkpoints are still present + let headers_checkpoint = provider.get_stage_checkpoint(StageId::Headers).unwrap(); + assert!( + headers_checkpoint.is_some(), + "Headers checkpoint should still exist after reopening database" + ); + assert_eq!( + headers_checkpoint.unwrap().block_number, + 5, + "Headers checkpoint should still be at block 5" + ); + + // Verify we can read blocks + let block_5_hash = provider.block_hash(5).unwrap(); + assert!(block_5_hash.is_some(), "Block 5 should exist in database"); + assert_eq!(block_5_hash.unwrap(), test_blocks[4].hash(), "Block 5 hash should match"); + + // Check all stage checkpoints + debug!(target: "e2e::import", "All stage checkpoints after reopening:"); + for stage in StageId::ALL { + let checkpoint = provider.get_stage_checkpoint(stage).unwrap(); + debug!(target: "e2e::import", " Stage {stage:?}: {checkpoint:?}"); + } + } + } + + /// Helper to create test chain spec + fn create_test_chain_spec() -> Arc { + Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis( + serde_json::from_str(include_str!("testsuite/assets/genesis.json")).unwrap(), + ) + .london_activated() + .shanghai_activated() + .build(), + ) + } + + /// Helper to setup test blocks and write to RLP + async fn setup_test_blocks_and_rlp( + chain_spec: &ChainSpec, + block_count: u64, + temp_dir: &Path, + ) -> (Vec, std::path::PathBuf) { + let test_blocks = generate_test_blocks(chain_spec, block_count); + assert_eq!( + test_blocks.len(), + block_count as usize, + "Should have generated expected blocks" + ); + + let rlp_path = temp_dir.join("test_chain.rlp"); + write_blocks_to_rlp(&test_blocks, &rlp_path).expect("Failed to write RLP data"); + + let rlp_size = std::fs::metadata(&rlp_path).expect("RLP file should exist").len(); + debug!(target: "e2e::import", "Wrote RLP file with size: {rlp_size} bytes"); + + (test_blocks, rlp_path) + } + + #[tokio::test] + async fn test_import_blocks_only() { + // Tests just the block import functionality without full node setup + reth_tracing::init_test_tracing(); + + let chain_spec = create_test_chain_spec(); + let temp_dir = tempfile::tempdir().expect("Failed to create temp dir"); + let (test_blocks, rlp_path) = + setup_test_blocks_and_rlp(&chain_spec, 10, temp_dir.path()).await; + + // Create a test database + let datadir = temp_dir.path().join("datadir"); + std::fs::create_dir_all(&datadir).unwrap(); + let db_path = datadir.join("db"); + let db_env = reth_db::init_db(&db_path, DatabaseArguments::default()).unwrap(); + let db = Arc::new(reth_db::test_utils::TempDatabase::new(db_env, db_path)); + + // Create static files path + let static_files_path = datadir.join("static_files"); + + // Create a provider factory + let provider_factory: ProviderFactory = ProviderFactory::new( + db.clone(), + chain_spec.clone(), + reth_provider::providers::StaticFileProvider::read_write(static_files_path).unwrap(), + ); + + // Initialize genesis + reth_db_common::init::init_genesis(&provider_factory).unwrap(); + + // Import the chain data + let import_config = ImportConfig::default(); + let config = Config::default(); + let evm_config = reth_node_ethereum::EthEvmConfig::new(chain_spec.clone()); + let consensus = reth_ethereum_consensus::EthBeaconConsensus::new(chain_spec.clone()); + + let result = import_blocks_from_file( + &rlp_path, + import_config, + provider_factory.clone(), + &config, + evm_config, + Arc::new(consensus), + ) + .await + .unwrap(); + + debug!(target: "e2e::import", + "Import result: decoded {} blocks, imported {} blocks", + result.total_decoded_blocks, result.total_imported_blocks + ); + + // Verify the import was successful + assert_eq!(result.total_decoded_blocks, 10); + assert_eq!(result.total_imported_blocks, 11); // +1 for genesis + assert_eq!(result.total_decoded_txns, 0); + assert_eq!(result.total_imported_txns, 0); + + // Verify we can read the imported blocks + let provider = provider_factory.database_provider_ro().unwrap(); + let latest_block = provider.last_block_number().unwrap(); + assert_eq!(latest_block, 10, "Should have imported up to block 10"); + + let block_10_hash = provider.block_hash(10).unwrap().expect("Block 10 should exist"); + assert_eq!(block_10_hash, test_blocks[9].hash(), "Block 10 hash should match"); + } + + #[tokio::test] + async fn test_import_with_node_integration() { + // Tests the full integration with node setup, forkchoice updates, and syncing + reth_tracing::init_test_tracing(); + + let chain_spec = create_test_chain_spec(); + let temp_dir = tempfile::tempdir().expect("Failed to create temp dir"); + let (test_blocks, rlp_path) = + setup_test_blocks_and_rlp(&chain_spec, 10, temp_dir.path()).await; + + // Create FCU data for the tip + let tip = test_blocks.last().expect("Should have generated blocks"); + let fcu_path = temp_dir.path().join("test_fcu.json"); + std::fs::write(&fcu_path, create_fcu_json(tip).to_string()) + .expect("Failed to write FCU data"); + + // Setup nodes with imported chain + let result = setup_engine_with_chain_import( + 1, + chain_spec, + false, + TreeConfig::default(), + &rlp_path, + |_| EthPayloadBuilderAttributes::default(), + ) + .await + .expect("Failed to setup nodes with chain import"); + + // Load and apply forkchoice state + let fcu_state = load_forkchoice_state(&fcu_path).expect("Failed to load forkchoice state"); + + let node = &result.nodes[0]; + + // Send forkchoice update to make the imported chain canonical + node.update_forkchoice(fcu_state.finalized_block_hash, fcu_state.head_block_hash) + .await + .expect("Failed to update forkchoice"); + + // Wait for the node to sync to the head + node.sync_to(fcu_state.head_block_hash).await.expect("Failed to sync to head"); + + // Verify the chain tip + let latest = node + .inner + .provider + .sealed_header_by_id(alloy_eips::BlockId::latest()) + .expect("Failed to get latest header") + .expect("No latest header found"); + + assert_eq!( + latest.hash(), + fcu_state.head_block_hash, + "Chain tip does not match expected head" + ); + } +} From 09b4c5e987f511a02433c585e87c42b875e2612e Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Mon, 7 Jul 2025 20:56:32 +0100 Subject: [PATCH 074/305] fix(trie): add lower subtrie root paths to upper subtrie prefix set (#17262) --- crates/trie/sparse-parallel/src/trie.rs | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) diff --git a/crates/trie/sparse-parallel/src/trie.rs b/crates/trie/sparse-parallel/src/trie.rs index 2bc4f495047..5f7a0eb3f38 100644 --- a/crates/trie/sparse-parallel/src/trie.rs +++ b/crates/trie/sparse-parallel/src/trie.rs @@ -1006,6 +1006,18 @@ impl ParallelSparseTrie { } .freeze(); + // We need the full path of root node of the lower subtrie to the unchanged prefix + // set, so that we don't skip it when calculating hashes for the upper subtrie. + match subtrie.nodes.get(&subtrie.path) { + Some(SparseNode::Extension { key, .. } | SparseNode::Leaf { key, .. }) => { + unchanged_prefix_set.insert(subtrie.path.join(key)); + } + Some(SparseNode::Branch { .. }) => { + unchanged_prefix_set.insert(subtrie.path); + } + _ => {} + } + changed_subtries.push(ChangedSubtrie { index, subtrie, prefix_set }); } } @@ -1597,9 +1609,12 @@ impl SparseSubtrieInner { SparseNode::Leaf { key, hash } => { let mut path = path; path.extend(key); - if let Some(hash) = hash.filter(|_| !prefix_set_contains(&path)) { - // If the node hash is already computed, and the node path is not in - // the prefix set, return the pre-computed hash + let value = self.values.get(&path); + if let Some(hash) = hash.filter(|_| !prefix_set_contains(&path) || value.is_none()) + { + // If the node hash is already computed, and either the node path is not in + // the prefix set or the leaf doesn't belong to the current trie (its value is + // absent), return the pre-computed hash (RlpNode::word_rlp(&hash), SparseNodeType::Leaf) } else { // Encode the leaf node and update its hash @@ -2867,6 +2882,7 @@ mod tests { let unchanged_prefix_set = PrefixSetMut::from([ Nibbles::from_nibbles([0x0]), + leaf_2_full_path, Nibbles::from_nibbles([0x2, 0x0, 0x0]), ]); // Create a prefix set with the keys that match only the second subtrie From e4574326ea64be47f5d65c2832bcfb241d3322ae Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 7 Jul 2025 23:50:37 +0200 Subject: [PATCH 075/305] chore: update size metrics once (#17242) --- crates/transaction-pool/src/pool/txpool.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 76607221fba..8c016c92a9c 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -853,6 +853,9 @@ impl TxPool { } } } + + self.update_size_metrics(); + outcome } @@ -1573,6 +1576,8 @@ impl AllTransactions { } /// Removes a transaction from the set using its id. + /// + /// This is intended for processing updates after state changes. pub(crate) fn remove_transaction_by_id( &mut self, tx_id: &TransactionId, @@ -1582,7 +1587,6 @@ impl AllTransactions { self.remove_auths(&internal); // decrement the counter for the sender. self.tx_decr(tx.sender_id()); - self.update_size_metrics(); Some((tx, internal.subpool)) } From 78bad34091ce6825d78525d68504b4d1ccbe3d65 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Tue, 8 Jul 2025 01:02:09 +0200 Subject: [PATCH 076/305] chore: check blob fee (#17272) --- crates/optimism/evm/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index a3f4e2042af..e493d6d9c52 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -135,7 +135,7 @@ where let blob_excess_gas_and_price = spec .into_eth_spec() .is_enabled_in(SpecId::CANCUN) - .then_some(BlobExcessGasAndPrice { excess_blob_gas: 0, blob_gasprice: 0 }); + .then_some(BlobExcessGasAndPrice { excess_blob_gas: 0, blob_gasprice: 1 }); let block_env = BlockEnv { number: U256::from(header.number()), @@ -177,7 +177,7 @@ where let blob_excess_gas_and_price = spec_id .into_eth_spec() .is_enabled_in(SpecId::CANCUN) - .then_some(BlobExcessGasAndPrice { excess_blob_gas: 0, blob_gasprice: 0 }); + .then_some(BlobExcessGasAndPrice { excess_blob_gas: 0, blob_gasprice: 1 }); let block_env = BlockEnv { number: U256::from(parent.number() + 1), From 1eccb5b7f6196a9e63d168e8a0d6b88c92322c5c Mon Sep 17 00:00:00 2001 From: Brian Date: Mon, 7 Jul 2025 19:08:48 -0400 Subject: [PATCH 077/305] fix: dead link (#17200) --- docs/vocs/docs/pages/run/ethereum.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/vocs/docs/pages/run/ethereum.mdx b/docs/vocs/docs/pages/run/ethereum.mdx index 885fca1d950..992cd70dd5f 100644 --- a/docs/vocs/docs/pages/run/ethereum.mdx +++ b/docs/vocs/docs/pages/run/ethereum.mdx @@ -88,7 +88,7 @@ In the meantime, consider setting up [observability](/run/monitoring) to monitor {/* TODO: Add more logs to help node operators debug any weird CL to EL messages! */} -[installation]: ./../installation/installation +[installation]: ./../installation/overview [docs]: https://github.com/paradigmxyz/reth/tree/main/docs [metrics]: https://github.com/paradigmxyz/reth/blob/main/docs/design/metrics.md#metrics From 36d568a404df3bca08b217ebade69cf4f1b38d0e Mon Sep 17 00:00:00 2001 From: Galoretka Date: Tue, 8 Jul 2025 02:09:14 +0300 Subject: [PATCH 078/305] chore: Fix typo in block number reader comment (#17250) --- crates/storage/provider/src/providers/database/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index eeedf55b7ac..a172fda90da 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -347,7 +347,7 @@ impl BlockNumReader for ProviderFactory { fn earliest_block_number(&self) -> ProviderResult { // earliest history height tracks the lowest block number that has __not__ been expired, in - // other words, the first/earlierst available block. + // other words, the first/earliest available block. Ok(self.static_file_provider.earliest_history_height()) } From af004c0c0d291c82ac1ec892f76c52573138015b Mon Sep 17 00:00:00 2001 From: VolodymyrBg Date: Tue, 8 Jul 2025 12:22:52 +0300 Subject: [PATCH 079/305] chore: fix typos (#17251) --- .../storage/provider/src/providers/blockchain_provider.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index 06118aa4141..f372d0c0c09 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -2052,7 +2052,7 @@ mod tests { // Test range that spans database and in-memory { - // This block will be persisted to disk and removed from memory AFTER the firsk database query. This ensures that we query the in-memory state before the database avoiding any race condition. + // This block will be persisted to disk and removed from memory AFTER the first database query. This ensures that we query the in-memory state before the database avoiding any race condition. persist_block_after_db_tx_creation(provider.clone(), in_memory_blocks[0].number); assert_eq!( @@ -2144,7 +2144,7 @@ mod tests { // Test range that spans database and in-memory { - // This block will be persisted to disk and removed from memory AFTER the firsk database query. This ensures that we query the in-memory state before the database avoiding any race condition. + // This block will be persisted to disk and removed from memory AFTER the first database query. This ensures that we query the in-memory state before the database avoiding any race condition. persist_block_after_db_tx_creation(provider.clone(), in_memory_blocks[0].number); assert_eq!( @@ -2261,7 +2261,7 @@ mod tests { // Ensure that the first generated in-memory block exists { - // This block will be persisted to disk and removed from memory AFTER the firsk database query. This ensures that we query the in-memory state before the database avoiding any race condition. + // This block will be persisted to disk and removed from memory AFTER the first database query. This ensures that we query the in-memory state before the database avoiding any race condition. persist_block_after_db_tx_creation(provider.clone(), in_memory_blocks[0].number); call_method!($arg_count, provider, $method, $item_extractor, tx_num, tx_hash, &in_memory_blocks[0], &receipts); From 7c69ab1c8d8cf72b257bd248427e49f43af4f0ef Mon Sep 17 00:00:00 2001 From: GarmashAlex Date: Tue, 8 Jul 2025 12:23:22 +0300 Subject: [PATCH 080/305] =?UTF-8?q?docs:=20fix=20typo=20basfee=20=E2=86=92?= =?UTF-8?q?=20basefee=20in=20txpool.mmd=20(#17252)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crates/transaction-pool/docs/mermaid/txpool.mmd | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/transaction-pool/docs/mermaid/txpool.mmd b/crates/transaction-pool/docs/mermaid/txpool.mmd index 94f3abda3e6..e183d8f3c91 100644 --- a/crates/transaction-pool/docs/mermaid/txpool.mmd +++ b/crates/transaction-pool/docs/mermaid/txpool.mmd @@ -16,7 +16,7 @@ graph TB A[Incoming Tx] --> B[Validation] -->|insert| pool pool --> |if ready + blobfee too low| B4 pool --> |if ready| B1 - pool --> |if ready + basfee too low| B2 + pool --> |if ready + basefee too low| B2 pool --> |nonce gap or lack of funds| B3 pool --> |update| pool B1 --> |best| production From 5645659d593eb9dbb92629358cea350d57cfad2e Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 8 Jul 2025 11:24:56 +0200 Subject: [PATCH 081/305] chore: bump alloy (#17275) --- Cargo.lock | 104 +++++++++++++-------------- Cargo.toml | 54 +++++++------- crates/rpc/rpc-eth-api/src/bundle.rs | 6 +- 3 files changed, 82 insertions(+), 82 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0857be0bad1..9ae8c5d081c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -112,9 +112,9 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9c6ad411efe0f49e0e99b9c7d8749a1eb55f6dbf74a1bc6953ab285b02c4f67" +checksum = "06b31d7560fdebcf24e21fcba9ed316c2fdf2854b2ca652a24741bf8192cd40a" dependencies = [ "alloy-eips", "alloy-primitives", @@ -153,9 +153,9 @@ dependencies = [ [[package]] name = "alloy-contract" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "977b97d271159578afcb26e39e1ca5ce1a7f937697793d7d571b0166dd8b8225" +checksum = "5efc90130119c22079b468c30eab6feda1ab4981c3ea88ed8e12dc155cc26ea1" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -236,9 +236,9 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "749b8449e4daf7359bdf1dabdba6ce424ff8b1bdc23bdb795661b2e991a08d87" +checksum = "fe854e4051afb5b47931b78ba7b5af1952d06e903637430e98c8321192d29eca" dependencies = [ "alloy-eip2124", "alloy-eip2930", @@ -279,9 +279,9 @@ dependencies = [ [[package]] name = "alloy-genesis" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fcbae2107f3f2df2b02bb7d9e81e8aa730ae371ca9dd7fd0c81c3d0cb78a452" +checksum = "38cc4c7363f48a2b61de395d9b2df52280e303a5af45a22ed33cf27cd30d7975" dependencies = [ "alloy-eips", "alloy-primitives", @@ -319,9 +319,9 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc30b0e20fcd0843834ecad2a716661c7b9d5aca2486f8e96b93d5246eb83e06" +checksum = "7b2c0cb72ef87173b9d78cd29be898820c44498ce60a7d5de82b577c8c002bb8" dependencies = [ "alloy-primitives", "alloy-sol-types", @@ -334,9 +334,9 @@ dependencies = [ [[package]] name = "alloy-network" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eaeb681024cf71f5ca14f3d812c0a8d8b49f13f7124713538e66d74d3bfe6aff" +checksum = "4965cff485617f5c2f4016a2e48503b735fb6ec3845ba86c68fdf338da9e85e7" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -433,9 +433,9 @@ dependencies = [ [[package]] name = "alloy-provider" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abc164acf8c41c756e76c7aea3be8f0fb03f8a3ef90a33e3ddcea5d1614d8779" +checksum = "956116526887a732fb5823648544ae56c78a38cf56d4e1c2c076d7432a90674c" dependencies = [ "alloy-chains", "alloy-consensus", @@ -477,9 +477,9 @@ dependencies = [ [[package]] name = "alloy-pubsub" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "670d155c3e35bcaa252ca706a2757a456c56aa71b80ad1539d07d49b86304e78" +checksum = "b6b19131a9cbf17486ef7fa37663f8c3631c3fa606aec3d77733042066439d68" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -520,9 +520,9 @@ dependencies = [ [[package]] name = "alloy-rpc-client" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03c44d31bcb9afad460915fe1fba004a2af5a07a3376c307b9bdfeec3678c209" +checksum = "5699f859f61936425d753c0709b8049ec7d83988ea4f0793526885f63d8d863b" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -548,9 +548,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ba2cf3d3c6ece87f1c6bb88324a997f28cf0ad7e98d5e0b6fa91c4003c30916" +checksum = "cca073dd05362d7a66241468862a18d95255f5eb7c28a9d83b458c8216b751bd" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -561,9 +561,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-admin" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4ce874dde17cc749f1aa8883e0c1431ddda6ba6dd9c9eb9b31d1fb0a6023830" +checksum = "74e7c6f85d9b38302ca0051420cb687d035f75cc1ff09cdf4f98991ff211fb9f" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -573,9 +573,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65e80e2ffa56956a92af375df1422b239fde6552bd222dfeaeb39f07949060fa" +checksum = "a6425892f9addc08b0c687878feb8e4a61a89e085ffdf52865fd44fa1d54f84f" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -596,9 +596,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-beacon" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "438a7a3a5c5d11877787e324dd1ffd9ab82314ca145513ebe8d12257cbfefb5b" +checksum = "30d8751cf34201ceb637974388971e38abbd84f9e10a03103170ac7b1e9f3137" dependencies = [ "alloy-eips", "alloy-primitives", @@ -614,9 +614,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-debug" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f53a2a78b44582e0742ab96d5782842d9b90cebf6ef6ccd8be864ae246fdd0f" +checksum = "2acde603d444a8f6f50bb79e1296602e8c0bf193b2fa9af0afe0287e8aaf87df" dependencies = [ "alloy-primitives", "serde", @@ -624,9 +624,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7041c3fd4dcd7af95e86280944cc33b4492ac2ddbe02f84079f8019742ec2857" +checksum = "24aa5a872715979dbb831ed9a50e983a1d2500c44ded79550000c905a4d5ca8e" dependencies = [ "alloy-consensus", "alloy-eips", @@ -645,9 +645,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "391e59f81bacbffc7bddd2da3a26d6eec0e2058e9237c279e9b1052bdf21b49e" +checksum = "fce2ac0e27fe24f27f1a6d0e0088b94c03c67dfcfb0461813a4a44b8197a8105" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -666,9 +666,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-mev" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de3f327d4cd140eca2c6c27c82c381aba6fa6a32cbb697c146b5607532f82167" +checksum = "6e082bf96fb0eec9efa1d981d6d9ff9880266884aea32ecf2f344c25073e19d5" dependencies = [ "alloy-consensus", "alloy-eips", @@ -681,9 +681,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29d96238f37e8a72dcf2cf6bead4c4f91dec1c0720b12be10558406e1633a804" +checksum = "18db18563210da6a1e7e172c9bf0049bc8e00058e31043458ec3cae92c51d1cb" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -695,9 +695,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-txpool" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e45d00db47a280d0a6e498b6e63344bccd9485d8860d2e2f06b680200c37ebc2" +checksum = "a5c202af188d9a60000d09309c6a2605cabf49d0b1de0307c3b9f221e8a545a5" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -707,9 +707,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ea08bc854235d4dff08fd57df8033285c11b8d7548b20c6da218194e7e6035f" +checksum = "1ad318c341481a5f5e50d69d830853873d8b5e8d2b73ea2c0da69cf78537c970" dependencies = [ "alloy-primitives", "arbitrary", @@ -719,9 +719,9 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcb3759f85ef5f010a874d9ebd5ee6ce01cac65211510863124e0ebac6552db0" +checksum = "a758b004483b906d622f607d27e1bc0923246a092adc475069b5509ab83c8148" dependencies = [ "alloy-primitives", "async-trait", @@ -734,9 +734,9 @@ dependencies = [ [[package]] name = "alloy-signer-local" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14d95902d29e1290809e1c967a1e974145b44b78f6e3e12fc07a60c1225e3df0" +checksum = "51d44ff6b720feb3fc17763f5d6cd90e57b05400acd2a5083a7d7020e351e5bb" dependencies = [ "alloy-consensus", "alloy-network", @@ -822,9 +822,9 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcdf4b7fc58ebb2605b2fc5a33dae5cf15527ea70476978351cc0db1c596ea93" +checksum = "3e551a125a5a96377ee0befc63db27b68078873d316c65b74587f14704dac630" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -845,9 +845,9 @@ dependencies = [ [[package]] name = "alloy-transport-http" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c4b0f3a9c28bcd3761504d9eb3578838d6d115c8959fc1ea05f59a3a8f691af" +checksum = "8640f66b33f0d85df0fcb0528739fb5d424f691a7c58963395b2417a68274703" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -860,9 +860,9 @@ dependencies = [ [[package]] name = "alloy-transport-ipc" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "758edb7c266374374e001c50fb1ea89cb5ed48d47ffbf297599f2a557804dd3b" +checksum = "d88ab7ac8a7aac07313bdeabbcd70818e6f675e4a9f101a3056d15aeb15be279" dependencies = [ "alloy-json-rpc", "alloy-pubsub", @@ -880,9 +880,9 @@ dependencies = [ [[package]] name = "alloy-transport-ws" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5596b913d1299ee37a9c1bb5118b2639bf253dc1088957bdf2073ae63a6fdfa" +checksum = "972664516ff27c90b156a7df9870d813b85b948d5063d3a1e9093109810b77b7" dependencies = [ "alloy-pubsub", "alloy-transport", diff --git a/Cargo.toml b/Cargo.toml index 67ba12f33bd..348fcef66c5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -476,33 +476,33 @@ alloy-trie = { version = "0.9.0", default-features = false } alloy-hardforks = "0.2.7" -alloy-consensus = { version = "1.0.17", default-features = false } -alloy-contract = { version = "1.0.17", default-features = false } -alloy-eips = { version = "1.0.17", default-features = false } -alloy-genesis = { version = "1.0.17", default-features = false } -alloy-json-rpc = { version = "1.0.17", default-features = false } -alloy-network = { version = "1.0.17", default-features = false } -alloy-network-primitives = { version = "1.0.17", default-features = false } -alloy-provider = { version = "1.0.17", features = ["reqwest"], default-features = false } -alloy-pubsub = { version = "1.0.17", default-features = false } -alloy-rpc-client = { version = "1.0.17", default-features = false } -alloy-rpc-types = { version = "1.0.17", features = ["eth"], default-features = false } -alloy-rpc-types-admin = { version = "1.0.17", default-features = false } -alloy-rpc-types-anvil = { version = "1.0.17", default-features = false } -alloy-rpc-types-beacon = { version = "1.0.17", default-features = false } -alloy-rpc-types-debug = { version = "1.0.17", default-features = false } -alloy-rpc-types-engine = { version = "1.0.17", default-features = false } -alloy-rpc-types-eth = { version = "1.0.17", default-features = false } -alloy-rpc-types-mev = { version = "1.0.17", default-features = false } -alloy-rpc-types-trace = { version = "1.0.17", default-features = false } -alloy-rpc-types-txpool = { version = "1.0.17", default-features = false } -alloy-serde = { version = "1.0.17", default-features = false } -alloy-signer = { version = "1.0.17", default-features = false } -alloy-signer-local = { version = "1.0.17", default-features = false } -alloy-transport = { version = "1.0.17" } -alloy-transport-http = { version = "1.0.17", features = ["reqwest-rustls-tls"], default-features = false } -alloy-transport-ipc = { version = "1.0.17", default-features = false } -alloy-transport-ws = { version = "1.0.17", default-features = false } +alloy-consensus = { version = "1.0.18", default-features = false } +alloy-contract = { version = "1.0.18", default-features = false } +alloy-eips = { version = "1.0.18", default-features = false } +alloy-genesis = { version = "1.0.18", default-features = false } +alloy-json-rpc = { version = "1.0.18", default-features = false } +alloy-network = { version = "1.0.18", default-features = false } +alloy-network-primitives = { version = "1.0.18", default-features = false } +alloy-provider = { version = "1.0.18", features = ["reqwest"], default-features = false } +alloy-pubsub = { version = "1.0.18", default-features = false } +alloy-rpc-client = { version = "1.0.18", default-features = false } +alloy-rpc-types = { version = "1.0.18", features = ["eth"], default-features = false } +alloy-rpc-types-admin = { version = "1.0.18", default-features = false } +alloy-rpc-types-anvil = { version = "1.0.18", default-features = false } +alloy-rpc-types-beacon = { version = "1.0.18", default-features = false } +alloy-rpc-types-debug = { version = "1.0.18", default-features = false } +alloy-rpc-types-engine = { version = "1.0.18", default-features = false } +alloy-rpc-types-eth = { version = "1.0.18", default-features = false } +alloy-rpc-types-mev = { version = "1.0.18", default-features = false } +alloy-rpc-types-trace = { version = "1.0.18", default-features = false } +alloy-rpc-types-txpool = { version = "1.0.18", default-features = false } +alloy-serde = { version = "1.0.18", default-features = false } +alloy-signer = { version = "1.0.18", default-features = false } +alloy-signer-local = { version = "1.0.18", default-features = false } +alloy-transport = { version = "1.0.18" } +alloy-transport-http = { version = "1.0.18", features = ["reqwest-rustls-tls"], default-features = false } +alloy-transport-ipc = { version = "1.0.18", default-features = false } +alloy-transport-ws = { version = "1.0.18", default-features = false } # op alloy-op-evm = { version = "0.14", default-features = false } diff --git a/crates/rpc/rpc-eth-api/src/bundle.rs b/crates/rpc/rpc-eth-api/src/bundle.rs index 1197d6afe50..79e64fae02d 100644 --- a/crates/rpc/rpc-eth-api/src/bundle.rs +++ b/crates/rpc/rpc-eth-api/src/bundle.rs @@ -4,8 +4,8 @@ use alloy_primitives::{Bytes, B256}; use alloy_rpc_types_mev::{ - CancelBundleRequest, CancelPrivateTransactionRequest, EthBundleHash, EthCallBundle, - EthCallBundleResponse, EthSendBundle, PrivateTransactionRequest, + CancelPrivateTransactionRequest, EthBundleHash, EthCallBundle, EthCallBundleResponse, + EthCancelBundle, EthSendBundle, PrivateTransactionRequest, }; use jsonrpsee::proc_macros::rpc; @@ -43,7 +43,7 @@ pub trait EthBundleApi { /// `eth_cancelBundle` is used to prevent a submitted bundle from being included on-chain. See [bundle cancellations](https://docs.flashbots.net/flashbots-auction/advanced/bundle-cancellations) for more information. #[method(name = "cancelBundle")] - async fn cancel_bundle(&self, request: CancelBundleRequest) -> jsonrpsee::core::RpcResult<()>; + async fn cancel_bundle(&self, request: EthCancelBundle) -> jsonrpsee::core::RpcResult<()>; /// `eth_sendPrivateTransaction` is used to send a single transaction to Flashbots. Flashbots will attempt to build a block including the transaction for the next 25 blocks. See [Private Transactions](https://docs.flashbots.net/flashbots-protect/additional-documentation/eth-sendPrivateTransaction) for more info. #[method(name = "sendPrivateTransaction")] From 557836b93d8fe3857f1ac5adcb4c54d7323c890b Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Tue, 8 Jul 2025 11:26:27 +0200 Subject: [PATCH 082/305] feat(test): add apply_with_import method to e2e Setup (#17263) --- Cargo.lock | 1 + crates/e2e-test-utils/Cargo.toml | 13 +- crates/e2e-test-utils/src/lib.rs | 3 + crates/e2e-test-utils/src/setup_import.rs | 206 ++----------- crates/e2e-test-utils/src/test_rlp_utils.rs | 185 ++++++++++++ .../src/testsuite/actions/mod.rs | 3 +- .../src/testsuite/actions/node_ops.rs | 42 +++ .../e2e-test-utils/src/testsuite/examples.rs | 106 ++++++- crates/e2e-test-utils/src/testsuite/mod.rs | 41 ++- crates/e2e-test-utils/src/testsuite/setup.rs | 276 ++++++++++++------ 10 files changed, 592 insertions(+), 284 deletions(-) create mode 100644 crates/e2e-test-utils/src/test_rlp_utils.rs diff --git a/Cargo.lock b/Cargo.lock index 9ae8c5d081c..fd883e79079 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7787,6 +7787,7 @@ dependencies = [ "alloy-eips", "alloy-network", "alloy-primitives", + "alloy-provider", "alloy-rlp", "alloy-rpc-types-engine", "alloy-rpc-types-eth", diff --git a/crates/e2e-test-utils/Cargo.toml b/crates/e2e-test-utils/Cargo.toml index 3aa26b9e1d5..997cb5a3570 100644 --- a/crates/e2e-test-utils/Cargo.toml +++ b/crates/e2e-test-utils/Cargo.toml @@ -56,17 +56,18 @@ url.workspace = true alloy-primitives.workspace = true alloy-eips.workspace = true alloy-rlp.workspace = true - -futures-util.workspace = true -eyre.workspace = true -tokio.workspace = true -tokio-stream.workspace = true -serde_json.workspace = true alloy-signer.workspace = true alloy-signer-local = { workspace = true, features = ["mnemonic"] } alloy-rpc-types-eth.workspace = true alloy-rpc-types-engine.workspace = true alloy-network.workspace = true alloy-consensus = { workspace = true, features = ["kzg"] } +alloy-provider = { workspace = true, features = ["reqwest"] } + +futures-util.workspace = true +eyre.workspace = true +tokio.workspace = true +tokio-stream.workspace = true +serde_json.workspace = true tracing.workspace = true derive_more.workspace = true diff --git a/crates/e2e-test-utils/src/lib.rs b/crates/e2e-test-utils/src/lib.rs index 647a1ae62e2..0037197f261 100644 --- a/crates/e2e-test-utils/src/lib.rs +++ b/crates/e2e-test-utils/src/lib.rs @@ -42,6 +42,9 @@ mod network; /// Helper for rpc operations mod rpc; +/// Utilities for creating and writing RLP test data +pub mod test_rlp_utils; + /// Creates the initial setup with `num_nodes` started and interconnected. pub async fn setup( num_nodes: usize, diff --git a/crates/e2e-test-utils/src/setup_import.rs b/crates/e2e-test-utils/src/setup_import.rs index 23977418dce..b19bc48c9c4 100644 --- a/crates/e2e-test-utils/src/setup_import.rs +++ b/crates/e2e-test-utils/src/setup_import.rs @@ -21,7 +21,6 @@ use tempfile::TempDir; use tracing::{debug, info, span, Level}; /// Setup result containing nodes and temporary directories that must be kept alive -#[allow(missing_debug_implementations)] pub struct ChainImportResult { /// The nodes that were created pub nodes: Vec>, @@ -33,6 +32,16 @@ pub struct ChainImportResult { pub _temp_dirs: Vec, } +impl std::fmt::Debug for ChainImportResult { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("ChainImportResult") + .field("nodes", &self.nodes.len()) + .field("wallet", &self.wallet) + .field("temp_dirs", &self._temp_dirs.len()) + .finish() + } +} + /// Creates a test setup with Ethereum nodes that have pre-imported chain data from RLP files. /// /// This function: @@ -40,6 +49,10 @@ pub struct ChainImportResult { /// 2. Imports the specified RLP chain data into the datadir /// 3. Starts the nodes with the pre-populated database /// 4. Returns the running nodes ready for testing +/// +/// Note: This function is currently specific to `EthereumNode` because the import process +/// uses Ethereum-specific consensus and block format. It can be made generic in the future +/// by abstracting the import process. pub async fn setup_engine_with_chain_import( num_nodes: usize, chain_spec: Arc, @@ -103,9 +116,8 @@ pub async fn setup_engine_with_chain_import( // Create a provider factory with the initialized database (use regular DB, not // TempDatabase) We need to specify the node types properly for the adapter - type ImportNodeTypes = reth_node_ethereum::EthereumNode; let provider_factory = ProviderFactory::< - NodeTypesWithDBAdapter>, + NodeTypesWithDBAdapter>, >::new( db.clone(), chain_spec.clone(), @@ -226,7 +238,7 @@ pub async fn setup_engine_with_chain_import( Ok(ChainImportResult { nodes, task_manager: tasks, - wallet: crate::Wallet::default().with_chain_id(chain_spec.chain().into()), + wallet: crate::Wallet::default().with_chain_id(chain_spec.chain.id()), _temp_dirs: temp_dirs, }) } @@ -256,193 +268,15 @@ pub fn load_forkchoice_state(path: &Path) -> eyre::Result Vec { - let mut blocks: Vec = Vec::new(); - let genesis_header = chain_spec.sealed_genesis_header(); - let mut parent_hash = genesis_header.hash(); - let mut parent_number = genesis_header.number(); - let mut parent_base_fee = genesis_header.base_fee_per_gas; - let mut parent_gas_limit = genesis_header.gas_limit; - debug!(target: "e2e::import", - "Genesis header base fee: {:?}, gas limit: {}, state root: {:?}", - parent_base_fee, - parent_gas_limit, - genesis_header.state_root() - ); - - for i in 1..=count { - // Create a simple header - let mut header = Header { - parent_hash, - number: parent_number + 1, - gas_limit: parent_gas_limit, // Use parent's gas limit - gas_used: 0, // Empty blocks use no gas - timestamp: genesis_header.timestamp() + i * 12, // 12 second blocks - beneficiary: Address::ZERO, - receipts_root: alloy_consensus::constants::EMPTY_RECEIPTS, - logs_bloom: Default::default(), - difficulty: U256::from(1), // Will be overridden for post-merge - // Use the same state root as parent for now (empty state changes) - state_root: if i == 1 { - genesis_header.state_root() - } else { - blocks.last().unwrap().state_root - }, - transactions_root: alloy_consensus::constants::EMPTY_TRANSACTIONS, - ommers_hash: alloy_consensus::constants::EMPTY_OMMER_ROOT_HASH, - mix_hash: B256::ZERO, - nonce: B64::from(0u64), - extra_data: Default::default(), - base_fee_per_gas: None, - withdrawals_root: None, - blob_gas_used: None, - excess_blob_gas: None, - parent_beacon_block_root: None, - requests_hash: None, - }; - - // Set required fields based on chain spec - if chain_spec.is_london_active_at_block(header.number) { - // Calculate base fee based on parent block - if let Some(parent_fee) = parent_base_fee { - // For the first block, we need to use the exact expected base fee - // The consensus rules expect it to be calculated from the genesis - let (parent_gas_used, parent_gas_limit) = if i == 1 { - // Genesis block parameters - (genesis_header.gas_used, genesis_header.gas_limit) - } else { - let last_block = blocks.last().unwrap(); - (last_block.gas_used, last_block.gas_limit) - }; - header.base_fee_per_gas = Some(alloy_eips::calc_next_block_base_fee( - parent_gas_used, - parent_gas_limit, - parent_fee, - chain_spec.base_fee_params_at_timestamp(header.timestamp), - )); - debug!(target: "e2e::import", "Block {} calculated base fee: {:?} (parent gas used: {}, parent gas limit: {}, parent base fee: {})", - i, header.base_fee_per_gas, parent_gas_used, parent_gas_limit, parent_fee); - parent_base_fee = header.base_fee_per_gas; - } - } - - // For post-merge blocks - if chain_spec.is_paris_active_at_block(header.number) { - header.difficulty = U256::ZERO; - header.nonce = B64::ZERO; - } - - // For post-shanghai blocks - if chain_spec.is_shanghai_active_at_timestamp(header.timestamp) { - header.withdrawals_root = Some(EMPTY_WITHDRAWALS); - } - - // For post-cancun blocks - if chain_spec.is_cancun_active_at_timestamp(header.timestamp) { - header.blob_gas_used = Some(0); - header.excess_blob_gas = Some(0); - header.parent_beacon_block_root = Some(B256::ZERO); - } - - // Create an empty block body - let body = BlockBody { - transactions: vec![], - ommers: vec![], - withdrawals: header.withdrawals_root.is_some().then(Withdrawals::default), - }; - - // Create the block - let block = Block { header: header.clone(), body: body.clone() }; - let sealed_block = BlockTrait::seal_slow(block); - - debug!(target: "e2e::import", - "Generated block {} with hash {:?}", - sealed_block.number(), - sealed_block.hash() - ); - debug!(target: "e2e::import", - " Body has {} transactions, {} ommers, withdrawals: {}", - body.transactions.len(), - body.ommers.len(), - body.withdrawals.is_some() - ); - - // Update parent for next iteration - parent_hash = sealed_block.hash(); - parent_number = sealed_block.number(); - parent_gas_limit = sealed_block.gas_limit; - if header.base_fee_per_gas.is_some() { - parent_base_fee = header.base_fee_per_gas; - } - - blocks.push(sealed_block); - } - - blocks - } - - /// Write blocks to RLP file - fn write_blocks_to_rlp(blocks: &[SealedBlock], path: &Path) -> std::io::Result<()> { - use alloy_rlp::Encodable; - - let mut file = std::fs::File::create(path)?; - let mut total_bytes = 0; - - for (i, block) in blocks.iter().enumerate() { - // Convert SealedBlock to Block before encoding - let block_for_encoding = block.clone().unseal(); - - let mut buf = Vec::new(); - block_for_encoding.encode(&mut buf); - debug!(target: "e2e::import", - "Block {} has {} transactions, encoded to {} bytes", - i, - block.body().transactions.len(), - buf.len() - ); - - // Debug: check what's in the encoded data - debug!(target: "e2e::import", "Block {} encoded to {} bytes", i, buf.len()); - if buf.len() < 20 { - debug!(target: "e2e::import", " Raw bytes: {:?}", &buf); - } else { - debug!(target: "e2e::import", " First 20 bytes: {:?}", &buf[..20]); - } - - total_bytes += buf.len(); - file.write_all(&buf)?; - } - - file.flush()?; - debug!(target: "e2e::import", "Total RLP bytes written: {total_bytes}"); - Ok(()) - } - - /// Create FCU JSON for the tip of the chain - fn create_fcu_json(tip: &SealedBlock) -> serde_json::Value { - serde_json::json!({ - "forkchoiceState": { - "headBlockHash": format!("0x{:x}", tip.hash()), - "safeBlockHash": format!("0x{:x}", tip.hash()), - "finalizedBlockHash": format!("0x{:x}", tip.hash()), - } - }) - } + use std::path::PathBuf; #[tokio::test] async fn test_stage_checkpoints_persistence() { @@ -595,7 +429,7 @@ mod tests { chain_spec: &ChainSpec, block_count: u64, temp_dir: &Path, - ) -> (Vec, std::path::PathBuf) { + ) -> (Vec, PathBuf) { let test_blocks = generate_test_blocks(chain_spec, block_count); assert_eq!( test_blocks.len(), diff --git a/crates/e2e-test-utils/src/test_rlp_utils.rs b/crates/e2e-test-utils/src/test_rlp_utils.rs new file mode 100644 index 00000000000..463f2dd184f --- /dev/null +++ b/crates/e2e-test-utils/src/test_rlp_utils.rs @@ -0,0 +1,185 @@ +//! Utilities for creating and writing RLP test data + +use alloy_consensus::{constants::EMPTY_WITHDRAWALS, BlockHeader, Header}; +use alloy_eips::eip4895::Withdrawals; +use alloy_primitives::{Address, B256, B64, U256}; +use alloy_rlp::Encodable; +use reth_chainspec::{ChainSpec, EthereumHardforks}; +use reth_ethereum_primitives::{Block, BlockBody}; +use reth_primitives::SealedBlock; +use reth_primitives_traits::Block as BlockTrait; +use std::{io::Write, path::Path}; +use tracing::debug; + +/// Generate test blocks for a given chain spec +pub fn generate_test_blocks(chain_spec: &ChainSpec, count: u64) -> Vec { + let mut blocks: Vec = Vec::new(); + let genesis_header = chain_spec.sealed_genesis_header(); + let mut parent_hash = genesis_header.hash(); + let mut parent_number = genesis_header.number(); + let mut parent_base_fee = genesis_header.base_fee_per_gas; + let mut parent_gas_limit = genesis_header.gas_limit; + + debug!(target: "e2e::import", + "Genesis header base fee: {:?}, gas limit: {}, state root: {:?}", + parent_base_fee, + parent_gas_limit, + genesis_header.state_root() + ); + + for i in 1..=count { + // Create a simple header + let mut header = Header { + parent_hash, + number: parent_number + 1, + gas_limit: parent_gas_limit, // Use parent's gas limit + gas_used: 0, // Empty blocks use no gas + timestamp: genesis_header.timestamp() + i * 12, // 12 second blocks + beneficiary: Address::ZERO, + receipts_root: alloy_consensus::constants::EMPTY_RECEIPTS, + logs_bloom: Default::default(), + difficulty: U256::from(1), // Will be overridden for post-merge + // Use the same state root as parent for now (empty state changes) + state_root: if i == 1 { + genesis_header.state_root() + } else { + blocks.last().unwrap().state_root + }, + transactions_root: alloy_consensus::constants::EMPTY_TRANSACTIONS, + ommers_hash: alloy_consensus::constants::EMPTY_OMMER_ROOT_HASH, + mix_hash: B256::ZERO, + nonce: B64::from(0u64), + extra_data: Default::default(), + base_fee_per_gas: None, + withdrawals_root: None, + blob_gas_used: None, + excess_blob_gas: None, + parent_beacon_block_root: None, + requests_hash: None, + }; + + // Set required fields based on chain spec + if chain_spec.is_london_active_at_block(header.number) { + // Calculate base fee based on parent block + if let Some(parent_fee) = parent_base_fee { + // For the first block, we need to use the exact expected base fee + // The consensus rules expect it to be calculated from the genesis + let (parent_gas_used, parent_gas_limit) = if i == 1 { + // Genesis block parameters + (genesis_header.gas_used, genesis_header.gas_limit) + } else { + let last_block = blocks.last().unwrap(); + (last_block.gas_used, last_block.gas_limit) + }; + header.base_fee_per_gas = Some(alloy_eips::calc_next_block_base_fee( + parent_gas_used, + parent_gas_limit, + parent_fee, + chain_spec.base_fee_params_at_timestamp(header.timestamp), + )); + debug!(target: "e2e::import", "Block {} calculated base fee: {:?} (parent gas used: {}, parent gas limit: {}, parent base fee: {})", + i, header.base_fee_per_gas, parent_gas_used, parent_gas_limit, parent_fee); + parent_base_fee = header.base_fee_per_gas; + } + } + + // For post-merge blocks + if chain_spec.is_paris_active_at_block(header.number) { + header.difficulty = U256::ZERO; + header.nonce = B64::ZERO; + } + + // For post-shanghai blocks + if chain_spec.is_shanghai_active_at_timestamp(header.timestamp) { + header.withdrawals_root = Some(EMPTY_WITHDRAWALS); + } + + // For post-cancun blocks + if chain_spec.is_cancun_active_at_timestamp(header.timestamp) { + header.blob_gas_used = Some(0); + header.excess_blob_gas = Some(0); + header.parent_beacon_block_root = Some(B256::ZERO); + } + + // Create an empty block body + let body = BlockBody { + transactions: vec![], + ommers: vec![], + withdrawals: header.withdrawals_root.is_some().then(Withdrawals::default), + }; + + // Create the block + let block = Block { header: header.clone(), body: body.clone() }; + let sealed_block = BlockTrait::seal_slow(block); + + debug!(target: "e2e::import", + "Generated block {} with hash {:?}", + sealed_block.number(), + sealed_block.hash() + ); + debug!(target: "e2e::import", + " Body has {} transactions, {} ommers, withdrawals: {}", + body.transactions.len(), + body.ommers.len(), + body.withdrawals.is_some() + ); + + // Update parent for next iteration + parent_hash = sealed_block.hash(); + parent_number = sealed_block.number(); + parent_gas_limit = sealed_block.gas_limit; + if header.base_fee_per_gas.is_some() { + parent_base_fee = header.base_fee_per_gas; + } + + blocks.push(sealed_block); + } + + blocks +} + +/// Write blocks to RLP file +pub fn write_blocks_to_rlp(blocks: &[SealedBlock], path: &Path) -> std::io::Result<()> { + let mut file = std::fs::File::create(path)?; + let mut total_bytes = 0; + + for (i, block) in blocks.iter().enumerate() { + // Convert SealedBlock to Block before encoding + let block_for_encoding = block.clone().unseal(); + + let mut buf = Vec::new(); + block_for_encoding.encode(&mut buf); + debug!(target: "e2e::import", + "Block {} has {} transactions, encoded to {} bytes", + i, + block.body().transactions.len(), + buf.len() + ); + + // Debug: check what's in the encoded data + debug!(target: "e2e::import", "Block {} encoded to {} bytes", i, buf.len()); + if buf.len() < 20 { + debug!(target: "e2e::import", " Raw bytes: {:?}", &buf); + } else { + debug!(target: "e2e::import", " First 20 bytes: {:?}", &buf[..20]); + } + + total_bytes += buf.len(); + file.write_all(&buf)?; + } + + file.flush()?; + debug!(target: "e2e::import", "Total RLP bytes written: {total_bytes}"); + Ok(()) +} + +/// Create FCU JSON for the tip of the chain +pub fn create_fcu_json(tip: &SealedBlock) -> serde_json::Value { + serde_json::json!({ + "forkchoiceState": { + "headBlockHash": format!("0x{:x}", tip.hash()), + "safeBlockHash": format!("0x{:x}", tip.hash()), + "finalizedBlockHash": format!("0x{:x}", tip.hash()), + } + }) +} diff --git a/crates/e2e-test-utils/src/testsuite/actions/mod.rs b/crates/e2e-test-utils/src/testsuite/actions/mod.rs index 205eb9ac48e..58472618001 100644 --- a/crates/e2e-test-utils/src/testsuite/actions/mod.rs +++ b/crates/e2e-test-utils/src/testsuite/actions/mod.rs @@ -18,7 +18,8 @@ pub mod reorg; pub use engine_api::{ExpectedPayloadStatus, SendNewPayload, SendNewPayloads}; pub use fork::{CreateFork, ForkBase, SetForkBase, SetForkBaseFromBlockInfo, ValidateFork}; pub use node_ops::{ - CaptureBlockOnNode, CompareNodeChainTips, SelectActiveNode, ValidateBlockTag, WaitForSync, + AssertChainTip, CaptureBlockOnNode, CompareNodeChainTips, SelectActiveNode, ValidateBlockTag, + WaitForSync, }; pub use produce_blocks::{ AssertMineBlock, BroadcastLatestForkchoice, BroadcastNextNewPayload, CheckPayloadAccepted, diff --git a/crates/e2e-test-utils/src/testsuite/actions/node_ops.rs b/crates/e2e-test-utils/src/testsuite/actions/node_ops.rs index 2b3914339f8..f42951fc57b 100644 --- a/crates/e2e-test-utils/src/testsuite/actions/node_ops.rs +++ b/crates/e2e-test-utils/src/testsuite/actions/node_ops.rs @@ -338,3 +338,45 @@ where }) } } + +/// Action to assert the current chain tip is at a specific block number. +#[derive(Debug)] +pub struct AssertChainTip { + /// Expected block number + pub expected_block_number: u64, +} + +impl AssertChainTip { + /// Create a new `AssertChainTip` action + pub const fn new(expected_block_number: u64) -> Self { + Self { expected_block_number } + } +} + +impl Action for AssertChainTip +where + Engine: EngineTypes, +{ + fn execute<'a>(&'a mut self, env: &'a mut Environment) -> BoxFuture<'a, Result<()>> { + Box::pin(async move { + let current_block = env + .current_block_info() + .ok_or_else(|| eyre::eyre!("No current block information available"))?; + + if current_block.number != self.expected_block_number { + return Err(eyre::eyre!( + "Expected chain tip to be at block {}, but found block {}", + self.expected_block_number, + current_block.number + )); + } + + debug!( + "Chain tip verified at block {} (hash: {})", + current_block.number, current_block.hash + ); + + Ok(()) + }) + } +} diff --git a/crates/e2e-test-utils/src/testsuite/examples.rs b/crates/e2e-test-utils/src/testsuite/examples.rs index fc7afd04359..58b66027635 100644 --- a/crates/e2e-test-utils/src/testsuite/examples.rs +++ b/crates/e2e-test-utils/src/testsuite/examples.rs @@ -2,8 +2,9 @@ use crate::testsuite::{ actions::{ - AssertMineBlock, CaptureBlock, CaptureBlockOnNode, CompareNodeChainTips, CreateFork, - MakeCanonical, ProduceBlocks, ReorgTo, SelectActiveNode, + Action, AssertChainTip, AssertMineBlock, CaptureBlock, CaptureBlockOnNode, + CompareNodeChainTips, CreateFork, MakeCanonical, ProduceBlocks, ReorgTo, SelectActiveNode, + UpdateBlockInfo, }, setup::{NetworkSetup, Setup}, TestBuilder, @@ -15,6 +16,107 @@ use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_node_api::TreeConfig; use reth_node_ethereum::{EthEngineTypes, EthereumNode}; use std::sync::Arc; +use tracing::debug; + +#[tokio::test] +async fn test_apply_with_import() -> Result<()> { + use crate::test_rlp_utils::{generate_test_blocks, write_blocks_to_rlp}; + use tempfile::TempDir; + + reth_tracing::init_test_tracing(); + + // Create test chain spec + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(serde_json::from_str(include_str!("assets/genesis.json")).unwrap()) + .london_activated() + .shanghai_activated() + .cancun_activated() + .build(), + ); + + // Generate test blocks + let test_blocks = generate_test_blocks(&chain_spec, 10); + + // Write blocks to RLP file + let temp_dir = TempDir::new()?; + let rlp_path = temp_dir.path().join("test_chain.rlp"); + write_blocks_to_rlp(&test_blocks, &rlp_path)?; + + // Create setup with imported chain + let mut setup = + Setup::default().with_chain_spec(chain_spec).with_network(NetworkSetup::single_node()); + + // Create environment and apply setup with import + let mut env = crate::testsuite::Environment::::default(); + setup.apply_with_import::(&mut env, &rlp_path).await?; + + // Now run test actions on the environment with imported chain + // First check what block we're at after import + debug!("Current block info after import: {:?}", env.current_block_info()); + + // Update block info to sync environment state with the node + let mut update_block_info = UpdateBlockInfo::default(); + update_block_info.execute(&mut env).await?; + + // Make the imported chain canonical first + let mut make_canonical = MakeCanonical::new(); + make_canonical.execute(&mut env).await?; + + // Wait for the pipeline to finish processing all stages + debug!("Waiting for pipeline to finish processing imported blocks..."); + let start = std::time::Instant::now(); + loop { + // Check if we can get the block from RPC (indicates pipeline finished) + let client = &env.node_clients[0]; + let block_result = reth_rpc_api::clients::EthApiClient::< + alloy_rpc_types_eth::TransactionRequest, + alloy_rpc_types_eth::Transaction, + alloy_rpc_types_eth::Block, + alloy_rpc_types_eth::Receipt, + alloy_rpc_types_eth::Header, + >::block_by_number( + &client.rpc, + alloy_eips::BlockNumberOrTag::Number(10), + true, // Include full transaction details + ) + .await; + + if let Ok(Some(block)) = block_result { + if block.header.number == 10 { + debug!("Pipeline finished, block 10 is fully available"); + break; + } + } + + if start.elapsed() > std::time::Duration::from_secs(10) { + return Err(eyre::eyre!("Timeout waiting for pipeline to finish")); + } + + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + } + + // Update block info again after making canonical + let mut update_block_info_2 = UpdateBlockInfo::default(); + update_block_info_2.execute(&mut env).await?; + + // Assert we're at block 10 after import + let mut assert_tip = AssertChainTip::new(10); + assert_tip.execute(&mut env).await?; + + debug!("Successfully imported chain to block 10"); + + // Produce 5 more blocks + let mut produce_blocks = ProduceBlocks::::new(5); + produce_blocks.execute(&mut env).await?; + + // Assert we're now at block 15 + let mut assert_new_tip = AssertChainTip::new(15); + assert_new_tip.execute(&mut env).await?; + + Ok(()) +} #[tokio::test] async fn test_testsuite_assert_mine_block() -> Result<()> { diff --git a/crates/e2e-test-utils/src/testsuite/mod.rs b/crates/e2e-test-utils/src/testsuite/mod.rs index 851053d8ebe..f151fdf6dc1 100644 --- a/crates/e2e-test-utils/src/testsuite/mod.rs +++ b/crates/e2e-test-utils/src/testsuite/mod.rs @@ -14,25 +14,58 @@ use std::{collections::HashMap, marker::PhantomData}; pub mod actions; pub mod setup; use crate::testsuite::setup::Setup; +use alloy_provider::{Provider, ProviderBuilder}; use alloy_rpc_types_engine::{ForkchoiceState, PayloadAttributes}; use reth_rpc_builder::auth::AuthServerHandle; +use std::sync::Arc; +use url::Url; #[cfg(test)] mod examples; /// Client handles for both regular RPC and Engine API endpoints -#[derive(Debug, Clone)] +#[derive(Clone)] pub struct NodeClient { /// Regular JSON-RPC client pub rpc: HttpClient, /// Engine API client pub engine: AuthServerHandle, + /// Alloy provider for interacting with the node + provider: Arc, } impl NodeClient { - /// Instantiates a new [`NodeClient`] with the given handles - pub const fn new(rpc: HttpClient, engine: AuthServerHandle) -> Self { - Self { rpc, engine } + /// Instantiates a new [`NodeClient`] with the given handles and RPC URL + pub fn new(rpc: HttpClient, engine: AuthServerHandle, url: Url) -> Self { + let provider = + Arc::new(ProviderBuilder::new().connect_http(url)) as Arc; + Self { rpc, engine, provider } + } + + /// Get a block by number using the alloy provider + pub async fn get_block_by_number( + &self, + number: alloy_eips::BlockNumberOrTag, + ) -> Result> { + self.provider + .get_block_by_number(number) + .await + .map_err(|e| eyre::eyre!("Failed to get block by number: {}", e)) + } + + /// Check if the node is ready by attempting to get the latest block + pub async fn is_ready(&self) -> bool { + self.get_block_by_number(alloy_eips::BlockNumberOrTag::Latest).await.is_ok() + } +} + +impl std::fmt::Debug for NodeClient { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("NodeClient") + .field("rpc", &self.rpc) + .field("engine", &self.engine) + .field("provider", &"") + .finish() } } diff --git a/crates/e2e-test-utils/src/testsuite/setup.rs b/crates/e2e-test-utils/src/testsuite/setup.rs index bb6140ed9f1..2b8ee948f93 100644 --- a/crates/e2e-test-utils/src/testsuite/setup.rs +++ b/crates/e2e-test-utils/src/testsuite/setup.rs @@ -7,7 +7,6 @@ use crate::{ use alloy_eips::BlockNumberOrTag; use alloy_primitives::B256; use alloy_rpc_types_engine::{ForkchoiceState, PayloadAttributes}; -use alloy_rpc_types_eth::{Block as RpcBlock, Header, Receipt, Transaction, TransactionRequest}; use eyre::{eyre, Result}; use reth_chainspec::ChainSpec; use reth_engine_local::LocalPayloadAttributesBuilder; @@ -15,14 +14,13 @@ use reth_ethereum_primitives::Block; use reth_node_api::{EngineTypes, NodeTypes, PayloadTypes, TreeConfig}; use reth_node_core::primitives::RecoveredBlock; use reth_payload_builder::EthPayloadBuilderAttributes; -use reth_rpc_api::clients::EthApiClient; use revm::state::EvmState; -use std::{marker::PhantomData, sync::Arc}; +use std::{marker::PhantomData, path::Path, sync::Arc}; use tokio::{ sync::mpsc, time::{sleep, Duration}, }; -use tracing::{debug, error}; +use tracing::debug; /// Configuration for setting up test environment #[derive(Debug)] @@ -45,6 +43,9 @@ pub struct Setup { pub is_dev: bool, /// Tracks instance generic. _phantom: PhantomData, + /// Holds the import result to keep nodes alive when using imported chain + /// This is stored as an option to avoid lifetime issues with `tokio::spawn` + import_result_holder: Option, } impl Default for Setup { @@ -59,6 +60,7 @@ impl Default for Setup { shutdown_tx: None, is_dev: true, _phantom: Default::default(), + import_result_holder: None, } } } @@ -129,6 +131,41 @@ where self } + /// Apply setup using pre-imported chain data from RLP file + pub async fn apply_with_import( + &mut self, + env: &mut Environment, + rlp_path: &Path, + ) -> Result<()> + where + N: NodeBuilderHelper, + LocalPayloadAttributesBuilder: PayloadAttributesBuilder< + <::Payload as PayloadTypes>::PayloadAttributes, + >, + { + // Create nodes with imported chain data + let import_result = self.create_nodes_with_import::(rlp_path).await?; + + // Extract node clients + let mut node_clients = Vec::new(); + let nodes = &import_result.nodes; + for node in nodes { + let rpc = node + .rpc_client() + .ok_or_else(|| eyre!("Failed to create HTTP RPC client for node"))?; + let auth = node.auth_server_handle(); + let url = node.rpc_url(); + node_clients.push(crate::testsuite::NodeClient::new(rpc, auth, url)); + } + + // Store the import result to keep nodes alive + // They will be dropped when the Setup is dropped + self.import_result_holder = Some(import_result); + + // Finalize setup - this will wait for nodes and initialize states + self.finalize_setup(env, node_clients, true).await + } + /// Apply the setup to the environment pub async fn apply(&mut self, env: &mut Environment) -> Result<()> where @@ -141,24 +178,12 @@ where self.chain_spec.clone().ok_or_else(|| eyre!("Chain specification is required"))?; let (shutdown_tx, mut shutdown_rx) = mpsc::channel(1); - self.shutdown_tx = Some(shutdown_tx); let is_dev = self.is_dev; let node_count = self.network.node_count; - let attributes_generator = move |timestamp| { - let attributes = PayloadAttributes { - timestamp, - prev_randao: B256::ZERO, - suggested_fee_recipient: alloy_primitives::Address::ZERO, - withdrawals: Some(vec![]), - parent_beacon_block_root: Some(B256::ZERO), - }; - <::Payload as PayloadTypes>::PayloadBuilderAttributes::from( - EthPayloadBuilderAttributes::new(B256::ZERO, attributes), - ) - }; + let attributes_generator = self.create_attributes_generator::(); let result = setup_engine_with_connection::( node_count, @@ -179,8 +204,9 @@ where .rpc_client() .ok_or_else(|| eyre!("Failed to create HTTP RPC client for node"))?; let auth = node.auth_server_handle(); + let url = node.rpc_url(); - node_clients.push(crate::testsuite::NodeClient::new(rpc, auth)); + node_clients.push(crate::testsuite::NodeClient::new(rpc, auth, url)); } // spawn a separate task just to handle the shutdown @@ -194,100 +220,180 @@ where }); } Err(e) => { - error!("Failed to setup nodes: {}", e); return Err(eyre!("Failed to setup nodes: {}", e)); } } - if node_clients.is_empty() { - return Err(eyre!("No nodes were created")); - } + // Finalize setup + self.finalize_setup(env, node_clients, false).await + } - // wait for all nodes to be ready to accept RPC requests before proceeding - for (idx, client) in node_clients.iter().enumerate() { - let mut retry_count = 0; - const MAX_RETRIES: usize = 5; - let mut last_error = None; + /// Create nodes with imported chain data + /// + /// Note: Currently this only supports `EthereumNode` due to the import process + /// being Ethereum-specific. The generic parameter N is kept for consistency + /// with other methods but is not used. + async fn create_nodes_with_import( + &self, + rlp_path: &Path, + ) -> Result + where + N: NodeBuilderHelper, + LocalPayloadAttributesBuilder: PayloadAttributesBuilder< + <::Payload as PayloadTypes>::PayloadAttributes, + >, + { + let chain_spec = + self.chain_spec.clone().ok_or_else(|| eyre!("Chain specification is required"))?; - while retry_count < MAX_RETRIES { - match EthApiClient::::block_by_number( - &client.rpc, - BlockNumberOrTag::Latest, - false, - ) - .await - { - Ok(_) => { - debug!("Node {idx} RPC endpoint is ready"); - break; - } - Err(e) => { - last_error = Some(e); - retry_count += 1; - debug!( - "Node {idx} RPC endpoint not ready, retry {retry_count}/{MAX_RETRIES}" - ); - sleep(Duration::from_millis(500)).await; - } - } - } - if retry_count == MAX_RETRIES { - return Err(eyre!("Failed to connect to node {idx} RPC endpoint after {MAX_RETRIES} retries: {:?}", last_error)); - } - } + let attributes_generator = move |timestamp| { + let attributes = PayloadAttributes { + timestamp, + prev_randao: B256::ZERO, + suggested_fee_recipient: alloy_primitives::Address::ZERO, + withdrawals: Some(vec![]), + parent_beacon_block_root: Some(B256::ZERO), + }; + EthPayloadBuilderAttributes::new(B256::ZERO, attributes) + }; - env.node_clients = node_clients; + crate::setup_import::setup_engine_with_chain_import( + self.network.node_count, + chain_spec, + self.is_dev, + self.tree_config.clone(), + rlp_path, + attributes_generator, + ) + .await + } - // Initialize per-node states for all nodes - env.initialize_node_states(node_count); - - // Initialize each node's state with genesis block information - let genesis_block_info = { - let first_client = &env.node_clients[0]; - let genesis_block = EthApiClient::< - TransactionRequest, - Transaction, - RpcBlock, - Receipt, - Header, - >::block_by_number( - &first_client.rpc, BlockNumberOrTag::Number(0), false + /// Create the attributes generator function + fn create_attributes_generator( + &self, + ) -> impl Fn(u64) -> <::Payload as PayloadTypes>::PayloadBuilderAttributes + Copy + where + N: NodeBuilderHelper, + LocalPayloadAttributesBuilder: PayloadAttributesBuilder< + <::Payload as PayloadTypes>::PayloadAttributes, + >, + { + move |timestamp| { + let attributes = PayloadAttributes { + timestamp, + prev_randao: B256::ZERO, + suggested_fee_recipient: alloy_primitives::Address::ZERO, + withdrawals: Some(vec![]), + parent_beacon_block_root: Some(B256::ZERO), + }; + <::Payload as PayloadTypes>::PayloadBuilderAttributes::from( + EthPayloadBuilderAttributes::new(B256::ZERO, attributes), ) - .await? - .ok_or_else(|| eyre!("Genesis block not found"))?; + } + } - crate::testsuite::BlockInfo { - hash: genesis_block.header.hash, - number: genesis_block.header.number, - timestamp: genesis_block.header.timestamp, - } + /// Common finalization logic for both apply methods + async fn finalize_setup( + &self, + env: &mut Environment, + node_clients: Vec, + use_latest_block: bool, + ) -> Result<()> { + if node_clients.is_empty() { + return Err(eyre!("No nodes were created")); + } + + // Wait for all nodes to be ready + self.wait_for_nodes_ready(&node_clients).await?; + + env.node_clients = node_clients; + env.initialize_node_states(self.network.node_count); + + // Get initial block info (genesis or latest depending on use_latest_block) + let (initial_block_info, genesis_block_info) = if use_latest_block { + // For imported chain, get both latest and genesis + let latest = + self.get_block_info(&env.node_clients[0], BlockNumberOrTag::Latest).await?; + let genesis = + self.get_block_info(&env.node_clients[0], BlockNumberOrTag::Number(0)).await?; + (latest, genesis) + } else { + // For fresh chain, both are genesis + let genesis = + self.get_block_info(&env.node_clients[0], BlockNumberOrTag::Number(0)).await?; + (genesis, genesis) }; - // Initialize all node states with the same genesis block + // Initialize all node states for (node_idx, node_state) in env.node_states.iter_mut().enumerate() { - node_state.current_block_info = Some(genesis_block_info); - node_state.latest_header_time = genesis_block_info.timestamp; + node_state.current_block_info = Some(initial_block_info); + node_state.latest_header_time = initial_block_info.timestamp; node_state.latest_fork_choice_state = ForkchoiceState { - head_block_hash: genesis_block_info.hash, - safe_block_hash: genesis_block_info.hash, + head_block_hash: initial_block_info.hash, + safe_block_hash: initial_block_info.hash, finalized_block_hash: genesis_block_info.hash, }; debug!( - "Node {} initialized with genesis block {} (hash: {})", - node_idx, genesis_block_info.number, genesis_block_info.hash + "Node {} initialized with block {} (hash: {})", + node_idx, initial_block_info.number, initial_block_info.hash ); } debug!( - "Environment initialized with {} nodes, all starting from genesis block {} (hash: {})", - node_count, genesis_block_info.number, genesis_block_info.hash + "Environment initialized with {} nodes, starting from block {} (hash: {})", + self.network.node_count, initial_block_info.number, initial_block_info.hash ); - // TODO: For each block in self.blocks, replay it on the node + Ok(()) + } + + /// Wait for all nodes to be ready to accept RPC requests + async fn wait_for_nodes_ready( + &self, + node_clients: &[crate::testsuite::NodeClient], + ) -> Result<()> { + for (idx, client) in node_clients.iter().enumerate() { + let mut retry_count = 0; + const MAX_RETRIES: usize = 10; + while retry_count < MAX_RETRIES { + if client.is_ready().await { + debug!("Node {idx} RPC endpoint is ready"); + break; + } + + retry_count += 1; + debug!("Node {idx} RPC endpoint not ready, retry {retry_count}/{MAX_RETRIES}"); + sleep(Duration::from_millis(500)).await; + } + + if retry_count == MAX_RETRIES { + return Err(eyre!( + "Failed to connect to node {idx} RPC endpoint after {MAX_RETRIES} retries" + )); + } + } Ok(()) } + + /// Get block info for a given block number or tag + async fn get_block_info( + &self, + client: &crate::testsuite::NodeClient, + block: BlockNumberOrTag, + ) -> Result { + let block = client + .get_block_by_number(block) + .await? + .ok_or_else(|| eyre!("Block {:?} not found", block))?; + + Ok(crate::testsuite::BlockInfo { + hash: block.header.hash, + number: block.header.number, + timestamp: block.header.timestamp, + }) + } } /// Genesis block configuration From e9a4222c8a36dca2734a977f7169a92ceb70992b Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Tue, 8 Jul 2025 11:45:23 +0200 Subject: [PATCH 083/305] fix(trie): correctly handle path field on cleared ParallelSparseTrie lower subtries (#17259) --- crates/trie/sparse-parallel/src/lib.rs | 3 + crates/trie/sparse-parallel/src/lower.rs | 101 +++++++++++++++ crates/trie/sparse-parallel/src/trie.rs | 154 ++++++++++++----------- 3 files changed, 183 insertions(+), 75 deletions(-) create mode 100644 crates/trie/sparse-parallel/src/lower.rs diff --git a/crates/trie/sparse-parallel/src/lib.rs b/crates/trie/sparse-parallel/src/lib.rs index 6a8a7048930..12f53935bf2 100644 --- a/crates/trie/sparse-parallel/src/lib.rs +++ b/crates/trie/sparse-parallel/src/lib.rs @@ -4,3 +4,6 @@ mod trie; pub use trie::*; + +mod lower; +use lower::*; diff --git a/crates/trie/sparse-parallel/src/lower.rs b/crates/trie/sparse-parallel/src/lower.rs new file mode 100644 index 00000000000..5e49bcb43cd --- /dev/null +++ b/crates/trie/sparse-parallel/src/lower.rs @@ -0,0 +1,101 @@ +use crate::SparseSubtrie; +use reth_trie_common::Nibbles; + +/// Tracks the state of the lower subtries. +/// +/// When a [`crate::ParallelSparseTrie`] is initialized/cleared then its `LowerSparseSubtrie`s are +/// all blinded, meaning they have no nodes. A blinded `LowerSparseSubtrie` may hold onto a cleared +/// [`SparseSubtrie`] in order to re-use allocations. +#[derive(Debug, Eq, PartialEq)] +pub(crate) enum LowerSparseSubtrie { + Blind(Option>), + Revealed(Box), +} + +impl Default for LowerSparseSubtrie { + /// Creates a new blinded subtrie with no allocated storage. + fn default() -> Self { + Self::Blind(None) + } +} + +impl LowerSparseSubtrie { + /// Returns a reference to the underlying [`SparseSubtrie`] if this subtrie is revealed. + /// + /// Returns `None` if the subtrie is blinded (has no nodes). + pub(crate) fn as_revealed_ref(&self) -> Option<&SparseSubtrie> { + match self { + Self::Blind(_) => None, + Self::Revealed(subtrie) => Some(subtrie.as_ref()), + } + } + + /// Returns a mutable reference to the underlying [`SparseSubtrie`] if this subtrie is revealed. + /// + /// Returns `None` if the subtrie is blinded (has no nodes). + pub(crate) fn as_revealed_mut(&mut self) -> Option<&mut SparseSubtrie> { + match self { + Self::Blind(_) => None, + Self::Revealed(subtrie) => Some(subtrie.as_mut()), + } + } + + /// Reveals the lower [`SparseSubtrie`], transitioning it from the Blinded to the Revealed + /// variant, preserving allocations if possible. + /// + /// The given path is the path of a node which will be set into the [`SparseSubtrie`]'s `nodes` + /// map immediately upon being revealed. If the subtrie is blinded, or if its current root path + /// is longer than this one, than this one becomes the new root path of the subtrie. + pub(crate) fn reveal(&mut self, path: &Nibbles) { + match self { + Self::Blind(allocated) => { + debug_assert!(allocated.as_ref().is_none_or(|subtrie| subtrie.is_empty())); + *self = if let Some(mut subtrie) = allocated.take() { + subtrie.path = *path; + Self::Revealed(subtrie) + } else { + Self::Revealed(Box::new(SparseSubtrie::new(*path))) + } + } + Self::Revealed(subtrie) => { + if path.len() < subtrie.path.len() { + subtrie.path = *path; + } + } + }; + } + + /// Clears the subtrie and transitions it to the blinded state, preserving a cleared + /// [`SparseSubtrie`] if possible. + pub(crate) fn clear(&mut self) { + *self = match core::mem::take(self) { + Self::Blind(allocated) => { + debug_assert!(allocated.as_ref().is_none_or(|subtrie| subtrie.is_empty())); + Self::Blind(allocated) + } + Self::Revealed(mut subtrie) => { + subtrie.clear(); + Self::Blind(Some(subtrie)) + } + } + } + + /// Takes ownership of the underlying [`SparseSubtrie`] if revealed and the predicate returns + /// true. + /// + /// If the subtrie is revealed, and the predicate function returns `true` when called with it, + /// then this method will take ownership of the subtrie and transition this `LowerSparseSubtrie` + /// to the blinded state. Otherwise, returns `None`. + pub(crate) fn take_revealed_if

(&mut self, predicate: P) -> Option> + where + P: FnOnce(&SparseSubtrie) -> bool, + { + match self { + Self::Revealed(subtrie) if predicate(subtrie) => { + let Self::Revealed(subtrie) = core::mem::take(self) else { unreachable!() }; + Some(subtrie) + } + Self::Revealed(_) | Self::Blind(_) => None, + } + } +} diff --git a/crates/trie/sparse-parallel/src/trie.rs b/crates/trie/sparse-parallel/src/trie.rs index 5f7a0eb3f38..e2df6cf005f 100644 --- a/crates/trie/sparse-parallel/src/trie.rs +++ b/crates/trie/sparse-parallel/src/trie.rs @@ -1,3 +1,4 @@ +use crate::LowerSparseSubtrie; use alloy_primitives::{ map::{Entry, HashMap}, B256, @@ -37,7 +38,7 @@ pub struct ParallelSparseTrie { /// This contains the trie nodes for the upper part of the trie. upper_subtrie: Box, /// An array containing the subtries at the second level of the trie. - lower_subtries: [Option>; NUM_LOWER_SUBTRIES], + lower_subtries: [LowerSparseSubtrie; NUM_LOWER_SUBTRIES], /// Set of prefixes (key paths) that have been marked as updated. /// This is used to track which parts of the trie need to be recalculated. prefix_set: PrefixSetMut, @@ -52,7 +53,7 @@ impl Default for ParallelSparseTrie { nodes: HashMap::from_iter([(Nibbles::default(), SparseNode::Empty)]), ..Default::default() }), - lower_subtries: [const { None }; NUM_LOWER_SUBTRIES], + lower_subtries: [const { LowerSparseSubtrie::Blind(None) }; NUM_LOWER_SUBTRIES], prefix_set: PrefixSetMut::default(), updates: None, } @@ -360,7 +361,9 @@ impl SparseTrieInterface for ParallelSparseTrie { if let SparseSubtrieType::Lower(idx) = SparseSubtrieType::from_path(&curr_path) { - curr_subtrie = self.lower_subtries[idx].as_mut().unwrap(); + curr_subtrie = self.lower_subtries[idx] + .as_revealed_mut() + .expect("lower subtrie is revealed"); curr_subtrie_is_upper = false; } } @@ -559,7 +562,7 @@ impl SparseTrieInterface for ParallelSparseTrie { // top-level `SparseTrieUpdates`. for (index, subtrie, update_actions) in rx { self.apply_subtrie_update_actions(update_actions); - self.lower_subtries[index] = Some(subtrie); + self.lower_subtries[index] = LowerSparseSubtrie::Revealed(subtrie); } } @@ -573,13 +576,14 @@ impl SparseTrieInterface for ParallelSparseTrie { fn wipe(&mut self) { self.upper_subtrie.wipe(); - self.lower_subtries = [const { None }; NUM_LOWER_SUBTRIES]; + self.lower_subtries = [const { LowerSparseSubtrie::Blind(None) }; NUM_LOWER_SUBTRIES]; self.prefix_set = PrefixSetMut::all(); } fn clear(&mut self) { self.upper_subtrie.clear(); - for subtrie in self.lower_subtries.iter_mut().flatten() { + self.upper_subtrie.nodes.insert(Nibbles::default(), SparseNode::Empty); + for subtrie in &mut self.lower_subtries { subtrie.clear(); } self.prefix_set.clear(); @@ -602,35 +606,27 @@ impl ParallelSparseTrie { } /// Returns a reference to the lower `SparseSubtrie` for the given path, or None if the - /// path belongs to the upper trie or a lower subtrie for the path doesn't exist. + /// path belongs to the upper trie, or if the lower subtrie for the path doesn't exist or is + /// blinded. fn lower_subtrie_for_path(&self, path: &Nibbles) -> Option<&SparseSubtrie> { match SparseSubtrieType::from_path(path) { SparseSubtrieType::Upper => None, - SparseSubtrieType::Lower(idx) => { - self.lower_subtries[idx].as_ref().map(|subtrie| subtrie.as_ref()) - } + SparseSubtrieType::Lower(idx) => self.lower_subtries[idx].as_revealed_ref(), } } /// Returns a mutable reference to the lower `SparseSubtrie` for the given path, or None if the /// path belongs to the upper trie. /// - /// This method will create a new lower subtrie if one doesn't exist for the given path. If one - /// does exist, but its path field is longer than the given path, then the field will be set - /// to the given path. - fn lower_subtrie_for_path_mut(&mut self, path: &Nibbles) -> Option<&mut Box> { + /// This method will create/reveal a new lower subtrie for the given path if one isn't already. + /// If one does exist, but its path field is longer than the given path, then the field will be + /// set to the given path. + fn lower_subtrie_for_path_mut(&mut self, path: &Nibbles) -> Option<&mut SparseSubtrie> { match SparseSubtrieType::from_path(path) { SparseSubtrieType::Upper => None, SparseSubtrieType::Lower(idx) => { - if let Some(subtrie) = self.lower_subtries[idx].as_mut() { - if path.len() < subtrie.path.len() { - subtrie.path = *path; - } - } else { - self.lower_subtries[idx] = Some(Box::new(SparseSubtrie::new(*path))); - } - - self.lower_subtries[idx].as_mut() + self.lower_subtries[idx].reveal(path); + Some(self.lower_subtries[idx].as_revealed_mut().expect("just revealed")) } } } @@ -652,10 +648,10 @@ impl ParallelSparseTrie { /// Returns a mutable reference to either the lower or upper `SparseSubtrie` for the given path, /// depending on the path's length. /// - /// This method will create a new lower subtrie if one doesn't exist for the given path. If one - /// does exist, but its path field is longer than the given path, then the field will be set - /// to the given path. - fn subtrie_for_path_mut(&mut self, path: &Nibbles) -> &mut Box { + /// This method will create/reveal a new lower subtrie for the given path if one isn't already. + /// If one does exist, but its path field is longer than the given path, then the field will be + /// set to the given path. + fn subtrie_for_path_mut(&mut self, path: &Nibbles) -> &mut SparseSubtrie { // We can't just call `lower_subtrie_for_path` and return `upper_subtrie` if it returns // None, because Rust complains about double mutable borrowing `self`. if SparseSubtrieType::path_len_is_upper(path.len()) { @@ -777,10 +773,10 @@ impl ParallelSparseTrie { match node { Some(SparseNode::Leaf { .. }) => { - // If the leaf was the final node in its lower subtrie then we can remove the lower - // subtrie completely. + // If the leaf was the final node in its lower subtrie then we can blind the + // subtrie, effectively marking it as empty. if subtrie.nodes.is_empty() { - self.lower_subtries[idx] = None; + self.lower_subtries[idx].clear(); } } Some(SparseNode::Extension { key, .. }) => { @@ -932,7 +928,7 @@ impl ParallelSparseTrie { } else { let index = path_subtrie_index_unchecked(&path); let node = self.lower_subtries[index] - .as_mut() + .as_revealed_mut() .expect("lower subtrie must exist") .nodes .get_mut(&path) @@ -976,7 +972,9 @@ impl ParallelSparseTrie { let mut unchanged_prefix_set = PrefixSetMut::default(); for (index, subtrie) in self.lower_subtries.iter_mut().enumerate() { - if let Some(subtrie) = subtrie.take_if(|subtrie| prefix_set.contains(&subtrie.path)) { + if let Some(subtrie) = + subtrie.take_revealed_if(|subtrie| prefix_set.contains(&subtrie.path)) + { let prefix_set = if prefix_set.all() { unchanged_prefix_set = PrefixSetMut::all(); PrefixSetMut::all() @@ -1040,7 +1038,7 @@ pub struct SparseSubtrie { /// [`ParallelSparseTrie`]. /// /// There should be a node for this path in `nodes` map. - path: Nibbles, + pub(crate) path: Nibbles, /// The map from paths to sparse trie nodes within this subtrie. nodes: HashMap, /// Subset of fields for mutable access while `nodes` field is also being mutably borrowed. @@ -1061,10 +1059,15 @@ enum FindNextToLeafOutcome { impl SparseSubtrie { /// Creates a new empty subtrie with the specified root path. - fn new(path: Nibbles) -> Self { + pub(crate) fn new(path: Nibbles) -> Self { Self { path, ..Default::default() } } + /// Returns true if this subtrie has any nodes, false otherwise. + pub(crate) fn is_empty(&self) -> bool { + self.nodes.is_empty() + } + /// Returns true if the current path and its child are both found in the same level. fn is_child_same_level(current_path: &Nibbles, child_path: &Nibbles) -> bool { let current_level = core::mem::discriminant(&SparseSubtrieType::from_path(current_path)); @@ -1528,7 +1531,7 @@ impl SparseSubtrie { } /// Clears the subtrie, keeping the data structures allocated. - fn clear(&mut self) { + pub(crate) fn clear(&mut self) { self.nodes.clear(); self.inner.clear(); } @@ -2031,7 +2034,8 @@ enum SparseTrieUpdatesAction { #[cfg(test)] mod tests { use super::{ - path_subtrie_index_unchecked, ParallelSparseTrie, SparseSubtrie, SparseSubtrieType, + path_subtrie_index_unchecked, LowerSparseSubtrie, ParallelSparseTrie, SparseSubtrie, + SparseSubtrieType, }; use crate::trie::ChangedSubtrie; use alloy_primitives::{ @@ -2112,7 +2116,7 @@ mod tests { fn assert_subtrie_exists(&self, trie: &ParallelSparseTrie, path: &Nibbles) { let idx = path_subtrie_index_unchecked(path); assert!( - trie.lower_subtries[idx].is_some(), + trie.lower_subtries[idx].as_revealed_ref().is_some(), "Expected lower subtrie at path {path:?} to exist", ); } @@ -2125,7 +2129,7 @@ mod tests { ) -> &'a SparseSubtrie { let idx = path_subtrie_index_unchecked(path); trie.lower_subtries[idx] - .as_ref() + .as_revealed_ref() .unwrap_or_else(|| panic!("Lower subtrie at path {path:?} should exist")) } @@ -2140,7 +2144,7 @@ mod tests { let expected_path = Nibbles::from_nibbles(expected_path); let idx = path_subtrie_index_unchecked(&subtrie_prefix); - let subtrie = trie.lower_subtries[idx].as_ref().unwrap_or_else(|| { + let subtrie = trie.lower_subtries[idx].as_revealed_ref().unwrap_or_else(|| { panic!("Lower subtrie at prefix {subtrie_prefix:?} should exist") }); @@ -2404,7 +2408,7 @@ mod tests { let lower_sparse_nodes = sparse_trie .lower_subtries .iter() - .filter_map(Option::as_ref) + .filter_map(LowerSparseSubtrie::as_revealed_ref) .flat_map(|subtrie| subtrie.nodes.iter()); let upper_sparse_nodes = sparse_trie.upper_subtrie.nodes.iter(); @@ -2511,9 +2515,9 @@ mod tests { let subtrie_3_index = path_subtrie_index_unchecked(&subtrie_3.path); // Add subtries at specific positions - trie.lower_subtries[subtrie_1_index] = Some(subtrie_1.clone()); - trie.lower_subtries[subtrie_2_index] = Some(subtrie_2.clone()); - trie.lower_subtries[subtrie_3_index] = Some(subtrie_3); + trie.lower_subtries[subtrie_1_index] = LowerSparseSubtrie::Revealed(subtrie_1.clone()); + trie.lower_subtries[subtrie_2_index] = LowerSparseSubtrie::Revealed(subtrie_2.clone()); + trie.lower_subtries[subtrie_3_index] = LowerSparseSubtrie::Revealed(subtrie_3); let unchanged_prefix_set = PrefixSetMut::from([ Nibbles::from_nibbles([0x0]), @@ -2547,10 +2551,10 @@ mod tests { )] ); assert_eq!(unchanged_prefix_set, unchanged_prefix_set); - assert!(trie.lower_subtries[subtrie_2_index].is_none()); + assert!(trie.lower_subtries[subtrie_2_index].as_revealed_ref().is_none()); // First subtrie should remain unchanged - assert_eq!(trie.lower_subtries[subtrie_1_index], Some(subtrie_1)); + assert_eq!(trie.lower_subtries[subtrie_1_index], LowerSparseSubtrie::Revealed(subtrie_1)); } #[test] @@ -2565,9 +2569,9 @@ mod tests { let subtrie_3_index = path_subtrie_index_unchecked(&subtrie_3.path); // Add subtries at specific positions - trie.lower_subtries[subtrie_1_index] = Some(subtrie_1.clone()); - trie.lower_subtries[subtrie_2_index] = Some(subtrie_2.clone()); - trie.lower_subtries[subtrie_3_index] = Some(subtrie_3.clone()); + trie.lower_subtries[subtrie_1_index] = LowerSparseSubtrie::Revealed(subtrie_1.clone()); + trie.lower_subtries[subtrie_2_index] = LowerSparseSubtrie::Revealed(subtrie_2.clone()); + trie.lower_subtries[subtrie_3_index] = LowerSparseSubtrie::Revealed(subtrie_3.clone()); // Create a prefix set that matches any key let mut prefix_set = PrefixSetMut::all().freeze(); @@ -2589,7 +2593,7 @@ mod tests { ); assert_eq!(unchanged_prefix_set, PrefixSetMut::all()); - assert!(trie.lower_subtries.iter().all(Option::is_none)); + assert!(trie.lower_subtries.iter().all(|subtrie| subtrie.as_revealed_ref().is_none())); } #[test] @@ -2676,10 +2680,10 @@ mod tests { // Check that the lower subtrie was created let idx = path_subtrie_index_unchecked(&path); - assert!(trie.lower_subtries[idx].is_some()); + assert!(trie.lower_subtries[idx].as_revealed_ref().is_some()); // Check that the lower subtrie's path was correctly set - let lower_subtrie = trie.lower_subtries[idx].as_ref().unwrap(); + let lower_subtrie = trie.lower_subtries[idx].as_revealed_ref().unwrap(); assert_eq!(lower_subtrie.path, path); assert_matches!( @@ -2700,7 +2704,7 @@ mod tests { // Check that the lower subtrie's path hasn't changed let idx = path_subtrie_index_unchecked(&path); - let lower_subtrie = trie.lower_subtries[idx].as_ref().unwrap(); + let lower_subtrie = trie.lower_subtries[idx].as_revealed_ref().unwrap(); assert_eq!(lower_subtrie.path, Nibbles::from_nibbles([0x1, 0x2])); } } @@ -2742,9 +2746,9 @@ mod tests { // Child path (0x1, 0x2, 0x3) should be in lower trie let child_path = Nibbles::from_nibbles([0x1, 0x2, 0x3]); let idx = path_subtrie_index_unchecked(&child_path); - assert!(trie.lower_subtries[idx].is_some()); + assert!(trie.lower_subtries[idx].as_revealed_ref().is_some()); - let lower_subtrie = trie.lower_subtries[idx].as_ref().unwrap(); + let lower_subtrie = trie.lower_subtries[idx].as_revealed_ref().unwrap(); assert_eq!(lower_subtrie.path, child_path); assert_eq!(lower_subtrie.nodes.get(&child_path), Some(&SparseNode::Hash(child_hash))); } @@ -2769,9 +2773,9 @@ mod tests { // Child path (0x1, 0x2) should be in lower trie let child_path = Nibbles::from_nibbles([0x1, 0x2]); let idx = path_subtrie_index_unchecked(&child_path); - assert!(trie.lower_subtries[idx].is_some()); + assert!(trie.lower_subtries[idx].as_revealed_ref().is_some()); - let lower_subtrie = trie.lower_subtries[idx].as_ref().unwrap(); + let lower_subtrie = trie.lower_subtries[idx].as_revealed_ref().unwrap(); assert_eq!(lower_subtrie.path, child_path); assert_eq!(lower_subtrie.nodes.get(&child_path), Some(&SparseNode::Hash(child_hash))); } @@ -2837,7 +2841,7 @@ mod tests { for (i, child_path) in child_paths.iter().enumerate() { let idx = path_subtrie_index_unchecked(child_path); - let lower_subtrie = trie.lower_subtries[idx].as_ref().unwrap(); + let lower_subtrie = trie.lower_subtries[idx].as_revealed_ref().unwrap(); assert_eq!(&lower_subtrie.path, child_path); assert_eq!( lower_subtrie.nodes.get(child_path), @@ -2876,9 +2880,9 @@ mod tests { subtrie_3.reveal_node(leaf_3_path, &leaf_3, TrieMasks::none()).unwrap(); // Add subtries at specific positions - trie.lower_subtries[subtrie_1_index] = Some(subtrie_1); - trie.lower_subtries[subtrie_2_index] = Some(subtrie_2); - trie.lower_subtries[subtrie_3_index] = Some(subtrie_3); + trie.lower_subtries[subtrie_1_index] = LowerSparseSubtrie::Revealed(subtrie_1); + trie.lower_subtries[subtrie_2_index] = LowerSparseSubtrie::Revealed(subtrie_2); + trie.lower_subtries[subtrie_3_index] = LowerSparseSubtrie::Revealed(subtrie_3); let unchanged_prefix_set = PrefixSetMut::from([ Nibbles::from_nibbles([0x0]), @@ -2900,9 +2904,9 @@ mod tests { // Check that the prefix set was updated assert_eq!(trie.prefix_set, unchanged_prefix_set); // Check that subtries were returned back to the array - assert!(trie.lower_subtries[subtrie_1_index].is_some()); - assert!(trie.lower_subtries[subtrie_2_index].is_some()); - assert!(trie.lower_subtries[subtrie_3_index].is_some()); + assert!(trie.lower_subtries[subtrie_1_index].as_revealed_ref().is_some()); + assert!(trie.lower_subtries[subtrie_2_index].as_revealed_ref().is_some()); + assert!(trie.lower_subtries[subtrie_3_index].as_revealed_ref().is_some()); } #[test] @@ -3071,11 +3075,11 @@ mod tests { trie.remove_leaf(&leaf_full_path, provider).unwrap(); let upper_subtrie = &trie.upper_subtrie; - let lower_subtrie_50 = trie.lower_subtries[0x50].as_ref().unwrap(); + let lower_subtrie_50 = trie.lower_subtries[0x50].as_revealed_ref().unwrap(); // Check that the `SparseSubtrie` the leaf was removed from was itself removed, as it is now // empty. - assert_matches!(trie.lower_subtries[0x53].as_ref(), None); + assert_matches!(trie.lower_subtries[0x53].as_revealed_ref(), None); // Check that the leaf node was removed, and that its parent/grandparent were modified // appropriately. @@ -3194,8 +3198,8 @@ mod tests { // Check that both lower subtries were removed. 0x50 should have been removed because // removing its leaf made it empty. 0x51 should have been removed after its own leaf was // collapsed into the upper trie, leaving it also empty. - assert_matches!(trie.lower_subtries[0x50].as_ref(), None); - assert_matches!(trie.lower_subtries[0x51].as_ref(), None); + assert_matches!(trie.lower_subtries[0x50].as_revealed_ref(), None); + assert_matches!(trie.lower_subtries[0x51].as_revealed_ref(), None); // Check that the other leaf's value was moved to the upper trie let other_leaf_full_value = Nibbles::from_nibbles([0x5, 0x1, 0x3, 0x4]); @@ -3254,8 +3258,8 @@ mod tests { // Check that both lower subtries were removed. 0x20 should have been removed because // removing its leaf made it empty. 0x21 should have been removed after its own leaf was // collapsed into the upper trie, leaving it also empty. - assert_matches!(trie.lower_subtries[0x20].as_ref(), None); - assert_matches!(trie.lower_subtries[0x21].as_ref(), None); + assert_matches!(trie.lower_subtries[0x20].as_revealed_ref(), None); + assert_matches!(trie.lower_subtries[0x21].as_revealed_ref(), None); // Check that the other leaf's value was moved to the upper trie let other_leaf_full_value = Nibbles::from_nibbles([0x2, 0x1, 0x5, 0x6]); @@ -3340,7 +3344,7 @@ mod tests { // 1. The branch at 0x123 should become an extension to 0x12345 // 2. That extension should merge with the root extension at 0x // 3. The lower subtrie's path should be updated to 0x12345 - let lower_subtrie = trie.lower_subtries[0x12].as_ref().unwrap(); + let lower_subtrie = trie.lower_subtries[0x12].as_revealed_ref().unwrap(); assert_eq!(lower_subtrie.path, Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x4, 0x5])); // Verify the root extension now points all the way to 0x12345 @@ -3517,7 +3521,7 @@ mod tests { trie.remove_leaf(&leaf_full_path, provider).unwrap(); let upper_subtrie = &trie.upper_subtrie; - let lower_subtrie_10 = trie.lower_subtries[0x01].as_ref().unwrap(); + let lower_subtrie_10 = trie.lower_subtries[0x01].as_revealed_ref().unwrap(); // Verify that hash fields are unset for all nodes along the path to the removed leaf assert_matches!( @@ -3606,14 +3610,14 @@ mod tests { let leaf_2_subtrie_idx = path_subtrie_index_unchecked(&leaf_2_path); trie.lower_subtries[leaf_1_subtrie_idx] - .as_mut() + .as_revealed_mut() .unwrap() .nodes .get_mut(&leaf_1_path) .unwrap() .set_hash(None); trie.lower_subtries[leaf_2_subtrie_idx] - .as_mut() + .as_revealed_mut() .unwrap() .nodes .get_mut(&leaf_2_path) @@ -3639,8 +3643,8 @@ mod tests { assert_eq!(root, hash_builder_root); // Verify hashes were computed - let leaf_1_subtrie = trie.lower_subtries[leaf_1_subtrie_idx].as_ref().unwrap(); - let leaf_2_subtrie = trie.lower_subtries[leaf_2_subtrie_idx].as_ref().unwrap(); + let leaf_1_subtrie = trie.lower_subtries[leaf_1_subtrie_idx].as_revealed_ref().unwrap(); + let leaf_2_subtrie = trie.lower_subtries[leaf_2_subtrie_idx].as_revealed_ref().unwrap(); assert!(trie.upper_subtrie.nodes.get(&extension_path).unwrap().hash().is_some()); assert!(trie.upper_subtrie.nodes.get(&branch_path).unwrap().hash().is_some()); assert!(leaf_1_subtrie.nodes.get(&leaf_1_path).unwrap().hash().is_some()); From 38f02bb46e69396e2933f610f17e318ec1e55058 Mon Sep 17 00:00:00 2001 From: Femi Bankole Date: Tue, 8 Jul 2025 10:56:41 +0100 Subject: [PATCH 084/305] feat: include chain-id query param for etherscan v2 API (#17167) Co-authored-by: Matthias Seitz --- Cargo.lock | 1 + Cargo.toml | 2 +- crates/consensus/debug-client/Cargo.toml | 3 +- .../debug-client/src/providers/etherscan.rs | 39 ++++++++++++------- crates/node/builder/src/launch/debug.rs | 1 + 5 files changed, 29 insertions(+), 17 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fd883e79079..8a795a4d1b9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7548,6 +7548,7 @@ dependencies = [ "reth-tracing", "ringbuffer", "serde", + "serde_json", "tokio", ] diff --git a/Cargo.toml b/Cargo.toml index 348fcef66c5..0d29af1b813 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -464,7 +464,7 @@ op-revm = { version = "8.0.2", default-features = false } revm-inspectors = "0.26.5" # eth -alloy-chains = { version = "0.2.0", default-features = false } +alloy-chains = { version = "0.2.5", default-features = false } alloy-dyn-abi = "1.2.0" alloy-eip2124 = { version = "0.2.0", default-features = false } alloy-evm = { version = "0.14", default-features = false } diff --git a/crates/consensus/debug-client/Cargo.toml b/crates/consensus/debug-client/Cargo.toml index 784c52c3b53..5ff3735c33c 100644 --- a/crates/consensus/debug-client/Cargo.toml +++ b/crates/consensus/debug-client/Cargo.toml @@ -28,8 +28,9 @@ auto_impl.workspace = true derive_more.workspace = true futures.workspace = true eyre.workspace = true -reqwest = { workspace = true, features = ["rustls-tls", "json"] } +reqwest = { workspace = true, features = ["rustls-tls"] } serde = { workspace = true, features = ["derive"] } tokio = { workspace = true, features = ["time"] } +serde_json.workspace = true ringbuffer.workspace = true diff --git a/crates/consensus/debug-client/src/providers/etherscan.rs b/crates/consensus/debug-client/src/providers/etherscan.rs index c52ee609d20..ea21d95e73d 100644 --- a/crates/consensus/debug-client/src/providers/etherscan.rs +++ b/crates/consensus/debug-client/src/providers/etherscan.rs @@ -3,7 +3,7 @@ use alloy_consensus::BlockHeader; use alloy_eips::BlockNumberOrTag; use alloy_json_rpc::{Response, ResponsePayload}; use reqwest::Client; -use reth_tracing::tracing::warn; +use reth_tracing::tracing::{debug, warn}; use serde::{de::DeserializeOwned, Serialize}; use std::{sync::Arc, time::Duration}; use tokio::{sync::mpsc, time::interval}; @@ -14,6 +14,7 @@ pub struct EtherscanBlockProvider { http_client: Client, base_url: String, api_key: String, + chain_id: u64, interval: Duration, #[debug(skip)] convert: Arc PrimitiveBlock + Send + Sync>, @@ -27,12 +28,14 @@ where pub fn new( base_url: String, api_key: String, + chain_id: u64, convert: impl Fn(RpcBlock) -> PrimitiveBlock + Send + Sync + 'static, ) -> Self { Self { http_client: Client::new(), base_url, api_key, + chain_id, interval: Duration::from_secs(3), convert: Arc::new(convert), } @@ -56,20 +59,26 @@ where tag => tag.to_string(), }; - let resp: Response = self - .http_client - .get(&self.base_url) - .query(&[ - ("module", "proxy"), - ("action", "eth_getBlockByNumber"), - ("tag", &tag), - ("boolean", "true"), - ("apikey", &self.api_key), - ]) - .send() - .await? - .json() - .await?; + let mut req = self.http_client.get(&self.base_url).query(&[ + ("module", "proxy"), + ("action", "eth_getBlockByNumber"), + ("tag", &tag), + ("boolean", "true"), + ("apikey", &self.api_key), + ]); + + if !self.base_url.contains("chainid=") { + // only append chainid if not part of the base url already + req = req.query(&[("chainid", &self.chain_id.to_string())]); + } + + let resp = req.send().await?.text().await?; + + debug!(target: "etherscan", %resp, "fetched block from etherscan"); + + let resp: Response = serde_json::from_str(&resp).inspect_err(|err| { + warn!(target: "etherscan", "Failed to parse block response from etherscan: {}", err); + })?; let payload = resp.payload; match payload { diff --git a/crates/node/builder/src/launch/debug.rs b/crates/node/builder/src/launch/debug.rs index dfc3ba27d56..64762587c62 100644 --- a/crates/node/builder/src/launch/debug.rs +++ b/crates/node/builder/src/launch/debug.rs @@ -145,6 +145,7 @@ where "etherscan api key not found for rpc consensus client for chain: {chain}" ) })?, + chain.id(), N::Types::rpc_to_primitive_block, ); let rpc_consensus_client = DebugConsensusClient::new( From 68309cac2826681e0895d0dad9413341c035c4de Mon Sep 17 00:00:00 2001 From: Noisy <125606576+donatik27@users.noreply.github.com> Date: Tue, 8 Jul 2025 12:14:33 +0200 Subject: [PATCH 085/305] docs: update snapshot URL from downloads.merkle.io to snapshots.merkle.io (#17248) Co-authored-by: Matthias Seitz --- crates/cli/commands/src/download.rs | 4 ++-- docs/vocs/docs/pages/cli/reth/download.mdx | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/cli/commands/src/download.rs b/crates/cli/commands/src/download.rs index 08c21d9eb83..2e33729e395 100644 --- a/crates/cli/commands/src/download.rs +++ b/crates/cli/commands/src/download.rs @@ -17,7 +17,7 @@ use tokio::task; use tracing::info; const BYTE_UNITS: [&str; 4] = ["B", "KB", "MB", "GB"]; -const MERKLE_BASE_URL: &str = "https://downloads.merkle.io"; +const MERKLE_BASE_URL: &str = "https://snapshots.merkle.io"; const EXTENSION_TAR_FILE: &str = ".tar.lz4"; #[derive(Debug, Parser)] @@ -32,7 +32,7 @@ pub struct DownloadCommand { long_help = "Specify a snapshot URL or let the command propose a default one.\n\ \n\ Available snapshot sources:\n\ - - https://downloads.merkle.io (default, mainnet archive)\n\ + - https://snapshots.merkle.io (default, mainnet archive)\n\ - https://publicnode.com/snapshots (full nodes & testnets)\n\ \n\ If no URL is provided, the latest mainnet archive snapshot\n\ diff --git a/docs/vocs/docs/pages/cli/reth/download.mdx b/docs/vocs/docs/pages/cli/reth/download.mdx index 04a7228f212..e170a321a4f 100644 --- a/docs/vocs/docs/pages/cli/reth/download.mdx +++ b/docs/vocs/docs/pages/cli/reth/download.mdx @@ -71,7 +71,7 @@ Database: Specify a snapshot URL or let the command propose a default one. Available snapshot sources: - - https://downloads.merkle.io (default, mainnet archive) + - https://snapshots.merkle.io (default, mainnet archive) - https://publicnode.com/snapshots (full nodes & testnets) If no URL is provided, the latest mainnet archive snapshot From 11db28e9b7e93d86c1078228df4d24406b8f336d Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Tue, 8 Jul 2025 06:15:04 -0400 Subject: [PATCH 086/305] feat(trie): add parallel sparse trie to TreeConfig (#17265) --- crates/engine/primitives/src/config.rs | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/crates/engine/primitives/src/config.rs b/crates/engine/primitives/src/config.rs index 9794caf4473..ccff97bc064 100644 --- a/crates/engine/primitives/src/config.rs +++ b/crates/engine/primitives/src/config.rs @@ -65,6 +65,8 @@ pub struct TreeConfig { always_compare_trie_updates: bool, /// Whether to disable cross-block caching and parallel prewarming. disable_caching_and_prewarming: bool, + /// Whether to enable the parallel sparse trie state root algorithm. + enable_parallel_sparse_trie: bool, /// Whether to enable state provider metrics. state_provider_metrics: bool, /// Cross-block cache size in bytes. @@ -106,6 +108,7 @@ impl Default for TreeConfig { legacy_state_root: false, always_compare_trie_updates: false, disable_caching_and_prewarming: false, + enable_parallel_sparse_trie: false, state_provider_metrics: false, cross_block_cache_size: DEFAULT_CROSS_BLOCK_CACHE_SIZE, has_enough_parallelism: has_enough_parallelism(), @@ -130,6 +133,7 @@ impl TreeConfig { legacy_state_root: bool, always_compare_trie_updates: bool, disable_caching_and_prewarming: bool, + enable_parallel_sparse_trie: bool, state_provider_metrics: bool, cross_block_cache_size: u64, has_enough_parallelism: bool, @@ -148,6 +152,7 @@ impl TreeConfig { legacy_state_root, always_compare_trie_updates, disable_caching_and_prewarming, + enable_parallel_sparse_trie, state_provider_metrics, cross_block_cache_size, has_enough_parallelism, @@ -205,6 +210,11 @@ impl TreeConfig { self.state_provider_metrics } + /// Returns whether or not the parallel sparse trie is enabled. + pub const fn enable_parallel_sparse_trie(&self) -> bool { + self.enable_parallel_sparse_trie + } + /// Returns whether or not cross-block caching and parallel prewarming should be used. pub const fn disable_caching_and_prewarming(&self) -> bool { self.disable_caching_and_prewarming @@ -329,6 +339,15 @@ impl TreeConfig { self } + /// Setter for using the parallel sparse trie + pub const fn with_enable_parallel_sparse_trie( + mut self, + enable_parallel_sparse_trie: bool, + ) -> Self { + self.enable_parallel_sparse_trie = enable_parallel_sparse_trie; + self + } + /// Setter for maximum number of concurrent proof tasks. pub const fn with_max_proof_task_concurrency( mut self, From 7017627a9f43a4971c0ff0004efbb004e0c17ef5 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Tue, 8 Jul 2025 06:15:40 -0400 Subject: [PATCH 087/305] chore(trie): add Send and Sync to SparseTrieInterface (#17270) --- crates/trie/sparse/src/traits.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/trie/sparse/src/traits.rs b/crates/trie/sparse/src/traits.rs index 62ca424cd32..6d615bb8131 100644 --- a/crates/trie/sparse/src/traits.rs +++ b/crates/trie/sparse/src/traits.rs @@ -18,7 +18,7 @@ use crate::blinded::BlindedProvider; /// This trait abstracts over different sparse trie implementations (serial vs parallel) /// while providing a unified interface for the core trie operations needed by the /// [`crate::SparseTrie`] enum. -pub trait SparseTrieInterface: Default + Debug { +pub trait SparseTrieInterface: Default + Debug + Send + Sync { /// Creates a new revealed sparse trie from the given root node. /// /// This function initializes the internal structures and then reveals the root. From 62c5a57302fba3a66de188d0b52155dabbe57131 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Roman=20Hodul=C3=A1k?= Date: Tue, 8 Jul 2025 12:31:19 +0200 Subject: [PATCH 088/305] docs(guides): Add history expiry (#17274) --- .../vocs/docs/pages/guides/history-expiry.mdx | 49 +++++++++++++++++++ 1 file changed, 49 insertions(+) create mode 100644 docs/vocs/docs/pages/guides/history-expiry.mdx diff --git a/docs/vocs/docs/pages/guides/history-expiry.mdx b/docs/vocs/docs/pages/guides/history-expiry.mdx new file mode 100644 index 00000000000..d3a0cb06386 --- /dev/null +++ b/docs/vocs/docs/pages/guides/history-expiry.mdx @@ -0,0 +1,49 @@ +--- +description: Usage of tools for importing, exporting and pruning historical blocks +--- + +# History Expiry + +In this chapter, we will learn how to use tools for dealing with historical data, it's import, export and removal. + +We will use [reth cli](../cli/cli) to import and export historical data. + +## File format + +The historical data is packaged and distributed in files of special formats with different names, all of which are based on [e2store](https://github.com/status-im/nimbus-eth2/blob/613f4a9a50c9c4bd8568844eaffb3ac15d067e56/docs/e2store.md#introduction). The most important ones are the **ERA1**, which deals with block range from genesis until the last pre-merge block, and **ERA**, which deals with block range from the merge onwards. See their [specification](https://github.com/eth-clients/e2store-format-specs) for more details. + +The contents of these archives is an ordered sequence of blocks. We're mostly concerned with headers and transactions. For ERA1, there is 8192 blocks per file except for the last one, i.e. the one containing pre-merge block, which can be less than that. + +## Import + +In this section we discuss how to get blocks from ERA1 files. + +### Automatic sync + +If enabled, importing blocks from ERA1 files can be done automatically with no manual steps required. + +#### Enabling the ERA stage + +The import from ERA1 files within the pre-merge block range is included in the [reth node](../cli/reth/node) synchronization pipeline. It is disabled by default. To enable it, pass the `--era.enable` flag when running the [`node`](../cli/reth/node) command. + +The benefit of using this option is significant increase in the synchronization speed for the headers and mainly bodies stage of the pipeline within the ERA1 block range. We encourage you to use it! Eventually, it will become enabled by default. + +#### Using the ERA stage + +When enabled, the import from ERA1 files runs as its own separate stage before all others. It is an optional stage that is doing the work of headers and bodies stage at a significantly higher speed. The checkpoints of these stages are shifted by the ERA stage. + +### Manual import + +If you want to import block headers and transactions from ERA1 files without running the synchronization pipeline, you may use the [`import-era`](../cli/reth/import-era) command. + +### Options + +Both ways of importing the ERA1 files have the same options because they use the same underlying subsystems. No options are mandatory. + +#### Sources + +There are two kinds of data sources for the ERA1 import. +* Remote from an HTTP URL. Use the option `--era.url` with an ERA1 hosting provider URL. +* Local from a file-system directory. Use the option `--era.path` with a directory containing ERA1 files. + +Both options cannot be used at the same time. If no option is specified, the remote source is used with a URL derived from the chain ID. Only Mainnet and Sepolia have ERA1 files. If the node is running on a different chain, no source is provided and nothing is imported. From 9fe0f25e7b703aff21aaae9c4aabf3897d353d77 Mon Sep 17 00:00:00 2001 From: Merkel Tranjes <140164174+rnkrtt@users.noreply.github.com> Date: Tue, 8 Jul 2025 13:15:59 +0200 Subject: [PATCH 089/305] docs: fix correction in storage reverts iterator test comment (#17276) --- crates/storage/provider/src/bundle_state/state_reverts.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/storage/provider/src/bundle_state/state_reverts.rs b/crates/storage/provider/src/bundle_state/state_reverts.rs index a44e038d49b..7ffdc153b22 100644 --- a/crates/storage/provider/src/bundle_state/state_reverts.rs +++ b/crates/storage/provider/src/bundle_state/state_reverts.rs @@ -173,7 +173,7 @@ mod tests { (B256::from_slice(&[8; 32]), U256::from(70)), // Revert takes priority. (B256::from_slice(&[9; 32]), U256::from(80)), // Only revert present. (B256::from_slice(&[10; 32]), U256::from(85)), // Wiped entry. - (B256::from_slice(&[15; 32]), U256::from(90)), // WGreater revert entry + (B256::from_slice(&[15; 32]), U256::from(90)), // Greater revert entry ] ); } From dbe7ee9c21392f360ff01f6307480f5d7dd73a3a Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 8 Jul 2025 13:31:56 +0200 Subject: [PATCH 090/305] chore: bump 1.5.1 (#17277) --- Cargo.lock | 264 ++++++++++++++++++++++++++--------------------------- Cargo.toml | 2 +- 2 files changed, 133 insertions(+), 133 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8a795a4d1b9..4d38c6923d6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3033,7 +3033,7 @@ dependencies = [ [[package]] name = "ef-tests" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -3621,7 +3621,7 @@ dependencies = [ [[package]] name = "exex-subscription" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-primitives", "clap", @@ -6065,7 +6065,7 @@ dependencies = [ [[package]] name = "op-reth" -version = "1.5.0" +version = "1.5.1" dependencies = [ "clap", "reth-cli-util", @@ -7143,7 +7143,7 @@ checksum = "95325155c684b1c89f7765e30bc1c42e4a6da51ca513615660cb8a62ef9a88e3" [[package]] name = "reth" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-rpc-types", "aquamarine", @@ -7191,7 +7191,7 @@ dependencies = [ [[package]] name = "reth-alloy-provider" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7220,7 +7220,7 @@ dependencies = [ [[package]] name = "reth-basic-payload-builder" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7243,7 +7243,7 @@ dependencies = [ [[package]] name = "reth-bench" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-eips", "alloy-json-rpc", @@ -7281,7 +7281,7 @@ dependencies = [ [[package]] name = "reth-chain-state" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7312,7 +7312,7 @@ dependencies = [ [[package]] name = "reth-chainspec" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-chains", "alloy-consensus", @@ -7332,7 +7332,7 @@ dependencies = [ [[package]] name = "reth-cli" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-genesis", "clap", @@ -7345,7 +7345,7 @@ dependencies = [ [[package]] name = "reth-cli-commands" -version = "1.5.0" +version = "1.5.1" dependencies = [ "ahash", "alloy-chains", @@ -7423,7 +7423,7 @@ dependencies = [ [[package]] name = "reth-cli-runner" -version = "1.5.0" +version = "1.5.1" dependencies = [ "reth-tasks", "tokio", @@ -7432,7 +7432,7 @@ dependencies = [ [[package]] name = "reth-cli-util" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7452,7 +7452,7 @@ dependencies = [ [[package]] name = "reth-codecs" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7476,7 +7476,7 @@ dependencies = [ [[package]] name = "reth-codecs-derive" -version = "1.5.0" +version = "1.5.1" dependencies = [ "convert_case", "proc-macro2", @@ -7487,7 +7487,7 @@ dependencies = [ [[package]] name = "reth-config" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-primitives", "eyre", @@ -7504,7 +7504,7 @@ dependencies = [ [[package]] name = "reth-consensus" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -7516,7 +7516,7 @@ dependencies = [ [[package]] name = "reth-consensus-common" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7530,7 +7530,7 @@ dependencies = [ [[package]] name = "reth-consensus-debug-client" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7554,7 +7554,7 @@ dependencies = [ [[package]] name = "reth-db" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -7587,7 +7587,7 @@ dependencies = [ [[package]] name = "reth-db-api" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -7617,7 +7617,7 @@ dependencies = [ [[package]] name = "reth-db-common" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -7646,7 +7646,7 @@ dependencies = [ [[package]] name = "reth-db-models" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7663,7 +7663,7 @@ dependencies = [ [[package]] name = "reth-discv4" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -7690,7 +7690,7 @@ dependencies = [ [[package]] name = "reth-discv5" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -7715,7 +7715,7 @@ dependencies = [ [[package]] name = "reth-dns-discovery" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-chains", "alloy-primitives", @@ -7743,7 +7743,7 @@ dependencies = [ [[package]] name = "reth-downloaders" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7782,7 +7782,7 @@ dependencies = [ [[package]] name = "reth-e2e-test-utils" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7842,7 +7842,7 @@ dependencies = [ [[package]] name = "reth-ecies" -version = "1.5.0" +version = "1.5.1" dependencies = [ "aes", "alloy-primitives", @@ -7872,7 +7872,7 @@ dependencies = [ [[package]] name = "reth-engine-local" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -7895,7 +7895,7 @@ dependencies = [ [[package]] name = "reth-engine-primitives" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7919,7 +7919,7 @@ dependencies = [ [[package]] name = "reth-engine-service" -version = "1.5.0" +version = "1.5.1" dependencies = [ "futures", "pin-project", @@ -7949,7 +7949,7 @@ dependencies = [ [[package]] name = "reth-engine-tree" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8018,7 +8018,7 @@ dependencies = [ [[package]] name = "reth-engine-util" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-rpc-types-engine", @@ -8044,7 +8044,7 @@ dependencies = [ [[package]] name = "reth-era" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8066,7 +8066,7 @@ dependencies = [ [[package]] name = "reth-era-downloader" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-primitives", "bytes", @@ -8083,7 +8083,7 @@ dependencies = [ [[package]] name = "reth-era-utils" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8112,7 +8112,7 @@ dependencies = [ [[package]] name = "reth-errors" -version = "1.5.0" +version = "1.5.1" dependencies = [ "reth-consensus", "reth-execution-errors", @@ -8122,7 +8122,7 @@ dependencies = [ [[package]] name = "reth-eth-wire" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-chains", "alloy-consensus", @@ -8160,7 +8160,7 @@ dependencies = [ [[package]] name = "reth-eth-wire-types" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-chains", "alloy-consensus", @@ -8185,7 +8185,7 @@ dependencies = [ [[package]] name = "reth-ethereum" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-rpc-types-engine", "alloy-rpc-types-eth", @@ -8224,7 +8224,7 @@ dependencies = [ [[package]] name = "reth-ethereum-cli" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8283,7 +8283,7 @@ dependencies = [ [[package]] name = "reth-ethereum-consensus" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8299,7 +8299,7 @@ dependencies = [ [[package]] name = "reth-ethereum-engine-primitives" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8317,7 +8317,7 @@ dependencies = [ [[package]] name = "reth-ethereum-forks" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-eip2124", "alloy-hardforks", @@ -8330,7 +8330,7 @@ dependencies = [ [[package]] name = "reth-ethereum-payload-builder" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8356,7 +8356,7 @@ dependencies = [ [[package]] name = "reth-ethereum-primitives" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8381,7 +8381,7 @@ dependencies = [ [[package]] name = "reth-etl" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-primitives", "rayon", @@ -8391,7 +8391,7 @@ dependencies = [ [[package]] name = "reth-evm" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8416,7 +8416,7 @@ dependencies = [ [[package]] name = "reth-evm-ethereum" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8438,7 +8438,7 @@ dependencies = [ [[package]] name = "reth-execution-errors" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-evm", "alloy-primitives", @@ -8450,7 +8450,7 @@ dependencies = [ [[package]] name = "reth-execution-types" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8470,7 +8470,7 @@ dependencies = [ [[package]] name = "reth-exex" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8514,7 +8514,7 @@ dependencies = [ [[package]] name = "reth-exex-test-utils" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-eips", "eyre", @@ -8546,7 +8546,7 @@ dependencies = [ [[package]] name = "reth-exex-types" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8563,7 +8563,7 @@ dependencies = [ [[package]] name = "reth-fs-util" -version = "1.5.0" +version = "1.5.1" dependencies = [ "serde", "serde_json", @@ -8572,7 +8572,7 @@ dependencies = [ [[package]] name = "reth-invalid-block-hooks" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8599,7 +8599,7 @@ dependencies = [ [[package]] name = "reth-ipc" -version = "1.5.0" +version = "1.5.1" dependencies = [ "bytes", "futures", @@ -8621,7 +8621,7 @@ dependencies = [ [[package]] name = "reth-libmdbx" -version = "1.5.0" +version = "1.5.1" dependencies = [ "bitflags 2.9.1", "byteorder", @@ -8640,7 +8640,7 @@ dependencies = [ [[package]] name = "reth-mdbx-sys" -version = "1.5.0" +version = "1.5.1" dependencies = [ "bindgen", "cc", @@ -8648,7 +8648,7 @@ dependencies = [ [[package]] name = "reth-metrics" -version = "1.5.0" +version = "1.5.1" dependencies = [ "futures", "metrics", @@ -8659,14 +8659,14 @@ dependencies = [ [[package]] name = "reth-net-banlist" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-primitives", ] [[package]] name = "reth-net-nat" -version = "1.5.0" +version = "1.5.1" dependencies = [ "futures-util", "if-addrs", @@ -8680,7 +8680,7 @@ dependencies = [ [[package]] name = "reth-network" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8741,7 +8741,7 @@ dependencies = [ [[package]] name = "reth-network-api" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-primitives", "alloy-rpc-types-admin", @@ -8763,7 +8763,7 @@ dependencies = [ [[package]] name = "reth-network-p2p" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8785,7 +8785,7 @@ dependencies = [ [[package]] name = "reth-network-peers" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -8802,7 +8802,7 @@ dependencies = [ [[package]] name = "reth-network-types" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-eip2124", "humantime-serde", @@ -8815,7 +8815,7 @@ dependencies = [ [[package]] name = "reth-nippy-jar" -version = "1.5.0" +version = "1.5.1" dependencies = [ "anyhow", "bincode 1.3.3", @@ -8833,7 +8833,7 @@ dependencies = [ [[package]] name = "reth-node-api" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-rpc-types-engine", "eyre", @@ -8856,7 +8856,7 @@ dependencies = [ [[package]] name = "reth-node-builder" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8921,7 +8921,7 @@ dependencies = [ [[package]] name = "reth-node-core" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8973,7 +8973,7 @@ dependencies = [ [[package]] name = "reth-node-ethereum" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-contract", @@ -9026,7 +9026,7 @@ dependencies = [ [[package]] name = "reth-node-events" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9049,7 +9049,7 @@ dependencies = [ [[package]] name = "reth-node-metrics" -version = "1.5.0" +version = "1.5.1" dependencies = [ "eyre", "http", @@ -9071,7 +9071,7 @@ dependencies = [ [[package]] name = "reth-node-types" -version = "1.5.0" +version = "1.5.1" dependencies = [ "reth-chainspec", "reth-db-api", @@ -9083,7 +9083,7 @@ dependencies = [ [[package]] name = "reth-op" -version = "1.5.0" +version = "1.5.1" dependencies = [ "reth-chainspec", "reth-cli-util", @@ -9122,7 +9122,7 @@ dependencies = [ [[package]] name = "reth-optimism-chainspec" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-chains", "alloy-consensus", @@ -9148,7 +9148,7 @@ dependencies = [ [[package]] name = "reth-optimism-cli" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9196,7 +9196,7 @@ dependencies = [ [[package]] name = "reth-optimism-consensus" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-chains", "alloy-consensus", @@ -9228,7 +9228,7 @@ dependencies = [ [[package]] name = "reth-optimism-evm" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9254,7 +9254,7 @@ dependencies = [ [[package]] name = "reth-optimism-forks" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-op-hardforks", "alloy-primitives", @@ -9264,7 +9264,7 @@ dependencies = [ [[package]] name = "reth-optimism-node" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9324,7 +9324,7 @@ dependencies = [ [[package]] name = "reth-optimism-payload-builder" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9362,7 +9362,7 @@ dependencies = [ [[package]] name = "reth-optimism-primitives" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9389,7 +9389,7 @@ dependencies = [ [[package]] name = "reth-optimism-rpc" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9449,7 +9449,7 @@ dependencies = [ [[package]] name = "reth-optimism-storage" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9467,7 +9467,7 @@ dependencies = [ [[package]] name = "reth-optimism-txpool" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9504,7 +9504,7 @@ dependencies = [ [[package]] name = "reth-payload-builder" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9524,7 +9524,7 @@ dependencies = [ [[package]] name = "reth-payload-builder-primitives" -version = "1.5.0" +version = "1.5.1" dependencies = [ "pin-project", "reth-payload-primitives", @@ -9535,7 +9535,7 @@ dependencies = [ [[package]] name = "reth-payload-primitives" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9554,7 +9554,7 @@ dependencies = [ [[package]] name = "reth-payload-util" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9563,7 +9563,7 @@ dependencies = [ [[package]] name = "reth-payload-validator" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-rpc-types-engine", @@ -9572,7 +9572,7 @@ dependencies = [ [[package]] name = "reth-primitives" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9594,7 +9594,7 @@ dependencies = [ [[package]] name = "reth-primitives-traits" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9632,7 +9632,7 @@ dependencies = [ [[package]] name = "reth-provider" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9681,7 +9681,7 @@ dependencies = [ [[package]] name = "reth-prune" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9713,7 +9713,7 @@ dependencies = [ [[package]] name = "reth-prune-types" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-primitives", "arbitrary", @@ -9732,7 +9732,7 @@ dependencies = [ [[package]] name = "reth-ress-protocol" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9758,7 +9758,7 @@ dependencies = [ [[package]] name = "reth-ress-provider" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9784,7 +9784,7 @@ dependencies = [ [[package]] name = "reth-revm" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9798,7 +9798,7 @@ dependencies = [ [[package]] name = "reth-rpc" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -9876,7 +9876,7 @@ dependencies = [ [[package]] name = "reth-rpc-api" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-eips", "alloy-genesis", @@ -9903,7 +9903,7 @@ dependencies = [ [[package]] name = "reth-rpc-api-testing-util" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9922,7 +9922,7 @@ dependencies = [ [[package]] name = "reth-rpc-builder" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-eips", "alloy-network", @@ -9977,7 +9977,7 @@ dependencies = [ [[package]] name = "reth-rpc-convert" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-json-rpc", @@ -9998,7 +9998,7 @@ dependencies = [ [[package]] name = "reth-rpc-engine-api" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10034,7 +10034,7 @@ dependencies = [ [[package]] name = "reth-rpc-eth-api" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -10078,7 +10078,7 @@ dependencies = [ [[package]] name = "reth-rpc-eth-types" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10121,7 +10121,7 @@ dependencies = [ [[package]] name = "reth-rpc-layer" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-rpc-types-engine", "http", @@ -10138,7 +10138,7 @@ dependencies = [ [[package]] name = "reth-rpc-server-types" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10153,7 +10153,7 @@ dependencies = [ [[package]] name = "reth-stages" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10214,7 +10214,7 @@ dependencies = [ [[package]] name = "reth-stages-api" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10243,7 +10243,7 @@ dependencies = [ [[package]] name = "reth-stages-types" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-primitives", "arbitrary", @@ -10260,7 +10260,7 @@ dependencies = [ [[package]] name = "reth-stateless" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10285,7 +10285,7 @@ dependencies = [ [[package]] name = "reth-static-file" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-primitives", "assert_matches", @@ -10309,7 +10309,7 @@ dependencies = [ [[package]] name = "reth-static-file-types" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-primitives", "clap", @@ -10321,7 +10321,7 @@ dependencies = [ [[package]] name = "reth-storage-api" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10344,7 +10344,7 @@ dependencies = [ [[package]] name = "reth-storage-errors" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10359,7 +10359,7 @@ dependencies = [ [[package]] name = "reth-tasks" -version = "1.5.0" +version = "1.5.1" dependencies = [ "auto_impl", "dyn-clone", @@ -10376,7 +10376,7 @@ dependencies = [ [[package]] name = "reth-testing-utils" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10391,7 +10391,7 @@ dependencies = [ [[package]] name = "reth-tokio-util" -version = "1.5.0" +version = "1.5.1" dependencies = [ "tokio", "tokio-stream", @@ -10400,7 +10400,7 @@ dependencies = [ [[package]] name = "reth-tracing" -version = "1.5.0" +version = "1.5.1" dependencies = [ "clap", "eyre", @@ -10414,7 +10414,7 @@ dependencies = [ [[package]] name = "reth-tracing-otlp" -version = "1.5.0" +version = "1.5.1" dependencies = [ "opentelemetry", "opentelemetry-otlp", @@ -10427,7 +10427,7 @@ dependencies = [ [[package]] name = "reth-transaction-pool" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10473,7 +10473,7 @@ dependencies = [ [[package]] name = "reth-trie" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10505,7 +10505,7 @@ dependencies = [ [[package]] name = "reth-trie-common" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -10537,7 +10537,7 @@ dependencies = [ [[package]] name = "reth-trie-db" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10563,7 +10563,7 @@ dependencies = [ [[package]] name = "reth-trie-parallel" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -10592,7 +10592,7 @@ dependencies = [ [[package]] name = "reth-trie-sparse" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -10624,7 +10624,7 @@ dependencies = [ [[package]] name = "reth-trie-sparse-parallel" -version = "1.5.0" +version = "1.5.1" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -10648,7 +10648,7 @@ dependencies = [ [[package]] name = "reth-zstd-compressors" -version = "1.5.0" +version = "1.5.1" dependencies = [ "zstd", ] diff --git a/Cargo.toml b/Cargo.toml index 0d29af1b813..0e387529ca1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,5 @@ [workspace.package] -version = "1.5.0" +version = "1.5.1" edition = "2021" rust-version = "1.86" license = "MIT OR Apache-2.0" From bb1e44e8ab921b3ef632f54f2107c24b7a984c77 Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Tue, 8 Jul 2025 13:57:40 +0200 Subject: [PATCH 091/305] fix(trie): ParallelSparseTrie: remove leaves from upper subtrie when update in a lower (#17278) --- crates/trie/sparse-parallel/src/trie.rs | 152 +++++++++++++++++++++++- 1 file changed, 148 insertions(+), 4 deletions(-) diff --git a/crates/trie/sparse-parallel/src/trie.rs b/crates/trie/sparse-parallel/src/trie.rs index e2df6cf005f..3eacdd9a7dc 100644 --- a/crates/trie/sparse-parallel/src/trie.rs +++ b/crates/trie/sparse-parallel/src/trie.rs @@ -257,6 +257,12 @@ impl SparseTrieInterface for ParallelSparseTrie { // If we reached the max depth of the upper trie, we may have had more nodes to insert. if let Some(next_path) = next.filter(|n| !SparseSubtrieType::path_len_is_upper(n.len())) { + // The value was inserted into the upper subtrie's `values` at the top of this method. + // At this point we know the value is not in the upper subtrie, and the call to + // `update_leaf` below will insert it into the lower subtrie. So remove it from the + // upper subtrie. + self.upper_subtrie.inner.values.remove(&full_path); + // Use subtrie_for_path to ensure the subtrie has the correct path. // // The next_path here represents where we need to continue traversal, which may @@ -2039,6 +2045,7 @@ mod tests { }; use crate::trie::ChangedSubtrie; use alloy_primitives::{ + b256, hex, map::{foldhash::fast::RandomState, B256Set, DefaultHashBuilder, HashMap}, B256, }; @@ -3691,8 +3698,8 @@ mod tests { // // Final trie structure: // Upper trie: - // 0x: Branch { state_mask: 0x10 } - // └── 0x1: Extension { key: 0x } + // 0x: Extension { key: 0x1 } + // └── 0x1: Branch { state_mask: 0x1100 } // └── Subtrie (0x12): pointer to lower subtrie // └── Subtrie (0x13): pointer to lower subtrie // @@ -3712,14 +3719,18 @@ mod tests { // Verify upper trie has a leaf at the root with key 1345 ctx.assert_upper_subtrie(&trie) - .has_leaf(&Nibbles::default(), &Nibbles::from_nibbles([0x1, 0x3, 0x4, 0x5])); + .has_leaf(&Nibbles::default(), &Nibbles::from_nibbles([0x1, 0x3, 0x4, 0x5])) + .has_value(&leaf1_path, &value1); // Add leaf 0x1234 - this should go first in the upper subtrie let (leaf2_path, value2) = ctx.create_test_leaf([0x1, 0x2, 0x3, 0x4], 2); trie.update_leaf(leaf2_path, value2.clone(), DefaultBlindedProvider).unwrap(); // Upper trie should now have a branch at 0x1 - ctx.assert_upper_subtrie(&trie).has_branch(&Nibbles::from_nibbles([0x1]), &[0x2, 0x3]); + ctx.assert_upper_subtrie(&trie) + .has_branch(&Nibbles::from_nibbles([0x1]), &[0x2, 0x3]) + .has_no_value(&leaf1_path) + .has_no_value(&leaf2_path); // Add leaf 0x1245 - this should cause a branch and create the 0x12 subtrie let (leaf3_path, value3) = ctx.create_test_leaf([0x1, 0x2, 0x4, 0x5], 3); @@ -3746,6 +3757,15 @@ mod tests { ctx.assert_subtrie(&trie, Nibbles::from_nibbles([0x1, 0x2])) .has_value(&leaf2_path, &value2) .has_value(&leaf3_path, &value3); + + // Upper trie has no values + ctx.assert_upper_subtrie(&trie) + .has_extension(&Nibbles::default(), &Nibbles::from_nibbles([0x1])) + .has_branch(&Nibbles::from_nibbles([0x1]), &[0x2, 0x3]) + .has_no_value(&leaf1_path) + .has_no_value(&leaf2_path) + .has_no_value(&leaf3_path) + .has_no_value(&leaf4_path); } #[test] @@ -4549,4 +4569,128 @@ mod tests { .has_value(&leaf1_path, &value1) .has_value(&leaf2_path, &value2); } + + #[test] + fn test_hoodie_block_1_data() { + // Reveal node at path Nibbles(0x) - root branch node + let root_branch_stack = vec![ + hex!("a0550b6aba4dd4582a2434d2cbdad8d3007d09f622d7a6e6eaa7a49385823c2fa2"), + hex!("a04788a4975a9e1efd29b834fd80fdfe8a57cc1b1c5ace6d30ce5a36a15e0092b3"), + hex!("a093aeccf87da304e6f7d09edc5d7bd3a552808866d2149dd0940507a8f9bfa910"), + hex!("a08b5b423ba68d0dec2eca1f408076f9170678505eb4a5db2abbbd83bb37666949"), + hex!("a08592f62216af4218098a78acad7cf472a727fb55e6c27d3cfdf2774d4518eb83"), + hex!("a0ef02aeee845cb64c11f85edc1a3094227c26445952554b8a9248915d80c746c3"), + hex!("a0df2529ee3a1ce4df5a758cf17e6a86d0fb5ea22ab7071cf60af6412e9b0a428a"), + hex!("a0acaa1092db69cd5a63676685827b3484c4b80dc1d3361f6073bbb9240101e144"), + hex!("a09c3f2bb2a729d71f246a833353ade65667716bb330e0127a3299a42d11200f93"), + hex!("a0ce978470f4c0b1f8069570563a14d2b79d709add2db4bf22dd9b6aed3271c566"), + hex!("a095f783cd1d464a60e3c8adcadc28c6eb9fec7306664df39553be41dccc909606"), + hex!("a0a9083f5fb914b255e1feb5d951a4dfddacf3c8003ef1d1ec6a13bb6ba5b2ac62"), + hex!("a0fec113d537d8577cd361e0cabf5e95ef58f1cc34318292fdecce9fae57c3e094"), + hex!("a08b7465f5fe8b3e3c0d087cb7521310d4065ef2a0ee43bf73f68dee8a5742b3dd"), + hex!("a0c589aa1ae3d5fd87d8640957f7d5184a4ac06f393b453a8e8ed7e8fba0d385c8"), + hex!("a0b516d6f3352f87beab4ed6e7322f191fc7a147686500ef4de7dd290ad784ef51"), + ]; + + let root_branch_rlp_stack: Vec = root_branch_stack + .iter() + .map(|hex_str| RlpNode::from_raw_rlp(&hex_str[..]).unwrap()) + .collect(); + + let root_branch_node = BranchNode::new( + root_branch_rlp_stack, + TrieMask::new(0b1111111111111111), // state_mask: all 16 children present + ); + + let root_branch_masks = TrieMasks { + hash_mask: Some(TrieMask::new(0b1111111111111111)), + tree_mask: Some(TrieMask::new(0b1111111111111111)), + }; + + let mut trie = ParallelSparseTrie::from_root( + TrieNode::Branch(root_branch_node), + root_branch_masks, + true, + ) + .unwrap(); + + // Reveal node at path Nibbles(0x3) - branch node + let branch_0x3_stack = vec![ + hex!("a09da7d9755fe0c558b3c3de9fdcdf9f28ae641f38c9787b05b73ab22ae53af3e2"), + hex!("a0d9990bf0b810d1145ecb2b011fd68c63cc85564e6724166fd4a9520180706e5f"), + hex!("a0f60eb4b12132a40df05d9bbdb88bbde0185a3f097f3c76bf4200c23eda26cf86"), + hex!("a0ca976997ddaf06f18992f6207e4f6a05979d07acead96568058789017cc6d06b"), + hex!("a04d78166b48044fdc28ed22d2fd39c8df6f8aaa04cb71d3a17286856f6893ff83"), + hex!("a021d4f90c34d3f1706e78463b6482bca77a3aa1cd059a3f326c42a1cfd30b9b60"), + hex!("a0fc3b71c33e2e6b77c5e494c1db7fdbb447473f003daf378c7a63ba9bf3f0049d"), + hex!("a0e33ed2be194a3d93d343e85642447c93a9d0cfc47a016c2c23d14c083be32a7c"), + hex!("a07b8e7a21c1178d28074f157b50fca85ee25c12568ff8e9706dcbcdacb77bf854"), + hex!("a0973274526811393ea0bf4811ca9077531db00d06b86237a2ecd683f55ba4bcb0"), + hex!("a03a93d726d7487874e51b52d8d534c63aa2a689df18e3b307c0d6cb0a388b00f3"), + hex!("a06aa67101d011d1c22fe739ef83b04b5214a3e2f8e1a2625d8bfdb116b447e86f"), + hex!("a02dd545b33c62d33a183e127a08a4767fba891d9f3b94fc20a2ca02600d6d1fff"), + hex!("a0fe6db87d00f06d53bff8169fa497571ff5af1addfb715b649b4d79dd3e394b04"), + hex!("a0d9240a9d2d5851d05a97ff3305334dfdb0101e1e321fc279d2bb3cad6afa8fc8"), + hex!("a01b69c6ab5173de8a8ec53a6ebba965713a4cc7feb86cb3e230def37c230ca2b2"), + ]; + + let branch_0x3_rlp_stack: Vec = branch_0x3_stack + .iter() + .map(|hex_str| RlpNode::from_raw_rlp(&hex_str[..]).unwrap()) + .collect(); + + let branch_0x3_node = BranchNode::new( + branch_0x3_rlp_stack, + TrieMask::new(0b1111111111111111), // state_mask: all 16 children present + ); + + let branch_0x3_masks = TrieMasks { + hash_mask: Some(TrieMask::new(0b0100010000010101)), + tree_mask: Some(TrieMask::new(0b0100000000000000)), + }; + + trie.reveal_node( + Nibbles::from_nibbles([0x3]), + TrieNode::Branch(branch_0x3_node), + branch_0x3_masks, + ) + .unwrap(); + + // Reveal node at path Nibbles(0x37) - leaf node + let leaf_path = Nibbles::from_nibbles([0x3, 0x7]); + let leaf_key = Nibbles::unpack( + &hex!("d65eaa92c6bc4c13a5ec45527f0c18ea8932588728769ec7aecfe6d9f32e42")[..], + ); + let leaf_value = hex!("f8440180a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0f57acd40259872606d76197ef052f3d35588dadf919ee1f0e3cb9b62d3f4b02c").to_vec(); + + let leaf_node = LeafNode::new(leaf_key, leaf_value); + let leaf_masks = TrieMasks::none(); + + trie.reveal_node(leaf_path, TrieNode::Leaf(leaf_node), leaf_masks).unwrap(); + + // Update leaf with its new value + let mut leaf_full_path = leaf_path; + leaf_full_path.extend(&leaf_key); + + let leaf_new_value = vec![ + 248, 68, 1, 128, 160, 224, 163, 152, 169, 122, 160, 155, 102, 53, 41, 0, 47, 28, 205, + 190, 199, 5, 215, 108, 202, 22, 138, 70, 196, 178, 193, 208, 18, 96, 95, 63, 238, 160, + 245, 122, 205, 64, 37, 152, 114, 96, 109, 118, 25, 126, 240, 82, 243, 211, 85, 136, + 218, 223, 145, 158, 225, 240, 227, 203, 155, 98, 211, 244, 176, 44, + ]; + + trie.update_leaf(leaf_full_path, leaf_new_value.clone(), DefaultBlindedProvider).unwrap(); + + // Sanity checks before calculating the root + assert_eq!( + Some(&leaf_new_value), + trie.lower_subtrie_for_path(&leaf_path).unwrap().inner.values.get(&leaf_full_path) + ); + assert!(trie.upper_subtrie.inner.values.is_empty()); + + // Assert the root hash matches the expected value + let expected_root = + b256!("29b07de8376e9ce7b3a69e9b102199869514d3f42590b5abc6f7d48ec9b8665c"); + assert_eq!(trie.root(), expected_root); + } } From eaf2e50f0f1442aea565f34a6b2386d7f75761d5 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Tue, 8 Jul 2025 14:28:54 +0100 Subject: [PATCH 092/305] test(trie): add sparse trie tests to parallel sparse trie (#17258) Co-authored-by: Brian Picciano --- Cargo.lock | 3 + crates/trie/sparse-parallel/Cargo.toml | 5 +- crates/trie/sparse-parallel/src/lib.rs | 2 + crates/trie/sparse-parallel/src/lower.rs | 2 +- crates/trie/sparse-parallel/src/trie.rs | 1128 ++++++++++++++++++++-- crates/trie/sparse/src/traits.rs | 7 +- crates/trie/sparse/src/trie.rs | 11 +- 7 files changed, 1061 insertions(+), 97 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4d38c6923d6..67a29aa26fe 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10632,6 +10632,7 @@ dependencies = [ "arbitrary", "assert_matches", "itertools 0.14.0", + "pretty_assertions", "proptest", "proptest-arbitrary-interop", "rand 0.8.5", @@ -10639,8 +10640,10 @@ dependencies = [ "rayon", "reth-execution-errors", "reth-primitives-traits", + "reth-provider", "reth-trie", "reth-trie-common", + "reth-trie-db", "reth-trie-sparse", "smallvec", "tracing", diff --git a/crates/trie/sparse-parallel/Cargo.toml b/crates/trie/sparse-parallel/Cargo.toml index 039f6d82a5f..41f9ab9ab1f 100644 --- a/crates/trie/sparse-parallel/Cargo.toml +++ b/crates/trie/sparse-parallel/Cargo.toml @@ -30,14 +30,17 @@ rayon = { workspace = true, optional = true } [dev-dependencies] # reth reth-primitives-traits.workspace = true +reth-provider = { workspace = true, features = ["test-utils"] } reth-trie-common = { workspace = true, features = ["test-utils", "arbitrary"] } -reth-trie.workspace = true +reth-trie-db.workspace = true reth-trie-sparse = { workspace = true, features = ["test-utils"] } +reth-trie.workspace = true # misc arbitrary.workspace = true assert_matches.workspace = true itertools.workspace = true +pretty_assertions.workspace = true proptest-arbitrary-interop.workspace = true proptest.workspace = true rand.workspace = true diff --git a/crates/trie/sparse-parallel/src/lib.rs b/crates/trie/sparse-parallel/src/lib.rs index 12f53935bf2..c4b7b10ea51 100644 --- a/crates/trie/sparse-parallel/src/lib.rs +++ b/crates/trie/sparse-parallel/src/lib.rs @@ -2,6 +2,8 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] +extern crate alloc; + mod trie; pub use trie::*; diff --git a/crates/trie/sparse-parallel/src/lower.rs b/crates/trie/sparse-parallel/src/lower.rs index 5e49bcb43cd..047e3a15a16 100644 --- a/crates/trie/sparse-parallel/src/lower.rs +++ b/crates/trie/sparse-parallel/src/lower.rs @@ -6,7 +6,7 @@ use reth_trie_common::Nibbles; /// When a [`crate::ParallelSparseTrie`] is initialized/cleared then its `LowerSparseSubtrie`s are /// all blinded, meaning they have no nodes. A blinded `LowerSparseSubtrie` may hold onto a cleared /// [`SparseSubtrie`] in order to re-use allocations. -#[derive(Debug, Eq, PartialEq)] +#[derive(Clone, Debug, Eq, PartialEq)] pub(crate) enum LowerSparseSubtrie { Blind(Option>), Revealed(Box), diff --git a/crates/trie/sparse-parallel/src/trie.rs b/crates/trie/sparse-parallel/src/trie.rs index 3eacdd9a7dc..bcf04c32ea9 100644 --- a/crates/trie/sparse-parallel/src/trie.rs +++ b/crates/trie/sparse-parallel/src/trie.rs @@ -1,4 +1,5 @@ use crate::LowerSparseSubtrie; +use alloc::borrow::Cow; use alloy_primitives::{ map::{Entry, HashMap}, B256, @@ -33,7 +34,7 @@ pub const NUM_LOWER_SUBTRIES: usize = 16usize.pow(UPPER_TRIE_MAX_DEPTH as u32); /// - Each leaf entry in the `subtries` and `upper_trie` collection must have a corresponding entry /// in `values` collection. If the root node is a leaf, it must also have an entry in `values`. /// - All keys in `values` collection are full leaf paths. -#[derive(PartialEq, Eq, Debug)] +#[derive(Clone, PartialEq, Eq, Debug)] pub struct ParallelSparseTrie { /// This contains the trie nodes for the upper part of the trie. upper_subtrie: Box, @@ -576,6 +577,10 @@ impl SparseTrieInterface for ParallelSparseTrie { self.subtrie_for_path(full_path).and_then(|subtrie| subtrie.inner.values.get(full_path)) } + fn updates_ref(&self) -> Cow<'_, SparseTrieUpdates> { + self.updates.as_ref().map_or(Cow::Owned(SparseTrieUpdates::default()), Cow::Borrowed) + } + fn take_updates(&mut self) -> SparseTrieUpdates { self.updates.take().unwrap_or_default() } @@ -2039,6 +2044,8 @@ enum SparseTrieUpdatesAction { #[cfg(test)] mod tests { + use std::collections::{BTreeMap, BTreeSet}; + use super::{ path_subtrie_index_unchecked, LowerSparseSubtrie, ParallelSparseTrie, SparseSubtrie, SparseSubtrieType, @@ -2047,18 +2054,21 @@ mod tests { use alloy_primitives::{ b256, hex, map::{foldhash::fast::RandomState, B256Set, DefaultHashBuilder, HashMap}, - B256, + B256, U256, }; use alloy_rlp::{Decodable, Encodable}; use alloy_trie::{BranchNodeCompact, Nibbles}; use assert_matches::assert_matches; use itertools::Itertools; - use reth_execution_errors::SparseTrieError; + use proptest::{prelude::*, sample::SizeRange}; + use proptest_arbitrary_interop::arb; + use reth_execution_errors::{SparseTrieError, SparseTrieErrorKind}; use reth_primitives_traits::Account; + use reth_provider::{test_utils::create_test_provider_factory, TrieWriter}; use reth_trie::{ hashed_cursor::{noop::NoopHashedAccountCursor, HashedPostStateAccountCursor}, node_iter::{TrieElement, TrieNodeIter}, - trie_cursor::{noop::NoopAccountTrieCursor, TrieCursor}, + trie_cursor::{noop::NoopAccountTrieCursor, TrieCursor, TrieCursorFactory}, walker::TrieWalker, HashedPostState, }; @@ -2069,11 +2079,21 @@ mod tests { BranchNode, ExtensionNode, HashBuilder, LeafNode, RlpNode, TrieMask, TrieNode, EMPTY_ROOT_HASH, }; + use reth_trie_db::DatabaseTrieCursorFactory; use reth_trie_sparse::{ blinded::{BlindedProvider, DefaultBlindedProvider, RevealedNode}, SparseNode, SparseTrieInterface, TrieMasks, }; + /// Pad nibbles to the length of a B256 hash with zeros on the right. + fn pad_nibbles_right(mut nibbles: Nibbles) -> Nibbles { + nibbles.extend(&Nibbles::from_nibbles_unchecked(vec![ + 0; + B256::len_bytes() * 2 - nibbles.len() + ])); + nibbles + } + /// Mock blinded provider for testing that allows pre-setting nodes at specific paths. /// /// This provider can be used in tests to simulate blinded nodes that need to be revealed @@ -2176,10 +2196,14 @@ mod tests { (Nibbles::from_nibbles(path), encode_account_value(value_nonce)) } - /// Insert multiple leaves into the trie - fn insert_leaves(&self, trie: &mut ParallelSparseTrie, leaves: &[(Nibbles, Vec)]) { + /// Update multiple leaves in the trie + fn update_leaves( + &self, + trie: &mut ParallelSparseTrie, + leaves: impl IntoIterator)>, + ) { for (path, value) in leaves { - trie.update_leaf(*path, value.clone(), DefaultBlindedProvider).unwrap(); + trie.update_leaf(path, value, DefaultBlindedProvider).unwrap(); } } @@ -2198,6 +2222,22 @@ mod tests { fn assert_upper_subtrie<'a>(&self, trie: &'a ParallelSparseTrie) -> SubtrieAssertion<'a> { SubtrieAssertion::new(&trie.upper_subtrie) } + + /// Assert the root, trie updates, and nodes against the hash builder output. + fn assert_with_hash_builder( + &self, + trie: &mut ParallelSparseTrie, + hash_builder_root: B256, + hash_builder_updates: TrieUpdates, + hash_builder_proof_nodes: ProofNodes, + ) { + assert_eq!(trie.root(), hash_builder_root); + pretty_assertions::assert_eq!( + BTreeMap::from_iter(trie.updates_ref().updated_nodes.clone()), + BTreeMap::from_iter(hash_builder_updates.account_nodes) + ); + assert_eq_parallel_sparse_trie_proof_nodes(trie, hash_builder_proof_nodes); + } } /// Assertion builder for subtrie structure @@ -2400,74 +2440,35 @@ mod tests { trie } - /// Assert that the parallel sparse trie nodes and the proof nodes from the hash builder are - /// equal. - #[allow(unused)] - fn assert_eq_parallel_sparse_trie_proof_nodes( + fn parallel_sparse_trie_nodes( sparse_trie: &ParallelSparseTrie, - proof_nodes: ProofNodes, - ) { - let proof_nodes = proof_nodes - .into_nodes_sorted() - .into_iter() - .map(|(path, node)| (path, TrieNode::decode(&mut node.as_ref()).unwrap())); - + ) -> impl IntoIterator { let lower_sparse_nodes = sparse_trie .lower_subtries .iter() - .filter_map(LowerSparseSubtrie::as_revealed_ref) + .filter_map(|subtrie| subtrie.as_revealed_ref()) .flat_map(|subtrie| subtrie.nodes.iter()); let upper_sparse_nodes = sparse_trie.upper_subtrie.nodes.iter(); - let all_sparse_nodes = - lower_sparse_nodes.chain(upper_sparse_nodes).sorted_by_key(|(path, _)| *path); - - for ((proof_node_path, proof_node), (sparse_node_path, sparse_node)) in - proof_nodes.zip(all_sparse_nodes) - { - assert_eq!(&proof_node_path, sparse_node_path); - - let equals = match (&proof_node, &sparse_node) { - // Both nodes are empty - (TrieNode::EmptyRoot, SparseNode::Empty) => true, - // Both nodes are branches and have the same state mask - ( - TrieNode::Branch(BranchNode { state_mask: proof_state_mask, .. }), - SparseNode::Branch { state_mask: sparse_state_mask, .. }, - ) => proof_state_mask == sparse_state_mask, - // Both nodes are extensions and have the same key - ( - TrieNode::Extension(ExtensionNode { key: proof_key, .. }), - SparseNode::Extension { key: sparse_key, .. }, - ) | - // Both nodes are leaves and have the same key - ( - TrieNode::Leaf(LeafNode { key: proof_key, .. }), - SparseNode::Leaf { key: sparse_key, .. }, - ) => proof_key == sparse_key, - // Empty and hash nodes are specific to the sparse trie, skip them - (_, SparseNode::Empty | SparseNode::Hash(_)) => continue, - _ => false, - }; - assert!( - equals, - "path: {proof_node_path:?}\nproof node: {proof_node:?}\nsparse node: {sparse_node:?}" - ); - } + lower_sparse_nodes.chain(upper_sparse_nodes).sorted_by_key(|(path, _)| *path) } - /// Assert that the sparse subtrie nodes and the proof nodes from the hash builder are equal. - fn assert_eq_sparse_subtrie_proof_nodes(sparse_trie: &SparseSubtrie, proof_nodes: ProofNodes) { + /// Assert that the parallel sparse trie nodes and the proof nodes from the hash builder are + /// equal. + fn assert_eq_parallel_sparse_trie_proof_nodes( + sparse_trie: &ParallelSparseTrie, + proof_nodes: ProofNodes, + ) { let proof_nodes = proof_nodes .into_nodes_sorted() .into_iter() .map(|(path, node)| (path, TrieNode::decode(&mut node.as_ref()).unwrap())); - let sparse_nodes = sparse_trie.nodes.iter().sorted_by_key(|(path, _)| *path); + let all_sparse_nodes = parallel_sparse_trie_nodes(sparse_trie); for ((proof_node_path, proof_node), (sparse_node_path, sparse_node)) in - proof_nodes.zip(sparse_nodes) + proof_nodes.zip(all_sparse_nodes) { assert_eq!(&proof_node_path, sparse_node_path); @@ -3659,7 +3660,9 @@ mod tests { } #[test] - fn sparse_subtrie_empty_update_one() { + fn sparse_trie_empty_update_one() { + let ctx = ParallelSparseTrieTestContext; + let key = Nibbles::unpack(B256::with_last_byte(42)); let value = || Account::default(); let value_encoded = || { @@ -3668,7 +3671,7 @@ mod tests { account_rlp }; - let (_hash_builder_root, _hash_builder_updates, hash_builder_proof_nodes, _, _) = + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes, _, _) = run_hash_builder( [(key, value())], NoopAccountTrieCursor::default(), @@ -3676,15 +3679,967 @@ mod tests { [key], ); - let mut sparse = SparseSubtrie::default(); - sparse.update_leaf(key, value_encoded(), DefaultBlindedProvider, false).unwrap(); - // TODO: enable these and make test pass as we have these implemented - // let sparse_root = sparse.root(); - // let sparse_updates = sparse.take_updates(); + let mut sparse = ParallelSparseTrie::default().with_updates(true); + ctx.update_leaves(&mut sparse, [(key, value_encoded())]); + ctx.assert_with_hash_builder( + &mut sparse, + hash_builder_root, + hash_builder_updates, + hash_builder_proof_nodes, + ); + } - // assert_eq!(sparse_root, hash_builder_root); - // assert_eq!(sparse_updates.updated_nodes, hash_builder_updates.account_nodes); - assert_eq_sparse_subtrie_proof_nodes(&sparse, hash_builder_proof_nodes); + #[test] + fn sparse_trie_empty_update_multiple_lower_nibbles() { + let ctx = ParallelSparseTrieTestContext; + + let paths = (0..=16).map(|b| Nibbles::unpack(B256::with_last_byte(b))).collect::>(); + let value = || Account::default(); + let value_encoded = || { + let mut account_rlp = Vec::new(); + value().into_trie_account(EMPTY_ROOT_HASH).encode(&mut account_rlp); + account_rlp + }; + + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes, _, _) = + run_hash_builder( + paths.iter().copied().zip(std::iter::repeat_with(value)), + NoopAccountTrieCursor::default(), + Default::default(), + paths.clone(), + ); + + let mut sparse = ParallelSparseTrie::default().with_updates(true); + ctx.update_leaves( + &mut sparse, + paths.into_iter().zip(std::iter::repeat_with(value_encoded)), + ); + + ctx.assert_with_hash_builder( + &mut sparse, + hash_builder_root, + hash_builder_updates, + hash_builder_proof_nodes, + ); + } + + #[test] + fn sparse_trie_empty_update_multiple_upper_nibbles() { + let paths = (239..=255).map(|b| Nibbles::unpack(B256::repeat_byte(b))).collect::>(); + let value = || Account::default(); + let value_encoded = || { + let mut account_rlp = Vec::new(); + value().into_trie_account(EMPTY_ROOT_HASH).encode(&mut account_rlp); + account_rlp + }; + + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes, _, _) = + run_hash_builder( + paths.iter().copied().zip(std::iter::repeat_with(value)), + NoopAccountTrieCursor::default(), + Default::default(), + paths.clone(), + ); + + let provider = DefaultBlindedProvider; + let mut sparse = ParallelSparseTrie::default().with_updates(true); + for path in &paths { + sparse.update_leaf(*path, value_encoded(), &provider).unwrap(); + } + let sparse_root = sparse.root(); + let sparse_updates = sparse.take_updates(); + + assert_eq!(sparse_root, hash_builder_root); + assert_eq!(sparse_updates.updated_nodes, hash_builder_updates.account_nodes); + assert_eq_parallel_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); + } + + #[test] + fn sparse_trie_empty_update_multiple() { + let ctx = ParallelSparseTrieTestContext; + + let paths = (0..=255) + .map(|b| { + Nibbles::unpack(if b % 2 == 0 { + B256::repeat_byte(b) + } else { + B256::with_last_byte(b) + }) + }) + .collect::>(); + let value = || Account::default(); + let value_encoded = || { + let mut account_rlp = Vec::new(); + value().into_trie_account(EMPTY_ROOT_HASH).encode(&mut account_rlp); + account_rlp + }; + + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes, _, _) = + run_hash_builder( + paths.iter().sorted_unstable().copied().zip(std::iter::repeat_with(value)), + NoopAccountTrieCursor::default(), + Default::default(), + paths.clone(), + ); + + let mut sparse = ParallelSparseTrie::default().with_updates(true); + ctx.update_leaves( + &mut sparse, + paths.iter().copied().zip(std::iter::repeat_with(value_encoded)), + ); + ctx.assert_with_hash_builder( + &mut sparse, + hash_builder_root, + hash_builder_updates, + hash_builder_proof_nodes, + ); + } + + #[test] + fn sparse_trie_empty_update_repeated() { + let ctx = ParallelSparseTrieTestContext; + + let paths = (0..=255).map(|b| Nibbles::unpack(B256::repeat_byte(b))).collect::>(); + let old_value = Account { nonce: 1, ..Default::default() }; + let old_value_encoded = { + let mut account_rlp = Vec::new(); + old_value.into_trie_account(EMPTY_ROOT_HASH).encode(&mut account_rlp); + account_rlp + }; + let new_value = Account { nonce: 2, ..Default::default() }; + let new_value_encoded = { + let mut account_rlp = Vec::new(); + new_value.into_trie_account(EMPTY_ROOT_HASH).encode(&mut account_rlp); + account_rlp + }; + + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes, _, _) = + run_hash_builder( + paths.iter().copied().zip(std::iter::repeat_with(|| old_value)), + NoopAccountTrieCursor::default(), + Default::default(), + paths.clone(), + ); + + let mut sparse = ParallelSparseTrie::default().with_updates(true); + ctx.update_leaves( + &mut sparse, + paths.iter().copied().zip(std::iter::repeat(old_value_encoded)), + ); + ctx.assert_with_hash_builder( + &mut sparse, + hash_builder_root, + hash_builder_updates, + hash_builder_proof_nodes, + ); + + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes, _, _) = + run_hash_builder( + paths.iter().copied().zip(std::iter::repeat(new_value)), + NoopAccountTrieCursor::default(), + Default::default(), + paths.clone(), + ); + + ctx.update_leaves( + &mut sparse, + paths.iter().copied().zip(std::iter::repeat(new_value_encoded)), + ); + ctx.assert_with_hash_builder( + &mut sparse, + hash_builder_root, + hash_builder_updates, + hash_builder_proof_nodes, + ); + } + + #[test] + fn sparse_trie_remove_leaf() { + let ctx = ParallelSparseTrieTestContext; + let provider = DefaultBlindedProvider; + let mut sparse = ParallelSparseTrie::default(); + + let value = alloy_rlp::encode_fixed_size(&U256::ZERO).to_vec(); + + ctx.update_leaves( + &mut sparse, + [ + (Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), value.clone()), + (Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3]), value.clone()), + (Nibbles::from_nibbles([0x5, 0x2, 0x0, 0x1, 0x3]), value.clone()), + (Nibbles::from_nibbles([0x5, 0x3, 0x1, 0x0, 0x2]), value.clone()), + (Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0, 0x2]), value.clone()), + (Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2, 0x0]), value), + ], + ); + + // Extension (Key = 5) + // └── Branch (Mask = 1011) + // ├── 0 -> Extension (Key = 23) + // │ └── Branch (Mask = 0101) + // │ ├── 1 -> Leaf (Key = 1, Path = 50231) + // │ └── 3 -> Leaf (Key = 3, Path = 50233) + // ├── 2 -> Leaf (Key = 013, Path = 52013) + // └── 3 -> Branch (Mask = 0101) + // ├── 1 -> Leaf (Key = 3102, Path = 53102) + // └── 3 -> Branch (Mask = 1010) + // ├── 0 -> Leaf (Key = 3302, Path = 53302) + // └── 2 -> Leaf (Key = 3320, Path = 53320) + pretty_assertions::assert_eq!( + parallel_sparse_trie_nodes(&sparse) + .into_iter() + .map(|(k, v)| (*k, v.clone())) + .collect::>(), + BTreeMap::from_iter([ + (Nibbles::default(), SparseNode::new_ext(Nibbles::from_nibbles([0x5]))), + (Nibbles::from_nibbles([0x5]), SparseNode::new_branch(0b1101.into())), + ( + Nibbles::from_nibbles([0x5, 0x0]), + SparseNode::new_ext(Nibbles::from_nibbles([0x2, 0x3])) + ), + ( + Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3]), + SparseNode::new_branch(0b1010.into()) + ), + ( + Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), + SparseNode::new_leaf(Nibbles::default()) + ), + ( + Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3]), + SparseNode::new_leaf(Nibbles::default()) + ), + ( + Nibbles::from_nibbles([0x5, 0x2]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x0, 0x1, 0x3])) + ), + (Nibbles::from_nibbles([0x5, 0x3]), SparseNode::new_branch(0b1010.into())), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x1]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x0, 0x2])) + ), + (Nibbles::from_nibbles([0x5, 0x3, 0x3]), SparseNode::new_branch(0b0101.into())), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x2])) + ), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x0])) + ) + ]) + ); + + sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x2, 0x0, 0x1, 0x3]), &provider).unwrap(); + + // Extension (Key = 5) + // └── Branch (Mask = 1001) + // ├── 0 -> Extension (Key = 23) + // │ └── Branch (Mask = 0101) + // │ ├── 1 -> Leaf (Key = 0231, Path = 50231) + // │ └── 3 -> Leaf (Key = 0233, Path = 50233) + // └── 3 -> Branch (Mask = 0101) + // ├── 1 -> Leaf (Key = 3102, Path = 53102) + // └── 3 -> Branch (Mask = 1010) + // ├── 0 -> Leaf (Key = 3302, Path = 53302) + // └── 2 -> Leaf (Key = 3320, Path = 53320) + pretty_assertions::assert_eq!( + parallel_sparse_trie_nodes(&sparse) + .into_iter() + .map(|(k, v)| (*k, v.clone())) + .collect::>(), + BTreeMap::from_iter([ + (Nibbles::default(), SparseNode::new_ext(Nibbles::from_nibbles([0x5]))), + (Nibbles::from_nibbles([0x5]), SparseNode::new_branch(0b1001.into())), + ( + Nibbles::from_nibbles([0x5, 0x0]), + SparseNode::new_ext(Nibbles::from_nibbles([0x2, 0x3])) + ), + ( + Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3]), + SparseNode::new_branch(0b1010.into()) + ), + ( + Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), + SparseNode::new_leaf(Nibbles::default()) + ), + ( + Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3]), + SparseNode::new_leaf(Nibbles::default()) + ), + (Nibbles::from_nibbles([0x5, 0x3]), SparseNode::new_branch(0b1010.into())), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x1]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x0, 0x2])) + ), + (Nibbles::from_nibbles([0x5, 0x3, 0x3]), SparseNode::new_branch(0b0101.into())), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x2])) + ), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x0])) + ) + ]) + ); + + sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), &provider).unwrap(); + + // Extension (Key = 5) + // └── Branch (Mask = 1001) + // ├── 0 -> Leaf (Key = 0233, Path = 50233) + // └── 3 -> Branch (Mask = 0101) + // ├── 1 -> Leaf (Key = 3102, Path = 53102) + // └── 3 -> Branch (Mask = 1010) + // ├── 0 -> Leaf (Key = 3302, Path = 53302) + // └── 2 -> Leaf (Key = 3320, Path = 53320) + pretty_assertions::assert_eq!( + parallel_sparse_trie_nodes(&sparse) + .into_iter() + .map(|(k, v)| (*k, v.clone())) + .collect::>(), + BTreeMap::from_iter([ + (Nibbles::default(), SparseNode::new_ext(Nibbles::from_nibbles([0x5]))), + (Nibbles::from_nibbles([0x5]), SparseNode::new_branch(0b1001.into())), + ( + Nibbles::from_nibbles([0x5, 0x0]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x2, 0x3, 0x3])) + ), + (Nibbles::from_nibbles([0x5, 0x3]), SparseNode::new_branch(0b1010.into())), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x1]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x0, 0x2])) + ), + (Nibbles::from_nibbles([0x5, 0x3, 0x3]), SparseNode::new_branch(0b0101.into())), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x2])) + ), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x0])) + ) + ]) + ); + + sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x3, 0x1, 0x0, 0x2]), &provider).unwrap(); + + // Extension (Key = 5) + // └── Branch (Mask = 1001) + // ├── 0 -> Leaf (Key = 0233, Path = 50233) + // └── 3 -> Branch (Mask = 1010) + // ├── 0 -> Leaf (Key = 3302, Path = 53302) + // └── 2 -> Leaf (Key = 3320, Path = 53320) + pretty_assertions::assert_eq!( + parallel_sparse_trie_nodes(&sparse) + .into_iter() + .map(|(k, v)| (*k, v.clone())) + .collect::>(), + BTreeMap::from_iter([ + (Nibbles::default(), SparseNode::new_ext(Nibbles::from_nibbles([0x5]))), + (Nibbles::from_nibbles([0x5]), SparseNode::new_branch(0b1001.into())), + ( + Nibbles::from_nibbles([0x5, 0x0]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x2, 0x3, 0x3])) + ), + ( + Nibbles::from_nibbles([0x5, 0x3]), + SparseNode::new_ext(Nibbles::from_nibbles([0x3])) + ), + (Nibbles::from_nibbles([0x5, 0x3, 0x3]), SparseNode::new_branch(0b0101.into())), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x2])) + ), + ( + Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x0])) + ) + ]) + ); + + sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x2, 0x0]), &provider).unwrap(); + + // Extension (Key = 5) + // └── Branch (Mask = 1001) + // ├── 0 -> Leaf (Key = 0233, Path = 50233) + // └── 3 -> Leaf (Key = 3302, Path = 53302) + pretty_assertions::assert_eq!( + parallel_sparse_trie_nodes(&sparse) + .into_iter() + .map(|(k, v)| (*k, v.clone())) + .collect::>(), + BTreeMap::from_iter([ + (Nibbles::default(), SparseNode::new_ext(Nibbles::from_nibbles([0x5]))), + (Nibbles::from_nibbles([0x5]), SparseNode::new_branch(0b1001.into())), + ( + Nibbles::from_nibbles([0x5, 0x0]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x2, 0x3, 0x3])) + ), + ( + Nibbles::from_nibbles([0x5, 0x3]), + SparseNode::new_leaf(Nibbles::from_nibbles([0x3, 0x0, 0x2])) + ), + ]) + ); + + sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x3]), &provider).unwrap(); + + // Leaf (Key = 53302) + pretty_assertions::assert_eq!( + parallel_sparse_trie_nodes(&sparse) + .into_iter() + .map(|(k, v)| (*k, v.clone())) + .collect::>(), + BTreeMap::from_iter([( + Nibbles::default(), + SparseNode::new_leaf(Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0, 0x2])) + ),]) + ); + + sparse.remove_leaf(&Nibbles::from_nibbles([0x5, 0x3, 0x3, 0x0, 0x2]), &provider).unwrap(); + + // Empty + pretty_assertions::assert_eq!( + parallel_sparse_trie_nodes(&sparse) + .into_iter() + .map(|(k, v)| (*k, v.clone())) + .collect::>(), + BTreeMap::from_iter([(Nibbles::default(), SparseNode::Empty)]) + ); + } + + #[test] + fn sparse_trie_remove_leaf_blinded() { + let leaf = LeafNode::new( + Nibbles::default(), + alloy_rlp::encode_fixed_size(&U256::from(1)).to_vec(), + ); + let branch = TrieNode::Branch(BranchNode::new( + vec![ + RlpNode::word_rlp(&B256::repeat_byte(1)), + RlpNode::from_raw_rlp(&alloy_rlp::encode(leaf.clone())).unwrap(), + ], + TrieMask::new(0b11), + )); + + let provider = DefaultBlindedProvider; + let mut sparse = ParallelSparseTrie::from_root( + branch.clone(), + TrieMasks { hash_mask: Some(TrieMask::new(0b01)), tree_mask: None }, + false, + ) + .unwrap(); + + // Reveal a branch node and one of its children + // + // Branch (Mask = 11) + // ├── 0 -> Hash (Path = 0) + // └── 1 -> Leaf (Path = 1) + sparse + .reveal_node( + Nibbles::default(), + branch, + TrieMasks { hash_mask: None, tree_mask: Some(TrieMask::new(0b01)) }, + ) + .unwrap(); + sparse + .reveal_node(Nibbles::from_nibbles([0x1]), TrieNode::Leaf(leaf), TrieMasks::none()) + .unwrap(); + + // Removing a blinded leaf should result in an error + assert_matches!( + sparse.remove_leaf(&Nibbles::from_nibbles([0x0]), &provider).map_err(|e| e.into_kind()), + Err(SparseTrieErrorKind::BlindedNode { path, hash }) if path == Nibbles::from_nibbles([0x0]) && hash == B256::repeat_byte(1) + ); + } + + #[test] + fn sparse_trie_remove_leaf_non_existent() { + let leaf = LeafNode::new( + Nibbles::default(), + alloy_rlp::encode_fixed_size(&U256::from(1)).to_vec(), + ); + let branch = TrieNode::Branch(BranchNode::new( + vec![ + RlpNode::word_rlp(&B256::repeat_byte(1)), + RlpNode::from_raw_rlp(&alloy_rlp::encode(leaf.clone())).unwrap(), + ], + TrieMask::new(0b11), + )); + + let provider = DefaultBlindedProvider; + let mut sparse = ParallelSparseTrie::from_root( + branch.clone(), + TrieMasks { hash_mask: Some(TrieMask::new(0b01)), tree_mask: None }, + false, + ) + .unwrap(); + + // Reveal a branch node and one of its children + // + // Branch (Mask = 11) + // ├── 0 -> Hash (Path = 0) + // └── 1 -> Leaf (Path = 1) + sparse + .reveal_node( + Nibbles::default(), + branch, + TrieMasks { hash_mask: None, tree_mask: Some(TrieMask::new(0b01)) }, + ) + .unwrap(); + sparse + .reveal_node(Nibbles::from_nibbles([0x1]), TrieNode::Leaf(leaf), TrieMasks::none()) + .unwrap(); + + // Removing a non-existent leaf should be a noop + let sparse_old = sparse.clone(); + assert_matches!(sparse.remove_leaf(&Nibbles::from_nibbles([0x2]), &provider), Ok(())); + assert_eq!(sparse, sparse_old); + } + + #[test] + fn sparse_trie_fuzz() { + // Having only the first 3 nibbles set, we narrow down the range of keys + // to 4096 different hashes. It allows us to generate collisions more likely + // to test the sparse trie updates. + const KEY_NIBBLES_LEN: usize = 3; + + fn test(updates: Vec<(BTreeMap, BTreeSet)>) { + { + let mut state = BTreeMap::default(); + let default_provider = DefaultBlindedProvider; + let provider_factory = create_test_provider_factory(); + let mut sparse = ParallelSparseTrie::default().with_updates(true); + + for (update, keys_to_delete) in updates { + // Insert state updates into the sparse trie and calculate the root + for (key, account) in update.clone() { + let account = account.into_trie_account(EMPTY_ROOT_HASH); + let mut account_rlp = Vec::new(); + account.encode(&mut account_rlp); + sparse.update_leaf(key, account_rlp, &default_provider).unwrap(); + } + // We need to clone the sparse trie, so that all updated branch nodes are + // preserved, and not only those that were changed after the last call to + // `root()`. + let mut updated_sparse = sparse.clone(); + let sparse_root = updated_sparse.root(); + let sparse_updates = updated_sparse.take_updates(); + + // Insert state updates into the hash builder and calculate the root + state.extend(update); + let provider = provider_factory.provider().unwrap(); + let trie_cursor = DatabaseTrieCursorFactory::new(provider.tx_ref()); + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes, _, _) = + run_hash_builder( + state.clone(), + trie_cursor.account_trie_cursor().unwrap(), + Default::default(), + state.keys().copied().collect::>(), + ); + + // Write trie updates to the database + let provider_rw = provider_factory.provider_rw().unwrap(); + provider_rw.write_trie_updates(&hash_builder_updates).unwrap(); + provider_rw.commit().unwrap(); + + // Assert that the sparse trie root matches the hash builder root + assert_eq!(sparse_root, hash_builder_root); + // Assert that the sparse trie updates match the hash builder updates + pretty_assertions::assert_eq!( + BTreeMap::from_iter(sparse_updates.updated_nodes), + BTreeMap::from_iter(hash_builder_updates.account_nodes) + ); + // Assert that the sparse trie nodes match the hash builder proof nodes + assert_eq_parallel_sparse_trie_proof_nodes( + &updated_sparse, + hash_builder_proof_nodes, + ); + + // Delete some keys from both the hash builder and the sparse trie and check + // that the sparse trie root still matches the hash builder root + for key in &keys_to_delete { + state.remove(key).unwrap(); + sparse.remove_leaf(key, &default_provider).unwrap(); + } + + // We need to clone the sparse trie, so that all updated branch nodes are + // preserved, and not only those that were changed after the last call to + // `root()`. + let mut updated_sparse = sparse.clone(); + let sparse_root = updated_sparse.root(); + let sparse_updates = updated_sparse.take_updates(); + + let provider = provider_factory.provider().unwrap(); + let trie_cursor = DatabaseTrieCursorFactory::new(provider.tx_ref()); + let (hash_builder_root, hash_builder_updates, hash_builder_proof_nodes, _, _) = + run_hash_builder( + state.clone(), + trie_cursor.account_trie_cursor().unwrap(), + keys_to_delete + .iter() + .map(|nibbles| B256::from_slice(&nibbles.pack())) + .collect(), + state.keys().copied().collect::>(), + ); + + // Write trie updates to the database + let provider_rw = provider_factory.provider_rw().unwrap(); + provider_rw.write_trie_updates(&hash_builder_updates).unwrap(); + provider_rw.commit().unwrap(); + + // Assert that the sparse trie root matches the hash builder root + assert_eq!(sparse_root, hash_builder_root); + // Assert that the sparse trie updates match the hash builder updates + pretty_assertions::assert_eq!( + BTreeMap::from_iter(sparse_updates.updated_nodes), + BTreeMap::from_iter(hash_builder_updates.account_nodes) + ); + // Assert that the sparse trie nodes match the hash builder proof nodes + assert_eq_parallel_sparse_trie_proof_nodes( + &updated_sparse, + hash_builder_proof_nodes, + ); + } + } + } + + fn transform_updates( + updates: Vec>, + mut rng: impl rand::Rng, + ) -> Vec<(BTreeMap, BTreeSet)> { + let mut keys = BTreeSet::new(); + updates + .into_iter() + .map(|update| { + keys.extend(update.keys().copied()); + + let keys_to_delete_len = update.len() / 2; + let keys_to_delete = (0..keys_to_delete_len) + .map(|_| { + let key = + *rand::seq::IteratorRandom::choose(keys.iter(), &mut rng).unwrap(); + keys.take(&key).unwrap() + }) + .collect(); + + (update, keys_to_delete) + }) + .collect::>() + } + + proptest!(ProptestConfig::with_cases(10), |( + updates in proptest::collection::vec( + proptest::collection::btree_map( + any_with::(SizeRange::new(KEY_NIBBLES_LEN..=KEY_NIBBLES_LEN)).prop_map(pad_nibbles_right), + arb::(), + 1..50, + ), + 1..50, + ).prop_perturb(transform_updates) + )| { + test(updates) + }); + } + + /// We have three leaves that share the same prefix: 0x00, 0x01 and 0x02. Hash builder trie has + /// only nodes 0x00 and 0x01, and we have proofs for them. Node B is new and inserted in the + /// sparse trie first. + /// + /// 1. Reveal the hash builder proof to leaf 0x00 in the sparse trie. + /// 2. Insert leaf 0x01 into the sparse trie. + /// 3. Reveal the hash builder proof to leaf 0x02 in the sparse trie. + /// + /// The hash builder proof to the leaf 0x02 didn't have the leaf 0x01 at the corresponding + /// nibble of the branch node, so we need to adjust the branch node instead of fully + /// replacing it. + #[test] + fn sparse_trie_reveal_node_1() { + let key1 = || pad_nibbles_right(Nibbles::from_nibbles_unchecked([0x00])); + let key2 = || pad_nibbles_right(Nibbles::from_nibbles_unchecked([0x01])); + let key3 = || pad_nibbles_right(Nibbles::from_nibbles_unchecked([0x02])); + let value = || Account::default(); + let value_encoded = || { + let mut account_rlp = Vec::new(); + value().into_trie_account(EMPTY_ROOT_HASH).encode(&mut account_rlp); + account_rlp + }; + + // Generate the proof for the root node and initialize the sparse trie with it + let (_, _, hash_builder_proof_nodes, branch_node_hash_masks, branch_node_tree_masks) = + run_hash_builder( + [(key1(), value()), (key3(), value())], + NoopAccountTrieCursor::default(), + Default::default(), + [Nibbles::default()], + ); + + let provider = DefaultBlindedProvider; + let mut sparse = ParallelSparseTrie::from_root( + TrieNode::decode(&mut &hash_builder_proof_nodes.nodes_sorted()[0].1[..]).unwrap(), + TrieMasks { + hash_mask: branch_node_hash_masks.get(&Nibbles::default()).copied(), + tree_mask: branch_node_tree_masks.get(&Nibbles::default()).copied(), + }, + false, + ) + .unwrap(); + + // Generate the proof for the first key and reveal it in the sparse trie + let (_, _, hash_builder_proof_nodes, branch_node_hash_masks, branch_node_tree_masks) = + run_hash_builder( + [(key1(), value()), (key3(), value())], + NoopAccountTrieCursor::default(), + Default::default(), + [key1()], + ); + for (path, node) in hash_builder_proof_nodes.nodes_sorted() { + let hash_mask = branch_node_hash_masks.get(&path).copied(); + let tree_mask = branch_node_tree_masks.get(&path).copied(); + sparse + .reveal_node( + path, + TrieNode::decode(&mut &node[..]).unwrap(), + TrieMasks { hash_mask, tree_mask }, + ) + .unwrap(); + } + + // Check that the branch node exists with only two nibbles set + assert_eq!( + sparse.upper_subtrie.nodes.get(&Nibbles::default()), + Some(&SparseNode::new_branch(0b101.into())) + ); + + // Insert the leaf for the second key + sparse.update_leaf(key2(), value_encoded(), &provider).unwrap(); + + // Check that the branch node was updated and another nibble was set + assert_eq!( + sparse.upper_subtrie.nodes.get(&Nibbles::default()), + Some(&SparseNode::new_branch(0b111.into())) + ); + + // Generate the proof for the third key and reveal it in the sparse trie + let (_, _, hash_builder_proof_nodes, branch_node_hash_masks, branch_node_tree_masks) = + run_hash_builder( + [(key1(), value()), (key3(), value())], + NoopAccountTrieCursor::default(), + Default::default(), + [key3()], + ); + for (path, node) in hash_builder_proof_nodes.nodes_sorted() { + let hash_mask = branch_node_hash_masks.get(&path).copied(); + let tree_mask = branch_node_tree_masks.get(&path).copied(); + sparse + .reveal_node( + path, + TrieNode::decode(&mut &node[..]).unwrap(), + TrieMasks { hash_mask, tree_mask }, + ) + .unwrap(); + } + + // Check that nothing changed in the branch node + assert_eq!( + sparse.upper_subtrie.nodes.get(&Nibbles::default()), + Some(&SparseNode::new_branch(0b111.into())) + ); + + // Generate the nodes for the full trie with all three key using the hash builder, and + // compare them to the sparse trie + let (_, _, hash_builder_proof_nodes, _, _) = run_hash_builder( + [(key1(), value()), (key2(), value()), (key3(), value())], + NoopAccountTrieCursor::default(), + Default::default(), + [key1(), key2(), key3()], + ); + + assert_eq_parallel_sparse_trie_proof_nodes(&sparse, hash_builder_proof_nodes); + } + + /// We have three leaves: 0x0000, 0x0101, and 0x0102. Hash builder trie has all nodes, and we + /// have proofs for them. + /// + /// 1. Reveal the hash builder proof to leaf 0x00 in the sparse trie. + /// 2. Remove leaf 0x00 from the sparse trie (that will remove the branch node and create an + /// extension node with the key 0x0000). + /// 3. Reveal the hash builder proof to leaf 0x0101 in the sparse trie. + /// + /// The hash builder proof to the leaf 0x0101 had a branch node in the path, but we turned it + /// into an extension node, so it should ignore this node. + #[test] + fn sparse_trie_reveal_node_2() { + let key1 = || pad_nibbles_right(Nibbles::from_nibbles_unchecked([0x00, 0x00])); + let key2 = || pad_nibbles_right(Nibbles::from_nibbles_unchecked([0x01, 0x01])); + let key3 = || pad_nibbles_right(Nibbles::from_nibbles_unchecked([0x01, 0x02])); + let value = || Account::default(); + + // Generate the proof for the root node and initialize the sparse trie with it + let (_, _, hash_builder_proof_nodes, branch_node_hash_masks, branch_node_tree_masks) = + run_hash_builder( + [(key1(), value()), (key2(), value()), (key3(), value())], + NoopAccountTrieCursor::default(), + Default::default(), + [Nibbles::default()], + ); + + let provider = DefaultBlindedProvider; + let mut sparse = ParallelSparseTrie::from_root( + TrieNode::decode(&mut &hash_builder_proof_nodes.nodes_sorted()[0].1[..]).unwrap(), + TrieMasks { + hash_mask: branch_node_hash_masks.get(&Nibbles::default()).copied(), + tree_mask: branch_node_tree_masks.get(&Nibbles::default()).copied(), + }, + false, + ) + .unwrap(); + + // Generate the proof for the children of the root branch node and reveal it in the sparse + // trie + let (_, _, hash_builder_proof_nodes, branch_node_hash_masks, branch_node_tree_masks) = + run_hash_builder( + [(key1(), value()), (key2(), value()), (key3(), value())], + NoopAccountTrieCursor::default(), + Default::default(), + [key1(), Nibbles::from_nibbles_unchecked([0x01])], + ); + for (path, node) in hash_builder_proof_nodes.nodes_sorted() { + let hash_mask = branch_node_hash_masks.get(&path).copied(); + let tree_mask = branch_node_tree_masks.get(&path).copied(); + sparse + .reveal_node( + path, + TrieNode::decode(&mut &node[..]).unwrap(), + TrieMasks { hash_mask, tree_mask }, + ) + .unwrap(); + } + + // Check that the branch node exists + assert_eq!( + sparse.upper_subtrie.nodes.get(&Nibbles::default()), + Some(&SparseNode::new_branch(0b11.into())) + ); + + // Remove the leaf for the first key + sparse.remove_leaf(&key1(), &provider).unwrap(); + + // Check that the branch node was turned into an extension node + assert_eq!( + sparse.upper_subtrie.nodes.get(&Nibbles::default()), + Some(&SparseNode::new_ext(Nibbles::from_nibbles_unchecked([0x01]))) + ); + + // Generate the proof for the third key and reveal it in the sparse trie + let (_, _, hash_builder_proof_nodes, branch_node_hash_masks, branch_node_tree_masks) = + run_hash_builder( + [(key1(), value()), (key2(), value()), (key3(), value())], + NoopAccountTrieCursor::default(), + Default::default(), + [key2()], + ); + for (path, node) in hash_builder_proof_nodes.nodes_sorted() { + let hash_mask = branch_node_hash_masks.get(&path).copied(); + let tree_mask = branch_node_tree_masks.get(&path).copied(); + sparse + .reveal_node( + path, + TrieNode::decode(&mut &node[..]).unwrap(), + TrieMasks { hash_mask, tree_mask }, + ) + .unwrap(); + } + + // Check that nothing changed in the extension node + assert_eq!( + sparse.upper_subtrie.nodes.get(&Nibbles::default()), + Some(&SparseNode::new_ext(Nibbles::from_nibbles_unchecked([0x01]))) + ); + } + + /// We have two leaves that share the same prefix: 0x0001 and 0x0002, and a leaf with a + /// different prefix: 0x0100. Hash builder trie has only the first two leaves, and we have + /// proofs for them. + /// + /// 1. Insert the leaf 0x0100 into the sparse trie, and check that the root extension node was + /// turned into a branch node. + /// 2. Reveal the leaf 0x0001 in the sparse trie, and check that the root branch node wasn't + /// overwritten with the extension node from the proof. + #[test] + fn sparse_trie_reveal_node_3() { + let key1 = || pad_nibbles_right(Nibbles::from_nibbles_unchecked([0x00, 0x01])); + let key2 = || pad_nibbles_right(Nibbles::from_nibbles_unchecked([0x00, 0x02])); + let key3 = || pad_nibbles_right(Nibbles::from_nibbles_unchecked([0x01, 0x00])); + let value = || Account::default(); + let value_encoded = || { + let mut account_rlp = Vec::new(); + value().into_trie_account(EMPTY_ROOT_HASH).encode(&mut account_rlp); + account_rlp + }; + + // Generate the proof for the root node and initialize the sparse trie with it + let (_, _, hash_builder_proof_nodes, branch_node_hash_masks, branch_node_tree_masks) = + run_hash_builder( + [(key1(), value()), (key2(), value())], + NoopAccountTrieCursor::default(), + Default::default(), + [Nibbles::default()], + ); + + let provider = DefaultBlindedProvider; + let mut sparse = ParallelSparseTrie::from_root( + TrieNode::decode(&mut &hash_builder_proof_nodes.nodes_sorted()[0].1[..]).unwrap(), + TrieMasks { + hash_mask: branch_node_hash_masks.get(&Nibbles::default()).copied(), + tree_mask: branch_node_tree_masks.get(&Nibbles::default()).copied(), + }, + false, + ) + .unwrap(); + + // Check that the root extension node exists + assert_matches!( + sparse.upper_subtrie.nodes.get(&Nibbles::default()), + Some(SparseNode::Extension { key, hash: None, store_in_db_trie: None }) if *key == Nibbles::from_nibbles([0x00]) + ); + + // Insert the leaf with a different prefix + sparse.update_leaf(key3(), value_encoded(), &provider).unwrap(); + + // Check that the extension node was turned into a branch node + assert_matches!( + sparse.upper_subtrie.nodes.get(&Nibbles::default()), + Some(SparseNode::Branch { state_mask, hash: None, store_in_db_trie: None }) if *state_mask == TrieMask::new(0b11) + ); + + // Generate the proof for the first key and reveal it in the sparse trie + let (_, _, hash_builder_proof_nodes, branch_node_hash_masks, branch_node_tree_masks) = + run_hash_builder( + [(key1(), value()), (key2(), value())], + NoopAccountTrieCursor::default(), + Default::default(), + [key1()], + ); + for (path, node) in hash_builder_proof_nodes.nodes_sorted() { + let hash_mask = branch_node_hash_masks.get(&path).copied(); + let tree_mask = branch_node_tree_masks.get(&path).copied(); + sparse + .reveal_node( + path, + TrieNode::decode(&mut &node[..]).unwrap(), + TrieMasks { hash_mask, tree_mask }, + ) + .unwrap(); + } + + // Check that the branch node wasn't overwritten by the extension node in the proof + assert_matches!( + sparse.upper_subtrie.nodes.get(&Nibbles::default()), + Some(SparseNode::Branch { state_mask, hash: None, store_in_db_trie: None }) if *state_mask == TrieMask::new(0b11) + ); } #[test] @@ -3843,7 +4798,7 @@ mod tests { ]); // Insert all leaves - ctx.insert_leaves(&mut trie, &leaves); + ctx.update_leaves(&mut trie, leaves.clone()); // Verify the upper subtrie has an extension node at the root with key 0x12 ctx.assert_upper_subtrie(&trie) @@ -3902,7 +4857,7 @@ mod tests { let leaves = ctx.create_test_leaves(&[&[0x1, 0x2, 0x3, 0x4], &[0x1, 0x2, 0x3, 0x5]]); // Insert all leaves - ctx.insert_leaves(&mut trie, &leaves); + ctx.update_leaves(&mut trie, leaves.clone()); // Verify the upper subtrie has an extension node at the root with key 0x123 ctx.assert_upper_subtrie(&trie) @@ -3933,7 +4888,7 @@ mod tests { let leaves = ctx.create_test_leaves(&[&[0x1, 0x2, 0x3, 0x4], &[0x1, 0x2, 0x4, 0x5]]); // Insert all leaves - ctx.insert_leaves(&mut trie, &leaves); + ctx.update_leaves(&mut trie, leaves.clone()); // Verify the upper subtrie has an extension node at the root with key 0x12 ctx.assert_upper_subtrie(&trie) @@ -3970,10 +4925,15 @@ mod tests { let (leaf3_path, value3) = ctx.create_test_leaf([0x2], 3); let (leaf4_path, value4) = ctx.create_test_leaf([0x3], 4); - trie.update_leaf(leaf1_path, value1.clone(), DefaultBlindedProvider).unwrap(); - trie.update_leaf(leaf2_path, value2.clone(), DefaultBlindedProvider).unwrap(); - trie.update_leaf(leaf3_path, value3.clone(), DefaultBlindedProvider).unwrap(); - trie.update_leaf(leaf4_path, value4.clone(), DefaultBlindedProvider).unwrap(); + ctx.update_leaves( + &mut trie, + [ + (leaf1_path, value1.clone()), + (leaf2_path, value2.clone()), + (leaf3_path, value3.clone()), + (leaf4_path, value4.clone()), + ], + ); // Verify upper trie has a branch at root with 4 children ctx.assert_upper_subtrie(&trie) @@ -4010,8 +4970,7 @@ mod tests { let (leaf1_path, value1) = ctx.create_test_leaf([0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x0], 1); let (leaf2_path, value2) = ctx.create_test_leaf([0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1], 2); - trie.update_leaf(leaf1_path, value1.clone(), DefaultBlindedProvider).unwrap(); - trie.update_leaf(leaf2_path, value2.clone(), DefaultBlindedProvider).unwrap(); + ctx.update_leaves(&mut trie, [(leaf1_path, value1.clone()), (leaf2_path, value2.clone())]); // Verify upper trie has extension with the full common prefix ctx.assert_upper_subtrie(&trie).has_extension( @@ -4063,9 +5022,7 @@ mod tests { } // Insert all leaves - for (path, value) in &leaves { - trie.update_leaf(*path, value.clone(), DefaultBlindedProvider).unwrap(); - } + ctx.update_leaves(&mut trie, leaves.iter().cloned()); // Verify upper trie structure ctx.assert_upper_subtrie(&trie) @@ -4115,9 +5072,7 @@ mod tests { ]; // Insert all leaves - for (path, value) in &leaves { - trie.update_leaf(*path, value.clone(), DefaultBlindedProvider).unwrap(); - } + ctx.update_leaves(&mut trie, leaves.iter().cloned()); // Verify upper trie has extension then branch ctx.assert_upper_subtrie(&trie) @@ -4165,17 +5120,16 @@ mod tests { // First two leaves share prefix 0xFF0 let (leaf1_path, value1) = ctx.create_test_leaf([0xF, 0xF, 0x0, 0x1], 1); let (leaf2_path, value2) = ctx.create_test_leaf([0xF, 0xF, 0x0, 0x2], 2); + let (leaf3_path, value3) = ctx.create_test_leaf([0xF, 0x0, 0x0, 0x3], 3); - trie.update_leaf(leaf1_path, value1.clone(), DefaultBlindedProvider).unwrap(); - trie.update_leaf(leaf2_path, value2.clone(), DefaultBlindedProvider).unwrap(); + ctx.update_leaves(&mut trie, [(leaf1_path, value1.clone()), (leaf2_path, value2.clone())]); // Verify initial extension structure ctx.assert_upper_subtrie(&trie) .has_extension(&Nibbles::default(), &Nibbles::from_nibbles([0xF, 0xF, 0x0])); // Add leaf that splits the extension - let (leaf3_path, value3) = ctx.create_test_leaf([0xF, 0x0, 0x0, 0x3], 3); - trie.update_leaf(leaf3_path, value3.clone(), DefaultBlindedProvider).unwrap(); + ctx.update_leaves(&mut trie, [(leaf3_path, value3.clone())]); // Verify transformed structure ctx.assert_upper_subtrie(&trie) diff --git a/crates/trie/sparse/src/traits.rs b/crates/trie/sparse/src/traits.rs index 6d615bb8131..c40adac3df4 100644 --- a/crates/trie/sparse/src/traits.rs +++ b/crates/trie/sparse/src/traits.rs @@ -2,7 +2,7 @@ use core::fmt::Debug; -use alloc::vec::Vec; +use alloc::{borrow::Cow, vec::Vec}; use alloy_primitives::{ map::{HashMap, HashSet}, B256, @@ -201,6 +201,11 @@ pub trait SparseTrieInterface: Default + Debug + Send + Sync { expected_value: Option<&Vec>, ) -> Result; + /// Returns a reference to the current sparse trie updates. + /// + /// If no updates have been made/recorded, returns an empty update set. + fn updates_ref(&self) -> Cow<'_, SparseTrieUpdates>; + /// Consumes and returns the currently accumulated trie updates. /// /// This is useful when you want to apply the updates to an external database diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 5bb8c7aef84..41cabda20b8 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -902,6 +902,10 @@ impl SparseTrieInterface for RevealedSparseTrie { self.values.get(full_path) } + fn updates_ref(&self) -> Cow<'_, SparseTrieUpdates> { + self.updates.as_ref().map_or(Cow::Owned(SparseTrieUpdates::default()), Cow::Borrowed) + } + fn take_updates(&mut self) -> SparseTrieUpdates { self.updates.take().unwrap_or_default() } @@ -1056,13 +1060,6 @@ impl SparseTrieInterface for RevealedSparseTrie { } impl RevealedSparseTrie { - /// Returns a reference to the current sparse trie updates. - /// - /// If no updates have been made/recorded, returns an empty update set. - pub fn updates_ref(&self) -> Cow<'_, SparseTrieUpdates> { - self.updates.as_ref().map_or(Cow::Owned(SparseTrieUpdates::default()), Cow::Borrowed) - } - /// Returns an immutable reference to all nodes in the sparse trie. pub const fn nodes_ref(&self) -> &HashMap { &self.nodes From 34b1d3d5cfeaf5fd7cf7bfbc52626c2787e19b51 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Tue, 8 Jul 2025 14:51:31 +0100 Subject: [PATCH 093/305] ci: add https:// to image URLs in release.yml (#17280) --- .github/workflows/release.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 3fdcaf6d9c3..9acc024baf1 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -20,8 +20,8 @@ env: OP_IMAGE_NAME: ${{ github.repository_owner }}/op-reth REPRODUCIBLE_IMAGE_NAME: ${{ github.repository_owner }}/reth-reproducible CARGO_TERM_COLOR: always - DOCKER_IMAGE_NAME_URL: ghcr.io/${{ github.repository_owner }}/reth - DOCKER_OP_IMAGE_NAME_URL: ghcr.io/${{ github.repository_owner }}/op-reth + DOCKER_IMAGE_NAME_URL: https://ghcr.io/${{ github.repository_owner }}/reth + DOCKER_OP_IMAGE_NAME_URL: https://ghcr.io/${{ github.repository_owner }}/op-reth jobs: dry-run: From 34f1a606b71d027ecc10fcf7de52bad2f2da9bc8 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Tue, 8 Jul 2025 12:23:57 -0400 Subject: [PATCH 094/305] chore(trie): move from_root out of SparseTrieInterface (#17266) --- crates/trie/sparse-parallel/src/trie.rs | 26 ++++++++++++++++--- crates/trie/sparse/src/traits.rs | 16 ------------ crates/trie/sparse/src/trie.rs | 33 ++++++++++++++++++++++--- 3 files changed, 51 insertions(+), 24 deletions(-) diff --git a/crates/trie/sparse-parallel/src/trie.rs b/crates/trie/sparse-parallel/src/trie.rs index bcf04c32ea9..069757e0520 100644 --- a/crates/trie/sparse-parallel/src/trie.rs +++ b/crates/trie/sparse-parallel/src/trie.rs @@ -62,10 +62,6 @@ impl Default for ParallelSparseTrie { } impl SparseTrieInterface for ParallelSparseTrie { - fn from_root(root: TrieNode, masks: TrieMasks, retain_updates: bool) -> SparseTrieResult { - Self::default().with_root(root, masks, retain_updates) - } - fn with_root( mut self, root: TrieNode, @@ -616,6 +612,28 @@ impl ParallelSparseTrie { self.updates.is_some() } + /// Creates a new revealed sparse trie from the given root node. + /// + /// This function initializes the internal structures and then reveals the root. + /// It is a convenient method to create a trie when you already have the root node available. + /// + /// # Arguments + /// + /// * `root` - The root node of the trie + /// * `masks` - Trie masks for root branch node + /// * `retain_updates` - Whether to track updates + /// + /// # Returns + /// + /// Self if successful, or an error if revealing fails. + pub fn from_root( + root: TrieNode, + masks: TrieMasks, + retain_updates: bool, + ) -> SparseTrieResult { + Self::default().with_root(root, masks, retain_updates) + } + /// Returns a reference to the lower `SparseSubtrie` for the given path, or None if the /// path belongs to the upper trie, or if the lower subtrie for the path doesn't exist or is /// blinded. diff --git a/crates/trie/sparse/src/traits.rs b/crates/trie/sparse/src/traits.rs index c40adac3df4..d935e25814e 100644 --- a/crates/trie/sparse/src/traits.rs +++ b/crates/trie/sparse/src/traits.rs @@ -19,22 +19,6 @@ use crate::blinded::BlindedProvider; /// while providing a unified interface for the core trie operations needed by the /// [`crate::SparseTrie`] enum. pub trait SparseTrieInterface: Default + Debug + Send + Sync { - /// Creates a new revealed sparse trie from the given root node. - /// - /// This function initializes the internal structures and then reveals the root. - /// It is a convenient method to create a trie when you already have the root node available. - /// - /// # Arguments - /// - /// * `root` - The root node of the trie - /// * `masks` - Trie masks for root branch node - /// * `retain_updates` - Whether to track updates - /// - /// # Returns - /// - /// Self if successful, or an error if revealing fails. - fn from_root(root: TrieNode, masks: TrieMasks, retain_updates: bool) -> SparseTrieResult; - /// Configures the trie to have the given root node revealed. /// /// # Arguments diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 41cabda20b8..77a4376c45b 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -377,10 +377,6 @@ impl Default for RevealedSparseTrie { } impl SparseTrieInterface for RevealedSparseTrie { - fn from_root(root: TrieNode, masks: TrieMasks, retain_updates: bool) -> SparseTrieResult { - Self::default().with_root(root, masks, retain_updates) - } - fn with_root( mut self, root: TrieNode, @@ -1060,6 +1056,35 @@ impl SparseTrieInterface for RevealedSparseTrie { } impl RevealedSparseTrie { + /// Creates a new revealed sparse trie from the given root node. + /// + /// This function initializes the internal structures and then reveals the root. + /// It is a convenient method to create a trie when you already have the root node available. + /// + /// # Arguments + /// + /// * `root` - The root node of the trie + /// * `masks` - Trie masks for root branch node + /// * `retain_updates` - Whether to track updates + /// + /// # Returns + /// + /// Self if successful, or an error if revealing fails. + pub fn from_root( + root: TrieNode, + masks: TrieMasks, + retain_updates: bool, + ) -> SparseTrieResult { + Self::default().with_root(root, masks, retain_updates) + } + + /// Returns a reference to the current sparse trie updates. + /// + /// If no updates have been made/recorded, returns an empty update set. + pub fn updates_ref(&self) -> Cow<'_, SparseTrieUpdates> { + self.updates.as_ref().map_or(Cow::Owned(SparseTrieUpdates::default()), Cow::Borrowed) + } + /// Returns an immutable reference to all nodes in the sparse trie. pub const fn nodes_ref(&self) -> &HashMap { &self.nodes From 038ddd6614326882236e28f964be2cf83c56a770 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Tue, 8 Jul 2025 18:58:40 +0200 Subject: [PATCH 095/305] perf: remove block cloning from `is_descendant` check (#17286) --- crates/engine/tree/src/tree/state.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/crates/engine/tree/src/tree/state.rs b/crates/engine/tree/src/tree/state.rs index 7bc443db935..380e100b475 100644 --- a/crates/engine/tree/src/tree/state.rs +++ b/crates/engine/tree/src/tree/state.rs @@ -362,13 +362,15 @@ impl TreeState { } // iterate through parents of the second until we reach the number - let Some(mut current_block) = self.block_by_hash(second.parent_hash()) else { + let Some(mut current_block) = self.blocks_by_hash.get(&second.parent_hash()) else { // If we can't find its parent in the tree, we can't continue, so return false return false }; - while current_block.number() > first.number + 1 { - let Some(block) = self.block_by_hash(current_block.header().parent_hash()) else { + while current_block.recovered_block().number() > first.number + 1 { + let Some(block) = + self.blocks_by_hash.get(¤t_block.recovered_block().parent_hash()) + else { // If we can't find its parent in the tree, we can't continue, so return false return false }; @@ -377,7 +379,7 @@ impl TreeState { } // Now the block numbers should be equal, so we compare hashes. - current_block.parent_hash() == first.hash + current_block.recovered_block().parent_hash() == first.hash } /// Updates the canonical head to the given block. From 3ba16128affa7467d41d1489abe1eb8cade262ae Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Tue, 8 Jul 2025 19:23:14 +0200 Subject: [PATCH 096/305] feat(test): add rpc e2e tests (#17284) --- .github/assets/check_wasm.sh | 1 + Cargo.lock | 21 + Cargo.toml | 2 + crates/e2e-test-utils/Cargo.toml | 1 + crates/e2e-test-utils/src/setup_import.rs | 22 +- crates/e2e-test-utils/src/test_rlp_utils.rs | 4 +- crates/e2e-test-utils/src/testsuite/mod.rs | 11 + crates/e2e-test-utils/src/testsuite/setup.rs | 7 + crates/rpc/rpc-e2e-tests/Cargo.toml | 39 ++ crates/rpc/rpc-e2e-tests/README.md | 170 ++++++ crates/rpc/rpc-e2e-tests/src/lib.rs | 12 + crates/rpc/rpc-e2e-tests/src/rpc_compat.rs | 514 ++++++++++++++++++ .../testdata/rpc-compat/chain.rlp | Bin 0 -> 54610 bytes .../rpc-compat/eth_getLogs/contract-addr.io | 3 + .../rpc-compat/eth_getLogs/no-topics.io | 3 + .../eth_getLogs/topic-exact-match.io | 3 + .../rpc-compat/eth_getLogs/topic-wildcard.io | 3 + .../testdata/rpc-compat/forkenv.json | 27 + .../testdata/rpc-compat/genesis.json | 141 +++++ .../testdata/rpc-compat/headfcu.json | 13 + crates/rpc/rpc-e2e-tests/tests/rpc_compat.rs | 79 +++ 21 files changed, 1067 insertions(+), 9 deletions(-) create mode 100644 crates/rpc/rpc-e2e-tests/Cargo.toml create mode 100644 crates/rpc/rpc-e2e-tests/README.md create mode 100644 crates/rpc/rpc-e2e-tests/src/lib.rs create mode 100644 crates/rpc/rpc-e2e-tests/src/rpc_compat.rs create mode 100644 crates/rpc/rpc-e2e-tests/testdata/rpc-compat/chain.rlp create mode 100644 crates/rpc/rpc-e2e-tests/testdata/rpc-compat/eth_getLogs/contract-addr.io create mode 100644 crates/rpc/rpc-e2e-tests/testdata/rpc-compat/eth_getLogs/no-topics.io create mode 100644 crates/rpc/rpc-e2e-tests/testdata/rpc-compat/eth_getLogs/topic-exact-match.io create mode 100644 crates/rpc/rpc-e2e-tests/testdata/rpc-compat/eth_getLogs/topic-wildcard.io create mode 100644 crates/rpc/rpc-e2e-tests/testdata/rpc-compat/forkenv.json create mode 100644 crates/rpc/rpc-e2e-tests/testdata/rpc-compat/genesis.json create mode 100644 crates/rpc/rpc-e2e-tests/testdata/rpc-compat/headfcu.json create mode 100644 crates/rpc/rpc-e2e-tests/tests/rpc_compat.rs diff --git a/.github/assets/check_wasm.sh b/.github/assets/check_wasm.sh index 481bed3c0a3..2d0eade3d74 100755 --- a/.github/assets/check_wasm.sh +++ b/.github/assets/check_wasm.sh @@ -48,6 +48,7 @@ exclude_crates=( reth-rpc-api reth-rpc-api-testing-util reth-rpc-builder + reth-rpc-e2e-tests reth-rpc-engine-api reth-rpc-eth-api reth-rpc-eth-types diff --git a/Cargo.lock b/Cargo.lock index 67a29aa26fe..c5412e83533 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7786,6 +7786,7 @@ version = "1.5.1" dependencies = [ "alloy-consensus", "alloy-eips", + "alloy-genesis", "alloy-network", "alloy-primitives", "alloy-provider", @@ -9996,6 +9997,26 @@ dependencies = [ "thiserror 2.0.12", ] +[[package]] +name = "reth-rpc-e2e-tests" +version = "1.5.1" +dependencies = [ + "alloy-genesis", + "alloy-rpc-types-engine", + "eyre", + "futures-util", + "jsonrpsee", + "reth-chainspec", + "reth-e2e-test-utils", + "reth-node-api", + "reth-node-ethereum", + "reth-rpc-api", + "reth-tracing", + "serde_json", + "tokio", + "tracing", +] + [[package]] name = "reth-rpc-engine-api" version = "1.5.1" diff --git a/Cargo.toml b/Cargo.toml index 0e387529ca1..63a6c3a458b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -105,6 +105,7 @@ members = [ "crates/rpc/rpc-layer", "crates/rpc/rpc-server-types/", "crates/rpc/rpc-testing-util/", + "crates/rpc/rpc-e2e-tests/", "crates/rpc/rpc-convert/", "crates/rpc/rpc/", "crates/stages/api/", @@ -420,6 +421,7 @@ reth-rpc = { path = "crates/rpc/rpc" } reth-rpc-api = { path = "crates/rpc/rpc-api" } reth-rpc-api-testing-util = { path = "crates/rpc/rpc-testing-util" } reth-rpc-builder = { path = "crates/rpc/rpc-builder" } +reth-rpc-e2e-tests = { path = "crates/rpc/rpc-e2e-tests" } reth-rpc-engine-api = { path = "crates/rpc/rpc-engine-api" } reth-rpc-eth-api = { path = "crates/rpc/rpc-eth-api" } reth-rpc-eth-types = { path = "crates/rpc/rpc-eth-types", default-features = false } diff --git a/crates/e2e-test-utils/Cargo.toml b/crates/e2e-test-utils/Cargo.toml index 997cb5a3570..ae3ae3cc281 100644 --- a/crates/e2e-test-utils/Cargo.toml +++ b/crates/e2e-test-utils/Cargo.toml @@ -63,6 +63,7 @@ alloy-rpc-types-engine.workspace = true alloy-network.workspace = true alloy-consensus = { workspace = true, features = ["kzg"] } alloy-provider = { workspace = true, features = ["reqwest"] } +alloy-genesis.workspace = true futures-util.workspace = true eyre.workspace = true diff --git a/crates/e2e-test-utils/src/setup_import.rs b/crates/e2e-test-utils/src/setup_import.rs index b19bc48c9c4..cde8136ff83 100644 --- a/crates/e2e-test-utils/src/setup_import.rs +++ b/crates/e2e-test-utils/src/setup_import.rs @@ -53,6 +53,9 @@ impl std::fmt::Debug for ChainImportResult { /// Note: This function is currently specific to `EthereumNode` because the import process /// uses Ethereum-specific consensus and block format. It can be made generic in the future /// by abstracting the import process. +/// It uses `NoopConsensus` during import to bypass validation checks like gas limit constraints, +/// which allows importing test chains that may not strictly conform to mainnet consensus rules. The +/// nodes themselves still run with proper consensus when started. pub async fn setup_engine_with_chain_import( num_nodes: usize, chain_spec: Arc, @@ -128,12 +131,14 @@ pub async fn setup_engine_with_chain_import( reth_db_common::init::init_genesis(&provider_factory)?; // Import the chain data + // Use no_state to skip state validation for test chains let import_config = ImportConfig::default(); let config = Config::default(); // Create EVM and consensus for Ethereum let evm_config = reth_node_ethereum::EthEvmConfig::new(chain_spec.clone()); - let consensus = reth_ethereum_consensus::EthBeaconConsensus::new(chain_spec.clone()); + // Use NoopConsensus to skip gas limit validation for test imports + let consensus = reth_consensus::noop::NoopConsensus::arc(); let result = import_blocks_from_file( rlp_path, @@ -141,7 +146,7 @@ pub async fn setup_engine_with_chain_import( provider_factory.clone(), &config, evm_config, - Arc::new(consensus), + consensus, ) .await?; @@ -248,7 +253,8 @@ pub fn load_forkchoice_state(path: &Path) -> eyre::Result std::io::Resu /// Create FCU JSON for the tip of the chain pub fn create_fcu_json(tip: &SealedBlock) -> serde_json::Value { serde_json::json!({ - "forkchoiceState": { + "params": [{ "headBlockHash": format!("0x{:x}", tip.hash()), "safeBlockHash": format!("0x{:x}", tip.hash()), "finalizedBlockHash": format!("0x{:x}", tip.hash()), - } + }] }) } diff --git a/crates/e2e-test-utils/src/testsuite/mod.rs b/crates/e2e-test-utils/src/testsuite/mod.rs index f151fdf6dc1..e2e737f2a38 100644 --- a/crates/e2e-test-utils/src/testsuite/mod.rs +++ b/crates/e2e-test-utils/src/testsuite/mod.rs @@ -293,6 +293,17 @@ where self } + /// Set the test setup with chain import from RLP file + pub fn with_setup_and_import( + mut self, + mut setup: Setup, + rlp_path: impl Into, + ) -> Self { + setup.import_rlp_path = Some(rlp_path.into()); + self.setup = Some(setup); + self + } + /// Add an action to the test pub fn with_action(mut self, action: A) -> Self where diff --git a/crates/e2e-test-utils/src/testsuite/setup.rs b/crates/e2e-test-utils/src/testsuite/setup.rs index 2b8ee948f93..2b4968c1fdb 100644 --- a/crates/e2e-test-utils/src/testsuite/setup.rs +++ b/crates/e2e-test-utils/src/testsuite/setup.rs @@ -46,6 +46,8 @@ pub struct Setup { /// Holds the import result to keep nodes alive when using imported chain /// This is stored as an option to avoid lifetime issues with `tokio::spawn` import_result_holder: Option, + /// Path to RLP file to import during setup + pub import_rlp_path: Option, } impl Default for Setup { @@ -61,6 +63,7 @@ impl Default for Setup { is_dev: true, _phantom: Default::default(), import_result_holder: None, + import_rlp_path: None, } } } @@ -174,6 +177,10 @@ where <::Payload as PayloadTypes>::PayloadAttributes, >, { + // If import_rlp_path is set, use apply_with_import instead + if let Some(rlp_path) = self.import_rlp_path.take() { + return self.apply_with_import::(env, &rlp_path).await; + } let chain_spec = self.chain_spec.clone().ok_or_else(|| eyre!("Chain specification is required"))?; diff --git a/crates/rpc/rpc-e2e-tests/Cargo.toml b/crates/rpc/rpc-e2e-tests/Cargo.toml new file mode 100644 index 00000000000..2484655d902 --- /dev/null +++ b/crates/rpc/rpc-e2e-tests/Cargo.toml @@ -0,0 +1,39 @@ +[package] +name = "reth-rpc-e2e-tests" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +description = "RPC end-to-end tests including execution-apis compatibility testing" + +[lints] +workspace = true + +[dependencies] +# reth +reth-e2e-test-utils.workspace = true +reth-rpc-api = { workspace = true, features = ["client"] } + +# ethereum +alloy-rpc-types-engine.workspace = true + +# async +tokio.workspace = true +futures-util.workspace = true + +# misc +eyre.workspace = true +serde_json.workspace = true +tracing.workspace = true +jsonrpsee.workspace = true + +# required for the Action trait +reth-node-api.workspace = true + +[dev-dependencies] +reth-tracing.workspace = true +reth-chainspec.workspace = true +reth-node-ethereum.workspace = true +alloy-genesis.workspace = true diff --git a/crates/rpc/rpc-e2e-tests/README.md b/crates/rpc/rpc-e2e-tests/README.md new file mode 100644 index 00000000000..44e9806f05d --- /dev/null +++ b/crates/rpc/rpc-e2e-tests/README.md @@ -0,0 +1,170 @@ +# Reth RPC E2E Tests + +This crate contains end-to-end tests for Reth's RPC implementation, including compatibility testing against the official execution-apis test suite. + +## Overview + +The RPC compatibility testing framework enables: +1. Importing pre-built blockchain data from RLP files +2. Initializing nodes with specific forkchoice states +3. Running standardized RPC test cases from the execution-apis repository +4. Comparing responses against expected results + +## Architecture + +### Key Components + +1. **`RunRpcCompatTests` Action**: Executes RPC test cases from .io files +2. **`InitializeFromExecutionApis` Action**: Applies forkchoice state from JSON files with automatic retry for syncing nodes +3. **Test Data Format**: Uses execution-apis .io file format for test cases + +### Test Data Structure + +Expected directory structure: +``` +test_data_path/ +├── chain.rlp # Pre-built blockchain data +├── headfcu.json # Initial forkchoice state +├── genesis.json # Genesis configuration (optional) +└── eth_getLogs/ # Test cases for eth_getLogs + ├── contract-addr.io + ├── no-topics.io + ├── topic-exact-match.io + └── topic-wildcard.io +``` + +### .io File Format + +Test files use a simple request-response format: +``` +// Optional comment describing the test +// speconly: marks test as specification-only +>> {"jsonrpc":"2.0","id":1,"method":"eth_getLogs","params":[...]} +<< {"jsonrpc":"2.0","id":1,"result":[...]} +``` + +## Usage + +### Basic Example + +```rust +use alloy_genesis::Genesis; +use reth_chainspec::ChainSpec; +use reth_e2e_test_utils::testsuite::{ + actions::{MakeCanonical, UpdateBlockInfo}, + setup::{NetworkSetup, Setup}, + TestBuilder, +}; +use reth_rpc_e2e_tests::rpc_compat::{InitializeFromExecutionApis, RunRpcCompatTests}; + +#[tokio::test] +async fn test_eth_get_logs_compat() -> Result<()> { + let test_data_path = "../execution-apis/tests"; + let chain_rlp_path = PathBuf::from(&test_data_path).join("chain.rlp"); + let fcu_json_path = PathBuf::from(&test_data_path).join("headfcu.json"); + let genesis_path = PathBuf::from(&test_data_path).join("genesis.json"); + + // Parse genesis.json to get chain spec with all hardfork configuration + let genesis_json = std::fs::read_to_string(&genesis_path)?; + let genesis: Genesis = serde_json::from_str(&genesis_json)?; + let chain_spec: ChainSpec = genesis.into(); + let chain_spec = Arc::new(chain_spec); + + let setup = Setup::::default() + .with_chain_spec(chain_spec) + .with_network(NetworkSetup::single_node()); + + let test = TestBuilder::new() + .with_setup_and_import(setup, chain_rlp_path) + .with_action(UpdateBlockInfo::default()) + .with_action( + InitializeFromExecutionApis::new() + .with_fcu_json(fcu_json_path.to_string_lossy()), + ) + .with_action(MakeCanonical::new()) + .with_action(RunRpcCompatTests::new( + vec!["eth_getLogs".to_string()], + test_data_path.to_string_lossy(), + )); + + test.run::().await?; + Ok(()) +} +``` + +### Running Tests + +1. Clone the execution-apis repository: + ```bash + git clone https://github.com/ethereum/execution-apis.git + ``` + +2. Set the test data path: + ```bash + export EXECUTION_APIS_TEST_PATH=../execution-apis/tests + ``` + +3. Run the test: + ```bash + cargo test --test rpc_compat test_eth_get_logs_compat -- --nocapture + ``` + +### Custom Test Data + +You can create custom test cases following the same format: + +1. Create a directory structure matching the execution-apis format +2. Write .io files with request-response pairs +3. Use the same testing framework with your custom path + +### Test Multiple RPC Methods + +```rust +let methods_to_test = vec![ + "eth_blockNumber".to_string(), + "eth_call".to_string(), + "eth_getLogs".to_string(), + "eth_getTransactionReceipt".to_string(), +]; + +RunRpcCompatTests::new(methods_to_test, test_data_path) + .with_fail_fast(true) // Stop on first failure +``` + +## Implementation Details + +### JSON-RPC Request Handling + +The framework handles various parameter formats: +- Empty parameters: `[]` +- Array parameters: `[param1, param2, ...]` +- Object parameters: Wrapped in array `[{...}]` + +### Response Comparison + +- **Numbers**: Compared with floating-point tolerance +- **Arrays**: Element-by-element comparison +- **Objects**: Key-by-key comparison (extra fields in actual response are allowed) +- **Errors**: Only presence is checked, not exact message + +### Error Handling + +- Parse errors are reported with context +- RPC errors are captured and compared +- Test failures include detailed diffs + +## Benefits + +1. **Standardization**: Uses official execution-apis test format +2. **Flexibility**: Works with custom test data +3. **Integration**: Seamlessly integrates with e2e test framework +4. **Extensibility**: Easy to add new RPC methods +5. **Debugging**: Detailed error reporting with fail-fast option + +## Future Enhancements + +- Support for batch requests +- WebSocket testing +- Performance benchmarking +- Automatic test discovery +- Parallel test execution \ No newline at end of file diff --git a/crates/rpc/rpc-e2e-tests/src/lib.rs b/crates/rpc/rpc-e2e-tests/src/lib.rs new file mode 100644 index 00000000000..c8c6dfe280e --- /dev/null +++ b/crates/rpc/rpc-e2e-tests/src/lib.rs @@ -0,0 +1,12 @@ +//! RPC end-to-end tests including execution-apis compatibility testing. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +/// RPC compatibility test actions for the e2e test framework +pub mod rpc_compat; diff --git a/crates/rpc/rpc-e2e-tests/src/rpc_compat.rs b/crates/rpc/rpc-e2e-tests/src/rpc_compat.rs new file mode 100644 index 00000000000..436ace0eeb0 --- /dev/null +++ b/crates/rpc/rpc-e2e-tests/src/rpc_compat.rs @@ -0,0 +1,514 @@ +//! RPC compatibility test actions for testing RPC methods against execution-apis test data. + +use eyre::{eyre, Result}; +use futures_util::future::BoxFuture; +use jsonrpsee::core::client::ClientT; +use reth_e2e_test_utils::testsuite::{actions::Action, BlockInfo, Environment}; +use reth_node_api::EngineTypes; +use serde_json::Value; +use std::path::Path; +use tracing::{debug, info}; + +/// Test case from execution-apis .io file format +#[derive(Debug, Clone)] +pub struct RpcTestCase { + /// The test name (filename without .io extension) + pub name: String, + /// Request to send (as JSON value) + pub request: Value, + /// Expected response (as JSON value) + pub expected_response: Value, + /// Whether this test is spec-only + pub spec_only: bool, +} + +/// Action that runs RPC compatibility tests from execution-apis test data +#[derive(Debug)] +pub struct RunRpcCompatTests { + /// RPC methods to test (e.g., ["`eth_getLogs`"]) + pub methods: Vec, + /// Path to the execution-apis tests directory + pub test_data_path: String, + /// Whether to stop on first failure + pub fail_fast: bool, +} + +impl RunRpcCompatTests { + /// Create a new RPC compatibility test runner + pub fn new(methods: Vec, test_data_path: impl Into) -> Self { + Self { methods, test_data_path: test_data_path.into(), fail_fast: false } + } + + /// Set whether to stop on first failure + pub const fn with_fail_fast(mut self, fail_fast: bool) -> Self { + self.fail_fast = fail_fast; + self + } + + /// Parse a .io test file + fn parse_io_file(content: &str) -> Result { + let mut lines = content.lines(); + let mut spec_only = false; + let mut request_line = None; + let mut response_line = None; + + // Skip comments and look for spec_only marker + for line in lines.by_ref() { + let line = line.trim(); + if line.starts_with("//") { + if line.contains("speconly:") { + spec_only = true; + } + } else if let Some(stripped) = line.strip_prefix(">>") { + request_line = Some(stripped.trim()); + break; + } + } + + // Look for response + for line in lines { + let line = line.trim(); + if let Some(stripped) = line.strip_prefix("<<") { + response_line = Some(stripped.trim()); + break; + } + } + + let request_str = + request_line.ok_or_else(|| eyre!("No request found in test file (>> marker)"))?; + let response_str = + response_line.ok_or_else(|| eyre!("No response found in test file (<< marker)"))?; + + // Parse request + let request: Value = serde_json::from_str(request_str) + .map_err(|e| eyre!("Failed to parse request: {}", e))?; + + // Parse response + let expected_response: Value = serde_json::from_str(response_str) + .map_err(|e| eyre!("Failed to parse response: {}", e))?; + + Ok(RpcTestCase { name: String::new(), request, expected_response, spec_only }) + } + + /// Compare JSON values with special handling for numbers and errors + /// Uses iterative approach to avoid stack overflow with deeply nested structures + fn compare_json_values(actual: &Value, expected: &Value, path: &str) -> Result<()> { + // Stack to hold work items: (actual, expected, path) + let mut work_stack = vec![(actual, expected, path.to_string())]; + + while let Some((actual, expected, current_path)) = work_stack.pop() { + match (actual, expected) { + // Number comparison: handle different representations + (Value::Number(a), Value::Number(b)) => { + let a_f64 = a.as_f64().ok_or_else(|| eyre!("Invalid number"))?; + let b_f64 = b.as_f64().ok_or_else(|| eyre!("Invalid number"))?; + // Use a reasonable epsilon for floating point comparison + const EPSILON: f64 = 1e-10; + if (a_f64 - b_f64).abs() > EPSILON { + return Err(eyre!("Number mismatch at {}: {} != {}", current_path, a, b)); + } + } + // Array comparison + (Value::Array(a), Value::Array(b)) => { + if a.len() != b.len() { + return Err(eyre!( + "Array length mismatch at {}: {} != {}", + current_path, + a.len(), + b.len() + )); + } + // Add array elements to work stack in reverse order + // so they are processed in correct order + for (i, (av, bv)) in a.iter().zip(b.iter()).enumerate().rev() { + work_stack.push((av, bv, format!("{current_path}[{i}]"))); + } + } + // Object comparison + (Value::Object(a), Value::Object(b)) => { + // Check all keys in expected are present in actual + for (key, expected_val) in b { + if let Some(actual_val) = a.get(key) { + work_stack.push(( + actual_val, + expected_val, + format!("{current_path}.{key}"), + )); + } else { + return Err(eyre!("Missing key at {}.{}", current_path, key)); + } + } + } + // Direct value comparison + (a, b) => { + if a != b { + return Err(eyre!("Value mismatch at {}: {:?} != {:?}", current_path, a, b)); + } + } + } + } + Ok(()) + } + + /// Execute a single test case + async fn execute_test_case( + &self, + test_case: &RpcTestCase, + env: &Environment, + ) -> Result<()> { + let node_client = &env.node_clients[env.active_node_idx]; + + // Extract method and params from request + let method = test_case + .request + .get("method") + .and_then(|v| v.as_str()) + .ok_or_else(|| eyre!("Request missing method field"))?; + + let params = test_case.request.get("params").cloned().unwrap_or(Value::Array(vec![])); + + // Make the RPC request using jsonrpsee + // We need to handle the case where the RPC might return an error + use jsonrpsee::core::params::ArrayParams; + + let response_result: Result = match params { + Value::Array(ref arr) => { + // Use ArrayParams for array parameters + let mut array_params = ArrayParams::new(); + for param in arr { + array_params + .insert(param.clone()) + .map_err(|e| eyre!("Failed to insert param: {}", e))?; + } + node_client.rpc.request(method, array_params).await + } + _ => { + // For non-array params, wrap in an array + let mut array_params = ArrayParams::new(); + array_params.insert(params).map_err(|e| eyre!("Failed to insert param: {}", e))?; + node_client.rpc.request(method, array_params).await + } + }; + + // Build actual response object to match execution-apis format + let actual_response = match response_result { + Ok(response) => { + serde_json::json!({ + "jsonrpc": "2.0", + "id": test_case.request.get("id").cloned().unwrap_or(Value::Null), + "result": response + }) + } + Err(err) => { + // RPC error - build error response + serde_json::json!({ + "jsonrpc": "2.0", + "id": test_case.request.get("id").cloned().unwrap_or(Value::Null), + "error": { + "code": -32000, // Generic error code + "message": err.to_string() + } + }) + } + }; + + // Compare responses + let expected_result = test_case.expected_response.get("result"); + let expected_error = test_case.expected_response.get("error"); + let actual_result = actual_response.get("result"); + let actual_error = actual_response.get("error"); + + match (expected_result, expected_error) { + (Some(expected), None) => { + // Expected success response + if let Some(actual) = actual_result { + Self::compare_json_values(actual, expected, "result")?; + } else if let Some(error) = actual_error { + return Err(eyre!("Expected success response but got error: {}", error)); + } else { + return Err(eyre!("Expected success response but got neither result nor error")); + } + } + (None, Some(_)) => { + // Expected error response - just check that we got an error + if actual_error.is_none() { + return Err(eyre!("Expected error response but got success")); + } + debug!("Both responses are errors (expected behavior)"); + } + _ => { + return Err(eyre!("Invalid expected response format")); + } + } + + Ok(()) + } +} + +impl Action for RunRpcCompatTests +where + Engine: EngineTypes, +{ + fn execute<'a>(&'a mut self, env: &'a mut Environment) -> BoxFuture<'a, Result<()>> { + Box::pin(async move { + let mut total_tests = 0; + let mut passed_tests = 0; + + for method in &self.methods { + info!("Running RPC compatibility tests for {}", method); + + let method_dir = Path::new(&self.test_data_path).join(method); + if !method_dir.exists() { + return Err(eyre!("Test directory does not exist: {}", method_dir.display())); + } + + // Read all .io files in the method directory + let entries = std::fs::read_dir(&method_dir) + .map_err(|e| eyre!("Failed to read directory: {}", e))?; + + for entry in entries { + let entry = entry?; + let path = entry.path(); + + if path.extension().and_then(|s| s.to_str()) == Some("io") { + let test_name = path + .file_stem() + .and_then(|s| s.to_str()) + .unwrap_or("unknown") + .to_string(); + + let content = std::fs::read_to_string(&path) + .map_err(|e| eyre!("Failed to read test file: {}", e))?; + + match Self::parse_io_file(&content) { + Ok(mut test_case) => { + test_case.name = test_name.clone(); + total_tests += 1; + + match self.execute_test_case(&test_case, env).await { + Ok(_) => { + info!("✓ {}/{}: PASS", method, test_name); + passed_tests += 1; + } + Err(e) => { + info!("✗ {}/{}: FAIL - {}", method, test_name, e); + + if self.fail_fast { + return Err(eyre!("Test failed (fail-fast enabled)")); + } + } + } + } + Err(e) => { + info!("✗ {}/{}: PARSE ERROR - {}", method, test_name, e); + if self.fail_fast { + return Err(e); + } + } + } + } + } + } + + info!("RPC compatibility test results: {}/{} passed", passed_tests, total_tests); + + if passed_tests < total_tests { + return Err(eyre!("Some tests failed: {}/{} passed", passed_tests, total_tests)); + } + + Ok(()) + }) + } +} + +/// Action to initialize the chain from execution-apis test data +#[derive(Debug)] +pub struct InitializeFromExecutionApis { + /// Path to the base.rlp file (if different from default) + pub chain_rlp_path: Option, + /// Path to the headfcu.json file (if different from default) + pub fcu_json_path: Option, +} + +impl Default for InitializeFromExecutionApis { + fn default() -> Self { + Self::new() + } +} + +impl InitializeFromExecutionApis { + /// Create with default paths (assumes execution-apis/tests structure) + pub const fn new() -> Self { + Self { chain_rlp_path: None, fcu_json_path: None } + } + + /// Set custom chain RLP path + pub fn with_chain_rlp(mut self, path: impl Into) -> Self { + self.chain_rlp_path = Some(path.into()); + self + } + + /// Set custom FCU JSON path + pub fn with_fcu_json(mut self, path: impl Into) -> Self { + self.fcu_json_path = Some(path.into()); + self + } +} + +impl Action for InitializeFromExecutionApis +where + Engine: EngineTypes, +{ + fn execute<'a>(&'a mut self, env: &'a mut Environment) -> BoxFuture<'a, Result<()>> { + Box::pin(async move { + // Load forkchoice state + let fcu_path = self + .fcu_json_path + .as_ref() + .map(Path::new) + .ok_or_else(|| eyre!("FCU JSON path is required"))?; + + let fcu_state = reth_e2e_test_utils::setup_import::load_forkchoice_state(fcu_path)?; + + info!( + "Applying forkchoice state - head: {}, safe: {}, finalized: {}", + fcu_state.head_block_hash, + fcu_state.safe_block_hash, + fcu_state.finalized_block_hash + ); + + // Apply forkchoice update to each node + for (idx, client) in env.node_clients.iter().enumerate() { + debug!("Applying forkchoice update to node {}", idx); + + // Wait for the node to finish syncing imported blocks + let mut retries = 0; + const MAX_RETRIES: u32 = 10; + const RETRY_DELAY_MS: u64 = 500; + + loop { + let response = + reth_rpc_api::clients::EngineApiClient::::fork_choice_updated_v3( + &client.engine.http_client(), + fcu_state, + None, + ) + .await + .map_err(|e| eyre!("Failed to update forkchoice on node {}: {}", idx, e))?; + + match response.payload_status.status { + alloy_rpc_types_engine::PayloadStatusEnum::Valid => { + debug!("Forkchoice update successful on node {}", idx); + break; + } + alloy_rpc_types_engine::PayloadStatusEnum::Syncing => { + if retries >= MAX_RETRIES { + return Err(eyre!( + "Node {} still syncing after {} retries", + idx, + MAX_RETRIES + )); + } + debug!("Node {} is syncing, retrying in {}ms...", idx, RETRY_DELAY_MS); + tokio::time::sleep(std::time::Duration::from_millis(RETRY_DELAY_MS)) + .await; + retries += 1; + } + _ => { + return Err(eyre!( + "Invalid forkchoice state on node {}: {:?}", + idx, + response.payload_status + )); + } + } + } + } + + // Update environment state + env.active_node_state_mut()?.current_block_info = Some(BlockInfo { + hash: fcu_state.head_block_hash, + number: 0, // Will be updated when we fetch the actual block + timestamp: 0, + }); + + info!("Successfully initialized chain from execution-apis test data"); + Ok(()) + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use serde_json::json; + + #[test] + fn test_compare_json_values_deeply_nested() { + // Test that the iterative comparison handles deeply nested structures + // without stack overflow + let mut nested = json!({"value": 0}); + let mut expected = json!({"value": 0}); + + // Create a deeply nested structure + for i in 1..1000 { + nested = json!({"level": i, "nested": nested}); + expected = json!({"level": i, "nested": expected}); + } + + // Should not panic with stack overflow + RunRpcCompatTests::compare_json_values(&nested, &expected, "root").unwrap(); + } + + #[test] + fn test_compare_json_values_arrays() { + // Test array comparison + let actual = json!([1, 2, 3, 4, 5]); + let expected = json!([1, 2, 3, 4, 5]); + + RunRpcCompatTests::compare_json_values(&actual, &expected, "root").unwrap(); + + // Test array length mismatch + let actual = json!([1, 2, 3]); + let expected = json!([1, 2, 3, 4, 5]); + + let result = RunRpcCompatTests::compare_json_values(&actual, &expected, "root"); + assert!(result.is_err()); + assert!(result.unwrap_err().to_string().contains("Array length mismatch")); + } + + #[test] + fn test_compare_json_values_objects() { + // Test object comparison + let actual = json!({"a": 1, "b": 2, "c": 3}); + let expected = json!({"a": 1, "b": 2, "c": 3}); + + RunRpcCompatTests::compare_json_values(&actual, &expected, "root").unwrap(); + + // Test missing key + let actual = json!({"a": 1, "b": 2}); + let expected = json!({"a": 1, "b": 2, "c": 3}); + + let result = RunRpcCompatTests::compare_json_values(&actual, &expected, "root"); + assert!(result.is_err()); + assert!(result.unwrap_err().to_string().contains("Missing key")); + } + + #[test] + fn test_compare_json_values_numbers() { + // Test number comparison with floating point + let actual = json!({"value": 1.00000000001}); + let expected = json!({"value": 1.0}); + + // Should be equal within epsilon (1e-10) + RunRpcCompatTests::compare_json_values(&actual, &expected, "root").unwrap(); + + // Test significant difference + let actual = json!({"value": 1.1}); + let expected = json!({"value": 1.0}); + + let result = RunRpcCompatTests::compare_json_values(&actual, &expected, "root"); + assert!(result.is_err()); + assert!(result.unwrap_err().to_string().contains("Number mismatch")); + } +} diff --git a/crates/rpc/rpc-e2e-tests/testdata/rpc-compat/chain.rlp b/crates/rpc/rpc-e2e-tests/testdata/rpc-compat/chain.rlp new file mode 100644 index 0000000000000000000000000000000000000000..ae681adf9f042b3efdc83e3029d29ea3c00f5be0 GIT binary patch literal 54610 zcmeFa2|QHa8$Ui{U$gJhSV9WfWgTmFSz7F5%}%x?##)w&LQ=Rz)=-wDB)d|SE&CcG zMIsF?{&!}KrF`RqwG&ga^bgNA2U_4}t1O$y5M?#ixKQ|_Pu+-v{(0e^Jq*uRqwkEs(%f0&#z8nS&NlGLo zB`r!TuEy{%aaSGMh49An3QV`vaxxpg2(K9if?o1`8K7zGjdo$jNm(e@I8xcA$c>x4i<{fQAmOSwmbcESc4@T02zsZhC-zb5s3RT zmgJJS2upHH$N@{pyd^{_Sla1i?;SYOcUjz8%H$?w@+UxP0%I5 zI!I|mg4PGV?&qvl#MEA)EK4E)Tl?hf(_IS@(enFfV;}iaG4X)^+7C7+N z?)n{cocc@8dR?QEymZ@U7xyYv(PRQ$Bw2SlI@%Nkvx(i|?Thr-2%MHHB&5Ax zpUuGv3$a1VS9j}015{KP`EE8XnzNwpa9YX4_tiQG6=*iCG6t+ zVd;Th0lQ&r#Y0JHNqiLaY+_Aig=3x7=20%*I@x;4(z_(iC1(%ZV3gkj`8(wP7}p$H)UOf!R$gyKR~lL0kfslv+#_Fs91aAL#` zF;FmG((t!+IpM_$q@=_lB-#_dGk*|08bpvZG)EexbCCbh0ar;Ck4fgE4#06fF%?hB zGtJI;%z?6nQRZ$U(Ce|+q|!xj$U}Coyz@s|5JE%riQ?a9+n8j=pF8}zxY@|*A)%3r zQ3Ea%!2&G5WJ#v$JK1@S#H+abDC5v{S@R@CoC6#nn6=;w_eljT?_6-Aw$zSm7_F!M zSOq(>*J|n1$^08p4A9GGL~zFEtCsK&qZkBlrq1~=>yT+4Vujr-uhT~i{fOU8yI;O8 zwtqQ$!6?BU0E&xFmCH$pmf8k-uk=qFA_xnYmlKJ<$;kvRU78Wh-0{2ih1(!ldPNX9{2VF*a+D z)yFrORs&LMv&WfCNbP7IuyOg|Bk5L{^2A))<(iVUcN`=NcRT|+C>*np5n!o?5wST8 zCBVV;yPiM?!1x_X1w&<9yVvUSUvI=k*}_*pV1e-@bYEa?V*zw?V9X4s@(Hks$>VPU-c7&fy-$58WiOnuXYb4GdK;IHX`~17 zZ-E0Vz|(TWV{b*Phxpv{6zJ8AWcG5#0Hx2$-zuT>+L*o6-fvt6$>vr>`t9l22+buNq9q)B;6Jq6ZMj-D zD1jrByBJuWAI@9}RhhU{g>*Gh8)mucoL>b!E?D~7x`cgOga6U-4Z9tJEzaSDkFB+X z)d^Rax2v_YEo?uq;c{;&p2KRF@C{J7x<7xONaCrzzE%Y?xQeSferA}m*qdVJP)VEb<%de}+_aD&=TJQ>__;hD$TzxC&4S($`vy)Q! zu*$Bx^N?tRDN8xvT??mCsAO%j%m+Yz*u9-5=Rv~_L+__z22S7VxPbaJ<-LYa&r-i! zjAT0v^zz>9A>L2h-0F5Xpf6q9% z)W!tcSosS%b`@>^eHZY5XNtmAJPfYllnU_v7OrX{ok@Si6E06s*j>AtFNfj@EI_=7 zP)HQv@o)VS@uz?*Ih=>DNrubi&g9g8=I-#>-*F4Dx^VkMis9l3Sd5^|r!TVgZLh-B zkCDN8;otAxAL=4uLt2obd4jD*rmjWhm#`Z5cV6r)!A&vBSprZ5D}W$hR16CpTMSZZ zi!N>{+c{T0qCIdJ{_zd3j(@H>(HnqdkI|19-Ik*K*-eh^*21=+xvjxAuC(0*{u|oY zDGGx3CsY*zwmZdjRKOGL(+~G+CtUNsmA96mqHwT^Qh*1q!7c)}%4*SVKV%@8MPO9$A6l?gG~GpK zNm$iSC2Ed12nd7#)v{qas4AF14O&TwZGxc{SO$wgEd_jRIN2=xTc`?#lRJKsF**<9 zvO7Jzd`z8rU{4CkXv-AN@WIQso%0C4@W}=pVn_v)K8+GT#r^n@0`LfyVViR%6XkjQ zD(va}9Duy0cf70zX-e@=jFAO+mfm2JMJKd;F-l87@K}ZK<>uO{onju^{=P>a?fTN` z2M`ZMU+i}OqAX>~Msj}n+4JiNH_O4`IQaL=Dc2rhqh&RG|q=g=}P1GY#Ts&*275%VV zXX5nn^g`DrJMS312&n_o%2HCu%bkG0VHbQcedG23ab1%YV-Lr0MsH2I$O9N zLk!EnaMBS)d-A2j+!*c(eB}~nKKn42ry&D}ghlH8;gB;ux$tj|^6>ySW%V$xwV<;{ z^h%uE5Y2Gs{@wd0YJ6H}P3Vp&(e0hxn)cucP2C%VUea)_eQ=2>_w4~870R!>eLI4^ z-b`>(Esi(=ff@z*pY8}bxa|B~(=0HX)~Geqrl=%sHn}%TA;^UMJ;0KX=$Vd#ymp57 zpveWlL(K%e>i5Wkl#W6KRGCiau)Clfbako1c(LmlUBb~hHtqu};5uG)+trp;&qJWA z2vo7=mT1*a<`^v|U%d~sQMEyaVPya{Ea;$^k+DvN8hLdt>niLa5S*=am{wH3aEu6c zfk`L_C0Gk5Wq!*+k0XPr|BNS`WgLGf^@tJxL~S~xDWL6Qymd2e(5#Q)!POJQ5|E}> z^%JlI{Q~0508v7VU--Lov&|gGarED;JRM!>e=K&k@m2SoCI1@e6$+#u&2M4E>d0vT ztuVETeqKqgG7YoJfysgv0(|u*RZgCRds#(RcwauT1Wp(ZP%&P3_P*vDR|#!Eg?p{z zm?uv-1l=TP7;b*&8xih-pP}|nZDT6`Z0>U zg!(z)>MQLY9}|>hSz(>|*)D!c{8`zxCkLc;OM8d7wO+s}R@u;hXbW6j1OHfTOLn$3 z_@^!KABX(!Ygpd`5i&oqAz@&<6F!GESi*Nef$dF4w_yNRSbkCbJS_^)Erz~0 zoqeo1MmuOI@hDK{1f!MH6Jy9Y$v%H|C*15fTk$pNLMonUqeIooRs9oa(6%&xoAR$;+w!ROhwWPn7{&qUl0XyW6rxpCtpq%tRqA2`+i*>1nC) z98tUG#3PjepPty|p+=^r{foy-Woz0TYK(HL?mW1e>uxP=LN6D12fY-2z)0-5fhA&t zyuAoVn}FE0F~K%g{z8uZ#j#tUE86;pschW?YAFz|#h9wU1-6#RlXSl#4}k*F>Pu3( zyHMm|4Sy2b9*DU;oQ<3kjw5!_KK+6Tn$o4ZV_$ym%-8Fe8q9_OX^$N?wf^(5sq%XI z&-mNo#$yptE3TJDXl_wR%PSrZLfX-zIfTVwet`UowuE}m1b@rxr;Opo5dajiHqP#6 z9t+)M;)b%yu%y>y*ExD}a5Y|$A61Uxp0bUr)NTZbTFYvl@-=v+hMki-{5tbepGV;- zuVd|xlUO7hw6i8#k--dqxs%+@d`Nw5*g5sQ&sQbdifL6w0@v*1yF;e8Je4G+AI3%l zG8qrG_vI*dWDQOorv5T_{2SlU_a)6;mbV^lIE7R~1}ku+MFklhIgow938iC*7E7Aunpo?gIq^PvE_CB8~9odPnnQQpUC5MpK#C z`CF4054`N7Sa|&^1Hk8NINq2~_h7<&SA*_gH$vEZPMFp#+qOM8CwgGz*-j8@F*FcG z6G8rn*a)veU=>tA;NS<7{uz`}?QobPn6Kg+0wdtDV~*UIgX3Tl3^nGeJywS*LH$qZ zC8&YIzzj740$py%FcX7s(EKdoA+Wl&XG5s*QA>enE$}?_TTokp^ke)pQmIoJPcwJE zFE#$&uCC>B-)y0PWZ?-;@j&;*=nIsOcqIU}_FKMn)Pg7SpHI&pGu;~yJCa90hgV&V zOBDMZN4?V=Y0b1PsifS+QHqBrgD0pumdH1lcEqZNZ{Hc&>XSG7q#jIV_R#@`QJtA2 zBXsdq*98|wb|)0MYlY%73y(h@d7a>VRWEBCd6M~8JBja!WA_)LUbAo5NdVxuL3(Ql z_*DTG-;!@_$j~>Phs$x2%c~+Ut40zoj%w5acrMNR%=l!eET1&m4vC!52=R}4Z0@kc z?4c7yQ%8oZMB1_Z;^F4nP9n^3FQDG2)xLRA=LP)m>@>^%?>%WLMRTw3-b{;)j4wBA%2CSB;M-vX|85*Id_mB}3DraO8{ElKQmVk{{K=);vjB ztBqga{Zv?^&38YC_#zqRM{2LQsbdpt9|{Sd}H0?sixC=Gxki-=`9^qnGEb zol$dj`_Y`>0IBmK+Q6A<<|i8RycvhTN(SAKrLXOJWQM!@YqX(8Lp9C8Or!zmyl9dE z!>;QM<(dvHtoj&ejaaq8maA3J{b8%(Rt~uu`=UT%HO7RIt(qfNOKhNtt!^&iunlzt zTE@fp?7IHPI<^IMQwdBkaPH6_zykCD2!wboC^h~q=LLgO*1w_>X@G|6d(oQy1^mUr zs2DCK{MX1x|7T4{c)E?uOiv35e`5rcYCq9%Uw4E=nZw>f9M!7zd=VL>RQx8Ab#y!h z#~ZbfemnmRKN_jt_5<`bHK}9wpZ6vyysp|BY(spH z)byG8Nl&_U$ey-!t572u{Dm7x8BN*^d(g5gBwpSO*?x^SX9<~h|WlKUf$g7Yj z8p3t)C=DD39L4}8V$SoYVxJJd8ox>4b;vl?D)qAVHTmx4+K;ZMrWi`#+bjjd<_m$w z<$nS+2n3F{{sGKjo4fe;H4H4T*~hL)|Az$-Y~dtpL8;Aefh`!6?)nv#ARuH{ZpySa z-b5w#M3+zmeXnCS5r|QE(pyd_yp$?-qS{Q2i6zI*%|3S&5Q`e;))i@oF6AkXXO|9C z(H%L=C6kuQK&7#8`GdghBGL|qrV>(bOo>XK5>8uZCz7e$cV$byM=i4!lmb9D1;Fok z8Si^~`Z#<~tENj|+q`X8qh}TWjXdwt0e=pF?eRnLo}Qh>`eUzi$q)PBNyt-lKCG`Y zvi&Y2BD7HHi411{tDOV@@Tr2zpI;DCII5JPZTa>|?v4s*cDbyo*Qb1(K}C`g0I)uA zJY|nwfmeH-oi-LP1Tvm`BQW~0WSn}E0w-HIhL+R$3K_whe2tNeO3Qi zy7#4%*)u{ONc8DauS{4&bg zhdbze6H53;fkM4xnYUBNrZc+zlSG=4XYPjGpz~Zf%*E(QL`d&e@ePDp40LV;HpJ@b zqiEoUgu+l5st!kGTO>G&uwouM!ZC+}eZ#PZXq-l@*YL0+CXU_oja}4GXnQo(MOngQ zAsY^WtsjdA$I7U7JoM>cP6?~WbcC+0iLlj1&=On<2-3Bn)b+QZ77R)`{)AKlKvEvH z*?sjyvU!|I^V{e7rfEWNoW3PBWvlGna9V~2cner&O&xuEb5Z)-gKq|9-yd?&^

h zlxsSS>>wq}oH28(pvGtBM2ncgOJUMhu=Ttw!; zK`z6S)pAPh1pVH6dlC=z7UuDHoV;27%`Ej@rAw8`5b`9~uXd8kue+&>1!#IV>?CP` z-}5-naUyt}DPc>6hj+?@kc&H>Fk9l@*R5x@lUc6Z4IHqFI+-Y)HE6CP7$?(}TK}2t!{;Qis7r7RaCN(IpD0|+QDNgCU zueErhjul{YKTzAL=~r?!`C~xAwIw|Ehf*fX8bOq0kR2JP_H~g1niA(Jy$CFl6eIee z>}EG8rBjdK2cFuuf0)Z8HYn$c_c~7km-FhHM|lo>b#bE#(63>bDyG1g;o-qn?NMz| ztO>>tg6d;6Q6jJs3@dD1Zg9?x2Jj8BDiH$Rg6ji=Gt33Pt)pDia&Z+jP&FAGyI6-Y_B~#cSzxJ;-0}) zdJ!P~Q;}*>wrb$YIZ2jy*}D&bz;kDGbak@KJ3o%j+FxZGXjCdPskn0fMrh5Grr@S{ z4Wu>iHuwPm-><+d^Bwg&4S_D^H{*XvP{2xfak z`2EC`#DVS3f1N3zzLtUnLgmW&)o21A%{X7=O_DyXaQ2-w(<7#RW_Z?NUpVYUY=F$; zy|a4Lg^XcK27@#x`&X0C@aK zt~6u7vQ+BXtGV4I(%O&2@tr;bHQA@TU(-Y{#(e_tVyy)A`<0cIg<9ICH1h2CY7_X0 z(%06hUoOaO_v&my+VP>e#MWQ8TD!q5Cr-R@;lZZ{+`|V4k9mA@7j-WWlOp!@9PJL7 zlgpgV0ldnZ4suzxbUE*h6i0g1hZMstWna*%jzyPc1ev5Q=OTmm{^eG}oHesUejbzEC}ToN-2mTugEN5WH(w?k z8_0M9H=sHmy60XIy9n=V_nN|cZvjJxl8D_XutlKG7LLk(N{ChD1j9KLNyI~d*~Wyx zR8SxYL-zsNZw6Zg3}v%{VPZ!Dk3k)VL1P4F7GUv1f}!f@&>01`>j|X0u+`cR&rhh< zjW7~8cho>|tbXJCV4yM-2MXD-7M2Fb{1(`Pap}H4;gxAXtk9si@q6c-@oU^X*Pr(3 znZaupzFevPM8nyVr-fJ40vt?Qc$(CgQWkb62 zv=c<{C1Gz&c||YYcWk+lBXvO0<&5RRbJexDG!4Laq9T9%-at{EB(QBIv_UctZ~ttx z&Is)X1v`Dnjd8Q{{Fdf&g<4P3}Dxzf$fl*PI&$zD-X)PSCA1y-Rm|_`W=@45{OV zguEITxq@)dT1)V5?$%UkKh4CUFol6{Zl_L1D;%|Wgez%%#(9cxLcg8vY|Zj6I565A zc50%7a;|%&(VrJ@yoNb4p}m#m{_MWgM5@~RS`0wC_QAxq;9NkdgpXD5i3W|Y^~`L9 zRSE~g!}+GXdOsb|dxqXQQk|j7MBJm{+9QGf$9FgWD`v- zIeMy#hBH(~Y9|(S-x()Qz^9CgH2KM!Sv_wJf652=Y@Sk@sob`~*|9K6*qf=PbX1CW zY*JPHiUzH~b&DI-NI#)J0T2K<-chc#vpcN+(mqwO1CP&U`2vc^E`{=&l2Dhyx4_pQ4&w&cj;cyoJsJ(36=}iE|LB+D@Bz@*KjtMON?@)xd)}v$MJjbKS!OBD zqVzY?<@-l^x`TtZhTG)Zx-IOH`B{;SW=ASIwFuAeP%wTW;cQ24y+UKID9Z!nmDEsh zW)CHMNz=`(e9bLvxK(9*;@oH4uFFgZPG)Ho1M=nk@J%ns_{CuxaQGKi8=FA5 zg;TCaroeB(Ef|@K{EAIHK-7p&>y=#G#3q(Yj@aHEZLgkjCt$5SV3%vBdD~@dSQiN| zTT+^>-uVI`_#S$jP_U6#kf2ja`y54c;K9DCq+9Tgui{oUZA5R1kanVIHnGcOP_(45 zA!1IvG5wiqtOlSACm{~5}uEckJwGK7p%$cEO%Zn^c20{VN_JBcB z)XiYCp3&ILPvjVNBd0s>gjT@v@Ef z+im&G|02_kYx@5w83nc&K7$AbegfMAz;=^e2U|>J3JI>NlwOch2zu2$6!LELFy5(a zdqC%qsXB1;PS0&p^*@YEWpHsl=9AJBUGMeD*_yg`pS^d>@C1RgOWfyOJTeD5hJcb$ zo{KxPJ}>)DM4EowH_(xD%uu_B+ut*}qcz^sCQ}1&=yUy$L^uqsn!HvMG^V ze8f`jyo{23KSyx;Ckgl$J7P+P1VPI-1qjkwVq0P<`emVVjKRKlM(4{_zB+N^-FF@c z-~XE3RLCmTrU7jh0bDqns%~FXeEKA(EWmbua8Ub|9O`R&5MB#A3d0@yXTiu+@>hF_ zspw%vmAQ(B4SNarn59MLHe)fL+sjMdLYB!F%pW^FQ2}TkELsj5KwOFAr~n<$H?&QjGXpW6#Xyn|Y@novMhRK9fWaQL#YU9oYUZCjfL7CTT1%P*X_wn-V%pvYQKURS z-cl?@Z3`mLo%{LC<@QXwms?XMEx7His7vb9MaRF<3JjM}4c~L6CWTznt7GKZ>1U@} z2sq~JC(BAL4&y~%ogWk`-MKr%su<2gkY1LRKT(-P@fKiv?!4kX)weudA8*=Rvd>Q3 zp0OFHeph!y9kaFD;X$z8h%HfVC`|Z)^=xbgTjAhmLxORw^#f{u94HKB`9ptjln#czuY{mpjZ6)# zgG+wPd%?(53S8b5jKBRRyR}>G3?D8;eJ=>M$Q-u$CX_=kbM_9uFI;z4r$%^Z-qUyb z{s7OR@zA;znT&+@tUHhK4|iqPIUEQYdHx{wffLmWgMM42pENjyUEm8B|K0n{)<(WM zYk|!F$e0(p(?MIJrHjQxEx%%P#~l|ab>-evs*(}_Usll882@s#c-gm}4~T3S(_~XN_qk!+r_d>+P{tqb^Q<{c~sRty(Tq)UKe$#zb+uGLK3*}tGAz8v91pY%C zBx(`-Ds3BhZfmfWi~VZE|H3v@G=kY65y2&wetd(G&~LrM$O(nFK`ULxO1uJ)wx#?- zu_AYr3ELv1CisY{oD%am%j0b6EPK3zq=Y%+0&Kre z^82mvcJSuO!(ET<{Vv1eGGA$DQW2_0)V1tYEvpfxyTNS)ISYh*loo#N?h^88;!r=m zUINafdcT_6wDzgjg-<&Zl)B7-8R9Wkc9r4S#v?PK!KUACm?zSHV^#9Wwn+##C#SJ6 z+-5N#e(N6}j0ApeaRi#b>kg7}+Wwm24C3{^C%q3P`l!Ii&-w_s>0`ztyj z?;VrYFknxxflj0W9=M6A%uMfi_SPLOAk-7_nj~0Z8sQCpEOL!@uxP*v2XKDDwz$;D zp!HG1Y4Onzg}9E&l$1q9@pqa*84W%*cTuFB9GXseiM`*ST~cu8Y``p;Ys|`|=yZg| zhR{@HF#eSsA7fzMo=T$^-vi}Y<&!>ls+Am1atpr8;BzPeVDTS2n0=A1ywWE__e^~5 zqIf%B(Y*F9Be^!b?l$UKT4b>NUv4HQ4NcD)K0JFKCZzHf@--v4IMo=XuV)U*I6pdgHkaG8JpV3wb*lwEVum($**tCRQ;vpQiFY4t*2i94GJIUkiOpp`HY^R06PhDI3${l?YFh8=`bUXS40Icxz6xJZN&ZSXNyu zUqf32_KYwkx{CTG=MR){v(*-a6Z4BvBFfe8;-dNE=7T`$iGC~RChfXCNH`IwK@s2w z4t`+Z2LXQ2`w<)tJ|uzt$X&r!OhPrlR%P@-tIvjhJ_!WI3Pr6$ad05CtFdXr`QRtN zg|=X93jZ@|Sx$a|mdWOLb?VoM^lYp6xrk%^MYJy-yIIK%Hdu9s?F07pan;X~?qt16 zXa3TW{!!?M?B1f=3+{p*$NZ@!4JmdatrfPRmHtH@>8|g&f5{^D}-d=W}^73%_Lq{ z_swWN>&B<108mCOwEG26uzMF1|GC|cC*n>wPWdz*7?YoFQWtfQxfBV|>NcgZAM4%0 z4MB<`$E9RWKGl%9lNOrU_n0g0S^OLo(oX5$+~?e-nR_mxy7_aldh`;{!X?iMr;~c7 z4Ve~U^zb6mB>zhQPeF<;*CDfJzt{bX0#o1Z6f?YUK_ilQxP@Zg6m^F2U}IB!sgx?M z<%IF!gU{YX-O{-G$p$y%yvwwlxesR;k9pt#u;1R0z{Tt}8{rpX($-`isVemr9h{|- zUU5?<*W@wnkgd7W+iVdwAeY;e#r9Jnvl-{jtR1yiO$#_5kS+kT%hpc}fE~=wv+YMo z?Q2sCMi_4EzOD8*;L~;y%Cnd8%=D}}=Lgi=?lnJ>yKkJt@jf@ToxcmaSDMmZr<~ht z`*jO0?du*ydGFTFfc4CE>I2%r!9@?c0t_rbFG7F?nAR;YbyQr5J$mcDF?Oq-w&BOx zC_N0@h$?{%6*x*pXa6*14Ag9#4rUr)j=ukefI;@7IzcD21|^}qmu@{aZTl_n1!Gg? zzoHWWa6n&Eap*dI%hh6b0UZndMwxySN9iduoOw9UmqXXTz6BI`ETs_AW}Q#MUO&Ax zJZLa>DnTu#e=r!znl4s5E!l+hQ~47B0f5*9D~_8K0s_w|jn7xl?TG)%m= zs-7n~Ha!8bpKjN?5~0NXo_wfa=6)vxQPe2z)JZO@MR_}i6TV*!X|1{qfTRJtJSO8F ziYpIPOtll7keri9(0}FYFJmnAauV0kN?g_tP~kjeK5!P1QWZJmljuhf|8a40AaCsQ z`AjQjExr6Ot5vr2AKD->i{YR1Z9~s(4c0cue;(w&y#*DHU^YlZaNEyp>9<~egpN&L zKJkhpHYTjn`*g&zYky%{zvjKca(jH)2+wn;k6#Dhi_zl0lOqHoN z>wUHeMB1>6y^8be4wJ3%R$ciF6{82EL(t>7W5xFZk`x?UGkMdyc>>rIL&*+Jn zz%Pz8K>&g@Jq&eSDH+H z=gNi#ozVo|gD_ ztTg2@$y;Xziz_}FPQU!a*mPD&+izi_h#^*{rQ;UlWTMPx!S-S{5-5eBj;WHj^bKIp zLiW8H;aNp?3fG(66thA8KyY> z0ef3htVM}A_@RNy_!-6aMsNqSm};b*HhMFmuMZpw%IVQJ z@Vj%L1)1LR@;Djm+GdgltVGm~-J{VZ^WlAeNZP)o7*cxUb{faGhrKFHWx{?{Ux3py zwE^6@3sI)YpS;gdzBg)`uTdYTilGy{nn>>cfvUR-{BbAhKK{k#Ima65lu??yyVd)8 zTdt3PBPPL}_>lOF(pNhte|g^@dT->J!v4z&P5KrR5+71mbi6DkC@%?U&|Qq|C9|+F zq80=$@10|`v3ww{;8f!ps?gVaiM)=#|Amq0=d|&uzzgObz>ct#QACo%``F-^>_qSE zO72#*MLI_%dj?|~&iJ=s{#frtV3#ylt}cF@U7!y#9jWJO#%?2N- zViOD{pdDqkUvvW=N{u!`fC3y3m9M9Df5hkzU=a(0K$J1n-^k(Up%K^)!9La!4g?=9 zqr8`K)qCH01b_G)@5SM`-vLFoo+a}fNHISrr}L|ozT<#SK-6C0rzqNlG(j3|`B#pR zion_g|3LtMwXAb+B#G~VwW#mB?_E8b#Mog6NPG5`j;E;gjW_QV_WN+=}hJi(YOHI zPF+UJN8DK3nPD^?d>TFJPsYFM9ME0#Bb=gDe0>&z^gHw?00;mn_bz+UNAD3BmR}k> zUEqep%dsN(GQu-6D*SHm5KLVX&|gk`PW!^hd&pi0Nt1qyo0)mXtw+N4-Nw|zv|SGe ztdQ1v+X7&fL=O(`E&@f<16OxC+vYk{zEs+MgC_uAzIEQJ;|||-Knam;@6Yt)izcIo z$+;6D5qJ6?OykW_c~OzJQnT{T)qwY}Dp43Q*KzR<+lv;Nh`rAbEWgDM^zo*uvcbc7 zP*d0b&WZ@a2wYt$?H{GvyXUL?Oxwy8g2t7%wnTuerIERT(_63>(L|kk^ov|c-5rQX*;Q_h z(`G68#X;G?Y4K7@Mv-cB@DbKvt2X_>4!;|m&BGn(DX)r3WG&iruo&?V>k$F!SNG9X zN8i#Aj?rQYtUs<=!dA<_Eq)zOVK8ZR&;G?wKTHl~iNXjb3`RhQ$by%D2P2@x$dmfN z0)|-5OWPb#F3iaXw4y9HkMt$GiEA$|lFl|VT$`Ft!yzyO0 z{T9xH(dIinr7nU5QSZMpO5I&bF>e>X53T=%v@<~a?JwUY3;qx@==OVLDMeH;FT=Zs z+TA^a(JCdM-=x~|D>Jd`WTbcl9)c!#MElMehQ=raiS%UJ4o^*}bF|J7;tp|p=D@E~ zYyv>TU!AA)FlY3jM34Ai1E4EuL#Tsns+@F%L57g+4%M+UwBOS>Si;I*eai6!3AUx9>=rm$RDm{V>0W-paqU zOP(ej$5Lf*GPf2)72y;Ww;++x?Ed6)gwPT^oJym{F#{Q(-aBZ zv$v<>yx-bn?dd1MNl#mjWBOF0lXZCU1#qb#PRXK9=uU{jE(RzVN^MLSLWY7x z5b{tE4*p6N5(bBy+h~N6*WmrfhP688fg%FUw%}`8RKJ*nL0uelPpIH-GYW60t&+rz zTyeDv7+7vFT-E-o0{!@~iWvjoc#jD~pm@K9;4e1up~Glg-TLeOARo%=9Zz~pvHP|Q z@G8p1B+^jLQu}kSB$%YV z>UU+~A%l;AF#6|T#RGt+Qm1nxPk$2Bh@tE|l_fE8pin~Zkt3;NVS@r#B=|0snxhQ zS#yx-F%EQk9I(JQQ+4$S%nF@4@sJR&KJa^<(T)nA{Hl_Hg2d$KR<2u9vfUg@{k41P zmKZr6d*$y+=<5=@qTUqAbu|z^xL_(6A_f${e;_?tF=eFeJ7aJoW<*Q+es$kvNfP_S zakq+_rh6=ayIPKFUQ=>?V=>oSt$fR;Uyx;V?kKYEPI&n0%t&9KAl9Qdt$vJipxha; zN&l+~>KHiMV9ixA<}gfw4)IpSKPrAr$*^6*(86zdHwf=lEMa(o0q{`h8+ZtG$8UIa zm~?UzWC@4oeh&`v6*kNXiD;YT|rbFhg z^F8>tU-GC9@l%VlF}EN9MK+FE{qv@$uQSNUBD-!Zyu2VDp4>F{^L8 z+rGVZ=4{L`BNlkrdcPZr?8J^}vfZ^zvxv@j(o2H<^|jP(I;i=5&+dS>ua3Fl+=E6- zL4(ip7swS1eS351)1gHOKrN1sb@vUNiF4)hir0nf2Bzbc=#3NiQ2Mpu(8O2Q@PJQ; zg_&`SXZsJZ2-3&g9GKL0dw2i4?Uw|}frEC(9^X5(ghL9x@900Z|6_gNpUQ2y-nIr? zqQyTA_ur{U1#p=C9|1ze&umZ(*zUZ~VGWk>)KPm!gbKfqc~GDRsaJQ<2>aYnLO)kH z*@eL(-u_8sW5pxyKV*Y?r&uE>38-Ze4M!r%?sx9EE`NPekdU}*va!^wd{om0aE0vh z8&saTV0<~7W~P9T^IU2AXs7bxyFIhzId~~9SwQrHONb`Z?vAqSZf6NcCW5Jn_LB#O z)b@yLnAg+!8!`U88sOMdLzRe)m2G7>+5)|IU_?|MhRXh3g8jfs;SLUC{* zteCw6mvk@$%J@5bhbVv5(Ip(^`)*LAGZyYl!$9pZ6uxeK#~H*|oFS50Hy2`cXGJ4! zr7yyccgBox#0$UY!ZQREG8o$L0;(PCUHF3EwQoUoWVOaVyI&13eQOtY3hx=9Xm1E` z_1&~tnL*Jb{^GBtGkm#1sIGYF$zKC3mG1T9w2ohI9HJH&s~;>SOPPwz;xQDCV5RLS zV1$f6M*!4wK|(vZxd&giP|oGVxA}jWZWii#OUnCzl;)hgia-?LoKs?(_&(rV%h5rj zeeb4?BLbBIDxO7O5!Tpgd8X*F>wPzz^=oO4IW^S9q#d(VnvmTg)Bn)`pK0Mmp%{Kl z5flMl%p8Y{;9oAkdpWK^@2J(GW2M(1WSTf1@kwAl#}`+=UHzPPMpkckvOr+;ZOFd2 z59wdJs21LUKhsVc%8UHm^OdxP+M8GI&A3@l)gNL>dkv9dnoZNJ^@^_WrNTQEeG36( z5uQnR`#XFq3102Y249$b^&nHFDl)nZ=h+axJt?1I-qW5~IvGdWw#?f3FRDP`0sO%; zx%}lPB_p5q&DM1@6>%^4+kUJ@e2rkF7fb9#VF@1UogiTPfk2{E2voM#uuAVx1QnA& z1?nJ_!V>(z(eWKh0|P&+$8n)W%%cK~xG~3B>m1b5Vq0KX0oxN;j|IC&p!DF=KycDO zr}b%V)rg#XSEatZnz8DJR(ic5d=>!%~H5`~B= zG>!ZB=nMtiv5WZ9h-de{U3V#O}w`4N6-@X($qz!jJ3!nWQlwLM|R#oR| zYNP8>H=)e306aY-7SDJrjrmT`Yw&Pv#g^x7*lvoeCwD+D0$dGO@>SzI9 zTDLo>4vZX&X-i_Izqf0|ckpp1rVbR=R!h zqwKiC+eXEhl$+0{v_=n%!jx`Y2R5}p$7+N@VSQ$EuuQ}|IqkJa zf&dUOBR~R-m;}5(G!B&GFWCM7P&*?vGYSC|Ao1_6D^D%=r7<-8#19Jt;Zh5t@{2Daf29 z1%F|D!~G5A2fZhx(U)OrzKiu2fu~VyH4`7m7r979*vDsB`|+;1WkKSO6jF`mJjqK{ zsswDMKGxiPa)DjBTX?7dN_75wT4D2!S&|Tnj|+g{zAfra&WW_ z%BR7Ebu1XV24?F%ht+^3e1p6}gl&*l#n_)!f2+aF8|Bk%8)D0SzvI)qc-i<%INY1U zA)7EzOYn=`dXib}W{hkP|6lLc6`~gQ5(m;>JvF^OCS^b#KI*G;a^{>wfNs}+*-ADlio11dqsMbD``W(4|n`I+5TG1@~@d-{)*QuY;%y7mEiY<4Ape6?mst!(g~W8}5`uYy#*Up1C-W^eL{ z5-~q^{a{`s_-F7amvG4cQv_f;Z`d|++}2XI#o>7x=*aIlE$)yoi+*o#AFnKp-c@dC&RcyzpnIRBzJr#k|)az7&3{Bk{PH z=8J+;-P8cCZx?3|`%2{ToLkzTX!=id7Z!CLBU*?dIF>J=Zn_H>ToN$Uzbh)kB0j3V zp75|D0=)rXb^#bxi7r=n4D4>Ob_CH{k88r}ag-1>jI_F`fbDCYV3^;g0m3%zK5PB2 z*}&H%jIcT!6tebhHBi6F2)pHhdT{@M@51aK=%)?hzq5aE66uyLL5DZpz_W*jf$IAx znBAq$byrt(RKZ9xfJCb=S9pHl&I@!*^i&dI*DMrPxeItpGqv-XM+{|EUa7OQ+41IVP5VOqa`MIQM2+zn3tZgC9pgs)LkAu$jehhGVO{QZ=OAXT#P|s zMFS59?)#nm!IaYc_!xa%UZMP)En$TxAn@j$vbbFM+q#`C65V%Zml~fWYe{m++H_^|0d!eix2*LZ z>d+|ZzNPOvPW?h7RVi9byCGkrXW@a_m3Le7#MiC_;kJpPM@F&&1jD;(fj3h9Ck7C^ zcYYM%7;?#E#05;x%~W-U8s+|2}}XUan0zykhj{3 zk~2<~H&+FwCI?+qPo)#EY_A@SQGC=t`Y?UVtk%d6vzvJg@cfzkPK_4T1ox(sdf)9E}X^xi-3?h3$OpRQ!4Kw3O=7nrYC+Jm|^r9(2UVHTk)ygRetlt-bHRn zZPjJ)T{mvHwH&^9`oV|&2$7D?=z&q7vh`i7wE*mMUbkBDZk2D{wJ$hD5c?u7$fn#s zQ^y!X8&a1(lrts32-H+U*K=|C0+6?qylGTrQH zom%(fg;G*{iD=RbzuLI8VjI9ug> zU+4<^;7L0)dh~^O=1zy+IJ#QLt5dV#7yNqw;W{D1ntR?O%+~BWT+_>@=Ut$I3EdvT zH)i9i$y7{Vt`fNa&~CkHwQW(^mY;2Fu#FdQG3I}r3Ki{Pc59Hp!G9)jzwvr*)Yeh9 zgr|W5_Ttf5_s(FmH+dr}4kj5-J5PGV*eaK@#k~94U7!?S>HmiWE;S2D3H>f%-`Ur0 z1tom;z1zE;V(jxRor3g?Jl_+SnE>1%?qa1N!3%7>CSArZr-?4?`&8%>7Vx@hN}H2K zIMo-JNqS*tDPCdgG*w|W=s8O%`_23wK~BQ(hfF#q0-BFY+bjvhzgqxsj2Wi*Ghp9# zpdWyJ8?@fs9cE&ib!#5!NBqUElBC90Jy)?;@2^qzp6_nTPhDpUh4F_AZ{wQL z&)j{)JHs;Z_}1xrL9JBvwom|CjjIuBQp^_{7y8gi3Eg?X z)-STXa$d8{g3x4tRQUCG)PqN#?wuD=5YG^J>Tuyk_$D&!jyW~d#kAGP)U~Mm5?15> z&WoKTxG6?CO8_Lp2%v_Rp)lbqix963yP@ALW??Oo%U|9%k{gP1-23`+5Ah(9GP-*7M>rLcNVbLen$WY?FZY%=*%$Z(I<3;5?x2-1L#D~-W;6kQf=!+_lV zboX%DyEO#;eDa2&WWrxNLOaC)1nPs?IvtJs> zh$mWum3FA7s5Fv(cd@$7NfhH=NIT z-U1e>(*v~b98`VPmv~((59TNY2?KCVWroMXUMKE)7T>^t3CjQ1F<^u1A=X?DExZ@ijP(uj;j{=2lh#qJeyKVyZ2>uTT zm8jncAV7%-+rhdKx*27%W!f-r6qsBO0*ZqzY=oUD6mj%rF6j0s2JS>ONH-F3%f z^~HbBWABVYDvuRe8Ii){k&&6mR#IkGl#!LkLu6#96uI_DsLUiQJA1E|nLSF6hJN>X zJVZa$w|f2l`5oMI&gXOPJ?9?x^S-{hfbhBk@jYiIkr(Vb0GC(fWPF0a-8 z8>fi3o3do`mGRifvldtpX|a&LqOGOYeji6Cy5)Cuv%5HfojdNH>xvq8nGt1al6~*O z$I~WU|B9L{vpfYlR&a8W2<_sCJI$ZY5M)l)KdgAN8#}tUK~RH|Pr~=MmM72KI-@3@ zkXSJ7N_wS>N;X(-7e&0wTA=`>3;2u>`yhPhvO{(a2j5&Oxhp&->qL}CM@IDqLZBJWA@&P$j_eRxpYGA%APBJ<=} zFOPub4r_ch?_u`sJwICmGsVj<4+gnak}K0P9g`=oq2joez=Mptd-^CDsWBa3*zhnh zrI+LL;JwzbC#K4n?G$Yzv^3>~O(N947Si&61Xk;(nuy0$YN=GMUpAkB3zE#J+7N1t z8|8`Ld|`0BSmh50!kVpaYazJNe*{+fLpj*m9&5U_^j~d%APB!Y#=2o6*Dd{5g)t*M zNOqj(hk5$U=ud2#{!Xpjk23_$zMxe87q{!12i&brL*A(WPxOeU*A+60zDeDW1YVmn+a^yP0s&bLw=_JIJo2J(`mjmD_=2FR zuch1`DbSDEkFG9B$Zbh*PTfI9ufMHFFtGO_K9>v-SS^CdQnp?ai0}VmkaTScxS@Ppv`MuN)*GV#B>*+pk$Ui5RBWO38w?DS!_~9-xJZnA zvjCIFqrmPB!6QI|8Eim;Tg+162=M1;VF#u)ux+$3Tx0McNRG9e2X=bE@=tCaw4||p zKTZ$0LVQ7SKL%sB|4Mr~1aMG&@i1tn!_3oOiSo$_wFdoZa*;VUL23dA`;mYksc}GZ znTUEIo9N8b%0eOSCB|FKd+{l&0P$yt$ly7zV(}34H8*T6|1v!QuEg|A9wBt<$<-p| z-6@AZd!2}$;bIGhxrX|j11t;*^}T`Pp&#vUJ`xbj82)%Y>rjZdOml6Cvb24^jkxN3 z(|m<2+QS{!j6a?phzGQ9=xMjBpW|o>E)*cTF#9Zvaew@?rQo-U`4m-0qxh@=L8>wY zm!#0RvZz}XO#%y=p3FM)c2Y%=k(?*Kf#Ka2w1vl7?Z0Hg@c@cdnB7Ob?#2ZdE;i~T zW*s4U@==udBvO+c7s5``-%1BugtLj$(51mFU)=9UPI`^LdHN)%A+mzj?_o=-K)J>O z_-45X#>gGmT&5#*7-lMS@hhX#Vn>lP@-#6zPt!g8m@WOU5gqtTBQH=~9roKBy?5_P zuXK^6(C9G2g5^(XsyXoZ^w$I9QSrdJs(_^TxjVn~iV~YePD%!=X^-;j_GNxLPlvBl zTW8_`5bUI8-SwPKeniFeg8*vgbhR`6(omq6NTw8N?uaF|wJyZmZun0)9B zw(}ercOBl|0jTp>bB$~pYan=P4sY7u<|%?-Zv7G>jQV`zO;GsVTyW9-tJsP$h_d~J z`|tLMD>`b*XYy*NMi^KWE77jH1cK10+OFZI~W*qt)GsHMkgMA zO>SH={k|xbwu{x5_w96&g~yh;0k7YkZ=!@_$#~S2p?{`s^hK5l@?uj)o!Y6=AMo8y zI$0M|-lWkYFXPM|z#l^&Mgn=t{j+JnDyQ94%=A{Q{ zmsH+p6WQmh@~aKzSC}EeAKL z3@pUs9DyI9`u0rpQf=mLRn-?t%Z^fSLDbO0pvHCm%z>;uV`n&2-W(kAJ-n>z zQ5|{|riMT?tmHh{->mqTsIjwye}LsS3rjIKX-cPtT*~Y@0+pDlTyWoJ^|U$ zGW83>GgZP3PdEs1vw8)Npxe$jDFDY&NG zWawLthQg|0YlXHj0whL;Z3rfY;ZTDBM;fq-ZJGs^MPh!z4JJS~*9+!h>zo6L(P9H~ zKW?n3;4li}@h7N3+2u&R0@W$%hoXDdpG^1mhWLW=9lwB(4+tKw+`Bo@e>N!?yz-Sa zt=ZJxw*uZBGrkO-b4YvGh;L-2>Ki;Sp2$21PzrhV<<4tTmlFP&PJYeV*esf%7e|~9^0~Bqyac-!! zbNe|;fxabop%pV+^_lg26p zJkC|QCY_bq zuAA1-G;{}<{HxN|x;8%{a3mU7f$jn1f~HYXdNCTP!oz0*cD&`7afbFjPpQ!d;u^W` zAKQQFFQG+}J}dqUDdPD|7-bdH-1)n6p(!l+?+=*>DsoU>)A6AJ&Ml~$evGx?%opg+ zIJq+kMcuxGtYR-J<(@p34)LR?bf7m^sBrRQto`FFn((E#!%wr1H{?(B!AU6HY1Ih{ z7Bbfe3C{l3nEr;nf(azf{MI7`0VbumRuC&hfLkOE5)h!}en$f^i3udx4q754 zP!PXAffkCrzzcw)^cc}2Iu_OsGJ5xTA3X^EGR5dLuiW%*$7Fb1<|3d97wNU!;V zg`2>EYP~=FkOu(BjL{}2%~DOa`8P#1hYtt~g?y4VajR^49Uf@qcu=JbFlA-St7%Ur zs;3ny)}2rkuL?_FitrT+ffP7|D?cR5KwEnItyK>t3H;U9JT5C_GY(lau2dikQX+$z zS{P%vV&c*egZ9G;R`{$}902F8)i_&!2V~F7gH~=2L&t1aX@k2iJzZ+%%|+0k4Ua-! z@%G1^=ud~Lb-YZQ5WCMQZQ^F}K+P0lE^RpVW-FmFi` z-v=l@$QDo+V@?|6?<|}k)30~(t~!2@yJ47VWzoFSEO52FuNO(Ut)FMQLhuA*qS^?4 zbxS$C*tc*v-`P;lo6IV8F>z|(pVuEYp!`b{)xCjOkx85oQ*BU^W`XQiJ8>7T z>N1(N(|K=ryR^b+84w%RNHihQq^20gDw2n0K0-$$ALm!Pt7w0FZ@4?GEFWl9RI8rJ zM2w#D)5*klGrL?TF2%1p^}IS~UPWV1jQ3;EL}5R_k}+=ge-B6mwk#BL<$n*b@5!~) zX1n@P_lrF4?*EGt7}XCq``?tts*&6{pW%oi3KI4wCQ25o_4>D?aVyYhw6*tjU$F2S z%x#l`PXmY{o85c4g6+l|Jxd=MZ6}hAxefqe@59~>!6++x4{KX;{kE6>`r>eannnit(?{8Rp2Vm>{ z1r@nh9g6h7;*9+w<3FvD6)Sx#D5Ge%KiMmYmGI-CL@7o)!ES?lukL$hiuDt)-l&=j1Xu>RGhioxrkD89#EK zgvYmqy@@i+13%NKY-RU>8(q5n^ieqkI5XcitmF)wf$bGiJZH<@j?c$HaT#xQ>Q=HN zLw{C05}>!y)L*ss$lD$Gwo#bI%^$9aneFkjc}lb1_UKPPV`c0rKe^`>Wghwg~q zS%-fnc8;n-f!uw0&wy@fJUY5+2rGG^;wfatH}1+oW5GwjO?V{2*PN z9Rfd>; zl)fC~7ljz`68kK5$c2YL^|S}+^Aiy~HYIR9y0_qG=+XOF3snM2)&3H^v}U=~8fTJF zH~I^_3U~R+Nm9GSJM5b>@o-7{CVM~bAs~3>ahi|loOKo@ZTBaS;L&{6PsM>f^0TsY zK&mGjY!&D!oq?n#&!6;DEWTAIZ7Sz&eM6+n$&s~R0Gl<`heBYu z{CbR~3NQptx)C8U3}TaXExB3n19!pO;1ysB7ZB@-9oQ5MM_e|<>*_7>CaSYpjzvf$ zMuNcoVbb3VLowO4fINT$uc;&y}rAA=YvN6m&ydt(-qE>{+4BU%|?XAt2a|G$B)1|0a@ z^Y@=eYisLA*z{>BQzS4XdoN3Sxz(+l)i~T#2xWLVri(6@2WahM?vz|^Vnj#?s*0UZ zf?Y5p|DfSMXi2JU{4o3Q(r%Dih2Yxp$J8nwxMovHJ-17t)R;M4zqIv$mTRGZj`tO% zj~DRo1PUJSkOE9NZYc~yuRzHjC?31N5>-Po9^9DXS}jx&kjr+Yjgc6n9T388myZBx zN9+pp7;x!|eSWV}^DcDVzMC)z4?H-};E@{&lKdx@OTwma_*>cmeq|O9 z3HTfr=(o#r3bUFmoWJMfp0R8$5pQLPKDXC|XxU}?YUv78v*;T>`pbrY9}4jW4}bcn zgaJSJzkgGe-U^ijPdn}BMGi3%o)4^ zAO@=Um1vHc$IKj3lo~Uo_+X$D_~o?GtFMi{PdnugT0-K{rH(Xe#|$!1XT=#z3xjND znmxhgg2wdtU)kAf)k|gj0-0F}v#ZUNhT%I{| zr;021*jrZ?9(L`?a_Bx)n#vIcF1zU+{_F0A2FHu*mYDWC&1~^t7Ka!Kx(p)H-Vp=u zdWaSAdDt}Cgc5|+nnNx0?QWCq16)Jr^9l4B;OFL^_`y&k&(sYa@pmlNpXR@Mh~bhW zZ3M_bBd|699Jn`zmtDBjqw}Ep>esm+$hvnl8n_;GBwiYWzAT#VQ)N9F6AKW_H2Djk z?ySkYm@1oBe$*}#kHXC;H=}xh#jiv-_EREAEC_~Ipwhs8o*JE*E}v{a`R?SoAwaHnuCoe1$Gv7u46zwBiHNEjY2i~2CpN+8_H(!(^ zPKE1SF3CC0Rrm1QeG0nB`Aj*V)MfsF+G<0zi=VaK z3N&`BFB9nt9@^!n&;-89UZ;J)J$HZFKSNVMg=b_$;Zg0&K?!%VnGPO|)u9~>>cwwh zFRuI?D|k-_0(f{yC>&`ylkKShvAA{JCT5H)&Mo&y*F~lDZ}@xgmFNK#0ZCd`(&6{7 zJO()>WF79BU+1`&&~d7CZt#lMZYzADt-eeY_S7)O={tU!`%q|+RdYhd(IdxfJ&oVL zP{2N4dnvb9@?(S0C+H!CCxn%RBQNVBc{p8j-zKa)R;9^ykOM@hdgiMc;)Jde?ID+C z;C^%^vXQj#Xb|2LA?Y&ug_6XrzRXR??|QR&(L|w)pHAqXdvk>PYFLN)J43xh6}rq4 zL;EX{x2IimZ;)y}rDkfs#P|jRR8!vXs+TNtDRMW>M=yTXRb&67R*~oYV_K1(%uaRLy zRcCgdGNx8jJAQKaqEC1nJ*pro{GuF#& z#A}DZZb7_Shp=@uY(0UEk>EBAfdGeR*FRQ(5pQV(49BaG7$*q=7R7{(5(qG}rHW%1 z^;$sk;YgS%IP+8VCn!fGnOq5Yr_Db8xLZWK>j9nyF@McY(SBu5%FhliRaW%=_hAk@ z*GQO?orr<|mF3Ih1~j zk8;Sn0mI=g9_8!BlP%%11-6tNxmH<^$qWip(yyI8act-dgE#2(#o*fU7f#>7FeOdR zeq*s&RN;a+l%C>y9-u+7kNB1GEyh=M zP7Te*3xA0>7Wp@vr8M^gEq^^>E;rAMkYI_+-)DdM|fa zQC;T)9;-{C`JO&n$`n7VC7#T#r~bM!&jNkfI~LceH1^vLo+14&*g0RcHZ#R_CIC=R zUS+J~p|xK)rpqbvRKQN8yK}cR9X>*;HG{GzdFc$GtXxoCqAFCCf{xieV)%d^|G7Rp zLf+wHUX9g2tAwjqQ4SQHwcb14anQ8}K$Ygf-XuLYr%07=glxOhE9#o6if0eSF49^g zUd+%kNXc2quiX+gARMymK;iqZZ>uQ!Tl^)=<#v6vqVV#lrij0k)ad){I=$-4aLzT8 zpv0^acQ#0n0!{5zI7*{Hosf>t6?B|UnvV>1&EquHS&uMEcHjW{qLQX)0+A?}9|GQY z_?6GKL{ZaAI$^M{HxJNVpsL@>`0wF5?5Fd6))J9UW;vDd|9l&qP-G$7m-a~0-*@iY zUS^TTZmasM&v=GUyWVPOlY%Bjv)>1E6D znItnn;Dq1ehtEa*owEVNvm=V3_O*ru3Yxqcx1J9U9+(s=+sgRk*RE@jqW)wRAMmR6 zxd%%{f1`d#AjmfAZK}mYFZRUCoVd(x)r{e5+uI?4I7~vSGr*)Ap&oWzKLYfidH Tm?6|W0s?E?aeD_-dHMeUAp1P3 literal 0 HcmV?d00001 diff --git a/crates/rpc/rpc-e2e-tests/testdata/rpc-compat/eth_getLogs/contract-addr.io b/crates/rpc/rpc-e2e-tests/testdata/rpc-compat/eth_getLogs/contract-addr.io new file mode 100644 index 00000000000..674a7eb4f81 --- /dev/null +++ b/crates/rpc/rpc-e2e-tests/testdata/rpc-compat/eth_getLogs/contract-addr.io @@ -0,0 +1,3 @@ +// queries for logs from a specific contract across a range of blocks +>> {"jsonrpc":"2.0","id":1,"method":"eth_getLogs","params":[{"address":["0x7dcd17433742f4c0ca53122ab541d0ba67fc27df"],"fromBlock":"0x1","toBlock":"0x4","topics":null}]} +<< {"jsonrpc":"2.0","id":1,"result":[{"address":"0x7dcd17433742f4c0ca53122ab541d0ba67fc27df","topics":["0x00000000000000000000000000000000000000000000000000000000656d6974","0xf4da19d6c17928e683661a52829cf391d3dc26d581152b81ce595a1207944f09"],"data":"0x0000000000000000000000000000000000000000000000000000000000000000","blockNumber":"0x2","transactionHash":"0x5bc704d4eb4ce7fe319705d2f888516961426a177f2799c9f934b5df7466dd33","transactionIndex":"0x2","blockHash":"0x28a64e8d846382eb270941251be3a3e1547809e7eb70939c3530faa8f4599570","logIndex":"0xa","removed":false},{"address":"0x7dcd17433742f4c0ca53122ab541d0ba67fc27df","topics":["0x00000000000000000000000000000000000000000000000000000000656d6974","0x4238ace0bf7e66fd40fea01bdf43f4f30423f48432efd0da3af5fcb17a977fd4"],"data":"0x0000000000000000000000000000000000000000000000000000000000000001","blockNumber":"0x4","transactionHash":"0xf047c5133c96c405a79d01038b4ccf8208c03e296dd9f6bea083727c9513f805","transactionIndex":"0x0","blockHash":"0x94540b21748e45497c41518ed68b2a0c16d728e917b665ae50d51f6895242e53","logIndex":"0x0","removed":false}]} diff --git a/crates/rpc/rpc-e2e-tests/testdata/rpc-compat/eth_getLogs/no-topics.io b/crates/rpc/rpc-e2e-tests/testdata/rpc-compat/eth_getLogs/no-topics.io new file mode 100644 index 00000000000..89ec5bcd058 --- /dev/null +++ b/crates/rpc/rpc-e2e-tests/testdata/rpc-compat/eth_getLogs/no-topics.io @@ -0,0 +1,3 @@ +// queries for all logs across a range of blocks +>> {"jsonrpc":"2.0","id":1,"method":"eth_getLogs","params":[{"address":null,"fromBlock":"0x1","toBlock":"0x3","topics":null}]} +<< {"jsonrpc":"2.0","id":1,"result":[{"address":"0x882e7e5d12617c267a72948e716f231fa79e6d51","topics":["0xabbb5caa7dda850e60932de0934eb1f9d0f59695050f761dc64e443e5030a569"],"data":"0x0000000000000000000000000000000000000000000000000000000000000001","blockNumber":"0x2","transactionHash":"0x25d8b4a27c4578e5de6441f98881cf050ab2d9f28ceb28559ece0b65f555e9d8","transactionIndex":"0x0","blockHash":"0x28a64e8d846382eb270941251be3a3e1547809e7eb70939c3530faa8f4599570","logIndex":"0x0","removed":false},{"address":"0x882e7e5d12617c267a72948e716f231fa79e6d51","topics":["0xd9d16d34ffb15ba3a3d852f0d403e2ce1d691fb54de27ac87cd2f993f3ec330f"],"data":"0x0000000000000000000000000000000000000000000000000000000000000002","blockNumber":"0x2","transactionHash":"0x25d8b4a27c4578e5de6441f98881cf050ab2d9f28ceb28559ece0b65f555e9d8","transactionIndex":"0x0","blockHash":"0x28a64e8d846382eb270941251be3a3e1547809e7eb70939c3530faa8f4599570","logIndex":"0x1","removed":false},{"address":"0x882e7e5d12617c267a72948e716f231fa79e6d51","topics":["0x679795a0195a1b76cdebb7c51d74e058aee92919b8c3389af86ef24535e8a28c"],"data":"0x0000000000000000000000000000000000000000000000000000000000000003","blockNumber":"0x2","transactionHash":"0x25d8b4a27c4578e5de6441f98881cf050ab2d9f28ceb28559ece0b65f555e9d8","transactionIndex":"0x0","blockHash":"0x28a64e8d846382eb270941251be3a3e1547809e7eb70939c3530faa8f4599570","logIndex":"0x2","removed":false},{"address":"0x882e7e5d12617c267a72948e716f231fa79e6d51","topics":["0xc3a24b0501bd2c13a7e57f2db4369ec4c223447539fc0724a9d55ac4a06ebd4d"],"data":"0x0000000000000000000000000000000000000000000000000000000000000004","blockNumber":"0x2","transactionHash":"0x25d8b4a27c4578e5de6441f98881cf050ab2d9f28ceb28559ece0b65f555e9d8","transactionIndex":"0x0","blockHash":"0x28a64e8d846382eb270941251be3a3e1547809e7eb70939c3530faa8f4599570","logIndex":"0x3","removed":false},{"address":"0x882e7e5d12617c267a72948e716f231fa79e6d51","topics":["0x91da3fd0782e51c6b3986e9e672fd566868e71f3dbc2d6c2cd6fbb3e361af2a7"],"data":"0x0000000000000000000000000000000000000000000000000000000000000005","blockNumber":"0x2","transactionHash":"0x25d8b4a27c4578e5de6441f98881cf050ab2d9f28ceb28559ece0b65f555e9d8","transactionIndex":"0x0","blockHash":"0x28a64e8d846382eb270941251be3a3e1547809e7eb70939c3530faa8f4599570","logIndex":"0x4","removed":false},{"address":"0x882e7e5d12617c267a72948e716f231fa79e6d51","topics":["0x89832631fb3c3307a103ba2c84ab569c64d6182a18893dcd163f0f1c2090733a"],"data":"0x0000000000000000000000000000000000000000000000000000000000000006","blockNumber":"0x2","transactionHash":"0x25d8b4a27c4578e5de6441f98881cf050ab2d9f28ceb28559ece0b65f555e9d8","transactionIndex":"0x0","blockHash":"0x28a64e8d846382eb270941251be3a3e1547809e7eb70939c3530faa8f4599570","logIndex":"0x5","removed":false},{"address":"0x882e7e5d12617c267a72948e716f231fa79e6d51","topics":["0x8819ef417987f8ae7a81f42cdfb18815282fe989326fbff903d13cf0e03ace29"],"data":"0x0000000000000000000000000000000000000000000000000000000000000007","blockNumber":"0x2","transactionHash":"0x25d8b4a27c4578e5de6441f98881cf050ab2d9f28ceb28559ece0b65f555e9d8","transactionIndex":"0x0","blockHash":"0x28a64e8d846382eb270941251be3a3e1547809e7eb70939c3530faa8f4599570","logIndex":"0x6","removed":false},{"address":"0x882e7e5d12617c267a72948e716f231fa79e6d51","topics":["0xb7c774451310d1be4108bc180d1b52823cb0ee0274a6c0081bcaf94f115fb96d"],"data":"0x0000000000000000000000000000000000000000000000000000000000000008","blockNumber":"0x2","transactionHash":"0x25d8b4a27c4578e5de6441f98881cf050ab2d9f28ceb28559ece0b65f555e9d8","transactionIndex":"0x0","blockHash":"0x28a64e8d846382eb270941251be3a3e1547809e7eb70939c3530faa8f4599570","logIndex":"0x7","removed":false},{"address":"0x882e7e5d12617c267a72948e716f231fa79e6d51","topics":["0x6add646517a5b0f6793cd5891b7937d28a5b2981a5d88ebc7cd776088fea9041"],"data":"0x0000000000000000000000000000000000000000000000000000000000000009","blockNumber":"0x2","transactionHash":"0x25d8b4a27c4578e5de6441f98881cf050ab2d9f28ceb28559ece0b65f555e9d8","transactionIndex":"0x0","blockHash":"0x28a64e8d846382eb270941251be3a3e1547809e7eb70939c3530faa8f4599570","logIndex":"0x8","removed":false},{"address":"0x882e7e5d12617c267a72948e716f231fa79e6d51","topics":["0x6cde3cea4b3a3fb2488b2808bae7556f4a405e50f65e1794383bc026131b13c3"],"data":"0x000000000000000000000000000000000000000000000000000000000000000a","blockNumber":"0x2","transactionHash":"0x25d8b4a27c4578e5de6441f98881cf050ab2d9f28ceb28559ece0b65f555e9d8","transactionIndex":"0x0","blockHash":"0x28a64e8d846382eb270941251be3a3e1547809e7eb70939c3530faa8f4599570","logIndex":"0x9","removed":false},{"address":"0x7dcd17433742f4c0ca53122ab541d0ba67fc27df","topics":["0x00000000000000000000000000000000000000000000000000000000656d6974","0xf4da19d6c17928e683661a52829cf391d3dc26d581152b81ce595a1207944f09"],"data":"0x0000000000000000000000000000000000000000000000000000000000000000","blockNumber":"0x2","transactionHash":"0x5bc704d4eb4ce7fe319705d2f888516961426a177f2799c9f934b5df7466dd33","transactionIndex":"0x2","blockHash":"0x28a64e8d846382eb270941251be3a3e1547809e7eb70939c3530faa8f4599570","logIndex":"0xa","removed":false},{"address":"0xa788ca96a910bac854f95b794776c1ad847dcdd5","topics":["0x101e368776582e57ab3d116ffe2517c0a585cd5b23174b01e275c2d8329c3d83"],"data":"0x0000000000000000000000000000000000000000000000000000000000000001","blockNumber":"0x3","transactionHash":"0xfecc4d4439d77962b8c27e7e652294717fcd75379cab400bbefb2975a960344c","transactionIndex":"0x1","blockHash":"0x30adf30837c967524cbcf881c024c194eee010b3750feef2e45a674979b2cd36","logIndex":"0x0","removed":false},{"address":"0xa788ca96a910bac854f95b794776c1ad847dcdd5","topics":["0x7dfe757ecd65cbd7922a9c0161e935dd7fdbcc0e999689c7d31633896b1fc60b"],"data":"0x0000000000000000000000000000000000000000000000000000000000000002","blockNumber":"0x3","transactionHash":"0xfecc4d4439d77962b8c27e7e652294717fcd75379cab400bbefb2975a960344c","transactionIndex":"0x1","blockHash":"0x30adf30837c967524cbcf881c024c194eee010b3750feef2e45a674979b2cd36","logIndex":"0x1","removed":false},{"address":"0xa788ca96a910bac854f95b794776c1ad847dcdd5","topics":["0x88601476d11616a71c5be67555bd1dff4b1cbf21533d2669b768b61518cfe1c3"],"data":"0x0000000000000000000000000000000000000000000000000000000000000003","blockNumber":"0x3","transactionHash":"0xfecc4d4439d77962b8c27e7e652294717fcd75379cab400bbefb2975a960344c","transactionIndex":"0x1","blockHash":"0x30adf30837c967524cbcf881c024c194eee010b3750feef2e45a674979b2cd36","logIndex":"0x2","removed":false},{"address":"0xa788ca96a910bac854f95b794776c1ad847dcdd5","topics":["0xcbc4e5fb02c3d1de23a9f1e014b4d2ee5aeaea9505df5e855c9210bf472495af"],"data":"0x0000000000000000000000000000000000000000000000000000000000000004","blockNumber":"0x3","transactionHash":"0xfecc4d4439d77962b8c27e7e652294717fcd75379cab400bbefb2975a960344c","transactionIndex":"0x1","blockHash":"0x30adf30837c967524cbcf881c024c194eee010b3750feef2e45a674979b2cd36","logIndex":"0x3","removed":false},{"address":"0xa788ca96a910bac854f95b794776c1ad847dcdd5","topics":["0x2e174c10e159ea99b867ce3205125c24a42d128804e4070ed6fcc8cc98166aa0"],"data":"0x0000000000000000000000000000000000000000000000000000000000000005","blockNumber":"0x3","transactionHash":"0xfecc4d4439d77962b8c27e7e652294717fcd75379cab400bbefb2975a960344c","transactionIndex":"0x1","blockHash":"0x30adf30837c967524cbcf881c024c194eee010b3750feef2e45a674979b2cd36","logIndex":"0x4","removed":false},{"address":"0xa788ca96a910bac854f95b794776c1ad847dcdd5","topics":["0xa9bc9a3a348c357ba16b37005d7e6b3236198c0e939f4af8c5f19b8deeb8ebc0"],"data":"0x0000000000000000000000000000000000000000000000000000000000000006","blockNumber":"0x3","transactionHash":"0xfecc4d4439d77962b8c27e7e652294717fcd75379cab400bbefb2975a960344c","transactionIndex":"0x1","blockHash":"0x30adf30837c967524cbcf881c024c194eee010b3750feef2e45a674979b2cd36","logIndex":"0x5","removed":false},{"address":"0xa788ca96a910bac854f95b794776c1ad847dcdd5","topics":["0x75f96ab15d697e93042dc45b5c896c4b27e89bb6eaf39475c5c371cb2513f7d2"],"data":"0x0000000000000000000000000000000000000000000000000000000000000007","blockNumber":"0x3","transactionHash":"0xfecc4d4439d77962b8c27e7e652294717fcd75379cab400bbefb2975a960344c","transactionIndex":"0x1","blockHash":"0x30adf30837c967524cbcf881c024c194eee010b3750feef2e45a674979b2cd36","logIndex":"0x6","removed":false},{"address":"0xa788ca96a910bac854f95b794776c1ad847dcdd5","topics":["0x3be6fd20d5acfde5b873b48692cd31f4d3c7e8ee8a813af4696af8859e5ca6c6"],"data":"0x0000000000000000000000000000000000000000000000000000000000000008","blockNumber":"0x3","transactionHash":"0xfecc4d4439d77962b8c27e7e652294717fcd75379cab400bbefb2975a960344c","transactionIndex":"0x1","blockHash":"0x30adf30837c967524cbcf881c024c194eee010b3750feef2e45a674979b2cd36","logIndex":"0x7","removed":false},{"address":"0xa788ca96a910bac854f95b794776c1ad847dcdd5","topics":["0x625b35f5e76f098dd7c3a05b10e2e5e78a4a01228d60c3b143426cdf36d26455"],"data":"0x0000000000000000000000000000000000000000000000000000000000000009","blockNumber":"0x3","transactionHash":"0xfecc4d4439d77962b8c27e7e652294717fcd75379cab400bbefb2975a960344c","transactionIndex":"0x1","blockHash":"0x30adf30837c967524cbcf881c024c194eee010b3750feef2e45a674979b2cd36","logIndex":"0x8","removed":false},{"address":"0xa788ca96a910bac854f95b794776c1ad847dcdd5","topics":["0xc575c31fea594a6eb97c8e9d3f9caee4c16218c6ef37e923234c0fe9014a61e7"],"data":"0x000000000000000000000000000000000000000000000000000000000000000a","blockNumber":"0x3","transactionHash":"0xfecc4d4439d77962b8c27e7e652294717fcd75379cab400bbefb2975a960344c","transactionIndex":"0x1","blockHash":"0x30adf30837c967524cbcf881c024c194eee010b3750feef2e45a674979b2cd36","logIndex":"0x9","removed":false}]} diff --git a/crates/rpc/rpc-e2e-tests/testdata/rpc-compat/eth_getLogs/topic-exact-match.io b/crates/rpc/rpc-e2e-tests/testdata/rpc-compat/eth_getLogs/topic-exact-match.io new file mode 100644 index 00000000000..4795cc4116b --- /dev/null +++ b/crates/rpc/rpc-e2e-tests/testdata/rpc-compat/eth_getLogs/topic-exact-match.io @@ -0,0 +1,3 @@ +// queries for logs with two topics, with both topics set explictly +>> {"jsonrpc":"2.0","id":1,"method":"eth_getLogs","params":[{"address":null,"fromBlock":"0x3","toBlock":"0x6","topics":[["0x00000000000000000000000000000000000000000000000000000000656d6974"],["0x4238ace0bf7e66fd40fea01bdf43f4f30423f48432efd0da3af5fcb17a977fd4"]]}]} +<< {"jsonrpc":"2.0","id":1,"result":[{"address":"0x7dcd17433742f4c0ca53122ab541d0ba67fc27df","topics":["0x00000000000000000000000000000000000000000000000000000000656d6974","0x4238ace0bf7e66fd40fea01bdf43f4f30423f48432efd0da3af5fcb17a977fd4"],"data":"0x0000000000000000000000000000000000000000000000000000000000000001","blockNumber":"0x4","transactionHash":"0xf047c5133c96c405a79d01038b4ccf8208c03e296dd9f6bea083727c9513f805","transactionIndex":"0x0","blockHash":"0x94540b21748e45497c41518ed68b2a0c16d728e917b665ae50d51f6895242e53","logIndex":"0x0","removed":false}]} diff --git a/crates/rpc/rpc-e2e-tests/testdata/rpc-compat/eth_getLogs/topic-wildcard.io b/crates/rpc/rpc-e2e-tests/testdata/rpc-compat/eth_getLogs/topic-wildcard.io new file mode 100644 index 00000000000..9a798698c25 --- /dev/null +++ b/crates/rpc/rpc-e2e-tests/testdata/rpc-compat/eth_getLogs/topic-wildcard.io @@ -0,0 +1,3 @@ +// queries for logs with two topics, performing a wildcard match in topic position zero +>> {"jsonrpc":"2.0","id":1,"method":"eth_getLogs","params":[{"address":null,"fromBlock":"0x3","toBlock":"0x6","topics":[[],["0x4238ace0bf7e66fd40fea01bdf43f4f30423f48432efd0da3af5fcb17a977fd4"]]}]} +<< {"jsonrpc":"2.0","id":1,"result":[{"address":"0x7dcd17433742f4c0ca53122ab541d0ba67fc27df","topics":["0x00000000000000000000000000000000000000000000000000000000656d6974","0x4238ace0bf7e66fd40fea01bdf43f4f30423f48432efd0da3af5fcb17a977fd4"],"data":"0x0000000000000000000000000000000000000000000000000000000000000001","blockNumber":"0x4","transactionHash":"0xf047c5133c96c405a79d01038b4ccf8208c03e296dd9f6bea083727c9513f805","transactionIndex":"0x0","blockHash":"0x94540b21748e45497c41518ed68b2a0c16d728e917b665ae50d51f6895242e53","logIndex":"0x0","removed":false}]} diff --git a/crates/rpc/rpc-e2e-tests/testdata/rpc-compat/forkenv.json b/crates/rpc/rpc-e2e-tests/testdata/rpc-compat/forkenv.json new file mode 100644 index 00000000000..3da23534337 --- /dev/null +++ b/crates/rpc/rpc-e2e-tests/testdata/rpc-compat/forkenv.json @@ -0,0 +1,27 @@ +{ + "HIVE_CANCUN_BLOB_BASE_FEE_UPDATE_FRACTION": "3338477", + "HIVE_CANCUN_BLOB_MAX": "6", + "HIVE_CANCUN_BLOB_TARGET": "3", + "HIVE_CANCUN_TIMESTAMP": "420", + "HIVE_CHAIN_ID": "3503995874084926", + "HIVE_FORK_ARROW_GLACIER": "30", + "HIVE_FORK_BERLIN": "24", + "HIVE_FORK_BYZANTIUM": "9", + "HIVE_FORK_CONSTANTINOPLE": "12", + "HIVE_FORK_GRAY_GLACIER": "33", + "HIVE_FORK_HOMESTEAD": "0", + "HIVE_FORK_ISTANBUL": "18", + "HIVE_FORK_LONDON": "27", + "HIVE_FORK_MUIR_GLACIER": "21", + "HIVE_FORK_PETERSBURG": "15", + "HIVE_FORK_SPURIOUS": "6", + "HIVE_FORK_TANGERINE": "3", + "HIVE_MERGE_BLOCK_ID": "36", + "HIVE_NETWORK_ID": "3503995874084926", + "HIVE_PRAGUE_BLOB_BASE_FEE_UPDATE_FRACTION": "5007716", + "HIVE_PRAGUE_BLOB_MAX": "9", + "HIVE_PRAGUE_BLOB_TARGET": "6", + "HIVE_PRAGUE_TIMESTAMP": "450", + "HIVE_SHANGHAI_TIMESTAMP": "390", + "HIVE_TERMINAL_TOTAL_DIFFICULTY": "4732736" +} \ No newline at end of file diff --git a/crates/rpc/rpc-e2e-tests/testdata/rpc-compat/genesis.json b/crates/rpc/rpc-e2e-tests/testdata/rpc-compat/genesis.json new file mode 100644 index 00000000000..0c29edcb252 --- /dev/null +++ b/crates/rpc/rpc-e2e-tests/testdata/rpc-compat/genesis.json @@ -0,0 +1,141 @@ +{ + "config": { + "chainId": 3503995874084926, + "homesteadBlock": 0, + "eip150Block": 3, + "eip155Block": 6, + "eip158Block": 6, + "byzantiumBlock": 9, + "constantinopleBlock": 12, + "petersburgBlock": 15, + "istanbulBlock": 18, + "muirGlacierBlock": 21, + "berlinBlock": 24, + "londonBlock": 27, + "arrowGlacierBlock": 30, + "grayGlacierBlock": 33, + "mergeNetsplitBlock": 36, + "shanghaiTime": 390, + "cancunTime": 420, + "pragueTime": 450, + "terminalTotalDifficulty": 4732736, + "depositContractAddress": "0x0000000000000000000000000000000000000000", + "ethash": {}, + "blobSchedule": { + "cancun": { + "target": 3, + "max": 6, + "baseFeeUpdateFraction": 3338477 + }, + "prague": { + "target": 6, + "max": 9, + "baseFeeUpdateFraction": 5007716 + } + } + }, + "nonce": "0x0", + "timestamp": "0x0", + "extraData": "0x68697665636861696e", + "gasLimit": "0x23f3e20", + "difficulty": "0x20000", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "coinbase": "0x0000000000000000000000000000000000000000", + "alloc": { + "00000961ef480eb55e80d19ad83579a64c007002": { + "code": "0x3373fffffffffffffffffffffffffffffffffffffffe1460cb5760115f54807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff146101f457600182026001905f5b5f82111560685781019083028483029004916001019190604d565b909390049250505036603814608857366101f457346101f4575f5260205ff35b34106101f457600154600101600155600354806003026004013381556001015f35815560010160203590553360601b5f5260385f601437604c5fa0600101600355005b6003546002548082038060101160df575060105b5f5b8181146101835782810160030260040181604c02815460601b8152601401816001015481526020019060020154807fffffffffffffffffffffffffffffffff00000000000000000000000000000000168252906010019060401c908160381c81600701538160301c81600601538160281c81600501538160201c81600401538160181c81600301538160101c81600201538160081c81600101535360010160e1565b910180921461019557906002556101a0565b90505f6002555f6003555b5f54807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff14156101cd57505f5b6001546002828201116101e25750505f6101e8565b01600290035b5f555f600155604c025ff35b5f5ffd", + "balance": "0x1" + }, + "0000bbddc7ce488642fb579f8b00f3a590007251": { + "code": "0x3373fffffffffffffffffffffffffffffffffffffffe1460d35760115f54807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1461019a57600182026001905f5b5f82111560685781019083028483029004916001019190604d565b9093900492505050366060146088573661019a573461019a575f5260205ff35b341061019a57600154600101600155600354806004026004013381556001015f358155600101602035815560010160403590553360601b5f5260605f60143760745fa0600101600355005b6003546002548082038060021160e7575060025b5f5b8181146101295782810160040260040181607402815460601b815260140181600101548152602001816002015481526020019060030154905260010160e9565b910180921461013b5790600255610146565b90505f6002555f6003555b5f54807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff141561017357505f5b6001546001828201116101885750505f61018e565b01600190035b5f555f6001556074025ff35b5f5ffd", + "balance": "0x1" + }, + "0000f90827f1c53a10cb7a02335b175320002935": { + "code": "0x3373fffffffffffffffffffffffffffffffffffffffe14604657602036036042575f35600143038111604257611fff81430311604257611fff9006545f5260205ff35b5f5ffd5b5f35611fff60014303065500", + "balance": "0x1" + }, + "000f3df6d732807ef1319fb7b8bb8522d0beac02": { + "code": "0x3373fffffffffffffffffffffffffffffffffffffffe14604d57602036146024575f5ffd5b5f35801560495762001fff810690815414603c575f5ffd5b62001fff01545f5260205ff35b5f5ffd5b62001fff42064281555f359062001fff015500", + "balance": "0x2a" + }, + "0c2c51a0990aee1d73c1228de158688341557508": { + "balance": "0xc097ce7bc90715b34b9f1000000000" + }, + "14e46043e63d0e3cdcf2530519f4cfaf35058cb2": { + "balance": "0xc097ce7bc90715b34b9f1000000000" + }, + "16c57edf7fa9d9525378b0b81bf8a3ced0620c1c": { + "balance": "0xc097ce7bc90715b34b9f1000000000" + }, + "1f4924b14f34e24159387c0a4cdbaa32f3ddb0cf": { + "balance": "0xc097ce7bc90715b34b9f1000000000" + }, + "1f5bde34b4afc686f136c7a3cb6ec376f7357759": { + "balance": "0xc097ce7bc90715b34b9f1000000000" + }, + "2d389075be5be9f2246ad654ce152cf05990b209": { + "balance": "0xc097ce7bc90715b34b9f1000000000" + }, + "3ae75c08b4c907eb63a8960c45b86e1e9ab6123c": { + "balance": "0xc097ce7bc90715b34b9f1000000000" + }, + "4340ee1b812acb40a1eb561c019c327b243b92df": { + "balance": "0xc097ce7bc90715b34b9f1000000000" + }, + "4a0f1452281bcec5bd90c3dce6162a5995bfe9df": { + "balance": "0xc097ce7bc90715b34b9f1000000000" + }, + "4dde844b71bcdf95512fb4dc94e84fb67b512ed8": { + "balance": "0xc097ce7bc90715b34b9f1000000000" + }, + "5f552da00dfb4d3749d9e62dcee3c918855a86a0": { + "balance": "0xc097ce7bc90715b34b9f1000000000" + }, + "654aa64f5fbefb84c270ec74211b81ca8c44a72e": { + "balance": "0xc097ce7bc90715b34b9f1000000000" + }, + "717f8aa2b982bee0e29f573d31df288663e1ce16": { + "balance": "0xc097ce7bc90715b34b9f1000000000" + }, + "7435ed30a8b4aeb0877cef0c6e8cffe834eb865f": { + "balance": "0xc097ce7bc90715b34b9f1000000000" + }, + "7dcd17433742f4c0ca53122ab541d0ba67fc27df": { + "code": "0x3680600080376000206000548082558060010160005560005263656d697460206000a2", + "balance": "0x0" + }, + "83c7e323d189f18725ac510004fdc2941f8c4a78": { + "balance": "0xc097ce7bc90715b34b9f1000000000" + }, + "84e75c28348fb86acea1a93a39426d7d60f4cc46": { + "balance": "0xc097ce7bc90715b34b9f1000000000" + }, + "8bebc8ba651aee624937e7d897853ac30c95a067": { + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000001": "0x0000000000000000000000000000000000000000000000000000000000000001", + "0x0000000000000000000000000000000000000000000000000000000000000002": "0x0000000000000000000000000000000000000000000000000000000000000002", + "0x0000000000000000000000000000000000000000000000000000000000000003": "0x0000000000000000000000000000000000000000000000000000000000000003" + }, + "balance": "0x1", + "nonce": "0x1" + }, + "c7b99a164efd027a93f147376cc7da7c67c6bbe0": { + "balance": "0xc097ce7bc90715b34b9f1000000000" + }, + "d803681e487e6ac18053afc5a6cd813c86ec3e4d": { + "balance": "0xc097ce7bc90715b34b9f1000000000" + }, + "e7d13f7aa2a838d24c59b40186a0aca1e21cffcc": { + "balance": "0xc097ce7bc90715b34b9f1000000000" + }, + "eda8645ba6948855e3b3cd596bbb07596d59c603": { + "balance": "0xc097ce7bc90715b34b9f1000000000" + } + }, + "number": "0x0", + "gasUsed": "0x0", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "baseFeePerGas": null, + "excessBlobGas": null, + "blobGasUsed": null +} \ No newline at end of file diff --git a/crates/rpc/rpc-e2e-tests/testdata/rpc-compat/headfcu.json b/crates/rpc/rpc-e2e-tests/testdata/rpc-compat/headfcu.json new file mode 100644 index 00000000000..cc39610b4f1 --- /dev/null +++ b/crates/rpc/rpc-e2e-tests/testdata/rpc-compat/headfcu.json @@ -0,0 +1,13 @@ +{ + "jsonrpc": "2.0", + "id": "fcu45", + "method": "engine_forkchoiceUpdatedV3", + "params": [ + { + "headBlockHash": "0xaf51811799f22260e5b4e1f95504dae760505f102dcb2e9ca7d897d8a40124a1", + "safeBlockHash": "0xaf51811799f22260e5b4e1f95504dae760505f102dcb2e9ca7d897d8a40124a1", + "finalizedBlockHash": "0xaf51811799f22260e5b4e1f95504dae760505f102dcb2e9ca7d897d8a40124a1" + }, + null + ] +} \ No newline at end of file diff --git a/crates/rpc/rpc-e2e-tests/tests/rpc_compat.rs b/crates/rpc/rpc-e2e-tests/tests/rpc_compat.rs new file mode 100644 index 00000000000..994cd714405 --- /dev/null +++ b/crates/rpc/rpc-e2e-tests/tests/rpc_compat.rs @@ -0,0 +1,79 @@ +//! RPC compatibility tests using execution-apis test data + +use alloy_genesis::Genesis; +use eyre::Result; +use reth_chainspec::ChainSpec; +use reth_e2e_test_utils::testsuite::{ + actions::{MakeCanonical, UpdateBlockInfo}, + setup::{NetworkSetup, Setup}, + TestBuilder, +}; +use reth_node_ethereum::{EthEngineTypes, EthereumNode}; +use reth_rpc_e2e_tests::rpc_compat::{InitializeFromExecutionApis, RunRpcCompatTests}; +use std::{path::PathBuf, sync::Arc}; +use tracing::info; + +/// Test `eth_getLogs` RPC method compatibility with execution-apis test data +/// +/// This test: +/// 1. Initializes a node with chain data from testdata (chain.rlp) +/// 2. Applies the forkchoice state from headfcu.json +/// 3. Runs all `eth_getLogs` test cases from the execution-apis test suite +#[tokio::test(flavor = "multi_thread")] +async fn test_eth_get_logs_compat() -> Result<()> { + reth_tracing::init_test_tracing(); + + // Use local test data + let test_data_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("testdata/rpc-compat"); + + assert!(test_data_path.exists(), "Test data path does not exist: {}", test_data_path.display()); + + info!("Using test data from: {}", test_data_path.display()); + + // Paths to test files + let chain_rlp_path = test_data_path.join("chain.rlp"); + let fcu_json_path = test_data_path.join("headfcu.json"); + let genesis_path = test_data_path.join("genesis.json"); + + // Verify required files exist + if !chain_rlp_path.exists() { + return Err(eyre::eyre!("chain.rlp not found at {}", chain_rlp_path.display())); + } + if !fcu_json_path.exists() { + return Err(eyre::eyre!("headfcu.json not found at {}", fcu_json_path.display())); + } + if !genesis_path.exists() { + return Err(eyre::eyre!("genesis.json not found at {}", genesis_path.display())); + } + + // Load genesis from test data + let genesis_json = std::fs::read_to_string(&genesis_path)?; + + // Parse the Genesis struct from JSON and convert it to ChainSpec + // This properly handles all the hardfork configuration from the config section + let genesis: Genesis = serde_json::from_str(&genesis_json)?; + let chain_spec: ChainSpec = genesis.into(); + let chain_spec = Arc::new(chain_spec); + + // Create test setup with imported chain + let setup = Setup::::default() + .with_chain_spec(chain_spec) + .with_network(NetworkSetup::single_node()); + + // Build and run the test + let test = TestBuilder::new() + .with_setup_and_import(setup, chain_rlp_path) + .with_action(UpdateBlockInfo::default()) + .with_action( + InitializeFromExecutionApis::new().with_fcu_json(fcu_json_path.to_string_lossy()), + ) + .with_action(MakeCanonical::new()) + .with_action(RunRpcCompatTests::new( + vec!["eth_getLogs".to_string()], + test_data_path.to_string_lossy(), + )); + + test.run::().await?; + + Ok(()) +} From 15c6562636f0fa13d1ac14ddb0c767029fdb72d3 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Tue, 8 Jul 2025 14:44:27 -0400 Subject: [PATCH 097/305] chore(trie): remove Default bound from SparseTrieInterface (#17268) --- crates/trie/sparse/src/state.rs | 4 ++-- crates/trie/sparse/src/traits.rs | 2 +- crates/trie/sparse/src/trie.rs | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index 9dacb9800bd..2fce6a99acf 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -73,8 +73,8 @@ impl SparseStateTrie { impl SparseStateTrie where - A: SparseTrieInterface, - S: SparseTrieInterface, + A: SparseTrieInterface + Default, + S: SparseTrieInterface + Default, { /// Create new [`SparseStateTrie`] pub fn new() -> Self { diff --git a/crates/trie/sparse/src/traits.rs b/crates/trie/sparse/src/traits.rs index d935e25814e..e2b22f2daf9 100644 --- a/crates/trie/sparse/src/traits.rs +++ b/crates/trie/sparse/src/traits.rs @@ -18,7 +18,7 @@ use crate::blinded::BlindedProvider; /// This trait abstracts over different sparse trie implementations (serial vs parallel) /// while providing a unified interface for the core trie operations needed by the /// [`crate::SparseTrie`] enum. -pub trait SparseTrieInterface: Default + Debug + Send + Sync { +pub trait SparseTrieInterface: Sized + Debug + Send + Sync { /// Configures the trie to have the given root node revealed. /// /// # Arguments diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 77a4376c45b..e174ecda387 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -65,7 +65,7 @@ impl Default for SparseTrie { } } -impl SparseTrie { +impl SparseTrie { /// Creates a new blind sparse trie. /// /// # Examples From cb42ac94b552fc798563ea9c383870dc3db99f4c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Roman=20Hodul=C3=A1k?= Date: Wed, 9 Jul 2025 11:09:10 +0200 Subject: [PATCH 098/305] refactor(examples): Use `TransactionEnvelope` macro from `alloy` for `CustomPooledTransaction` in the `custom-node` example (#17302) --- examples/custom-node/src/pool.rs | 82 ++++++++++++++++++++++++++++---- 1 file changed, 74 insertions(+), 8 deletions(-) diff --git a/examples/custom-node/src/pool.rs b/examples/custom-node/src/pool.rs index 09f0b667c79..c24e4d38e75 100644 --- a/examples/custom-node/src/pool.rs +++ b/examples/custom-node/src/pool.rs @@ -1,15 +1,29 @@ use crate::primitives::{CustomTransaction, CustomTransactionEnvelope}; -use alloy_consensus::error::ValueError; -use op_alloy_consensus::OpPooledTransaction; -use reth_ethereum::primitives::Extended; +use alloy_consensus::{ + crypto::RecoveryError, error::ValueError, transaction::SignerRecoverable, TransactionEnvelope, +}; +use alloy_primitives::{Address, Sealed, B256}; +use op_alloy_consensus::{OpPooledTransaction, OpTransaction, TxDeposit}; +use reth_ethereum::primitives::{ + serde_bincode_compat::RlpBincode, InMemorySize, SignedTransaction, +}; -pub type CustomPooledTransaction = Extended; +#[derive(Clone, Debug, TransactionEnvelope)] +#[envelope(tx_type_name = CustomPooledTxType)] +pub enum CustomPooledTransaction { + /// A regular Optimism transaction as defined by [`OpPooledTransaction`]. + #[envelope(flatten)] + Op(OpPooledTransaction), + /// A [`CustomTransactionEnvelope`] tagged with type 0x7E. + #[envelope(ty = 42)] + Payment(CustomTransactionEnvelope), +} impl From for CustomTransaction { fn from(tx: CustomPooledTransaction) -> Self { match tx { - CustomPooledTransaction::BuiltIn(tx) => Self::Op(tx.into()), - CustomPooledTransaction::Other(tx) => Self::Payment(tx), + CustomPooledTransaction::Op(tx) => Self::Op(tx.into()), + CustomPooledTransaction::Payment(tx) => Self::Payment(tx), } } } @@ -19,10 +33,62 @@ impl TryFrom for CustomPooledTransaction { fn try_from(tx: CustomTransaction) -> Result { match tx { - CustomTransaction::Op(op) => Ok(Self::BuiltIn( + CustomTransaction::Op(op) => Ok(Self::Op( OpPooledTransaction::try_from(op).map_err(|op| op.map(CustomTransaction::Op))?, )), - CustomTransaction::Payment(payment) => Ok(Self::Other(payment)), + CustomTransaction::Payment(payment) => Ok(Self::Payment(payment)), + } + } +} + +impl RlpBincode for CustomPooledTransaction {} + +impl OpTransaction for CustomPooledTransaction { + fn is_deposit(&self) -> bool { + match self { + CustomPooledTransaction::Op(_) => false, + CustomPooledTransaction::Payment(payment) => payment.is_deposit(), + } + } + + fn as_deposit(&self) -> Option<&Sealed> { + match self { + CustomPooledTransaction::Op(_) => None, + CustomPooledTransaction::Payment(payment) => payment.as_deposit(), + } + } +} + +impl SignerRecoverable for CustomPooledTransaction { + fn recover_signer(&self) -> Result { + match self { + CustomPooledTransaction::Op(tx) => SignerRecoverable::recover_signer(tx), + CustomPooledTransaction::Payment(tx) => SignerRecoverable::recover_signer(tx), + } + } + + fn recover_signer_unchecked(&self) -> Result { + match self { + CustomPooledTransaction::Op(tx) => SignerRecoverable::recover_signer_unchecked(tx), + CustomPooledTransaction::Payment(tx) => SignerRecoverable::recover_signer_unchecked(tx), + } + } +} + +impl SignedTransaction for CustomPooledTransaction { + fn tx_hash(&self) -> &B256 { + match self { + CustomPooledTransaction::Op(tx) => SignedTransaction::tx_hash(tx), + CustomPooledTransaction::Payment(tx) => SignedTransaction::tx_hash(tx), + } + } +} + +impl InMemorySize for CustomPooledTransaction { + fn size(&self) -> usize { + match self { + CustomPooledTransaction::Op(tx) => InMemorySize::size(tx), + CustomPooledTransaction::Payment(tx) => InMemorySize::size(tx), } } } From 818712124b1e1e67fba149150185bf1ea98a3c08 Mon Sep 17 00:00:00 2001 From: Starkey Date: Wed, 9 Jul 2025 15:40:22 +0630 Subject: [PATCH 099/305] docs: myrpc_ext.rs: fix namespace inconsistency in myrpcExt comments (#17300) --- examples/rpc-db/src/myrpc_ext.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/rpc-db/src/myrpc_ext.rs b/examples/rpc-db/src/myrpc_ext.rs index d183ae818bd..68681ad587e 100644 --- a/examples/rpc-db/src/myrpc_ext.rs +++ b/examples/rpc-db/src/myrpc_ext.rs @@ -4,7 +4,7 @@ use reth_ethereum::{provider::BlockReaderIdExt, rpc::eth::EthResult, Block}; // Rpc related imports use jsonrpsee::proc_macros::rpc; -/// trait interface for a custom rpc namespace: `MyRpc` +/// trait interface for a custom rpc namespace: `myrpcExt` /// /// This defines an additional namespace where all methods are configured as trait functions. #[rpc(server, namespace = "myrpcExt")] @@ -14,7 +14,7 @@ pub trait MyRpcExtApi { fn custom_method(&self) -> EthResult>; } -/// The type that implements `myRpc` rpc namespace trait +/// The type that implements `myrpcExt` rpc namespace trait pub struct MyRpcExt { pub provider: Provider, } From e238fc4823b4194c31e04d184627441d107b258c Mon Sep 17 00:00:00 2001 From: stevencartavia <112043913+stevencartavia@users.noreply.github.com> Date: Wed, 9 Jul 2025 03:14:39 -0600 Subject: [PATCH 100/305] feat: add --prune.receipts.premerge setting (#17295) --- crates/node/builder/src/launch/common.rs | 1 + crates/node/core/src/args/pruning.rs | 29 ++++++++++++++++-------- docs/vocs/docs/pages/cli/reth/node.mdx | 3 +++ 3 files changed, 23 insertions(+), 10 deletions(-) diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index 3001256b96f..50ad3599095 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -1254,6 +1254,7 @@ mod tests { transaction_lookup_distance: None, transaction_lookup_before: None, receipts_full: false, + receipts_pre_merge: false, receipts_distance: None, receipts_before: None, account_history_full: false, diff --git a/crates/node/core/src/args/pruning.rs b/crates/node/core/src/args/pruning.rs index 3f493a900a9..d6b8170440a 100644 --- a/crates/node/core/src/args/pruning.rs +++ b/crates/node/core/src/args/pruning.rs @@ -48,19 +48,22 @@ pub struct PruningArgs { // Receipts /// Prunes all receipt data. - #[arg(long = "prune.receipts.full", conflicts_with_all = &["receipts_distance", "receipts_before"])] + #[arg(long = "prune.receipts.full", conflicts_with_all = &["receipts_pre_merge", "receipts_distance", "receipts_before"])] pub receipts_full: bool, + /// Prune receipts before the merge block. + #[arg(long = "prune.receipts.pre-merge", conflicts_with_all = &["receipts_full", "receipts_distance", "receipts_before"])] + pub receipts_pre_merge: bool, /// Prune receipts before the `head-N` block number. In other words, keep last N + 1 blocks. - #[arg(long = "prune.receipts.distance", value_name = "BLOCKS", conflicts_with_all = &["receipts_full", "receipts_before"])] + #[arg(long = "prune.receipts.distance", value_name = "BLOCKS", conflicts_with_all = &["receipts_full", "receipts_pre_merge", "receipts_before"])] pub receipts_distance: Option, /// Prune receipts before the specified block number. The specified block number is not pruned. - #[arg(long = "prune.receipts.before", value_name = "BLOCK_NUMBER", conflicts_with_all = &["receipts_full", "receipts_distance"])] + #[arg(long = "prune.receipts.before", value_name = "BLOCK_NUMBER", conflicts_with_all = &["receipts_full", "receipts_pre_merge", "receipts_distance"])] pub receipts_before: Option, // Receipts Log Filter /// Configure receipts log filter. Format: /// <`address`>:<`prune_mode`>[,<`address`>:<`prune_mode`>...] Where <`prune_mode`> can be /// 'full', 'distance:<`blocks`>', or 'before:<`block_number`>' - #[arg(long = "prune.receiptslogfilter", value_name = "FILTER_CONFIG", conflicts_with_all = &["receipts_full", "receipts_distance", "receipts_before"], value_parser = parse_receipts_log_filter)] + #[arg(long = "prune.receiptslogfilter", value_name = "FILTER_CONFIG", conflicts_with_all = &["receipts_full", "receipts_pre_merge", "receipts_distance", "receipts_before"], value_parser = parse_receipts_log_filter)] pub receipts_log_filter: Option, // Account History @@ -138,7 +141,7 @@ impl PruningArgs { if let Some(mode) = self.transaction_lookup_prune_mode() { config.segments.transaction_lookup = Some(mode); } - if let Some(mode) = self.receipts_prune_mode() { + if let Some(mode) = self.receipts_prune_mode(chain_spec) { config.segments.receipts = Some(mode); } if let Some(mode) = self.account_history_prune_mode() { @@ -202,15 +205,21 @@ impl PruningArgs { } } - const fn receipts_prune_mode(&self) -> Option { - if self.receipts_full { + fn receipts_prune_mode(&self, chain_spec: &ChainSpec) -> Option + where + ChainSpec: EthereumHardforks, + { + if self.receipts_pre_merge { + chain_spec + .ethereum_fork_activation(EthereumHardfork::Paris) + .block_number() + .map(PruneMode::Before) + } else if self.receipts_full { Some(PruneMode::Full) } else if let Some(distance) = self.receipts_distance { Some(PruneMode::Distance(distance)) - } else if let Some(block_number) = self.receipts_before { - Some(PruneMode::Before(block_number)) } else { - None + self.receipts_before.map(PruneMode::Before) } } diff --git a/docs/vocs/docs/pages/cli/reth/node.mdx b/docs/vocs/docs/pages/cli/reth/node.mdx index fe9d08b8dee..638c8fe33c0 100644 --- a/docs/vocs/docs/pages/cli/reth/node.mdx +++ b/docs/vocs/docs/pages/cli/reth/node.mdx @@ -705,6 +705,9 @@ Pruning: --prune.receipts.full Prunes all receipt data + --prune.receipts.pre-merge + Prune receipts before the merge block + --prune.receipts.distance Prune receipts before the `head-N` block number. In other words, keep last N + 1 blocks From 9a2c66a5084656566a5bec899c774fabdebf4d0f Mon Sep 17 00:00:00 2001 From: Bilog WEB3 <155262265+Bilogweb3@users.noreply.github.com> Date: Wed, 9 Jul 2025 11:44:35 +0200 Subject: [PATCH 101/305] fix(docs): correct duplicated function reference in documentation (#17301) --- crates/primitives-traits/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/primitives-traits/src/lib.rs b/crates/primitives-traits/src/lib.rs index 60d265d2be6..60f83532dfc 100644 --- a/crates/primitives-traits/src/lib.rs +++ b/crates/primitives-traits/src/lib.rs @@ -50,7 +50,7 @@ //! #### Naming //! //! The types in this crate support multiple recovery functions, e.g. -//! [`SealedBlock::try_recover_unchecked`] and [`SealedBlock::try_recover_unchecked`]. The `_unchecked` suffix indicates that this function recovers the signer _without ensuring that the signature has a low `s` value_, in other words this rule introduced in [EIP-2](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-2.md) is ignored. +//! [`SealedBlock::try_recover`] and [`SealedBlock::try_recover_unchecked`]. The `_unchecked` suffix indicates that this function recovers the signer _without ensuring that the signature has a low `s` value_, in other words this rule introduced in [EIP-2](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-2.md) is ignored. //! Hence this function is necessary when dealing with pre EIP-2 transactions on the ethereum //! mainnet. Newer transactions must always be recovered with the regular `recover` functions, see //! also [`recover_signer`](crypto::secp256k1::recover_signer). From 9d8248528bcef0e86c0e27a7314e1b49fbe33612 Mon Sep 17 00:00:00 2001 From: Fallengirl <155266340+Fallengirl@users.noreply.github.com> Date: Wed, 9 Jul 2025 12:05:03 +0200 Subject: [PATCH 102/305] fix: correct typos (#17296) --- crates/rpc/ipc/src/stream_codec.rs | 2 +- crates/storage/nippy-jar/src/writer.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/rpc/ipc/src/stream_codec.rs b/crates/rpc/ipc/src/stream_codec.rs index 4205081e3de..aa5cda16b7f 100644 --- a/crates/rpc/ipc/src/stream_codec.rs +++ b/crates/rpc/ipc/src/stream_codec.rs @@ -209,7 +209,7 @@ mod tests { let request2 = codec .decode(&mut buf) .expect("There should be no error in first 2nd test") - .expect("There should be aa request in 2nd whitespace test"); + .expect("There should be a request in 2nd whitespace test"); // TODO: maybe actually trim it out assert_eq!(request2, "\n\n\n\n{ test: 2 }"); diff --git a/crates/storage/nippy-jar/src/writer.rs b/crates/storage/nippy-jar/src/writer.rs index d32d9b51408..1069c6e67da 100644 --- a/crates/storage/nippy-jar/src/writer.rs +++ b/crates/storage/nippy-jar/src/writer.rs @@ -48,7 +48,7 @@ pub struct NippyJarWriter { impl NippyJarWriter { /// Creates a [`NippyJarWriter`] from [`NippyJar`]. /// - /// If will **always** attempt to heal any inconsistent state when called. + /// If will **always** attempt to heal any inconsistent state when called. pub fn new(jar: NippyJar) -> Result { let (data_file, offsets_file, is_created) = Self::create_or_open_files(jar.data_path(), &jar.offsets_path())?; From 162568b297e4e20cd95530d78d2c0ed9ef26c124 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 9 Jul 2025 12:26:08 +0200 Subject: [PATCH 103/305] chore: relax era export bounds (#17312) --- crates/era-utils/src/export.rs | 18 +++++++----------- crates/era/src/execution_types.rs | 12 ++++++------ 2 files changed, 13 insertions(+), 17 deletions(-) diff --git a/crates/era-utils/src/export.rs b/crates/era-utils/src/export.rs index 2eba464e509..5ff1a0d78ca 100644 --- a/crates/era-utils/src/export.rs +++ b/crates/era-utils/src/export.rs @@ -1,7 +1,7 @@ //! Logic to export from database era1 block history //! and injecting them into era1 files with `Era1Writer`. -use alloy_consensus::{BlockBody, BlockHeader, Header}; +use alloy_consensus::BlockHeader; use alloy_primitives::{BlockNumber, B256, U256}; use eyre::{eyre, Result}; use reth_era::{ @@ -76,11 +76,9 @@ impl ExportConfig { /// Fetches block history data from the provider /// and prepares it for export to era1 files /// for a given number of blocks then writes them to disk. -pub fn export(provider: &P, config: &ExportConfig) -> Result> +pub fn export

(provider: &P, config: &ExportConfig) -> Result> where - P: BlockReader, - B: Into>, - P::Header: Into

, + P: BlockReader, { config.validate()?; info!( @@ -259,16 +257,14 @@ where } // Compresses block data and returns compressed components with metadata -fn compress_block_data( +fn compress_block_data

( provider: &P, header: P::Header, expected_block_number: BlockNumber, total_difficulty: &mut U256, ) -> Result<(CompressedHeader, CompressedBody, CompressedReceipts)> where - P: BlockReader, - B: Into>, - P::Header: Into

, + P: BlockReader, { let actual_block_number = header.number(); @@ -286,8 +282,8 @@ where *total_difficulty += header.difficulty(); - let compressed_header = CompressedHeader::from_header(&header.into())?; - let compressed_body = CompressedBody::from_body(&body.into())?; + let compressed_header = CompressedHeader::from_header(&header)?; + let compressed_body = CompressedBody::from_body(&body)?; let compressed_receipts = CompressedReceipts::from_encodable_list(&receipts) .map_err(|e| eyre!("Failed to compress receipts: {}", e))?; diff --git a/crates/era/src/execution_types.rs b/crates/era/src/execution_types.rs index 27030b112a1..34b953b8359 100644 --- a/crates/era/src/execution_types.rs +++ b/crates/era/src/execution_types.rs @@ -161,9 +161,9 @@ impl CompressedHeader { self.decode() } - /// Create a [`CompressedHeader`] from an `alloy_consensus::Header` - pub fn from_header(header: &Header) -> Result { - let encoder = SnappyRlpCodec::
::new(); + /// Create a [`CompressedHeader`] from a header. + pub fn from_header(header: &H) -> Result { + let encoder = SnappyRlpCodec::new(); let compressed = encoder.encode(header)?; Ok(Self::new(compressed)) } @@ -248,9 +248,9 @@ impl CompressedBody { .map_err(|e| E2sError::Rlp(format!("Failed to decode RLP data: {e}"))) } - /// Create a [`CompressedBody`] from an `alloy_consensus::BlockBody` - pub fn from_body(body: &BlockBody) -> Result { - let encoder = SnappyRlpCodec::>::new(); + /// Create a [`CompressedBody`] from a block body (e.g. `alloy_consensus::BlockBody`) + pub fn from_body(body: &B) -> Result { + let encoder = SnappyRlpCodec::new(); let compressed = encoder.encode(body)?; Ok(Self::new(compressed)) } From e15be6584cecfbc83b33bec3e7e0850996b5f894 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 9 Jul 2025 15:23:00 +0200 Subject: [PATCH 104/305] chore: bump vdocs version (#17318) --- docs/vocs/vocs.config.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/vocs/vocs.config.ts b/docs/vocs/vocs.config.ts index 1f1b76f6a70..46685cd6064 100644 --- a/docs/vocs/vocs.config.ts +++ b/docs/vocs/vocs.config.ts @@ -15,7 +15,7 @@ export default defineConfig({ { text: 'Rustdocs', link: '/docs' }, { text: 'GitHub', link: 'https://github.com/paradigmxyz/reth' }, { - text: 'v1.5.0', + text: 'v1.5.1', items: [ { text: 'Releases', From 700b1fd3122de8aa2670dabb88ef030428843324 Mon Sep 17 00:00:00 2001 From: catconcat Date: Wed, 9 Jul 2025 20:29:46 +0700 Subject: [PATCH 105/305] feat: make build_receipt infallable (#17287) Co-authored-by: Matthias Seitz --- crates/optimism/rpc/src/eth/block.rs | 5 +--- crates/optimism/rpc/src/eth/receipt.rs | 19 +++++++++------ .../src/transaction/signed.rs | 6 +++++ crates/rpc/rpc-eth-types/src/receipt.rs | 23 ++++++++----------- crates/rpc/rpc/src/eth/helpers/block.rs | 9 +++----- crates/rpc/rpc/src/eth/helpers/receipt.rs | 15 ++++++++---- 6 files changed, 43 insertions(+), 34 deletions(-) diff --git a/crates/optimism/rpc/src/eth/block.rs b/crates/optimism/rpc/src/eth/block.rs index 34ce4081b2e..6c1053b5f7d 100644 --- a/crates/optimism/rpc/src/eth/block.rs +++ b/crates/optimism/rpc/src/eth/block.rs @@ -4,7 +4,6 @@ use alloy_consensus::{transaction::TransactionMeta, BlockHeader}; use alloy_rpc_types_eth::BlockId; use op_alloy_rpc_types::OpTransactionReceipt; use reth_chainspec::ChainSpecProvider; -use reth_node_api::BlockBody; use reth_optimism_forks::OpHardforks; use reth_optimism_primitives::{OpReceipt, OpTransactionSigned}; use reth_rpc_eth_api::{ @@ -53,9 +52,7 @@ where }; return block - .body() - .transactions() - .iter() + .transactions_recovered() .zip(receipts.iter()) .enumerate() .map(|(idx, (tx, receipt))| -> Result<_, _> { diff --git a/crates/optimism/rpc/src/eth/receipt.rs b/crates/optimism/rpc/src/eth/receipt.rs index 92bd6fb1957..088d212a4b3 100644 --- a/crates/optimism/rpc/src/eth/receipt.rs +++ b/crates/optimism/rpc/src/eth/receipt.rs @@ -1,6 +1,7 @@ //! Loads and formats OP receipt RPC response. -use alloy_consensus::transaction::TransactionMeta; +use crate::{OpEthApi, OpEthApiError}; +use alloy_consensus::transaction::{SignerRecoverable, TransactionMeta}; use alloy_eips::eip2718::Encodable2718; use alloy_rpc_types_eth::{Log, TransactionReceipt}; use op_alloy_consensus::{OpDepositReceipt, OpDepositReceiptWithBloom, OpReceiptEnvelope}; @@ -10,12 +11,11 @@ use reth_node_api::{FullNodeComponents, NodeTypes}; use reth_optimism_evm::RethL1BlockInfo; use reth_optimism_forks::OpHardforks; use reth_optimism_primitives::{OpReceipt, OpTransactionSigned}; +use reth_primitives_traits::Recovered; use reth_rpc_eth_api::{helpers::LoadReceipt, FromEthApiError, RpcReceipt}; use reth_rpc_eth_types::{receipt::build_receipt, EthApiError}; use reth_storage_api::{ReceiptProvider, TransactionsProvider}; -use crate::{OpEthApi, OpEthApiError}; - impl LoadReceipt for OpEthApi where Self: Send + Sync, @@ -43,9 +43,13 @@ where let mut l1_block_info = reth_optimism_evm::extract_l1_info(block.body()).map_err(OpEthApiError::from)?; + let recovered_tx = tx + .try_into_recovered_unchecked() + .map_err(|_| reth_rpc_eth_types::EthApiError::InvalidTransactionSignature)?; + Ok(OpReceiptBuilder::new( &self.inner.eth_api.provider().chain_spec(), - &tx, + recovered_tx.as_recovered_ref(), meta, &receipt, &receipts, @@ -223,7 +227,7 @@ impl OpReceiptBuilder { /// Returns a new builder. pub fn new( chain_spec: &impl OpHardforks, - transaction: &OpTransactionSigned, + transaction: Recovered<&OpTransactionSigned>, meta: TransactionMeta, receipt: &OpReceipt, all_receipts: &[OpReceipt], @@ -231,6 +235,7 @@ impl OpReceiptBuilder { ) -> Result { let timestamp = meta.timestamp; let block_number = meta.block_number; + let tx_signed = *transaction.inner(); let core_receipt = build_receipt(transaction, meta, receipt, all_receipts, None, |receipt_with_bloom| { match receipt { @@ -249,10 +254,10 @@ impl OpReceiptBuilder { }) } } - })?; + }); let op_receipt_fields = OpReceiptFieldsBuilder::new(timestamp, block_number) - .l1_block_info(chain_spec, transaction, l1_block_info)? + .l1_block_info(chain_spec, tx_signed, l1_block_info)? .build(); Ok(Self { core_receipt, op_receipt_fields }) diff --git a/crates/primitives-traits/src/transaction/signed.rs b/crates/primitives-traits/src/transaction/signed.rs index dfa1d896162..104555db0f7 100644 --- a/crates/primitives-traits/src/transaction/signed.rs +++ b/crates/primitives-traits/src/transaction/signed.rs @@ -86,6 +86,12 @@ pub trait SignedTransaction: self.recover_signer().map(|signer| Recovered::new_unchecked(self.clone(), signer)) } + /// Tries to recover signer and return [`Recovered`] by cloning the type. + #[auto_impl(keep_default_for(&, Arc))] + fn try_clone_into_recovered_unchecked(&self) -> Result, RecoveryError> { + self.recover_signer_unchecked().map(|signer| Recovered::new_unchecked(self.clone(), signer)) + } + /// Tries to recover signer and return [`Recovered`]. /// /// Returns `Err(Self)` if the transaction's signature is invalid, see also diff --git a/crates/rpc/rpc-eth-types/src/receipt.rs b/crates/rpc/rpc-eth-types/src/receipt.rs index a99d4eff493..1d8659a66ad 100644 --- a/crates/rpc/rpc-eth-types/src/receipt.rs +++ b/crates/rpc/rpc-eth-types/src/receipt.rs @@ -1,8 +1,7 @@ //! RPC receipt response builder, extends a layer one receipt with layer two data. -use super::EthResult; use alloy_consensus::{ - transaction::{SignerRecoverable, TransactionMeta}, + transaction::{Recovered, SignerRecoverable, TransactionMeta}, ReceiptEnvelope, Transaction, TxReceipt, }; use alloy_eips::eip7840::BlobParams; @@ -12,20 +11,18 @@ use reth_ethereum_primitives::{Receipt, TransactionSigned, TxType}; /// Builds an [`TransactionReceipt`] obtaining the inner receipt envelope from the given closure. pub fn build_receipt( - transaction: &T, + transaction: Recovered<&T>, meta: TransactionMeta, receipt: &R, all_receipts: &[R], blob_params: Option, build_envelope: impl FnOnce(ReceiptWithBloom>) -> E, -) -> EthResult> +) -> TransactionReceipt where R: TxReceipt, T: Transaction + SignerRecoverable, { - // Note: we assume this transaction is valid, because it's mined (or part of pending block) - // and we don't need to check for pre EIP-2 - let from = transaction.recover_signer_unchecked()?; + let from = transaction.signer(); // get the previous transaction cumulative gas used let gas_used = if meta.index == 0 { @@ -78,7 +75,7 @@ where TxKind::Call(addr) => (None, Some(Address(*addr))), }; - Ok(TransactionReceipt { + TransactionReceipt { inner: build_envelope(ReceiptWithBloom { receipt: rpc_receipt, logs_bloom }), transaction_hash: meta.tx_hash, transaction_index: Some(meta.index), @@ -92,7 +89,7 @@ where // EIP-4844 fields blob_gas_price, blob_gas_used, - }) + } } /// Receipt response builder. @@ -108,12 +105,12 @@ impl EthReceiptBuilder { /// Note: This requires _all_ block receipts because we need to calculate the gas used by the /// transaction. pub fn new( - transaction: &TransactionSigned, + transaction: Recovered<&TransactionSigned>, meta: TransactionMeta, receipt: &Receipt, all_receipts: &[Receipt], blob_params: Option, - ) -> EthResult { + ) -> Self { let base = build_receipt( transaction, meta, @@ -127,9 +124,9 @@ impl EthReceiptBuilder { TxType::Eip4844 => ReceiptEnvelope::Eip4844(receipt_with_bloom), TxType::Eip7702 => ReceiptEnvelope::Eip7702(receipt_with_bloom), }, - )?; + ); - Ok(Self { base }) + Self { base } } /// Builds a receipt response from the base response body, and any set additional fields. diff --git a/crates/rpc/rpc/src/eth/helpers/block.rs b/crates/rpc/rpc/src/eth/helpers/block.rs index 724b3a5c965..6665644dbc7 100644 --- a/crates/rpc/rpc/src/eth/helpers/block.rs +++ b/crates/rpc/rpc/src/eth/helpers/block.rs @@ -4,7 +4,7 @@ use alloy_consensus::{transaction::TransactionMeta, BlockHeader}; use alloy_rpc_types_eth::{BlockId, TransactionReceipt}; use reth_chainspec::{ChainSpecProvider, EthChainSpec}; use reth_evm::ConfigureEvm; -use reth_primitives_traits::{BlockBody, NodePrimitives}; +use reth_primitives_traits::NodePrimitives; use reth_rpc_convert::RpcConvert; use reth_rpc_eth_api::{ helpers::{EthBlocks, LoadBlock, LoadPendingBlock, LoadReceipt, SpawnBlocking}, @@ -46,9 +46,7 @@ where let blob_params = self.provider().chain_spec().blob_params_at_timestamp(timestamp); return block - .body() - .transactions() - .iter() + .transactions_recovered() .zip(receipts.iter()) .enumerate() .map(|(idx, (tx, receipt))| { @@ -61,8 +59,7 @@ where excess_blob_gas, timestamp, }; - EthReceiptBuilder::new(tx, meta, receipt, &receipts, blob_params) - .map(|builder| builder.build()) + Ok(EthReceiptBuilder::new(tx, meta, receipt, &receipts, blob_params).build()) }) .collect::, Self::Error>>() .map(Some) diff --git a/crates/rpc/rpc/src/eth/helpers/receipt.rs b/crates/rpc/rpc/src/eth/helpers/receipt.rs index 9d0a744ee80..2018ba38aca 100644 --- a/crates/rpc/rpc/src/eth/helpers/receipt.rs +++ b/crates/rpc/rpc/src/eth/helpers/receipt.rs @@ -1,14 +1,13 @@ //! Builds an RPC receipt response w.r.t. data layout of network. -use alloy_consensus::transaction::TransactionMeta; +use crate::EthApi; +use alloy_consensus::transaction::{SignerRecoverable, TransactionMeta}; use reth_chainspec::{ChainSpecProvider, EthChainSpec}; use reth_ethereum_primitives::{Receipt, TransactionSigned}; use reth_rpc_eth_api::{helpers::LoadReceipt, FromEthApiError, RpcNodeCoreExt, RpcReceipt}; use reth_rpc_eth_types::{EthApiError, EthReceiptBuilder}; use reth_storage_api::{BlockReader, ReceiptProvider, TransactionsProvider}; -use crate::EthApi; - impl LoadReceipt for EthApi where Self: RpcNodeCoreExt< @@ -33,6 +32,14 @@ where .ok_or(EthApiError::HeaderNotFound(hash.into()))?; let blob_params = self.provider().chain_spec().blob_params_at_timestamp(meta.timestamp); - Ok(EthReceiptBuilder::new(&tx, meta, &receipt, &all_receipts, blob_params)?.build()) + Ok(EthReceiptBuilder::new( + // Note: we assume this transaction is valid, because it's mined and therefor valid + tx.try_into_recovered_unchecked()?.as_recovered_ref(), + meta, + &receipt, + &all_receipts, + blob_params, + ) + .build()) } } From 7e3eb03939f8c1f98d0d839670fc87c4bf726041 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 9 Jul 2025 15:44:15 +0200 Subject: [PATCH 106/305] docs: add section for enabling pre-merge history expiry (#17320) --- docs/vocs/docs/pages/guides/history-expiry.mdx | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/docs/vocs/docs/pages/guides/history-expiry.mdx b/docs/vocs/docs/pages/guides/history-expiry.mdx index d3a0cb06386..91066218dee 100644 --- a/docs/vocs/docs/pages/guides/history-expiry.mdx +++ b/docs/vocs/docs/pages/guides/history-expiry.mdx @@ -8,6 +8,16 @@ In this chapter, we will learn how to use tools for dealing with historical data We will use [reth cli](../cli/cli) to import and export historical data. +## Enabling Pre-merge history expiry + +Opting in into pre-merge history expiry will remove all pre-merge transaction/receipt data (static files) for mainnet and sepolia. + +For new and existing nodes: + +Use the flags `--prune.bodies.pre-merge` `--prune.receipts.pre-merge` + +See also [Partial history expiry announcement](https://blog.ethereum.org/2025/07/08/partial-history-exp) + ## File format The historical data is packaged and distributed in files of special formats with different names, all of which are based on [e2store](https://github.com/status-im/nimbus-eth2/blob/613f4a9a50c9c4bd8568844eaffb3ac15d067e56/docs/e2store.md#introduction). The most important ones are the **ERA1**, which deals with block range from genesis until the last pre-merge block, and **ERA**, which deals with block range from the merge onwards. See their [specification](https://github.com/eth-clients/e2store-format-specs) for more details. From b0cf23af44aa41698fdfd890b756ac0f6810c967 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Wed, 9 Jul 2025 14:52:10 +0100 Subject: [PATCH 107/305] fix(trie): duplicate hash mask check in sparse trie implementations (#17316) Co-authored-by: Claude --- crates/trie/sparse-parallel/src/trie.rs | 2 +- crates/trie/sparse/src/trie.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/trie/sparse-parallel/src/trie.rs b/crates/trie/sparse-parallel/src/trie.rs index 069757e0520..fb258ccd2eb 100644 --- a/crates/trie/sparse-parallel/src/trie.rs +++ b/crates/trie/sparse-parallel/src/trie.rs @@ -1868,7 +1868,7 @@ impl SparseSubtrieInner { // removed nodes. update_actions.push(SparseTrieUpdatesAction::InsertRemoved(path)); } else if self - .branch_node_hash_masks + .branch_node_tree_masks .get(&path) .is_none_or(|mask| mask.is_empty()) && self.branch_node_hash_masks.get(&path).is_none_or(|mask| mask.is_empty()) diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index e174ecda387..a576956f213 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -1618,7 +1618,7 @@ impl RevealedSparseTrie { updates.updated_nodes.remove(&path); updates.removed_nodes.insert(path); } else if self - .branch_node_hash_masks + .branch_node_tree_masks .get(&path) .is_none_or(|mask| mask.is_empty()) && self.branch_node_hash_masks From 0cbb4823c958ed14eb55c1fa34b240d157d6f791 Mon Sep 17 00:00:00 2001 From: nekomoto911 Date: Wed, 9 Jul 2025 22:52:44 +0800 Subject: [PATCH 108/305] perf(txpool): reduce one BTree lookup operation in `add_transaction` (#17313) --- crates/transaction-pool/src/pool/pending.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/transaction-pool/src/pool/pending.rs b/crates/transaction-pool/src/pool/pending.rs index fde9c36df45..3e90722dcd6 100644 --- a/crates/transaction-pool/src/pool/pending.rs +++ b/crates/transaction-pool/src/pool/pending.rs @@ -292,7 +292,7 @@ impl PendingPool { tx: Arc>, base_fee: u64, ) { - assert!( + debug_assert!( !self.contains(tx.id()), "transaction already included {:?}", self.get(tx.id()).unwrap().transaction From 7195eca1cbe8b9209508d807e72a592a9e86d6b3 Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Wed, 9 Jul 2025 16:58:04 +0200 Subject: [PATCH 109/305] fix(trie): ParallelSparseTrie::update_leaf: add moved leaves to the prefix set (#17317) --- crates/trie/sparse-parallel/src/trie.rs | 141 +++++++++++++++++++++++- 1 file changed, 137 insertions(+), 4 deletions(-) diff --git a/crates/trie/sparse-parallel/src/trie.rs b/crates/trie/sparse-parallel/src/trie.rs index fb258ccd2eb..62dba315768 100644 --- a/crates/trie/sparse-parallel/src/trie.rs +++ b/crates/trie/sparse-parallel/src/trie.rs @@ -224,10 +224,13 @@ impl SparseTrieInterface for ParallelSparseTrie { let node = self.upper_subtrie.nodes.remove(node_path).expect("node belongs to upper subtrie"); - // If it's a leaf node, extract its value before getting mutable reference to subtrie + // If it's a leaf node, extract its value before getting mutable reference to subtrie. + // We also add the leaf the prefix set, so that whichever lower subtrie it belongs to + // will have its hash recalculated as part of `update_subtrie_hashes`. let leaf_value = if let SparseNode::Leaf { key, .. } = &node { let mut leaf_full_path = *node_path; leaf_full_path.extend(key); + self.prefix_set.insert(leaf_full_path); Some(( leaf_full_path, self.upper_subtrie @@ -2062,8 +2065,6 @@ enum SparseTrieUpdatesAction { #[cfg(test)] mod tests { - use std::collections::{BTreeMap, BTreeSet}; - use super::{ path_subtrie_index_unchecked, LowerSparseSubtrie, ParallelSparseTrie, SparseSubtrie, SparseSubtrieType, @@ -2100,8 +2101,9 @@ mod tests { use reth_trie_db::DatabaseTrieCursorFactory; use reth_trie_sparse::{ blinded::{BlindedProvider, DefaultBlindedProvider, RevealedNode}, - SparseNode, SparseTrieInterface, TrieMasks, + RevealedSparseTrie, SparseNode, SparseTrieInterface, SparseTrieUpdates, TrieMasks, }; + use std::collections::{BTreeMap, BTreeSet}; /// Pad nibbles to the length of a B256 hash with zeros on the right. fn pad_nibbles_right(mut nibbles: Nibbles) -> Nibbles { @@ -4362,6 +4364,137 @@ mod tests { }); } + #[test] + fn sparse_trie_fuzz_vs_serial() { + // Having only the first 3 nibbles set, we narrow down the range of keys + // to 4096 different hashes. It allows us to generate collisions more likely + // to test the sparse trie updates. + const KEY_NIBBLES_LEN: usize = 3; + + fn test(updates: Vec<(BTreeMap, BTreeSet)>) { + let default_provider = DefaultBlindedProvider; + let mut serial = RevealedSparseTrie::default().with_updates(true); + let mut parallel = ParallelSparseTrie::default().with_updates(true); + + for (update, keys_to_delete) in updates { + // Perform leaf updates on both tries + for (key, account) in update.clone() { + let account = account.into_trie_account(EMPTY_ROOT_HASH); + let mut account_rlp = Vec::new(); + account.encode(&mut account_rlp); + serial.update_leaf(key, account_rlp.clone(), &default_provider).unwrap(); + parallel.update_leaf(key, account_rlp, &default_provider).unwrap(); + } + + // Calculate roots and assert their equality + let serial_root = serial.root(); + let parallel_root = parallel.root(); + assert_eq!(parallel_root, serial_root); + + // Assert that both tries produce the same updates + let serial_updates = serial.take_updates(); + let parallel_updates = parallel.take_updates(); + pretty_assertions::assert_eq!( + BTreeMap::from_iter(parallel_updates.updated_nodes), + BTreeMap::from_iter(serial_updates.updated_nodes), + ); + pretty_assertions::assert_eq!( + BTreeSet::from_iter(parallel_updates.removed_nodes), + BTreeSet::from_iter(serial_updates.removed_nodes), + ); + + // Perform leaf removals on both tries + for key in &keys_to_delete { + parallel.remove_leaf(key, &default_provider).unwrap(); + serial.remove_leaf(key, &default_provider).unwrap(); + } + + // Calculate roots and assert their equality + let serial_root = serial.root(); + let parallel_root = parallel.root(); + assert_eq!(parallel_root, serial_root); + + // Assert that both tries produce the same updates + let serial_updates = serial.take_updates(); + let parallel_updates = parallel.take_updates(); + pretty_assertions::assert_eq!( + BTreeMap::from_iter(parallel_updates.updated_nodes), + BTreeMap::from_iter(serial_updates.updated_nodes), + ); + pretty_assertions::assert_eq!( + BTreeSet::from_iter(parallel_updates.removed_nodes), + BTreeSet::from_iter(serial_updates.removed_nodes), + ); + } + } + + fn transform_updates( + updates: Vec>, + mut rng: impl rand::Rng, + ) -> Vec<(BTreeMap, BTreeSet)> { + let mut keys = BTreeSet::new(); + updates + .into_iter() + .map(|update| { + keys.extend(update.keys().copied()); + + let keys_to_delete_len = update.len() / 2; + let keys_to_delete = (0..keys_to_delete_len) + .map(|_| { + let key = + *rand::seq::IteratorRandom::choose(keys.iter(), &mut rng).unwrap(); + keys.take(&key).unwrap() + }) + .collect(); + + (update, keys_to_delete) + }) + .collect::>() + } + + proptest!(ProptestConfig::with_cases(10), |( + updates in proptest::collection::vec( + proptest::collection::btree_map( + any_with::(SizeRange::new(KEY_NIBBLES_LEN..=KEY_NIBBLES_LEN)).prop_map(pad_nibbles_right), + arb::(), + 1..50, + ), + 1..50, + ).prop_perturb(transform_updates) + )| { + test(updates) + }); + } + + #[test] + fn sparse_trie_two_leaves_at_lower_roots() { + let provider = DefaultBlindedProvider; + let mut trie = ParallelSparseTrie::default().with_updates(true); + let key_50 = Nibbles::unpack(hex!( + "0x5000000000000000000000000000000000000000000000000000000000000000" + )); + let key_51 = Nibbles::unpack(hex!( + "0x5100000000000000000000000000000000000000000000000000000000000000" + )); + + let account = Account::default().into_trie_account(EMPTY_ROOT_HASH); + let mut account_rlp = Vec::new(); + account.encode(&mut account_rlp); + + // Add a leaf and calculate the root. + trie.update_leaf(key_50, account_rlp.clone(), &provider).unwrap(); + trie.root(); + + // Add a second leaf and assert that the root is the expected value. + trie.update_leaf(key_51, account_rlp.clone(), &provider).unwrap(); + + let expected_root = + hex!("0xdaf0ef9f91a2f179bb74501209effdb5301db1697bcab041eca2234b126e25de"); + let root = trie.root(); + assert_eq!(root, expected_root); + assert_eq!(SparseTrieUpdates::default(), trie.take_updates()); + } + /// We have three leaves that share the same prefix: 0x00, 0x01 and 0x02. Hash builder trie has /// only nodes 0x00 and 0x01, and we have proofs for them. Node B is new and inserted in the /// sparse trie first. From 9ec522d9145544918e395c35db93b0fb2cbc25ed Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Wed, 9 Jul 2025 16:06:55 +0100 Subject: [PATCH 110/305] fix(trie): move masks to `ParallelSparseTrie` level (#17322) Co-authored-by: Claude --- crates/trie/sparse-parallel/src/trie.rs | 92 ++++++++++++++++--------- 1 file changed, 60 insertions(+), 32 deletions(-) diff --git a/crates/trie/sparse-parallel/src/trie.rs b/crates/trie/sparse-parallel/src/trie.rs index 62dba315768..12c9eebb959 100644 --- a/crates/trie/sparse-parallel/src/trie.rs +++ b/crates/trie/sparse-parallel/src/trie.rs @@ -45,6 +45,10 @@ pub struct ParallelSparseTrie { prefix_set: PrefixSetMut, /// Optional tracking of trie updates for later use. updates: Option, + /// When a bit is set, the corresponding child subtree is stored in the database. + branch_node_tree_masks: HashMap, + /// When a bit is set, the corresponding child is stored as a hash in the database. + branch_node_hash_masks: HashMap, } impl Default for ParallelSparseTrie { @@ -57,6 +61,8 @@ impl Default for ParallelSparseTrie { lower_subtries: [const { LowerSparseSubtrie::Blind(None) }; NUM_LOWER_SUBTRIES], prefix_set: PrefixSetMut::default(), updates: None, + branch_node_tree_masks: HashMap::default(), + branch_node_hash_masks: HashMap::default(), } } } @@ -91,6 +97,14 @@ impl SparseTrieInterface for ParallelSparseTrie { node: TrieNode, masks: TrieMasks, ) -> SparseTrieResult<()> { + // Store masks + if let Some(tree_mask) = masks.tree_mask { + self.branch_node_tree_masks.insert(path, tree_mask); + } + if let Some(hash_mask) = masks.hash_mask { + self.branch_node_hash_masks.insert(path, hash_mask); + } + if let Some(subtrie) = self.lower_subtrie_for_path_mut(&path) { return subtrie.reveal_node(path, &node, masks); } @@ -544,7 +558,12 @@ impl SparseTrieInterface for ParallelSparseTrie { // Update subtrie hashes serially if nostd for ChangedSubtrie { index, mut subtrie, mut prefix_set } in subtries { let mut update_actions = self.updates_enabled().then(|| Vec::new()); - subtrie.update_hashes(&mut prefix_set, &mut update_actions); + subtrie.update_hashes( + &mut prefix_set, + &mut update_actions, + &self.branch_node_tree_masks, + &self.branch_node_hash_masks, + ); tx.send((index, subtrie, update_actions)).unwrap(); } @@ -552,11 +571,18 @@ impl SparseTrieInterface for ParallelSparseTrie { // Update subtrie hashes in parallel { use rayon::iter::{IntoParallelIterator, ParallelIterator}; + let branch_node_tree_masks = &self.branch_node_tree_masks; + let branch_node_hash_masks = &self.branch_node_hash_masks; subtries .into_par_iter() .map(|ChangedSubtrie { index, mut subtrie, mut prefix_set }| { let mut update_actions = self.updates_enabled().then(Vec::new); - subtrie.update_hashes(&mut prefix_set, &mut update_actions); + subtrie.update_hashes( + &mut prefix_set, + &mut update_actions, + branch_node_tree_masks, + branch_node_hash_masks, + ); (index, subtrie, update_actions) }) .for_each_init(|| tx.clone(), |tx, result| tx.send(result).unwrap()); @@ -972,7 +998,14 @@ impl ParallelSparseTrie { }; // Calculate the RLP node for the current node using upper subtrie - self.upper_subtrie.inner.rlp_node(prefix_set, &mut update_actions, stack_item, node); + self.upper_subtrie.inner.rlp_node( + prefix_set, + &mut update_actions, + stack_item, + node, + &self.branch_node_tree_masks, + &self.branch_node_hash_masks, + ); } // If there were any branch node updates as a result of calculating the RLP node for the @@ -1321,13 +1354,6 @@ impl SparseSubtrie { return Ok(()) } - if let Some(tree_mask) = masks.tree_mask { - self.inner.branch_node_tree_masks.insert(path, tree_mask); - } - if let Some(hash_mask) = masks.hash_mask { - self.inner.branch_node_hash_masks.insert(path, hash_mask); - } - match node { TrieNode::EmptyRoot => { // For an empty root, ensure that we are at the root path, and at the upper subtrie. @@ -1516,7 +1542,8 @@ impl SparseSubtrie { /// - `update_actions`: A buffer which `SparseTrieUpdatesAction`s will be written to in the /// event that any changes to the top-level updates are required. If None then update /// retention is disabled. - /// is disabled. + /// - `branch_node_tree_masks`: The tree masks for branch nodes + /// - `branch_node_hash_masks`: The hash masks for branch nodes /// /// # Returns /// @@ -1530,6 +1557,8 @@ impl SparseSubtrie { &mut self, prefix_set: &mut PrefixSet, update_actions: &mut Option>, + branch_node_tree_masks: &HashMap, + branch_node_hash_masks: &HashMap, ) -> RlpNode { trace!(target: "trie::parallel_sparse", "Updating subtrie hashes"); @@ -1548,7 +1577,14 @@ impl SparseSubtrie { .get_mut(&path) .unwrap_or_else(|| panic!("node at path {path:?} does not exist")); - self.inner.rlp_node(prefix_set, update_actions, stack_item, node); + self.inner.rlp_node( + prefix_set, + update_actions, + stack_item, + node, + branch_node_tree_masks, + branch_node_hash_masks, + ); } debug_assert_eq!(self.inner.buffers.rlp_node_stack.len(), 1); @@ -1573,10 +1609,6 @@ impl SparseSubtrie { /// struct. #[derive(Clone, PartialEq, Eq, Debug, Default)] struct SparseSubtrieInner { - /// When a branch is set, the corresponding child subtree is stored in the database. - branch_node_tree_masks: HashMap, - /// When a bit is set, the corresponding child is stored as a hash in the database. - branch_node_hash_masks: HashMap, /// Map from leaf key paths to their values. /// All values are stored here instead of directly in leaf nodes. values: HashMap>, @@ -1602,6 +1634,8 @@ impl SparseSubtrieInner { /// retention is disabled. /// - `stack_item`: The stack item to process /// - `node`: The sparse node to process (will be mutated to update hash) + /// - `branch_node_tree_masks`: The tree masks for branch nodes + /// - `branch_node_hash_masks`: The hash masks for branch nodes /// /// # Side Effects /// @@ -1619,6 +1653,8 @@ impl SparseSubtrieInner { update_actions: &mut Option>, mut stack_item: RlpNodePathStackItem, node: &mut SparseNode, + branch_node_tree_masks: &HashMap, + branch_node_hash_masks: &HashMap, ) { let path = stack_item.path; trace!( @@ -1775,7 +1811,7 @@ impl SparseSubtrieInner { } else { // A blinded node has the tree mask bit set child_node_type.is_hash() && - self.branch_node_tree_masks + branch_node_tree_masks .get(&path) .is_some_and(|mask| mask.is_bit_set(last_child_nibble)) }; @@ -1789,7 +1825,7 @@ impl SparseSubtrieInner { let hash = child.as_hash().filter(|_| { child_node_type.is_branch() || (child_node_type.is_hash() && - self.branch_node_hash_masks.get(&path).is_some_and( + branch_node_hash_masks.get(&path).is_some_and( |mask| mask.is_bit_set(last_child_nibble), )) }); @@ -1858,23 +1894,15 @@ impl SparseSubtrieInner { ); update_actions .push(SparseTrieUpdatesAction::InsertUpdated(path, branch_node)); - } else if self - .branch_node_tree_masks - .get(&path) - .is_some_and(|mask| !mask.is_empty()) || - self.branch_node_hash_masks - .get(&path) - .is_some_and(|mask| !mask.is_empty()) + } else if branch_node_tree_masks.get(&path).is_some_and(|mask| !mask.is_empty()) || + branch_node_hash_masks.get(&path).is_some_and(|mask| !mask.is_empty()) { // If new tree and hash masks are empty, but previously they weren't, we // need to remove the node update and add the node itself to the list of // removed nodes. update_actions.push(SparseTrieUpdatesAction::InsertRemoved(path)); - } else if self - .branch_node_tree_masks - .get(&path) - .is_none_or(|mask| mask.is_empty()) && - self.branch_node_hash_masks.get(&path).is_none_or(|mask| mask.is_empty()) + } else if branch_node_tree_masks.get(&path).is_none_or(|mask| mask.is_empty()) && + branch_node_hash_masks.get(&path).is_none_or(|mask| mask.is_empty()) { // If new tree and hash masks are empty, and they were previously empty // as well, we need to remove the node update. @@ -1905,8 +1933,6 @@ impl SparseSubtrieInner { /// Clears the subtrie, keeping the data structures allocated. fn clear(&mut self) { - self.branch_node_tree_masks.clear(); - self.branch_node_hash_masks.clear(); self.values.clear(); self.buffers.clear(); } @@ -3019,6 +3045,8 @@ mod tests { &mut PrefixSetMut::from([leaf_1_full_path, leaf_2_full_path, leaf_3_full_path]) .freeze(), &mut None, + &HashMap::default(), + &HashMap::default(), ); // Compare hashes between hash builder and subtrie From 73f820af400588144b30ee5e95f464b976947b06 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9a=20Narzis?= <78718413+lean-apple@users.noreply.github.com> Date: Wed, 9 Jul 2025 19:19:25 +0200 Subject: [PATCH 111/305] feat(sdk): add `local_payload_attributes_builder` to `DebugNodeLauncher` (#17297) Co-authored-by: Arsenii Kulikov --- Cargo.lock | 3 +- crates/engine/local/src/payload.rs | 16 ---------- crates/ethereum/node/Cargo.toml | 1 + crates/ethereum/node/src/node.rs | 10 ++++++- crates/ethereum/node/tests/e2e/dev.rs | 6 ++-- crates/node/builder/src/launch/debug.rs | 38 +++++++++++++++++++++++- crates/node/builder/src/launch/engine.rs | 19 ------------ crates/node/core/Cargo.toml | 1 + crates/node/core/src/node_config.rs | 11 +++++++ crates/optimism/node/Cargo.toml | 2 +- crates/optimism/node/src/node.rs | 9 +++++- examples/custom-engine-types/Cargo.toml | 1 - examples/custom-engine-types/src/main.rs | 4 --- 13 files changed, 74 insertions(+), 47 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c5412e83533..39884ad1742 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3335,7 +3335,6 @@ dependencies = [ "alloy-rpc-types", "eyre", "reth-basic-payload-builder", - "reth-engine-local", "reth-ethereum", "reth-ethereum-payload-builder", "reth-payload-builder", @@ -8943,6 +8942,7 @@ dependencies = [ "reth-db", "reth-discv4", "reth-discv5", + "reth-engine-local", "reth-engine-primitives", "reth-ethereum-forks", "reth-net-nat", @@ -8994,6 +8994,7 @@ dependencies = [ "reth-consensus", "reth-db", "reth-e2e-test-utils", + "reth-engine-local", "reth-engine-primitives", "reth-ethereum-consensus", "reth-ethereum-engine-primitives", diff --git a/crates/engine/local/src/payload.rs b/crates/engine/local/src/payload.rs index 327690197e4..408ea2b8d05 100644 --- a/crates/engine/local/src/payload.rs +++ b/crates/engine/local/src/payload.rs @@ -64,19 +64,3 @@ where } } } - -/// A temporary workaround to support local payload engine launcher for arbitrary payload -/// attributes. -// TODO(mattsse): This should be reworked so that LocalPayloadAttributesBuilder can be implemented -// for any -pub trait UnsupportedLocalAttributes: Send + Sync + 'static {} - -impl PayloadAttributesBuilder for LocalPayloadAttributesBuilder -where - ChainSpec: Send + Sync + 'static, - T: UnsupportedLocalAttributes, -{ - fn build(&self, _: u64) -> T { - panic!("Unsupported payload attributes") - } -} diff --git a/crates/ethereum/node/Cargo.toml b/crates/ethereum/node/Cargo.toml index d3266bbb21b..a1cca45ea2d 100644 --- a/crates/ethereum/node/Cargo.toml +++ b/crates/ethereum/node/Cargo.toml @@ -34,6 +34,7 @@ reth-chainspec.workspace = true reth-revm = { workspace = true, features = ["std"] } reth-trie-db.workspace = true reth-rpc-eth-types.workspace = true +reth-engine-local.workspace = true reth-engine-primitives.workspace = true reth-payload-primitives.workspace = true diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index 41ea86eb2f8..a7d2913eac3 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -6,6 +6,7 @@ use alloy_eips::{eip7840::BlobParams, merge::EPOCH_SLOTS}; use alloy_rpc_types_engine::ExecutionData; use reth_chainspec::{ChainSpec, EthChainSpec, EthereumHardforks, Hardforks}; use reth_consensus::{ConsensusError, FullConsensus}; +use reth_engine_local::LocalPayloadAttributesBuilder; use reth_engine_primitives::EngineTypes; use reth_ethereum_consensus::EthBeaconConsensus; use reth_ethereum_engine_primitives::{ @@ -17,7 +18,8 @@ use reth_evm::{ }; use reth_network::{primitives::BasicNetworkPrimitives, NetworkHandle, PeersInfo}; use reth_node_api::{ - AddOnsContext, FullNodeComponents, NodeAddOns, NodePrimitives, PrimitivesTy, TxTy, + AddOnsContext, FullNodeComponents, NodeAddOns, NodePrimitives, PayloadAttributesBuilder, + PrimitivesTy, TxTy, }; use reth_node_builder::{ components::{ @@ -353,6 +355,12 @@ impl> DebugNode for EthereumNode { fn rpc_to_primitive_block(rpc_block: Self::RpcBlock) -> reth_ethereum_primitives::Block { rpc_block.into_consensus().convert_transactions() } + + fn local_payload_attributes_builder( + chain_spec: &Self::ChainSpec, + ) -> impl PayloadAttributesBuilder<::PayloadAttributes> { + LocalPayloadAttributesBuilder::new(Arc::new(chain_spec.clone())) + } } /// A regular ethereum evm and executor builder. diff --git a/crates/ethereum/node/tests/e2e/dev.rs b/crates/ethereum/node/tests/e2e/dev.rs index d4a24191dbd..ad214b04fe0 100644 --- a/crates/ethereum/node/tests/e2e/dev.rs +++ b/crates/ethereum/node/tests/e2e/dev.rs @@ -5,7 +5,8 @@ use futures::StreamExt; use reth_chainspec::ChainSpec; use reth_node_api::{BlockBody, FullNodeComponents, FullNodePrimitives, NodeTypes}; use reth_node_builder::{ - rpc::RethRpcAddOns, EngineNodeLauncher, FullNode, NodeBuilder, NodeConfig, NodeHandle, + rpc::RethRpcAddOns, DebugNodeLauncher, EngineNodeLauncher, FullNode, NodeBuilder, NodeConfig, + NodeHandle, }; use reth_node_core::args::DevArgs; use reth_node_ethereum::{node::EthereumAddOns, EthereumNode}; @@ -29,11 +30,12 @@ async fn can_run_dev_node() -> eyre::Result<()> { .with_components(EthereumNode::components()) .with_add_ons(EthereumAddOns::default()) .launch_with_fn(|builder| { - let launcher = EngineNodeLauncher::new( + let engine_launcher = EngineNodeLauncher::new( builder.task_executor().clone(), builder.config().datadir(), Default::default(), ); + let launcher = DebugNodeLauncher::new(engine_launcher); builder.launch_with(launcher) }) .await?; diff --git a/crates/node/builder/src/launch/debug.rs b/crates/node/builder/src/launch/debug.rs index 64762587c62..bbe6a7a6b72 100644 --- a/crates/node/builder/src/launch/debug.rs +++ b/crates/node/builder/src/launch/debug.rs @@ -4,7 +4,8 @@ use alloy_provider::network::AnyNetwork; use jsonrpsee::core::{DeserializeOwned, Serialize}; use reth_chainspec::EthChainSpec; use reth_consensus_debug_client::{DebugConsensusClient, EtherscanBlockProvider, RpcBlockProvider}; -use reth_node_api::{BlockTy, FullNodeComponents}; +use reth_engine_local::LocalMiner; +use reth_node_api::{BlockTy, FullNodeComponents, PayloadAttributesBuilder, PayloadTypes}; use std::sync::Arc; use tracing::info; @@ -56,6 +57,18 @@ pub trait DebugNode: Node { /// For Ethereum nodes, this typically converts from `alloy_rpc_types_eth::Block` /// to the node's internal block representation. fn rpc_to_primitive_block(rpc_block: Self::RpcBlock) -> BlockTy; + + /// Creates a payload attributes builder for local mining in dev mode. + /// + /// It will be used by the `LocalMiner` when dev mode is enabled. + /// + /// The builder is responsible for creating the payload attributes that define how blocks should + /// be constructed during local mining. + fn local_payload_attributes_builder( + chain_spec: &Self::ChainSpec, + ) -> impl PayloadAttributesBuilder< + <::Payload as PayloadTypes>::PayloadAttributes, + >; } /// Node launcher with support for launching various debugging utilities. @@ -157,6 +170,29 @@ where }); } + if config.dev.dev { + info!(target: "reth::cli", "Using local payload attributes builder for dev mode"); + + let blockchain_db = handle.node.provider.clone(); + let chain_spec = config.chain.clone(); + let beacon_engine_handle = handle.node.add_ons_handle.beacon_engine_handle.clone(); + let pool = handle.node.pool.clone(); + let payload_builder_handle = handle.node.payload_builder_handle.clone(); + + let dev_mining_mode = handle.node.config.dev_mining_mode(pool); + handle.node.task_executor.spawn_critical("local engine", async move { + LocalMiner::new( + blockchain_db, + N::Types::local_payload_attributes_builder(&chain_spec), + beacon_engine_handle, + dev_mining_mode, + payload_builder_handle, + ) + .run() + .await + }); + } + Ok(handle) } } diff --git a/crates/node/builder/src/launch/engine.rs b/crates/node/builder/src/launch/engine.rs index b9eca178acd..4b17954ed9c 100644 --- a/crates/node/builder/src/launch/engine.rs +++ b/crates/node/builder/src/launch/engine.rs @@ -12,7 +12,6 @@ use alloy_consensus::BlockHeader; use futures::{stream_select, StreamExt}; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_db_api::{database_metrics::DatabaseMetrics, Database}; -use reth_engine_local::{LocalMiner, LocalPayloadAttributesBuilder}; use reth_engine_service::service::{ChainEvent, EngineService}; use reth_engine_tree::{ engine::{EngineApiRequest, EngineRequestHandler}, @@ -24,7 +23,6 @@ use reth_network::{types::BlockRangeUpdate, NetworkSyncUpdater, SyncState}; use reth_network_api::BlockDownloaderProvider; use reth_node_api::{ BeaconConsensusEngineHandle, BuiltPayload, FullNodeTypes, NodeTypes, NodeTypesWithDBAdapter, - PayloadAttributesBuilder, PayloadTypes, }; use reth_node_core::{ dirs::{ChainPath, DataDirPath}, @@ -77,9 +75,6 @@ where CB: NodeComponentsBuilder, AO: RethRpcAddOns> + EngineValidatorAddOn>, - LocalPayloadAttributesBuilder: PayloadAttributesBuilder< - <::Payload as PayloadTypes>::PayloadAttributes, - >, { type Node = NodeHandle, AO>; @@ -230,20 +225,6 @@ where ctx.components().evm_config().clone(), ); - if ctx.is_dev() { - ctx.task_executor().spawn_critical( - "local engine", - LocalMiner::new( - ctx.blockchain_db().clone(), - LocalPayloadAttributesBuilder::new(ctx.chain_spec()), - beacon_engine_handle.clone(), - ctx.dev_mining_mode(ctx.components().pool()), - ctx.components().payload_builder_handle().clone(), - ) - .run(), - ); - } - info!(target: "reth::cli", "Consensus engine initialized"); let events = stream_select!( diff --git a/crates/node/core/Cargo.toml b/crates/node/core/Cargo.toml index 1a36c9af5ef..2240fa98837 100644 --- a/crates/node/core/Cargo.toml +++ b/crates/node/core/Cargo.toml @@ -34,6 +34,7 @@ reth-network-peers.workspace = true reth-prune-types.workspace = true reth-stages-types.workspace = true reth-ethereum-forks.workspace = true +reth-engine-local.workspace = true reth-engine-primitives.workspace = true # ethereum diff --git a/crates/node/core/src/node_config.rs b/crates/node/core/src/node_config.rs index b1998110a33..f2962e0f236 100644 --- a/crates/node/core/src/node_config.rs +++ b/crates/node/core/src/node_config.rs @@ -14,6 +14,7 @@ use alloy_primitives::{BlockNumber, B256}; use eyre::eyre; use reth_chainspec::{ChainSpec, EthChainSpec, MAINNET}; use reth_config::config::PruneConfig; +use reth_engine_local::MiningMode; use reth_ethereum_forks::{EthereumHardforks, Head}; use reth_network_p2p::headers::client::HeadersClient; use reth_primitives_traits::SealedHeader; @@ -22,6 +23,7 @@ use reth_storage_api::{ BlockHashReader, DatabaseProviderFactory, HeaderProvider, StageCheckpointReader, }; use reth_storage_errors::provider::ProviderResult; +use reth_transaction_pool::TransactionPool; use serde::{de::DeserializeOwned, Serialize}; use std::{ fs, @@ -490,6 +492,15 @@ impl NodeConfig { era: self.era, } } + + /// Returns the [`MiningMode`] intended for --dev mode. + pub fn dev_mining_mode(&self, pool: impl TransactionPool) -> MiningMode { + if let Some(interval) = self.dev.block_time { + MiningMode::interval(interval) + } else { + MiningMode::instant(pool) + } + } } impl Default for NodeConfig { diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index 63de9ec3291..46a282481ea 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -31,6 +31,7 @@ reth-tasks = { workspace = true, optional = true } reth-trie-common.workspace = true reth-node-core.workspace = true reth-rpc-engine-api.workspace = true +reth-engine-local = { workspace = true, features = ["op"] } reth-rpc-api.workspace = true # op-reth @@ -78,7 +79,6 @@ reth-tasks.workspace = true reth-payload-util.workspace = true reth-payload-validator.workspace = true reth-revm = { workspace = true, features = ["std"] } -reth-engine-local = { workspace = true, features = ["op"] } alloy-primitives.workspace = true op-alloy-consensus.workspace = true diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 62433c1ba58..4d642548e12 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -9,6 +9,7 @@ use crate::{ use op_alloy_consensus::{interop::SafetyLevel, OpPooledTransaction}; use op_alloy_rpc_types_engine::{OpExecutionData, OpPayloadAttributes}; use reth_chainspec::{ChainSpecProvider, EthChainSpec, Hardforks}; +use reth_engine_local::LocalPayloadAttributesBuilder; use reth_evm::{ConfigureEvm, EvmFactory, EvmFactoryFor}; use reth_network::{ types::BasicNetworkPrimitives, NetworkConfig, NetworkHandle, NetworkManager, NetworkPrimitives, @@ -16,7 +17,7 @@ use reth_network::{ }; use reth_node_api::{ AddOnsContext, EngineTypes, FullNodeComponents, KeyHasherTy, NodeAddOns, NodePrimitives, - PayloadTypes, PrimitivesTy, TxTy, + PayloadAttributesBuilder, PayloadTypes, PrimitivesTy, TxTy, }; use reth_node_builder::{ components::{ @@ -246,6 +247,12 @@ where fn rpc_to_primitive_block(rpc_block: Self::RpcBlock) -> reth_node_api::BlockTy { rpc_block.into_consensus() } + + fn local_payload_attributes_builder( + chain_spec: &Self::ChainSpec, + ) -> impl PayloadAttributesBuilder<::PayloadAttributes> { + LocalPayloadAttributesBuilder::new(Arc::new(chain_spec.clone())) + } } impl NodeTypes for OpNode { diff --git a/examples/custom-engine-types/Cargo.toml b/examples/custom-engine-types/Cargo.toml index d0a0543b5d3..50bd58620e3 100644 --- a/examples/custom-engine-types/Cargo.toml +++ b/examples/custom-engine-types/Cargo.toml @@ -9,7 +9,6 @@ license.workspace = true reth-payload-builder.workspace = true reth-basic-payload-builder.workspace = true reth-ethereum-payload-builder.workspace = true -reth-engine-local.workspace = true reth-ethereum = { workspace = true, features = ["test-utils", "node", "node-api", "pool"] } reth-tracing.workspace = true reth-trie-db.workspace = true diff --git a/examples/custom-engine-types/src/main.rs b/examples/custom-engine-types/src/main.rs index ae42090d214..8ab99b8fcb7 100644 --- a/examples/custom-engine-types/src/main.rs +++ b/examples/custom-engine-types/src/main.rs @@ -29,7 +29,6 @@ use alloy_rpc_types::{ Withdrawal, }; use reth_basic_payload_builder::{BuildArguments, BuildOutcome, PayloadBuilder, PayloadConfig}; -use reth_engine_local::payload::UnsupportedLocalAttributes; use reth_ethereum::{ chainspec::{Chain, ChainSpec, ChainSpecProvider}, node::{ @@ -76,9 +75,6 @@ pub struct CustomPayloadAttributes { pub custom: u64, } -// TODO(mattsse): remove this tmp workaround -impl UnsupportedLocalAttributes for CustomPayloadAttributes {} - /// Custom error type used in payload attributes validation #[derive(Debug, Error)] pub enum CustomError { From 959323fa6fa31401ab9def6bfc41f67878062088 Mon Sep 17 00:00:00 2001 From: Roman Krasiuk Date: Wed, 9 Jul 2025 22:44:49 +0200 Subject: [PATCH 112/305] feat(sync): track time spent in stages (#17321) --- crates/stages/api/Cargo.toml | 1 + crates/stages/api/src/metrics/listener.rs | 7 ++- crates/stages/api/src/metrics/sync_metrics.rs | 3 ++ crates/stages/api/src/pipeline/mod.rs | 54 ++++++++++++------- 4 files changed, 44 insertions(+), 21 deletions(-) diff --git a/crates/stages/api/Cargo.toml b/crates/stages/api/Cargo.toml index 6d230b34731..c8eb81289d0 100644 --- a/crates/stages/api/Cargo.toml +++ b/crates/stages/api/Cargo.toml @@ -44,6 +44,7 @@ auto_impl.workspace = true [dev-dependencies] assert_matches.workspace = true reth-provider = { workspace = true, features = ["test-utils"] } +tokio = { workspace = true, features = ["sync", "rt-multi-thread"] } tokio-stream.workspace = true reth-testing-utils.workspace = true diff --git a/crates/stages/api/src/metrics/listener.rs b/crates/stages/api/src/metrics/listener.rs index aba001a92f1..8c0707d1bea 100644 --- a/crates/stages/api/src/metrics/listener.rs +++ b/crates/stages/api/src/metrics/listener.rs @@ -4,6 +4,7 @@ use std::{ future::Future, pin::Pin, task::{ready, Context, Poll}, + time::Duration, }; use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender}; use tracing::trace; @@ -28,6 +29,8 @@ pub enum MetricEvent { /// Maximum known block number reachable by this stage. /// If specified, `entities_total` metric is updated. max_block_number: Option, + /// The duration of stage iteration including database commit. + elapsed: Duration, }, } @@ -57,12 +60,14 @@ impl MetricsListener { stage_checkpoint: None, }, max_block_number: Some(height), + elapsed: Duration::default(), }); } } - MetricEvent::StageCheckpoint { stage_id, checkpoint, max_block_number } => { + MetricEvent::StageCheckpoint { stage_id, checkpoint, max_block_number, elapsed } => { let stage_metrics = self.sync_metrics.get_stage_metrics(stage_id); + stage_metrics.total_elapsed.increment(elapsed.as_secs_f64()); stage_metrics.checkpoint.set(checkpoint.block_number as f64); let (processed, total) = match checkpoint.entities() { diff --git a/crates/stages/api/src/metrics/sync_metrics.rs b/crates/stages/api/src/metrics/sync_metrics.rs index b89d7b8822e..754a2b22fcc 100644 --- a/crates/stages/api/src/metrics/sync_metrics.rs +++ b/crates/stages/api/src/metrics/sync_metrics.rs @@ -4,6 +4,7 @@ use std::collections::HashMap; #[derive(Debug, Default)] pub(crate) struct SyncMetrics { + /// Stage metrics by stage. pub(crate) stages: HashMap, } @@ -26,4 +27,6 @@ pub(crate) struct StageMetrics { pub(crate) entities_processed: Gauge, /// The number of total entities of the last commit for a stage, if applicable. pub(crate) entities_total: Gauge, + /// The number of seconds spent executing the stage and committing the data. + pub(crate) total_elapsed: Gauge, } diff --git a/crates/stages/api/src/pipeline/mod.rs b/crates/stages/api/src/pipeline/mod.rs index b8d41e9e552..61c6755be9f 100644 --- a/crates/stages/api/src/pipeline/mod.rs +++ b/crates/stages/api/src/pipeline/mod.rs @@ -14,7 +14,10 @@ use reth_provider::{ use reth_prune::PrunerBuilder; use reth_static_file::StaticFileProducer; use reth_tokio_util::{EventSender, EventStream}; -use std::pin::Pin; +use std::{ + pin::Pin, + time::{Duration, Instant}, +}; use tokio::sync::watch; use tracing::*; @@ -138,6 +141,7 @@ impl Pipeline { stage_id, checkpoint: provider.get_stage_checkpoint(stage_id)?.unwrap_or_default(), max_block_number: None, + elapsed: Duration::default(), }); } Ok(()) @@ -338,6 +342,7 @@ impl Pipeline { "Starting unwind" ); while checkpoint.block_number > to { + let unwind_started_at = Instant::now(); let input = UnwindInput { checkpoint, unwind_to: to, bad_block }; self.event_sender.notify(PipelineEvent::Unwind { stage_id, input }); @@ -353,6 +358,13 @@ impl Pipeline { done = checkpoint.block_number == to, "Stage unwound" ); + + provider_rw.save_stage_checkpoint(stage_id, checkpoint)?; + + // Notify event listeners and update metrics. + self.event_sender + .notify(PipelineEvent::Unwound { stage_id, result: unwind_output }); + if let Some(metrics_tx) = &mut self.metrics_tx { let _ = metrics_tx.send(MetricEvent::StageCheckpoint { stage_id, @@ -360,12 +372,9 @@ impl Pipeline { // We assume it was set in the previous execute iteration, so it // doesn't change when we unwind. max_block_number: None, + elapsed: unwind_started_at.elapsed(), }); } - provider_rw.save_stage_checkpoint(stage_id, checkpoint)?; - - self.event_sender - .notify(PipelineEvent::Unwound { stage_id, result: unwind_output }); // update finalized block if needed let last_saved_finalized_block_number = @@ -452,6 +461,7 @@ impl Pipeline { }; } + let stage_started_at = Instant::now(); let provider_rw = self.provider_factory.database_provider_rw()?; self.event_sender.notify(PipelineEvent::Run { @@ -466,18 +476,16 @@ impl Pipeline { match self.stage(stage_index).execute(&provider_rw, exec_input) { Ok(out @ ExecOutput { checkpoint, done }) => { - made_progress |= - checkpoint.block_number != prev_checkpoint.unwrap_or_default().block_number; - - if let Some(metrics_tx) = &mut self.metrics_tx { - let _ = metrics_tx.send(MetricEvent::StageCheckpoint { - stage_id, - checkpoint, - max_block_number: target, - }); - } + // Update stage checkpoint. provider_rw.save_stage_checkpoint(stage_id, checkpoint)?; + // Commit processed data to the database. + UnifiedStorageWriter::commit(provider_rw)?; + + // Invoke stage post commit hook. + self.stage(stage_index).post_execute_commit()?; + + // Notify event listeners and update metrics. self.event_sender.notify(PipelineEvent::Ran { pipeline_stages_progress: PipelineStagesProgress { current: stage_index + 1, @@ -486,13 +494,19 @@ impl Pipeline { stage_id, result: out.clone(), }); + if let Some(metrics_tx) = &mut self.metrics_tx { + let _ = metrics_tx.send(MetricEvent::StageCheckpoint { + stage_id, + checkpoint, + max_block_number: target, + elapsed: stage_started_at.elapsed(), + }); + } - UnifiedStorageWriter::commit(provider_rw)?; - - self.stage(stage_index).post_execute_commit()?; - + let block_number = checkpoint.block_number; + let prev_block_number = prev_checkpoint.unwrap_or_default().block_number; + made_progress |= block_number != prev_block_number; if done { - let block_number = checkpoint.block_number; return Ok(if made_progress { ControlFlow::Continue { block_number } } else { From 4cd0c0d6133c7e2ed2f71aa58869cb1694596222 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 9 Jul 2025 23:19:42 +0200 Subject: [PATCH 113/305] test: allow empty response (#17332) --- .../stages/stages/src/stages/s3/downloader/fetch.rs | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/crates/stages/stages/src/stages/s3/downloader/fetch.rs b/crates/stages/stages/src/stages/s3/downloader/fetch.rs index 1d8bba739fd..9c5e3c3d324 100644 --- a/crates/stages/stages/src/stages/s3/downloader/fetch.rs +++ b/crates/stages/stages/src/stages/s3/downloader/fetch.rs @@ -174,11 +174,18 @@ mod tests { reth_tracing::init_test_tracing(); let b3sum = b256!("0xe9908f4992ae39c4d1fe9984dd743ae3f8e9a84a4a5af768128833605ff72723"); - let url = "https://link.testfile.org/15MB"; + let url = "https://link.testfile.org/5MB"; let file = tempfile::NamedTempFile::new().unwrap(); let filename = file.path().file_name().unwrap().to_str().unwrap(); let target_dir = file.path().parent().unwrap(); - fetch(filename, target_dir, url, 4, Some(b3sum)).await.unwrap(); + match fetch(filename, target_dir, url, 4, Some(b3sum)).await { + Ok(_) | Err(DownloaderError::EmptyContentLength) => { + // the testfil API can be flaky, so we ignore this error + } + Err(error) => { + panic!("Unexpected download error: {error:?}"); + } + } } } From b317431b771efd83b1a33b8d816b0c72485ade50 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 10 Jul 2025 00:29:23 +0200 Subject: [PATCH 114/305] chore: make tracer match non-exhaustive (#17338) --- crates/rpc/rpc/src/debug.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 632811962d6..7117c83cbad 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -277,6 +277,7 @@ where let this = self.clone(); if let Some(tracer) = tracer { + #[allow(unreachable_patterns)] return match tracer { GethDebugTracerType::BuiltInTracer(tracer) => match tracer { GethDebugBuiltInTracerType::FourByteTracer => { @@ -444,6 +445,11 @@ where Ok(GethTrace::JS(res)) } + _ => { + // Note: this match is non-exhaustive in case we need to add support for + // additional tracers + Err(EthApiError::Unsupported("unsupported tracer").into()) + } } } @@ -732,6 +738,7 @@ where }; if let Some(tracer) = tracer { + #[allow(unreachable_patterns)] return match tracer { GethDebugTracerType::BuiltInTracer(tracer) => match tracer { GethDebugBuiltInTracerType::FourByteTracer => { @@ -847,6 +854,11 @@ where .map_err(Eth::Error::from_eth_err)?; Ok((GethTrace::JS(result), state)) } + _ => { + // Note: this match is non-exhaustive in case we need to add support for + // additional tracers + Err(EthApiError::Unsupported("unsupported tracer").into()) + } } } From e3d2632be2a57215ffb1f3df1d47d06d5473df90 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 10 Jul 2025 00:46:46 +0200 Subject: [PATCH 115/305] chore: remove type hints (#17336) --- crates/optimism/rpc/src/eth/receipt.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/crates/optimism/rpc/src/eth/receipt.rs b/crates/optimism/rpc/src/eth/receipt.rs index 088d212a4b3..69eba47910c 100644 --- a/crates/optimism/rpc/src/eth/receipt.rs +++ b/crates/optimism/rpc/src/eth/receipt.rs @@ -239,13 +239,13 @@ impl OpReceiptBuilder { let core_receipt = build_receipt(transaction, meta, receipt, all_receipts, None, |receipt_with_bloom| { match receipt { - OpReceipt::Legacy(_) => OpReceiptEnvelope::::Legacy(receipt_with_bloom), - OpReceipt::Eip2930(_) => OpReceiptEnvelope::::Eip2930(receipt_with_bloom), - OpReceipt::Eip1559(_) => OpReceiptEnvelope::::Eip1559(receipt_with_bloom), - OpReceipt::Eip7702(_) => OpReceiptEnvelope::::Eip7702(receipt_with_bloom), + OpReceipt::Legacy(_) => OpReceiptEnvelope::Legacy(receipt_with_bloom), + OpReceipt::Eip2930(_) => OpReceiptEnvelope::Eip2930(receipt_with_bloom), + OpReceipt::Eip1559(_) => OpReceiptEnvelope::Eip1559(receipt_with_bloom), + OpReceipt::Eip7702(_) => OpReceiptEnvelope::Eip7702(receipt_with_bloom), OpReceipt::Deposit(receipt) => { - OpReceiptEnvelope::::Deposit(OpDepositReceiptWithBloom:: { - receipt: OpDepositReceipt:: { + OpReceiptEnvelope::Deposit(OpDepositReceiptWithBloom { + receipt: OpDepositReceipt { inner: receipt_with_bloom.receipt, deposit_nonce: receipt.deposit_nonce, deposit_receipt_version: receipt.deposit_receipt_version, From b3d722f1fdc56de2354248dfd9a0e82f03941586 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 10 Jul 2025 00:46:57 +0200 Subject: [PATCH 116/305] chore: simplify receipt envelope conversion (#17337) --- crates/rpc/rpc-eth-types/src/receipt.rs | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/crates/rpc/rpc-eth-types/src/receipt.rs b/crates/rpc/rpc-eth-types/src/receipt.rs index 1d8659a66ad..4988d13879b 100644 --- a/crates/rpc/rpc-eth-types/src/receipt.rs +++ b/crates/rpc/rpc-eth-types/src/receipt.rs @@ -7,7 +7,7 @@ use alloy_consensus::{ use alloy_eips::eip7840::BlobParams; use alloy_primitives::{Address, TxKind}; use alloy_rpc_types_eth::{Log, ReceiptWithBloom, TransactionReceipt}; -use reth_ethereum_primitives::{Receipt, TransactionSigned, TxType}; +use reth_ethereum_primitives::{Receipt, TransactionSigned}; /// Builds an [`TransactionReceipt`] obtaining the inner receipt envelope from the given closure. pub fn build_receipt( @@ -117,13 +117,7 @@ impl EthReceiptBuilder { receipt, all_receipts, blob_params, - |receipt_with_bloom| match receipt.tx_type { - TxType::Legacy => ReceiptEnvelope::Legacy(receipt_with_bloom), - TxType::Eip2930 => ReceiptEnvelope::Eip2930(receipt_with_bloom), - TxType::Eip1559 => ReceiptEnvelope::Eip1559(receipt_with_bloom), - TxType::Eip4844 => ReceiptEnvelope::Eip4844(receipt_with_bloom), - TxType::Eip7702 => ReceiptEnvelope::Eip7702(receipt_with_bloom), - }, + |receipt_with_bloom| ReceiptEnvelope::from_typed(receipt.tx_type, receipt_with_bloom), ); Self { base } From 0326dab81c18ab77c22293683c728276e7468be0 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 10 Jul 2025 11:34:18 +0200 Subject: [PATCH 117/305] chore: replace CacheDb with trait bounds (#17315) --- crates/rpc/rpc-eth-api/src/helpers/call.rs | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index 8659721d457..22ec006a4f8 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -11,7 +11,7 @@ use alloy_consensus::BlockHeader; use alloy_eips::eip2930::AccessListResult; use alloy_evm::{ call::caller_gas_allowance, - overrides::{apply_block_overrides, apply_state_overrides}, + overrides::{apply_block_overrides, apply_state_overrides, OverrideBlockHashes}, }; use alloy_primitives::{Bytes, B256, U256}; use alloy_rpc_types_eth::{ @@ -31,7 +31,6 @@ use reth_primitives_traits::{Recovered, SealedHeader, SignedTransaction}; use reth_revm::{ database::StateProviderDatabase, db::{CacheDB, State}, - DatabaseRef, }; use reth_rpc_convert::{RpcConvert, RpcTypes}; use reth_rpc_eth_types::{ @@ -735,12 +734,12 @@ pub trait Call: &self, mut evm_env: EvmEnvFor, mut request: TransactionRequest, - db: &mut CacheDB, + db: &mut DB, overrides: EvmOverrides, ) -> Result<(EvmEnvFor, TxEnvFor), Self::Error> where - DB: DatabaseRef, - EthApiError: From<::Error>, + DB: Database + DatabaseCommit + OverrideBlockHashes, + EthApiError: From<::Error>, { if request.gas > Some(self.call_gas_limit()) { // configured gas exceeds limit From 0f49e35fbbc1f79dc6c246a9eb8c8f18fa641267 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Thu, 10 Jul 2025 10:27:23 +0100 Subject: [PATCH 118/305] fix(trie): reset hashes of nodes along the path of removed leaf (#17331) Co-authored-by: Brian Picciano --- crates/trie/sparse-parallel/src/trie.rs | 60 ++++++++++++++++++++++--- 1 file changed, 55 insertions(+), 5 deletions(-) diff --git a/crates/trie/sparse-parallel/src/trie.rs b/crates/trie/sparse-parallel/src/trie.rs index 12c9eebb959..c791a35fd0e 100644 --- a/crates/trie/sparse-parallel/src/trie.rs +++ b/crates/trie/sparse-parallel/src/trie.rs @@ -329,6 +329,9 @@ impl SparseTrieInterface for ParallelSparseTrie { let mut curr_subtrie = self.upper_subtrie.as_mut(); let mut curr_subtrie_is_upper = true; + // List of node paths which need to have their hashes reset + let mut paths_to_reset_hashes = Vec::new(); + loop { let curr_node = curr_subtrie.nodes.get_mut(&curr_path).unwrap(); @@ -345,7 +348,10 @@ impl SparseTrieInterface for ParallelSparseTrie { // field unset, as it will no longer be valid once the leaf is removed. match curr_node { SparseNode::Branch { hash, .. } => { - *hash = None; + if hash.is_some() { + paths_to_reset_hashes + .push((SparseSubtrieType::from_path(&curr_path), curr_path)); + } // If there is already an extension leading into a branch, then that // extension is no longer relevant. @@ -360,7 +366,10 @@ impl SparseTrieInterface for ParallelSparseTrie { branch_parent_node = Some(curr_node.clone()); } SparseNode::Extension { hash, .. } => { - *hash = None; + if hash.is_some() { + paths_to_reset_hashes + .push((SparseSubtrieType::from_path(&curr_path), curr_path)); + } // We can assume a new branch node will be found after the extension, so // there's no need to modify branch_parent_path/node even if it's @@ -392,9 +401,29 @@ impl SparseTrieInterface for ParallelSparseTrie { } // We've traversed to the leaf and collected its ancestors as necessary. Remove the leaf - // from its SparseSubtrie. + // from its SparseSubtrie and reset the hashes of the nodes along the path. self.prefix_set.insert(*full_path); leaf_subtrie.inner.values.remove(full_path); + for (subtrie_type, path) in paths_to_reset_hashes { + let node = match subtrie_type { + SparseSubtrieType::Upper => self.upper_subtrie.nodes.get_mut(&path), + SparseSubtrieType::Lower(idx) => self.lower_subtries[idx] + .as_revealed_mut() + .expect("lower subtrie is revealed") + .nodes + .get_mut(&path), + } + .expect("node exists"); + + match node { + SparseNode::Extension { hash, .. } | SparseNode::Branch { hash, .. } => { + *hash = None + } + SparseNode::Empty | SparseNode::Hash(_) | SparseNode::Leaf { .. } => { + unreachable!("only branch and extension node hashes can be reset") + } + } + } self.remove_node(&leaf_path); // If the leaf was at the root replace its node with the empty value. We can stop execution @@ -993,7 +1022,10 @@ impl ParallelSparseTrie { .expect("lower subtrie node must exist"); // Lower subtrie root node hashes must be computed before updating upper subtrie // hashes - debug_assert!(node.hash().is_some()); + debug_assert!( + node.hash().is_some(), + "Lower subtrie root node at path {path:?} has no hash" + ); node }; @@ -1090,6 +1122,17 @@ impl ParallelSparseTrie { (changed_subtries, unchanged_prefix_set) } + + /// Returns an iterator over all nodes in the trie in no particular order. + #[cfg(test)] + fn all_nodes(&self) -> impl IntoIterator { + let mut nodes = vec![]; + for subtrie in self.lower_subtries.iter().filter_map(LowerSparseSubtrie::as_revealed_ref) { + nodes.extend(subtrie.nodes.iter()) + } + nodes.extend(self.upper_subtrie.nodes.iter()); + nodes + } } /// This is a subtrie of the [`ParallelSparseTrie`] that contains a map from path to sparse trie @@ -3572,9 +3615,16 @@ mod tests { let provider = MockBlindedProvider::new(); + // Remove a leaf which does not exist; this should have no effect. + trie.remove_leaf(&Nibbles::from_nibbles([0x0, 0x1, 0x2, 0x3, 0x4, 0xF]), &provider) + .unwrap(); + for (path, node) in trie.all_nodes() { + assert!(node.hash().is_some(), "path {path:?} should still have a hash"); + } + // Remove the leaf at path 0x01234 let leaf_full_path = Nibbles::from_nibbles([0x0, 0x1, 0x2, 0x3, 0x4]); - trie.remove_leaf(&leaf_full_path, provider).unwrap(); + trie.remove_leaf(&leaf_full_path, &provider).unwrap(); let upper_subtrie = &trie.upper_subtrie; let lower_subtrie_10 = trie.lower_subtries[0x01].as_revealed_ref().unwrap(); From ea944fa75a9e1df7487475ade5577dcea769da63 Mon Sep 17 00:00:00 2001 From: Yash Atreya <44857776+yash-atreya@users.noreply.github.com> Date: Thu, 10 Jul 2025 15:03:25 +0530 Subject: [PATCH 119/305] fix(`docs`): broken rustdocs link (#17341) --- docs/vocs/vocs.config.ts | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/docs/vocs/vocs.config.ts b/docs/vocs/vocs.config.ts index 46685cd6064..a13320ae40d 100644 --- a/docs/vocs/vocs.config.ts +++ b/docs/vocs/vocs.config.ts @@ -1,3 +1,4 @@ +import React from 'react' import { defineConfig } from 'vocs' import { sidebar } from './sidebar' import { basePath } from './redirects.config' @@ -12,7 +13,9 @@ export default defineConfig({ topNav: [ { text: 'Run', link: '/run/ethereum' }, { text: 'SDK', link: '/sdk/overview' }, - { text: 'Rustdocs', link: '/docs' }, + { + element: React.createElement('a', { href: '/docs', target: '_self' }, 'Rustdocs') + }, { text: 'GitHub', link: 'https://github.com/paradigmxyz/reth' }, { text: 'v1.5.1', From 1a7c335a60b8cc7e17105d3b96e07ca303b27fbd Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Thu, 10 Jul 2025 13:21:51 +0300 Subject: [PATCH 120/305] feat: `re-execute` command (#17330) --- Cargo.lock | 1 + crates/cli/commands/Cargo.toml | 1 + crates/cli/commands/src/common.rs | 15 ++ crates/cli/commands/src/lib.rs | 1 + crates/cli/commands/src/re_execute.rs | 222 +++++++++++++++++++ crates/ethereum/cli/src/interface.rs | 9 +- crates/optimism/cli/src/app.rs | 17 +- crates/optimism/cli/src/commands/mod.rs | 6 +- crates/stages/stages/src/stages/execution.rs | 3 +- docs/vocs/docs/pages/cli/SUMMARY.mdx | 1 + docs/vocs/docs/pages/cli/reth.mdx | 1 + docs/vocs/docs/pages/cli/reth/re-execute.mdx | 159 +++++++++++++ 12 files changed, 427 insertions(+), 9 deletions(-) create mode 100644 crates/cli/commands/src/re_execute.rs create mode 100644 docs/vocs/docs/pages/cli/reth/re-execute.mdx diff --git a/Cargo.lock b/Cargo.lock index 39884ad1742..e668dd6ad77 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7403,6 +7403,7 @@ dependencies = [ "reth-provider", "reth-prune", "reth-prune-types", + "reth-revm", "reth-stages", "reth-stages-types", "reth-static-file", diff --git a/crates/cli/commands/Cargo.toml b/crates/cli/commands/Cargo.toml index b8e4d397697..548049bd7a9 100644 --- a/crates/cli/commands/Cargo.toml +++ b/crates/cli/commands/Cargo.toml @@ -43,6 +43,7 @@ reth-ethereum-primitives = { workspace = true, optional = true } reth-provider.workspace = true reth-prune.workspace = true reth-prune-types = { workspace = true, optional = true } +reth-revm.workspace = true reth-stages.workspace = true reth-stages-types = { workspace = true, optional = true } reth-static-file-types = { workspace = true, features = ["clap"] } diff --git a/crates/cli/commands/src/common.rs b/crates/cli/commands/src/common.rs index be3bcec5a17..340dbf8e760 100644 --- a/crates/cli/commands/src/common.rs +++ b/crates/cli/commands/src/common.rs @@ -260,3 +260,18 @@ where &self.1 } } + +/// Helper trait alias for an [`FnOnce`] producing [`CliNodeComponents`]. +pub trait CliComponentsBuilder: + FnOnce(Arc) -> Self::Components +{ + type Components: CliNodeComponents; +} + +impl CliComponentsBuilder for F +where + F: FnOnce(Arc) -> Comp, + Comp: CliNodeComponents, +{ + type Components = Comp; +} diff --git a/crates/cli/commands/src/lib.rs b/crates/cli/commands/src/lib.rs index e602fac8207..ed57a55aae8 100644 --- a/crates/cli/commands/src/lib.rs +++ b/crates/cli/commands/src/lib.rs @@ -22,6 +22,7 @@ pub mod launcher; pub mod node; pub mod p2p; pub mod prune; +pub mod re_execute; pub mod recover; pub mod stage; #[cfg(feature = "arbitrary")] diff --git a/crates/cli/commands/src/re_execute.rs b/crates/cli/commands/src/re_execute.rs new file mode 100644 index 00000000000..a555297488e --- /dev/null +++ b/crates/cli/commands/src/re_execute.rs @@ -0,0 +1,222 @@ +//! Re-execute blocks from database in parallel. + +use crate::common::{ + AccessRights, CliComponentsBuilder, CliNodeComponents, CliNodeTypes, Environment, + EnvironmentArgs, +}; +use alloy_consensus::{BlockHeader, TxReceipt}; +use clap::Parser; +use eyre::WrapErr; +use reth_chainspec::{EthChainSpec, EthereumHardforks, Hardforks}; +use reth_cli::chainspec::ChainSpecParser; +use reth_consensus::FullConsensus; +use reth_evm::{execute::Executor, ConfigureEvm}; +use reth_primitives_traits::{format_gas_throughput, BlockBody, GotExpected, SignedTransaction}; +use reth_provider::{ + BlockNumReader, BlockReader, ChainSpecProvider, DatabaseProviderFactory, ReceiptProvider, + StaticFileProviderFactory, TransactionVariant, +}; +use reth_revm::database::StateProviderDatabase; +use reth_stages::stages::calculate_gas_used_from_headers; +use std::{ + sync::Arc, + time::{Duration, Instant}, +}; +use tokio::{sync::mpsc, task::JoinSet}; +use tracing::*; + +/// `reth re-execute` command +/// +/// Re-execute blocks in parallel to verify historical sync correctness. +#[derive(Debug, Parser)] +pub struct Command { + #[command(flatten)] + env: EnvironmentArgs, + + /// The height to start at. + #[arg(long, default_value = "1")] + from: u64, + + /// The height to end at. Defaults to the latest block. + #[arg(long)] + to: Option, + + /// Number of tasks to run in parallel + #[arg(long, default_value = "10")] + num_tasks: u64, +} + +impl Command { + /// Returns the underlying chain being used to run this command + pub fn chain_spec(&self) -> Option<&Arc> { + Some(&self.env.chain) + } +} + +impl> Command { + /// Execute `re-execute` command + pub async fn execute(self, components: impl CliComponentsBuilder) -> eyre::Result<()> + where + N: CliNodeTypes, + { + let Environment { provider_factory, .. } = self.env.init::(AccessRights::RO)?; + + let provider = provider_factory.database_provider_ro()?; + let components = components(provider_factory.chain_spec()); + + let min_block = self.from; + let max_block = self.to.unwrap_or(provider.best_block_number()?); + + let total_blocks = max_block - min_block; + let total_gas = calculate_gas_used_from_headers( + &provider_factory.static_file_provider(), + min_block..=max_block, + )?; + let blocks_per_task = total_blocks / self.num_tasks; + + let db_at = { + let provider_factory = provider_factory.clone(); + move |block_number: u64| { + StateProviderDatabase( + provider_factory.history_by_block_number(block_number).unwrap(), + ) + } + }; + + let (stats_tx, mut stats_rx) = mpsc::unbounded_channel(); + + let mut tasks = JoinSet::new(); + for i in 0..self.num_tasks { + let start_block = min_block + i * blocks_per_task; + let end_block = + if i == self.num_tasks - 1 { max_block } else { start_block + blocks_per_task }; + + // Spawn thread executing blocks + let provider_factory = provider_factory.clone(); + let evm_config = components.evm_config().clone(); + let consensus = components.consensus().clone(); + let db_at = db_at.clone(); + let stats_tx = stats_tx.clone(); + tasks.spawn_blocking(move || { + let mut executor = evm_config.batch_executor(db_at(start_block - 1)); + for block in start_block..end_block { + let block = provider_factory + .recovered_block(block.into(), TransactionVariant::NoHash)? + .unwrap(); + let result = executor.execute_one(&block)?; + + if let Err(err) = consensus + .validate_block_post_execution(&block, &result) + .wrap_err_with(|| format!("Failed to validate block {}", block.number())) + { + let correct_receipts = + provider_factory.receipts_by_block(block.number().into())?.unwrap(); + + for (i, (receipt, correct_receipt)) in + result.receipts.iter().zip(correct_receipts.iter()).enumerate() + { + if receipt != correct_receipt { + let tx_hash = block.body().transactions()[i].tx_hash(); + error!( + ?receipt, + ?correct_receipt, + index = i, + ?tx_hash, + "Invalid receipt" + ); + let expected_gas_used = correct_receipt.cumulative_gas_used() - + if i == 0 { + 0 + } else { + correct_receipts[i - 1].cumulative_gas_used() + }; + let got_gas_used = receipt.cumulative_gas_used() - + if i == 0 { + 0 + } else { + result.receipts[i - 1].cumulative_gas_used() + }; + if got_gas_used != expected_gas_used { + let mismatch = GotExpected { + expected: expected_gas_used, + got: got_gas_used, + }; + + error!(number=?block.number(), ?mismatch, "Gas usage mismatch"); + return Err(err); + } + } else { + continue; + } + } + + return Err(err); + } + let _ = stats_tx.send(block.gas_used()); + + // Reset DB once in a while to avoid OOM + if executor.size_hint() > 1_000_000 { + executor = evm_config.batch_executor(db_at(block.number())); + } + } + + eyre::Ok(()) + }); + } + + let instant = Instant::now(); + let mut total_executed_blocks = 0; + let mut total_executed_gas = 0; + + let mut last_logged_gas = 0; + let mut last_logged_blocks = 0; + let mut last_logged_time = Instant::now(); + + let mut interval = tokio::time::interval(Duration::from_secs(10)); + + loop { + tokio::select! { + Some(gas_used) = stats_rx.recv() => { + total_executed_blocks += 1; + total_executed_gas += gas_used; + } + result = tasks.join_next() => { + if let Some(result) = result { + if matches!(result, Err(_) | Ok(Err(_))) { + error!(?result); + return Err(eyre::eyre!("Re-execution failed: {result:?}")); + } + } else { + break; + } + } + _ = interval.tick() => { + let blocks_executed = total_executed_blocks - last_logged_blocks; + let gas_executed = total_executed_gas - last_logged_gas; + + if blocks_executed > 0 { + let progress = 100.0 * total_executed_gas as f64 / total_gas as f64; + info!( + throughput=?format_gas_throughput(gas_executed, last_logged_time.elapsed()), + progress=format!("{progress:.2}%"), + "Executed {blocks_executed} blocks" + ); + } + + last_logged_blocks = total_executed_blocks; + last_logged_gas = total_executed_gas; + last_logged_time = Instant::now(); + } + } + } + + info!( + start_block = min_block, + end_block = max_block, + throughput=?format_gas_throughput(total_executed_gas, instant.elapsed()), + "Re-executed successfully" + ); + + Ok(()) + } +} diff --git a/crates/ethereum/cli/src/interface.rs b/crates/ethereum/cli/src/interface.rs index 4419d700f93..3d89c1317e1 100644 --- a/crates/ethereum/cli/src/interface.rs +++ b/crates/ethereum/cli/src/interface.rs @@ -8,7 +8,7 @@ use reth_cli_commands::{ config_cmd, db, download, dump_genesis, import, import_era, init_cmd, init_state, launcher::FnLauncher, node::{self, NoArgs}, - p2p, prune, recover, stage, + p2p, prune, re_execute, recover, stage, }; use reth_cli_runner::CliRunner; use reth_db::DatabaseEnv; @@ -186,6 +186,9 @@ impl, Ext: clap::Args + fmt::Debug> Cl runner.run_command_until_exit(|ctx| command.execute::(ctx)) } Commands::Prune(command) => runner.run_until_ctrl_c(command.execute::()), + Commands::ReExecute(command) => { + runner.run_until_ctrl_c(command.execute::(components)) + } } } @@ -248,6 +251,9 @@ pub enum Commands { /// Prune according to the configuration without any limits #[command(name = "prune")] Prune(prune::PruneCommand), + /// Re-execute blocks in parallel to verify historical sync correctness. + #[command(name = "re-execute")] + ReExecute(re_execute::Command), } impl Commands { @@ -270,6 +276,7 @@ impl Commands { Self::Debug(cmd) => cmd.chain_spec(), Self::Recover(cmd) => cmd.chain_spec(), Self::Prune(cmd) => cmd.chain_spec(), + Self::ReExecute(cmd) => cmd.chain_spec(), } } } diff --git a/crates/optimism/cli/src/app.rs b/crates/optimism/cli/src/app.rs index 1c7af0d328c..e0774068b7e 100644 --- a/crates/optimism/cli/src/app.rs +++ b/crates/optimism/cli/src/app.rs @@ -8,7 +8,7 @@ use reth_optimism_chainspec::OpChainSpec; use reth_optimism_consensus::OpBeaconConsensus; use reth_optimism_node::{OpExecutorProvider, OpNode}; use reth_tracing::{FileWorkerGuard, Layers}; -use std::fmt; +use std::{fmt, sync::Arc}; use tracing::info; /// A wrapper around a parsed CLI that handles command execution. @@ -65,6 +65,10 @@ where // Install the prometheus recorder to be sure to record all metrics let _ = install_prometheus_recorder(); + let components = |spec: Arc| { + (OpExecutorProvider::optimism(spec.clone()), OpBeaconConsensus::new(spec)) + }; + match self.cli.command { Commands::Node(command) => { runner.run_command_until_exit(|ctx| command.execute(ctx, launcher)) @@ -83,11 +87,9 @@ where } Commands::DumpGenesis(command) => runner.run_blocking_until_ctrl_c(command.execute()), Commands::Db(command) => runner.run_blocking_until_ctrl_c(command.execute::()), - Commands::Stage(command) => runner.run_command_until_exit(|ctx| { - command.execute::(ctx, |spec| { - (OpExecutorProvider::optimism(spec.clone()), OpBeaconConsensus::new(spec)) - }) - }), + Commands::Stage(command) => { + runner.run_command_until_exit(|ctx| command.execute::(ctx, components)) + } Commands::P2P(command) => runner.run_until_ctrl_c(command.execute::()), Commands::Config(command) => runner.run_until_ctrl_c(command.execute()), Commands::Recover(command) => { @@ -96,6 +98,9 @@ where Commands::Prune(command) => runner.run_until_ctrl_c(command.execute::()), #[cfg(feature = "dev")] Commands::TestVectors(command) => runner.run_until_ctrl_c(command.execute()), + Commands::ReExecute(command) => { + runner.run_until_ctrl_c(command.execute::(components)) + } } } diff --git a/crates/optimism/cli/src/commands/mod.rs b/crates/optimism/cli/src/commands/mod.rs index 515307c9ddb..161aa1d0bab 100644 --- a/crates/optimism/cli/src/commands/mod.rs +++ b/crates/optimism/cli/src/commands/mod.rs @@ -7,7 +7,7 @@ use reth_cli::chainspec::ChainSpecParser; use reth_cli_commands::{ config_cmd, db, dump_genesis, init_cmd, node::{self, NoArgs}, - p2p, prune, recover, stage, + p2p, prune, re_execute, recover, stage, }; use std::{fmt, sync::Arc}; @@ -62,6 +62,9 @@ pub enum Commands), } impl< @@ -86,6 +89,7 @@ impl< Self::ImportReceiptsOp(cmd) => cmd.chain_spec(), #[cfg(feature = "dev")] Self::TestVectors(_) => None, + Self::ReExecute(cmd) => cmd.chain_spec(), } } } diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index e5592cd8dec..50313f24d42 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -627,7 +627,8 @@ fn execution_checkpoint( }) } -fn calculate_gas_used_from_headers( +/// Calculates the total amount of gas used from the headers in the given range. +pub fn calculate_gas_used_from_headers( provider: &StaticFileProvider, range: RangeInclusive, ) -> Result { diff --git a/docs/vocs/docs/pages/cli/SUMMARY.mdx b/docs/vocs/docs/pages/cli/SUMMARY.mdx index 143cded1466..44d7408253f 100644 --- a/docs/vocs/docs/pages/cli/SUMMARY.mdx +++ b/docs/vocs/docs/pages/cli/SUMMARY.mdx @@ -46,3 +46,4 @@ - [`reth recover`](/cli/reth/recover) - [`reth recover storage-tries`](/cli/reth/recover/storage-tries) - [`reth prune`](/cli/reth/prune) + - [`reth re-execute`](/cli/reth/re-execute) diff --git a/docs/vocs/docs/pages/cli/reth.mdx b/docs/vocs/docs/pages/cli/reth.mdx index 8225d71b3b7..031fe62f465 100644 --- a/docs/vocs/docs/pages/cli/reth.mdx +++ b/docs/vocs/docs/pages/cli/reth.mdx @@ -23,6 +23,7 @@ Commands: debug Various debug routines recover Scripts for node recovery prune Prune according to the configuration without any limits + re-execute Re-execute blocks in parallel to verify historical sync correctness help Print this message or the help of the given subcommand(s) Options: diff --git a/docs/vocs/docs/pages/cli/reth/re-execute.mdx b/docs/vocs/docs/pages/cli/reth/re-execute.mdx new file mode 100644 index 00000000000..22883e9d610 --- /dev/null +++ b/docs/vocs/docs/pages/cli/reth/re-execute.mdx @@ -0,0 +1,159 @@ +# reth re-execute + +Re-execute blocks in parallel to verify historical sync correctness + +```bash +$ reth re-execute --help +``` +```txt +Usage: reth re-execute [OPTIONS] + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --datadir + The path to the data dir for all reth files and subdirectories. + + Defaults to the OS-specific data directory: + + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + - Windows: `{FOLDERID_RoamingAppData}/reth/` + - macOS: `$HOME/Library/Application Support/reth/` + + [default: default] + + --datadir.static-files + The absolute path to store static files in. + + --config + The path to the configuration file to use + + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + mainnet, sepolia, holesky, hoodi, dev + + [default: mainnet] + +Database: + --db.log-level + Database logging level. Levels higher than "notice" require a debug build + + Possible values: + - fatal: Enables logging for critical conditions, i.e. assertion failures + - error: Enables logging for error conditions + - warn: Enables logging for warning conditions + - notice: Enables logging for normal but significant condition + - verbose: Enables logging for verbose informational + - debug: Enables logging for debug-level messages + - trace: Enables logging for trace debug-level messages + - extra: Enables logging for extra debug-level messages + + --db.exclusive + Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume + + [possible values: true, false] + + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + + --from + The height to start at + + [default: 1] + + --to + The height to end at. Defaults to the latest block + + --num-tasks + Number of tasks to run in parallel + + [default: 10] + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + [default: terminal] + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + [default: terminal] + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + [default: always] + + Possible values: + - always: Colors on + - auto: Colors on + - never: Colors off + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output +``` \ No newline at end of file From 26b7258d57d03c06df3d44f5191dda64e2340d82 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Thu, 10 Jul 2025 12:41:48 +0200 Subject: [PATCH 121/305] feat(ci): reorganize e2e tests with dedicated nextest filter and CI workflow (#17290) --- .config/nextest.toml | 6 + .github/workflows/e2e.yml | 46 ++++++++ .github/workflows/integration.yml | 2 +- .github/workflows/unit.yml | 2 +- crates/e2e-test-utils/Cargo.toml | 4 + crates/e2e-test-utils/src/testsuite/README.md | 106 ++++++++++++++++++ crates/e2e-test-utils/src/testsuite/mod.rs | 3 - .../e2e-testsuite/main.rs} | 76 +++++++++---- crates/engine/tree/Cargo.toml | 4 + crates/engine/tree/src/tree/mod.rs | 2 - .../e2e-testsuite/main.rs} | 2 +- crates/optimism/node/Cargo.toml | 4 + .../node/tests/{e2e => e2e-testsuite}/main.rs | 0 .../node/tests/{e2e => e2e-testsuite}/p2p.rs | 0 .../tests/{e2e => e2e-testsuite}/testsuite.rs | 0 crates/rpc/rpc-e2e-tests/Cargo.toml | 4 + .../{rpc_compat.rs => e2e-testsuite/main.rs} | 0 17 files changed, 233 insertions(+), 28 deletions(-) create mode 100644 .github/workflows/e2e.yml create mode 100644 crates/e2e-test-utils/src/testsuite/README.md rename crates/e2e-test-utils/{src/testsuite/examples.rs => tests/e2e-testsuite/main.rs} (82%) rename crates/engine/tree/{src/tree/e2e_tests.rs => tests/e2e-testsuite/main.rs} (99%) rename crates/optimism/node/tests/{e2e => e2e-testsuite}/main.rs (100%) rename crates/optimism/node/tests/{e2e => e2e-testsuite}/p2p.rs (100%) rename crates/optimism/node/tests/{e2e => e2e-testsuite}/testsuite.rs (100%) rename crates/rpc/rpc-e2e-tests/tests/{rpc_compat.rs => e2e-testsuite/main.rs} (100%) diff --git a/.config/nextest.toml b/.config/nextest.toml index e107857a351..94d55bf0311 100644 --- a/.config/nextest.toml +++ b/.config/nextest.toml @@ -5,3 +5,9 @@ slow-timeout = { period = "30s", terminate-after = 4 } [[profile.default.overrides]] filter = "test(general_state_tests)" slow-timeout = { period = "1m", terminate-after = 10 } + +# E2E tests using the testsuite framework from crates/e2e-test-utils +# These tests are located in tests/e2e-testsuite/ directories across various crates +[[profile.default.overrides]] +filter = "binary(e2e_testsuite)" +slow-timeout = { period = "2m", terminate-after = 3 } diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml new file mode 100644 index 00000000000..ac43d6cc84f --- /dev/null +++ b/.github/workflows/e2e.yml @@ -0,0 +1,46 @@ +# Runs e2e tests using the testsuite framework + +name: e2e + +on: + pull_request: + merge_group: + push: + branches: [main] + +env: + CARGO_TERM_COLOR: always + SEED: rustethereumethereumrust + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +jobs: + test: + name: e2e-testsuite + runs-on: + group: Reth + env: + RUST_BACKTRACE: 1 + timeout-minutes: 90 + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + - uses: taiki-e/install-action@nextest + - uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: true + - name: Run e2e tests + run: | + cargo nextest run \ + --locked --features "asm-keccak" \ + --workspace \ + --exclude 'example-*' \ + --exclude 'exex-subscription' \ + --exclude 'reth-bench' \ + --exclude 'ef-tests' \ + --exclude 'op-reth' \ + --exclude 'reth' \ + -E 'binary(e2e_testsuite)' + diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml index 1369ba1502a..c59a5767054 100644 --- a/.github/workflows/integration.yml +++ b/.github/workflows/integration.yml @@ -47,7 +47,7 @@ jobs: cargo nextest run \ --locked --features "asm-keccak ${{ matrix.network }}" \ --workspace --exclude ef-tests \ - -E "kind(test)" + -E "kind(test) and not binary(e2e_testsuite)" - if: matrix.network == 'optimism' name: Run tests run: | diff --git a/.github/workflows/unit.yml b/.github/workflows/unit.yml index a46bf5bc3ca..ffdf38dc9f7 100644 --- a/.github/workflows/unit.yml +++ b/.github/workflows/unit.yml @@ -61,7 +61,7 @@ jobs: ${{ matrix.args }} --workspace \ --exclude ef-tests --no-tests=warn \ --partition hash:${{ matrix.partition }}/2 \ - -E "!kind(test)" + -E "!kind(test) and not binary(e2e_testsuite)" state: name: Ethereum state tests diff --git a/crates/e2e-test-utils/Cargo.toml b/crates/e2e-test-utils/Cargo.toml index ae3ae3cc281..ca10c80e578 100644 --- a/crates/e2e-test-utils/Cargo.toml +++ b/crates/e2e-test-utils/Cargo.toml @@ -72,3 +72,7 @@ tokio-stream.workspace = true serde_json.workspace = true tracing.workspace = true derive_more.workspace = true + +[[test]] +name = "e2e_testsuite" +path = "tests/e2e-testsuite/main.rs" diff --git a/crates/e2e-test-utils/src/testsuite/README.md b/crates/e2e-test-utils/src/testsuite/README.md new file mode 100644 index 00000000000..1d91367fef0 --- /dev/null +++ b/crates/e2e-test-utils/src/testsuite/README.md @@ -0,0 +1,106 @@ +# E2E Test Suite Framework + +This directory contains the framework for writing end-to-end (e2e) tests in Reth. The framework provides utilities for setting up test environments, performing actions, and verifying blockchain behavior. + +## Test Organization + +E2E tests using this framework follow a consistent structure across the codebase: + +### Directory Structure +Each crate that requires e2e tests should organize them as follows: +``` +/ +├── src/ +│ └── ... (implementation code) +├── tests/ +│ └── e2e-testsuite/ +│ └── main.rs (or other test files) +└── Cargo.toml +``` + +### Cargo.toml Configuration +In your crate's `Cargo.toml`, define the e2e test binary: +```toml +[[test]] +name = "e2e_testsuite" +path = "tests/e2e-testsuite/main.rs" +harness = true +``` + +**Important**: The test binary MUST be named `e2e_testsuite` to be properly recognized by the nextest filter and CI workflows. + +## Running E2E Tests + +### Run all e2e tests across the workspace +```bash +cargo nextest run --workspace \ + --exclude 'example-*' \ + --exclude 'exex-subscription' \ + --exclude 'reth-bench' \ + --exclude 'ef-tests' \ + --exclude 'op-reth' \ + --exclude 'reth' \ + -E 'binary(e2e_testsuite)' +``` + +Note: The `--exclude` flags prevent compilation of crates that don't contain e2e tests (examples, benchmarks, binaries, and EF tests), significantly reducing build time. + +### Run e2e tests for a specific crate +```bash +cargo nextest run -p -E 'binary(e2e_testsuite)' +``` + +### Run with additional features +```bash +cargo nextest run --locked --features "asm-keccak" --workspace -E 'binary(e2e_testsuite)' +``` + +### Run a specific test +```bash +cargo nextest run --workspace -E 'binary(e2e_testsuite) and test(test_name)' +``` + +## Writing E2E Tests + +Tests use the framework components from this directory: + +```rust +use reth_e2e_test_utils::{setup_import, Environment, TestBuilder}; + +#[tokio::test] +async fn test_example() -> eyre::Result<()> { + // Create test environment + let (mut env, mut handle) = TestBuilder::new() + .build() + .await?; + + // Perform test actions... + + Ok(()) +} +``` + +## Framework Components + +- **Environment**: Core test environment managing nodes and network state +- **TestBuilder**: Builder pattern for configuring test environments +- **Actions** (`actions/`): Pre-built test actions like block production, reorgs, etc. +- **Setup utilities**: Helper functions for common test scenarios + +## CI Integration + +E2E tests run in a dedicated GitHub Actions workflow (`.github/workflows/e2e.yml`) with: +- Extended timeouts (2 minutes per test, with 3 retries) +- Isolation from unit and integration tests +- Parallel execution support + +## Nextest Configuration + +The framework uses custom nextest settings (`.config/nextest.toml`): +```toml +[[profile.default.overrides]] +filter = "binary(e2e_testsuite)" +slow-timeout = { period = "2m", terminate-after = 3 } +``` + +This ensures all e2e tests get appropriate timeouts for complex blockchain operations. \ No newline at end of file diff --git a/crates/e2e-test-utils/src/testsuite/mod.rs b/crates/e2e-test-utils/src/testsuite/mod.rs index e2e737f2a38..0762212f5f2 100644 --- a/crates/e2e-test-utils/src/testsuite/mod.rs +++ b/crates/e2e-test-utils/src/testsuite/mod.rs @@ -20,9 +20,6 @@ use reth_rpc_builder::auth::AuthServerHandle; use std::sync::Arc; use url::Url; -#[cfg(test)] -mod examples; - /// Client handles for both regular RPC and Engine API endpoints #[derive(Clone)] pub struct NodeClient { diff --git a/crates/e2e-test-utils/src/testsuite/examples.rs b/crates/e2e-test-utils/tests/e2e-testsuite/main.rs similarity index 82% rename from crates/e2e-test-utils/src/testsuite/examples.rs rename to crates/e2e-test-utils/tests/e2e-testsuite/main.rs index 58b66027635..96c976a44ca 100644 --- a/crates/e2e-test-utils/src/testsuite/examples.rs +++ b/crates/e2e-test-utils/tests/e2e-testsuite/main.rs @@ -1,35 +1,41 @@ //! Example tests using the test suite framework. -use crate::testsuite::{ - actions::{ - Action, AssertChainTip, AssertMineBlock, CaptureBlock, CaptureBlockOnNode, - CompareNodeChainTips, CreateFork, MakeCanonical, ProduceBlocks, ReorgTo, SelectActiveNode, - UpdateBlockInfo, - }, - setup::{NetworkSetup, Setup}, - TestBuilder, -}; use alloy_primitives::{Address, B256}; use alloy_rpc_types_engine::PayloadAttributes; use eyre::Result; use reth_chainspec::{ChainSpecBuilder, MAINNET}; +use reth_e2e_test_utils::{ + test_rlp_utils::{generate_test_blocks, write_blocks_to_rlp}, + testsuite::{ + actions::{ + Action, AssertChainTip, AssertMineBlock, CaptureBlock, CaptureBlockOnNode, + CompareNodeChainTips, CreateFork, MakeCanonical, ProduceBlocks, ReorgTo, + SelectActiveNode, UpdateBlockInfo, + }, + setup::{NetworkSetup, Setup}, + Environment, TestBuilder, + }, +}; use reth_node_api::TreeConfig; use reth_node_ethereum::{EthEngineTypes, EthereumNode}; use std::sync::Arc; +use tempfile::TempDir; use tracing::debug; #[tokio::test] async fn test_apply_with_import() -> Result<()> { - use crate::test_rlp_utils::{generate_test_blocks, write_blocks_to_rlp}; - use tempfile::TempDir; - reth_tracing::init_test_tracing(); // Create test chain spec let chain_spec = Arc::new( ChainSpecBuilder::default() .chain(MAINNET.chain) - .genesis(serde_json::from_str(include_str!("assets/genesis.json")).unwrap()) + .genesis( + serde_json::from_str(include_str!( + "../../../../crates/e2e-test-utils/src/testsuite/assets/genesis.json" + )) + .unwrap(), + ) .london_activated() .shanghai_activated() .cancun_activated() @@ -49,7 +55,7 @@ async fn test_apply_with_import() -> Result<()> { Setup::default().with_chain_spec(chain_spec).with_network(NetworkSetup::single_node()); // Create environment and apply setup with import - let mut env = crate::testsuite::Environment::::default(); + let mut env = Environment::::default(); setup.apply_with_import::(&mut env, &rlp_path).await?; // Now run test actions on the environment with imported chain @@ -126,7 +132,12 @@ async fn test_testsuite_assert_mine_block() -> Result<()> { .with_chain_spec(Arc::new( ChainSpecBuilder::default() .chain(MAINNET.chain) - .genesis(serde_json::from_str(include_str!("assets/genesis.json")).unwrap()) + .genesis( + serde_json::from_str(include_str!( + "../../../../crates/e2e-test-utils/src/testsuite/assets/genesis.json" + )) + .unwrap(), + ) .paris_activated() .build(), )) @@ -163,7 +174,12 @@ async fn test_testsuite_produce_blocks() -> Result<()> { .with_chain_spec(Arc::new( ChainSpecBuilder::default() .chain(MAINNET.chain) - .genesis(serde_json::from_str(include_str!("assets/genesis.json")).unwrap()) + .genesis( + serde_json::from_str(include_str!( + "../../../../crates/e2e-test-utils/src/testsuite/assets/genesis.json" + )) + .unwrap(), + ) .cancun_activated() .build(), )) @@ -187,7 +203,12 @@ async fn test_testsuite_create_fork() -> Result<()> { .with_chain_spec(Arc::new( ChainSpecBuilder::default() .chain(MAINNET.chain) - .genesis(serde_json::from_str(include_str!("assets/genesis.json")).unwrap()) + .genesis( + serde_json::from_str(include_str!( + "../../../../crates/e2e-test-utils/src/testsuite/assets/genesis.json" + )) + .unwrap(), + ) .cancun_activated() .build(), )) @@ -212,7 +233,12 @@ async fn test_testsuite_reorg_with_tagging() -> Result<()> { .with_chain_spec(Arc::new( ChainSpecBuilder::default() .chain(MAINNET.chain) - .genesis(serde_json::from_str(include_str!("assets/genesis.json")).unwrap()) + .genesis( + serde_json::from_str(include_str!( + "../../../../crates/e2e-test-utils/src/testsuite/assets/genesis.json" + )) + .unwrap(), + ) .cancun_activated() .build(), )) @@ -239,7 +265,12 @@ async fn test_testsuite_deep_reorg() -> Result<()> { .with_chain_spec(Arc::new( ChainSpecBuilder::default() .chain(MAINNET.chain) - .genesis(serde_json::from_str(include_str!("assets/genesis.json")).unwrap()) + .genesis( + serde_json::from_str(include_str!( + "../../../../crates/e2e-test-utils/src/testsuite/assets/genesis.json" + )) + .unwrap(), + ) .cancun_activated() .build(), )) @@ -284,7 +315,12 @@ async fn test_testsuite_multinode_block_production() -> Result<()> { .with_chain_spec(Arc::new( ChainSpecBuilder::default() .chain(MAINNET.chain) - .genesis(serde_json::from_str(include_str!("assets/genesis.json")).unwrap()) + .genesis( + serde_json::from_str(include_str!( + "../../../../crates/e2e-test-utils/src/testsuite/assets/genesis.json" + )) + .unwrap(), + ) .cancun_activated() .build(), )) diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index b5515142cad..c8a4b730cbe 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -139,3 +139,7 @@ test-utils = [ "reth-node-ethereum/test-utils", "reth-evm-ethereum/test-utils", ] + +[[test]] +name = "e2e_testsuite" +path = "tests/e2e-testsuite/main.rs" diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 860243cb17e..3618c80a39d 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -66,8 +66,6 @@ use tracing::*; mod block_buffer; mod cached_state; -#[cfg(test)] -mod e2e_tests; pub mod error; mod instrumented_state; mod invalid_block_hook; diff --git a/crates/engine/tree/src/tree/e2e_tests.rs b/crates/engine/tree/tests/e2e-testsuite/main.rs similarity index 99% rename from crates/engine/tree/src/tree/e2e_tests.rs rename to crates/engine/tree/tests/e2e-testsuite/main.rs index 9eb6a64c885..cc5240f5f84 100644 --- a/crates/engine/tree/src/tree/e2e_tests.rs +++ b/crates/engine/tree/tests/e2e-testsuite/main.rs @@ -1,6 +1,5 @@ //! E2E test implementations using the e2e test framework for engine tree functionality. -use crate::tree::TreeConfig; use eyre::Result; use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_e2e_test_utils::testsuite::{ @@ -12,6 +11,7 @@ use reth_e2e_test_utils::testsuite::{ setup::{NetworkSetup, Setup}, TestBuilder, }; +use reth_engine_tree::tree::TreeConfig; use reth_ethereum_engine_primitives::EthEngineTypes; use reth_node_ethereum::EthereumNode; use std::sync::Arc; diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index 46a282481ea..12367188576 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -119,3 +119,7 @@ test-utils = [ "reth-trie-common/test-utils", ] reth-codec = ["reth-optimism-primitives/reth-codec"] + +[[test]] +name = "e2e_testsuite" +path = "tests/e2e-testsuite/main.rs" diff --git a/crates/optimism/node/tests/e2e/main.rs b/crates/optimism/node/tests/e2e-testsuite/main.rs similarity index 100% rename from crates/optimism/node/tests/e2e/main.rs rename to crates/optimism/node/tests/e2e-testsuite/main.rs diff --git a/crates/optimism/node/tests/e2e/p2p.rs b/crates/optimism/node/tests/e2e-testsuite/p2p.rs similarity index 100% rename from crates/optimism/node/tests/e2e/p2p.rs rename to crates/optimism/node/tests/e2e-testsuite/p2p.rs diff --git a/crates/optimism/node/tests/e2e/testsuite.rs b/crates/optimism/node/tests/e2e-testsuite/testsuite.rs similarity index 100% rename from crates/optimism/node/tests/e2e/testsuite.rs rename to crates/optimism/node/tests/e2e-testsuite/testsuite.rs diff --git a/crates/rpc/rpc-e2e-tests/Cargo.toml b/crates/rpc/rpc-e2e-tests/Cargo.toml index 2484655d902..78c04740497 100644 --- a/crates/rpc/rpc-e2e-tests/Cargo.toml +++ b/crates/rpc/rpc-e2e-tests/Cargo.toml @@ -37,3 +37,7 @@ reth-tracing.workspace = true reth-chainspec.workspace = true reth-node-ethereum.workspace = true alloy-genesis.workspace = true + +[[test]] +name = "e2e_testsuite" +path = "tests/e2e-testsuite/main.rs" diff --git a/crates/rpc/rpc-e2e-tests/tests/rpc_compat.rs b/crates/rpc/rpc-e2e-tests/tests/e2e-testsuite/main.rs similarity index 100% rename from crates/rpc/rpc-e2e-tests/tests/rpc_compat.rs rename to crates/rpc/rpc-e2e-tests/tests/e2e-testsuite/main.rs From da2ab711d3fea1463715318854dc93cb33cdb1e3 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Thu, 10 Jul 2025 13:06:29 +0100 Subject: [PATCH 122/305] refactor: rename `RevealedSparseTrie` to `SerialSparseTrie` (#17345) --- crates/trie/sparse-parallel/src/trie.rs | 4 +- crates/trie/sparse/benches/rlp_node.rs | 4 +- crates/trie/sparse/benches/root.rs | 6 +- crates/trie/sparse/benches/update.rs | 6 +- crates/trie/sparse/src/state.rs | 20 ++--- crates/trie/sparse/src/trie.rs | 105 ++++++++++++------------ crates/trie/trie/src/witness.rs | 4 +- 7 files changed, 73 insertions(+), 76 deletions(-) diff --git a/crates/trie/sparse-parallel/src/trie.rs b/crates/trie/sparse-parallel/src/trie.rs index c791a35fd0e..9f36413cb72 100644 --- a/crates/trie/sparse-parallel/src/trie.rs +++ b/crates/trie/sparse-parallel/src/trie.rs @@ -2170,7 +2170,7 @@ mod tests { use reth_trie_db::DatabaseTrieCursorFactory; use reth_trie_sparse::{ blinded::{BlindedProvider, DefaultBlindedProvider, RevealedNode}, - RevealedSparseTrie, SparseNode, SparseTrieInterface, SparseTrieUpdates, TrieMasks, + SerialSparseTrie, SparseNode, SparseTrieInterface, SparseTrieUpdates, TrieMasks, }; use std::collections::{BTreeMap, BTreeSet}; @@ -4451,7 +4451,7 @@ mod tests { fn test(updates: Vec<(BTreeMap, BTreeSet)>) { let default_provider = DefaultBlindedProvider; - let mut serial = RevealedSparseTrie::default().with_updates(true); + let mut serial = SerialSparseTrie::default().with_updates(true); let mut parallel = ParallelSparseTrie::default().with_updates(true); for (update, keys_to_delete) in updates { diff --git a/crates/trie/sparse/benches/rlp_node.rs b/crates/trie/sparse/benches/rlp_node.rs index cfffd614203..97dac845bc0 100644 --- a/crates/trie/sparse/benches/rlp_node.rs +++ b/crates/trie/sparse/benches/rlp_node.rs @@ -7,7 +7,7 @@ use proptest::{prelude::*, test_runner::TestRunner}; use rand::{seq::IteratorRandom, Rng}; use reth_testing_utils::generators; use reth_trie::Nibbles; -use reth_trie_sparse::{blinded::DefaultBlindedProvider, RevealedSparseTrie, SparseTrieInterface}; +use reth_trie_sparse::{blinded::DefaultBlindedProvider, SerialSparseTrie, SparseTrieInterface}; fn update_rlp_node_level(c: &mut Criterion) { let mut rng = generators::rng(); @@ -23,7 +23,7 @@ fn update_rlp_node_level(c: &mut Criterion) { // Create a sparse trie with `size` leaves let provider = DefaultBlindedProvider; - let mut sparse = RevealedSparseTrie::default(); + let mut sparse = SerialSparseTrie::default(); for (key, value) in &state { sparse .update_leaf( diff --git a/crates/trie/sparse/benches/root.rs b/crates/trie/sparse/benches/root.rs index d61760910b0..121d3350eb3 100644 --- a/crates/trie/sparse/benches/root.rs +++ b/crates/trie/sparse/benches/root.rs @@ -13,7 +13,7 @@ use reth_trie::{ HashedStorage, }; use reth_trie_common::{HashBuilder, Nibbles}; -use reth_trie_sparse::{blinded::DefaultBlindedProvider, RevealedSparseTrie, SparseTrie}; +use reth_trie_sparse::{blinded::DefaultBlindedProvider, SerialSparseTrie, SparseTrie}; fn calculate_root_from_leaves(c: &mut Criterion) { let mut group = c.benchmark_group("calculate root from leaves"); @@ -42,7 +42,7 @@ fn calculate_root_from_leaves(c: &mut Criterion) { // sparse trie let provider = DefaultBlindedProvider; group.bench_function(BenchmarkId::new("sparse trie", size), |b| { - b.iter_with_setup(SparseTrie::::revealed_empty, |mut sparse| { + b.iter_with_setup(SparseTrie::::revealed_empty, |mut sparse| { for (key, value) in &state { sparse .update_leaf( @@ -189,7 +189,7 @@ fn calculate_root_from_leaves_repeated(c: &mut Criterion) { group.bench_function(benchmark_id, |b| { b.iter_with_setup( || { - let mut sparse = SparseTrie::::revealed_empty(); + let mut sparse = SparseTrie::::revealed_empty(); for (key, value) in &init_state { sparse .update_leaf( diff --git a/crates/trie/sparse/benches/update.rs b/crates/trie/sparse/benches/update.rs index dd4005291a0..66669e0d161 100644 --- a/crates/trie/sparse/benches/update.rs +++ b/crates/trie/sparse/benches/update.rs @@ -5,7 +5,7 @@ use criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criteri use proptest::{prelude::*, strategy::ValueTree}; use rand::seq::IteratorRandom; use reth_trie_common::Nibbles; -use reth_trie_sparse::{blinded::DefaultBlindedProvider, RevealedSparseTrie, SparseTrie}; +use reth_trie_sparse::{blinded::DefaultBlindedProvider, SerialSparseTrie, SparseTrie}; const LEAF_COUNTS: [usize; 2] = [1_000, 5_000]; @@ -20,7 +20,7 @@ fn update_leaf(c: &mut Criterion) { b.iter_batched( || { - let mut trie = SparseTrie::::revealed_empty(); + let mut trie = SparseTrie::::revealed_empty(); // Pre-populate with data for (path, value) in leaves.iter().cloned() { trie.update_leaf(path, value, &provider).unwrap(); @@ -64,7 +64,7 @@ fn remove_leaf(c: &mut Criterion) { b.iter_batched( || { - let mut trie = SparseTrie::::revealed_empty(); + let mut trie = SparseTrie::::revealed_empty(); // Pre-populate with data for (path, value) in leaves.iter().cloned() { trie.update_leaf(path, value, &provider).unwrap(); diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index 2fce6a99acf..2dc443ac1bb 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -1,7 +1,7 @@ use crate::{ blinded::{BlindedProvider, BlindedProviderFactory}, traits::SparseTrieInterface, - LeafLookup, RevealedSparseTrie, SparseTrie, TrieMasks, + LeafLookup, SerialSparseTrie, SparseTrie, TrieMasks, }; use alloc::{collections::VecDeque, vec::Vec}; use alloy_primitives::{ @@ -24,8 +24,8 @@ use tracing::trace; #[derive(Debug)] /// Sparse state trie representing lazy-loaded Ethereum state trie. pub struct SparseStateTrie< - A = RevealedSparseTrie, // Account trie implementation - S = RevealedSparseTrie, // Storage trie implementation + A = SerialSparseTrie, // Account trie implementation + S = SerialSparseTrie, // Storage trie implementation > { /// Sparse account trie. state: SparseTrie, @@ -930,7 +930,7 @@ mod tests { #[test] fn validate_root_node_first_node_not_root() { - let sparse = SparseStateTrie::::default(); + let sparse = SparseStateTrie::::default(); let proof = [(Nibbles::from_nibbles([0x1]), Bytes::from([EMPTY_STRING_CODE]))]; assert_matches!( sparse.validate_root_node(&mut proof.into_iter().peekable()).map_err(|e| e.into_kind()), @@ -940,7 +940,7 @@ mod tests { #[test] fn validate_root_node_invalid_proof_with_empty_root() { - let sparse = SparseStateTrie::::default(); + let sparse = SparseStateTrie::::default(); let proof = [ (Nibbles::default(), Bytes::from([EMPTY_STRING_CODE])), (Nibbles::from_nibbles([0x1]), Bytes::new()), @@ -959,7 +959,7 @@ mod tests { let proofs = hash_builder.take_proof_nodes(); assert_eq!(proofs.len(), 1); - let mut sparse = SparseStateTrie::::default(); + let mut sparse = SparseStateTrie::::default(); assert_eq!(sparse.state, SparseTrie::Blind(None)); sparse.reveal_account(Default::default(), proofs.into_inner()).unwrap(); @@ -974,7 +974,7 @@ mod tests { let proofs = hash_builder.take_proof_nodes(); assert_eq!(proofs.len(), 1); - let mut sparse = SparseStateTrie::::default(); + let mut sparse = SparseStateTrie::::default(); assert!(sparse.storages.is_empty()); sparse @@ -989,7 +989,7 @@ mod tests { #[test] fn reveal_account_path_twice() { let provider_factory = DefaultBlindedProviderFactory; - let mut sparse = SparseStateTrie::::default(); + let mut sparse = SparseStateTrie::::default(); let leaf_value = alloy_rlp::encode(TrieAccount::default()); let leaf_1 = alloy_rlp::encode(TrieNode::Leaf(LeafNode::new( @@ -1061,7 +1061,7 @@ mod tests { #[test] fn reveal_storage_path_twice() { let provider_factory = DefaultBlindedProviderFactory; - let mut sparse = SparseStateTrie::::default(); + let mut sparse = SparseStateTrie::::default(); let leaf_value = alloy_rlp::encode(TrieAccount::default()); let leaf_1 = alloy_rlp::encode(TrieNode::Leaf(LeafNode::new( @@ -1193,7 +1193,7 @@ mod tests { let proof_nodes = hash_builder.take_proof_nodes(); let provider_factory = DefaultBlindedProviderFactory; - let mut sparse = SparseStateTrie::::default().with_updates(true); + let mut sparse = SparseStateTrie::::default().with_updates(true); sparse .reveal_decoded_multiproof( MultiProof { diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index a576956f213..4e0d03b0900 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -26,7 +26,7 @@ use smallvec::SmallVec; use tracing::trace; /// The level below which the sparse trie hashes are calculated in -/// [`RevealedSparseTrie::update_subtrie_hashes`]. +/// [`SerialSparseTrie::update_subtrie_hashes`]. const SPARSE_TRIE_SUBTRIE_HASHES_LEVEL: usize = 2; /// A sparse trie that is either in a "blind" state (no nodes are revealed, root node hash is @@ -42,13 +42,13 @@ const SPARSE_TRIE_SUBTRIE_HASHES_LEVEL: usize = 2; /// 3. Incremental operations - nodes can be revealed as needed without loading the entire trie. /// This is what gives rise to the notion of a "sparse" trie. #[derive(PartialEq, Eq, Debug)] -pub enum SparseTrie { +pub enum SparseTrie { /// The trie is blind -- no nodes have been revealed /// /// This is the default state. In this state, the trie cannot be directly queried or modified /// until nodes are revealed. /// - /// In this state the `SparseTrie` can optionally carry with it a cleared `RevealedSparseTrie`. + /// In this state the `SparseTrie` can optionally carry with it a cleared `SerialSparseTrie`. /// This allows for reusing the trie's allocations between payload executions. Blind(Option>), /// Some nodes in the Trie have been revealed. @@ -71,11 +71,11 @@ impl SparseTrie { /// # Examples /// /// ``` - /// use reth_trie_sparse::{blinded::DefaultBlindedProvider, RevealedSparseTrie, SparseTrie}; + /// use reth_trie_sparse::{blinded::DefaultBlindedProvider, SerialSparseTrie, SparseTrie}; /// - /// let trie = SparseTrie::::blind(); + /// let trie = SparseTrie::::blind(); /// assert!(trie.is_blind()); - /// let trie = SparseTrie::::default(); + /// let trie = SparseTrie::::default(); /// assert!(trie.is_blind()); /// ``` pub const fn blind() -> Self { @@ -87,9 +87,9 @@ impl SparseTrie { /// # Examples /// /// ``` - /// use reth_trie_sparse::{blinded::DefaultBlindedProvider, RevealedSparseTrie, SparseTrie}; + /// use reth_trie_sparse::{blinded::DefaultBlindedProvider, SerialSparseTrie, SparseTrie}; /// - /// let trie = SparseTrie::::revealed_empty(); + /// let trie = SparseTrie::::revealed_empty(); /// assert!(!trie.is_blind()); /// ``` pub fn revealed_empty() -> Self { @@ -256,7 +256,7 @@ impl SparseTrie { /// The opposite is also true. /// - All keys in `values` collection are full leaf paths. #[derive(Clone, PartialEq, Eq)] -pub struct RevealedSparseTrie { +pub struct SerialSparseTrie { /// Map from a path (nibbles) to its corresponding sparse trie node. /// This contains all of the revealed nodes in trie. nodes: HashMap, @@ -276,9 +276,9 @@ pub struct RevealedSparseTrie { rlp_buf: Vec, } -impl fmt::Debug for RevealedSparseTrie { +impl fmt::Debug for SerialSparseTrie { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("RevealedSparseTrie") + f.debug_struct("SerialSparseTrie") .field("nodes", &self.nodes) .field("branch_tree_masks", &self.branch_node_tree_masks) .field("branch_hash_masks", &self.branch_node_hash_masks) @@ -296,7 +296,7 @@ fn encode_nibbles(nibbles: &Nibbles) -> String { encoded[..nibbles.len()].to_string() } -impl fmt::Display for RevealedSparseTrie { +impl fmt::Display for SerialSparseTrie { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { // This prints the trie in preorder traversal, using a stack let mut stack = Vec::new(); @@ -362,7 +362,7 @@ impl fmt::Display for RevealedSparseTrie { } } -impl Default for RevealedSparseTrie { +impl Default for SerialSparseTrie { fn default() -> Self { Self { nodes: HashMap::from_iter([(Nibbles::default(), SparseNode::Empty)]), @@ -376,7 +376,7 @@ impl Default for RevealedSparseTrie { } } -impl SparseTrieInterface for RevealedSparseTrie { +impl SparseTrieInterface for SerialSparseTrie { fn with_root( mut self, root: TrieNode, @@ -385,7 +385,7 @@ impl SparseTrieInterface for RevealedSparseTrie { ) -> SparseTrieResult { self = self.with_updates(retain_updates); - // A fresh/cleared `RevealedSparseTrie` has a `SparseNode::Empty` at its root. Delete that + // A fresh/cleared `SerialSparseTrie` has a `SparseNode::Empty` at its root. Delete that // so we can reveal the new root node. let path = Nibbles::default(); let _removed_root = self.nodes.remove(&path).expect("root node should exist"); @@ -1055,7 +1055,7 @@ impl SparseTrieInterface for RevealedSparseTrie { } } -impl RevealedSparseTrie { +impl SerialSparseTrie { /// Creates a new revealed sparse trie from the given root node. /// /// This function initializes the internal structures and then reveals the root. @@ -1838,7 +1838,7 @@ struct RemovedSparseNode { unset_branch_nibble: Option, } -/// Collection of reusable buffers for [`RevealedSparseTrie::rlp_node`] calculations. +/// Collection of reusable buffers for [`SerialSparseTrie::rlp_node`] calculations. /// /// These buffers reduce allocations when computing RLP representations during trie updates. #[derive(Debug, Default)] @@ -1941,7 +1941,7 @@ mod find_leaf_tests { fn find_leaf_existing_leaf() { // Create a simple trie with one leaf let provider = DefaultBlindedProvider; - let mut sparse = RevealedSparseTrie::default(); + let mut sparse = SerialSparseTrie::default(); let path = Nibbles::from_nibbles([0x1, 0x2, 0x3]); let value = b"test_value".to_vec(); @@ -1960,7 +1960,7 @@ mod find_leaf_tests { fn find_leaf_value_mismatch() { // Create a simple trie with one leaf let provider = DefaultBlindedProvider; - let mut sparse = RevealedSparseTrie::default(); + let mut sparse = SerialSparseTrie::default(); let path = Nibbles::from_nibbles([0x1, 0x2, 0x3]); let value = b"test_value".to_vec(); let wrong_value = b"wrong_value".to_vec(); @@ -1978,7 +1978,7 @@ mod find_leaf_tests { #[test] fn find_leaf_not_found_empty_trie() { // Empty trie - let sparse = RevealedSparseTrie::default(); + let sparse = SerialSparseTrie::default(); let path = Nibbles::from_nibbles([0x1, 0x2, 0x3]); // Leaf should not exist @@ -1991,7 +1991,7 @@ mod find_leaf_tests { #[test] fn find_leaf_empty_trie() { - let sparse = RevealedSparseTrie::default(); + let sparse = SerialSparseTrie::default(); let path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4]); let result = sparse.find_leaf(&path, None); @@ -2003,7 +2003,7 @@ mod find_leaf_tests { #[test] fn find_leaf_exists_no_value_check() { let provider = DefaultBlindedProvider; - let mut sparse = RevealedSparseTrie::default(); + let mut sparse = SerialSparseTrie::default(); let path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4]); sparse.update_leaf(path, VALUE_A(), &provider).unwrap(); @@ -2014,7 +2014,7 @@ mod find_leaf_tests { #[test] fn find_leaf_exists_with_value_check_ok() { let provider = DefaultBlindedProvider; - let mut sparse = RevealedSparseTrie::default(); + let mut sparse = SerialSparseTrie::default(); let path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4]); let value = VALUE_A(); sparse.update_leaf(path, value.clone(), &provider).unwrap(); @@ -2026,7 +2026,7 @@ mod find_leaf_tests { #[test] fn find_leaf_exclusion_branch_divergence() { let provider = DefaultBlindedProvider; - let mut sparse = RevealedSparseTrie::default(); + let mut sparse = SerialSparseTrie::default(); let path1 = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4]); // Creates branch at 0x12 let path2 = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x5, 0x6]); // Belongs to same branch let search_path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x7, 0x8]); // Diverges at nibble 7 @@ -2044,7 +2044,7 @@ mod find_leaf_tests { #[test] fn find_leaf_exclusion_extension_divergence() { let provider = DefaultBlindedProvider; - let mut sparse = RevealedSparseTrie::default(); + let mut sparse = SerialSparseTrie::default(); // This will create an extension node at root with key 0x12 let path1 = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4, 0x5, 0x6]); // This path diverges from the extension key @@ -2062,7 +2062,7 @@ mod find_leaf_tests { #[test] fn find_leaf_exclusion_leaf_divergence() { let provider = DefaultBlindedProvider; - let mut sparse = RevealedSparseTrie::default(); + let mut sparse = SerialSparseTrie::default(); let existing_leaf_path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4]); let search_path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4, 0x5, 0x6]); @@ -2080,7 +2080,7 @@ mod find_leaf_tests { #[test] fn find_leaf_exclusion_path_ends_at_branch() { let provider = DefaultBlindedProvider; - let mut sparse = RevealedSparseTrie::default(); + let mut sparse = SerialSparseTrie::default(); let path1 = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4]); // Creates branch at 0x12 let path2 = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x5, 0x6]); let search_path = Nibbles::from_nibbles_unchecked([0x1, 0x2]); // Path of the branch itself @@ -2118,7 +2118,7 @@ mod find_leaf_tests { ); // Branch at 0x123, child 4 nodes.insert(leaf_path, SparseNode::Hash(blinded_hash)); // Blinded node at 0x1234 - let sparse = RevealedSparseTrie { + let sparse = SerialSparseTrie { nodes, branch_node_tree_masks: Default::default(), branch_node_hash_masks: Default::default(), @@ -2161,7 +2161,7 @@ mod find_leaf_tests { let mut values = HashMap::with_hasher(RandomState::default()); values.insert(path_revealed_leaf, VALUE_A()); - let sparse = RevealedSparseTrie { + let sparse = SerialSparseTrie { nodes, branch_node_tree_masks: Default::default(), branch_node_hash_masks: Default::default(), @@ -2208,7 +2208,7 @@ mod find_leaf_tests { // 3. Initialize the sparse trie using from_root // This will internally create Hash nodes for paths "1" and "5" initially. - let mut sparse = RevealedSparseTrie::from_root(root_trie_node, TrieMasks::none(), false) + let mut sparse = SerialSparseTrie::from_root(root_trie_node, TrieMasks::none(), false) .expect("Failed to create trie from root"); // Assertions before we reveal child5 @@ -2357,10 +2357,7 @@ mod tests { } /// Assert that the sparse trie nodes and the proof nodes from the hash builder are equal. - fn assert_eq_sparse_trie_proof_nodes( - sparse_trie: &RevealedSparseTrie, - proof_nodes: ProofNodes, - ) { + fn assert_eq_sparse_trie_proof_nodes(sparse_trie: &SerialSparseTrie, proof_nodes: ProofNodes) { let proof_nodes = proof_nodes .into_nodes_sorted() .into_iter() @@ -2404,8 +2401,8 @@ mod tests { #[test] fn sparse_trie_is_blind() { - assert!(SparseTrie::::blind().is_blind()); - assert!(!SparseTrie::::revealed_empty().is_blind()); + assert!(SparseTrie::::blind().is_blind()); + assert!(!SparseTrie::::revealed_empty().is_blind()); } #[test] @@ -2427,7 +2424,7 @@ mod tests { ); let provider = DefaultBlindedProvider; - let mut sparse = RevealedSparseTrie::default().with_updates(true); + let mut sparse = SerialSparseTrie::default().with_updates(true); sparse.update_leaf(key, value_encoded(), &provider).unwrap(); let sparse_root = sparse.root(); let sparse_updates = sparse.take_updates(); @@ -2458,7 +2455,7 @@ mod tests { ); let provider = DefaultBlindedProvider; - let mut sparse = RevealedSparseTrie::default().with_updates(true); + let mut sparse = SerialSparseTrie::default().with_updates(true); for path in &paths { sparse.update_leaf(*path, value_encoded(), &provider).unwrap(); } @@ -2489,7 +2486,7 @@ mod tests { ); let provider = DefaultBlindedProvider; - let mut sparse = RevealedSparseTrie::default().with_updates(true); + let mut sparse = SerialSparseTrie::default().with_updates(true); for path in &paths { sparse.update_leaf(*path, value_encoded(), &provider).unwrap(); } @@ -2528,7 +2525,7 @@ mod tests { ); let provider = DefaultBlindedProvider; - let mut sparse = RevealedSparseTrie::default().with_updates(true); + let mut sparse = SerialSparseTrie::default().with_updates(true); for path in &paths { sparse.update_leaf(*path, value_encoded(), &provider).unwrap(); } @@ -2568,7 +2565,7 @@ mod tests { ); let provider = DefaultBlindedProvider; - let mut sparse = RevealedSparseTrie::default().with_updates(true); + let mut sparse = SerialSparseTrie::default().with_updates(true); for path in &paths { sparse.update_leaf(*path, old_value_encoded.clone(), &provider).unwrap(); } @@ -2603,7 +2600,7 @@ mod tests { reth_tracing::init_test_tracing(); let provider = DefaultBlindedProvider; - let mut sparse = RevealedSparseTrie::default(); + let mut sparse = SerialSparseTrie::default(); let value = alloy_rlp::encode_fixed_size(&U256::ZERO).to_vec(); @@ -2857,7 +2854,7 @@ mod tests { )); let provider = DefaultBlindedProvider; - let mut sparse = RevealedSparseTrie::from_root( + let mut sparse = SerialSparseTrie::from_root( branch.clone(), TrieMasks { hash_mask: Some(TrieMask::new(0b01)), tree_mask: None }, false, @@ -2902,7 +2899,7 @@ mod tests { )); let provider = DefaultBlindedProvider; - let mut sparse = RevealedSparseTrie::from_root( + let mut sparse = SerialSparseTrie::from_root( branch.clone(), TrieMasks { hash_mask: Some(TrieMask::new(0b01)), tree_mask: None }, false, @@ -2943,7 +2940,7 @@ mod tests { let mut state = BTreeMap::default(); let default_provider = DefaultBlindedProvider; let provider_factory = create_test_provider_factory(); - let mut sparse = RevealedSparseTrie::default().with_updates(true); + let mut sparse = SerialSparseTrie::default().with_updates(true); for (update, keys_to_delete) in updates { // Insert state updates into the sparse trie and calculate the root @@ -3103,7 +3100,7 @@ mod tests { ); let provider = DefaultBlindedProvider; - let mut sparse = RevealedSparseTrie::from_root( + let mut sparse = SerialSparseTrie::from_root( TrieNode::decode(&mut &hash_builder_proof_nodes.nodes_sorted()[0].1[..]).unwrap(), TrieMasks { hash_mask: branch_node_hash_masks.get(&Nibbles::default()).copied(), @@ -3213,7 +3210,7 @@ mod tests { ); let provider = DefaultBlindedProvider; - let mut sparse = RevealedSparseTrie::from_root( + let mut sparse = SerialSparseTrie::from_root( TrieNode::decode(&mut &hash_builder_proof_nodes.nodes_sorted()[0].1[..]).unwrap(), TrieMasks { hash_mask: branch_node_hash_masks.get(&Nibbles::default()).copied(), @@ -3316,7 +3313,7 @@ mod tests { ); let provider = DefaultBlindedProvider; - let mut sparse = RevealedSparseTrie::from_root( + let mut sparse = SerialSparseTrie::from_root( TrieNode::decode(&mut &hash_builder_proof_nodes.nodes_sorted()[0].1[..]).unwrap(), TrieMasks { hash_mask: branch_node_hash_masks.get(&Nibbles::default()).copied(), @@ -3371,7 +3368,7 @@ mod tests { #[test] fn sparse_trie_get_changed_nodes_at_depth() { let provider = DefaultBlindedProvider; - let mut sparse = RevealedSparseTrie::default(); + let mut sparse = SerialSparseTrie::default(); let value = alloy_rlp::encode_fixed_size(&U256::ZERO).to_vec(); @@ -3486,7 +3483,7 @@ mod tests { ); let provider = DefaultBlindedProvider; - let mut sparse = RevealedSparseTrie::default(); + let mut sparse = SerialSparseTrie::default(); sparse.update_leaf(key1(), value_encoded(), &provider).unwrap(); sparse.update_leaf(key2(), value_encoded(), &provider).unwrap(); let sparse_root = sparse.root(); @@ -3499,7 +3496,7 @@ mod tests { #[test] fn sparse_trie_wipe() { let provider = DefaultBlindedProvider; - let mut sparse = RevealedSparseTrie::default().with_updates(true); + let mut sparse = SerialSparseTrie::default().with_updates(true); let value = alloy_rlp::encode_fixed_size(&U256::ZERO).to_vec(); @@ -3549,7 +3546,7 @@ mod tests { // tests that if we fill a sparse trie with some nodes and then clear it, it has the same // contents as an empty sparse trie let provider = DefaultBlindedProvider; - let mut sparse = RevealedSparseTrie::default(); + let mut sparse = SerialSparseTrie::default(); let value = alloy_rlp::encode_fixed_size(&U256::ZERO).to_vec(); sparse .update_leaf(Nibbles::from_nibbles([0x5, 0x0, 0x2, 0x3, 0x1]), value.clone(), &provider) @@ -3566,14 +3563,14 @@ mod tests { sparse.clear(); - let empty_trie = RevealedSparseTrie::default(); + let empty_trie = SerialSparseTrie::default(); assert_eq!(empty_trie, sparse); } #[test] fn sparse_trie_display() { let provider = DefaultBlindedProvider; - let mut sparse = RevealedSparseTrie::default(); + let mut sparse = SerialSparseTrie::default(); let value = alloy_rlp::encode_fixed_size(&U256::ZERO).to_vec(); diff --git a/crates/trie/trie/src/witness.rs b/crates/trie/trie/src/witness.rs index 50b3834a1f4..b3d8c6d1411 100644 --- a/crates/trie/trie/src/witness.rs +++ b/crates/trie/trie/src/witness.rs @@ -22,7 +22,7 @@ use reth_execution_errors::{ use reth_trie_common::{MultiProofTargets, Nibbles}; use reth_trie_sparse::{ blinded::{BlindedProvider, BlindedProviderFactory, RevealedNode}, - RevealedSparseTrie, SparseStateTrie, + SerialSparseTrie, SparseStateTrie, }; use std::sync::{mpsc, Arc}; @@ -154,7 +154,7 @@ where ), tx, ); - let mut sparse_trie = SparseStateTrie::::new(); + let mut sparse_trie = SparseStateTrie::::new(); sparse_trie.reveal_multiproof(multiproof)?; // Attempt to update state trie to gather additional information for the witness. From 60c86aeca217fdaa2acfc83da675eab93fa071db Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9a=20Narzis?= <78718413+lean-apple@users.noreply.github.com> Date: Thu, 10 Jul 2025 14:37:17 +0200 Subject: [PATCH 123/305] feat(era1): add subcommand `export-era` (#17132) Co-authored-by: Matthias Seitz --- Cargo.lock | 1 + crates/cli/commands/Cargo.toml | 1 + crates/cli/commands/src/export_era.rs | 109 +++++++++++++ crates/cli/commands/src/lib.rs | 1 + crates/era-utils/src/export.rs | 18 ++- crates/era-utils/tests/it/genesis.rs | 5 +- crates/era-utils/tests/it/history.rs | 29 +++- crates/era/src/era1_types.rs | 65 +++++--- crates/ethereum/cli/src/interface.rs | 9 +- docs/vocs/docs/pages/cli/SUMMARY.mdx | 1 + docs/vocs/docs/pages/cli/reth.mdx | 1 + docs/vocs/docs/pages/cli/reth/export-era.mdx | 162 +++++++++++++++++++ docs/vocs/sidebar.ts | 4 + 13 files changed, 372 insertions(+), 34 deletions(-) create mode 100644 crates/cli/commands/src/export_era.rs create mode 100644 docs/vocs/docs/pages/cli/reth/export-era.mdx diff --git a/Cargo.lock b/Cargo.lock index e668dd6ad77..d091d564a4d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7381,6 +7381,7 @@ dependencies = [ "reth-discv5", "reth-downloaders", "reth-ecies", + "reth-era", "reth-era-downloader", "reth-era-utils", "reth-eth-wire", diff --git a/crates/cli/commands/Cargo.toml b/crates/cli/commands/Cargo.toml index 548049bd7a9..06ceb9423c1 100644 --- a/crates/cli/commands/Cargo.toml +++ b/crates/cli/commands/Cargo.toml @@ -24,6 +24,7 @@ reth-db-common.workspace = true reth-downloaders.workspace = true reth-ecies.workspace = true reth-eth-wire.workspace = true +reth-era.workspace = true reth-era-downloader.workspace = true reth-era-utils.workspace = true reth-etl.workspace = true diff --git a/crates/cli/commands/src/export_era.rs b/crates/cli/commands/src/export_era.rs new file mode 100644 index 00000000000..dbedf1852e5 --- /dev/null +++ b/crates/cli/commands/src/export_era.rs @@ -0,0 +1,109 @@ +//! Command exporting block data to convert them to ERA1 files. + +use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; +use clap::{Args, Parser}; +use reth_chainspec::{EthChainSpec, EthereumHardforks}; +use reth_cli::chainspec::ChainSpecParser; +use reth_era::execution_types::MAX_BLOCKS_PER_ERA1; +use reth_era_utils as era1; +use reth_provider::DatabaseProviderFactory; +use std::{path::PathBuf, sync::Arc}; +use tracing::info; + +// Default folder name for era1 export files +const ERA1_EXPORT_FOLDER_NAME: &str = "era1-export"; + +#[derive(Debug, Parser)] +pub struct ExportEraCommand { + #[command(flatten)] + env: EnvironmentArgs, + + #[clap(flatten)] + export: ExportArgs, +} + +#[derive(Debug, Args)] +pub struct ExportArgs { + /// Optional first block number to export from the db. + /// It is by default 0. + #[arg(long, value_name = "first-block-number", verbatim_doc_comment)] + first_block_number: Option, + /// Optional last block number to export from the db. + /// It is by default 8191. + #[arg(long, value_name = "last-block-number", verbatim_doc_comment)] + last_block_number: Option, + /// The maximum number of blocks per file, it can help you to decrease the size of the files. + /// Must be less than or equal to 8192. + #[arg(long, value_name = "max-blocks-per-file", verbatim_doc_comment)] + max_blocks_per_file: Option, + /// The directory path where to export era1 files. + /// The block data are read from the database. + #[arg(long, value_name = "EXPORT_ERA1_PATH", verbatim_doc_comment)] + path: Option, +} + +impl> ExportEraCommand { + /// Execute `export-era` command + pub async fn execute(self) -> eyre::Result<()> + where + N: CliNodeTypes, + { + let Environment { provider_factory, .. } = self.env.init::(AccessRights::RO)?; + + // Either specified path or default to `//era1-export/` + let data_dir = match &self.export.path { + Some(path) => path.clone(), + None => self + .env + .datadir + .resolve_datadir(self.env.chain.chain()) + .data_dir() + .join(ERA1_EXPORT_FOLDER_NAME), + }; + + let export_config = era1::ExportConfig { + network: self.env.chain.chain().to_string(), + first_block_number: self.export.first_block_number.unwrap_or(0), + last_block_number: self + .export + .last_block_number + .unwrap_or(MAX_BLOCKS_PER_ERA1 as u64 - 1), + max_blocks_per_file: self + .export + .max_blocks_per_file + .unwrap_or(MAX_BLOCKS_PER_ERA1 as u64), + dir: data_dir, + }; + + export_config.validate()?; + + info!( + target: "reth::cli", + "Starting ERA1 block export: blocks {}-{} to {}", + export_config.first_block_number, + export_config.last_block_number, + export_config.dir.display() + ); + + // Only read access is needed for the database provider + let provider = provider_factory.database_provider_ro()?; + + let exported_files = era1::export(&provider, &export_config)?; + + info!( + target: "reth::cli", + "Successfully exported {} ERA1 files to {}", + exported_files.len(), + export_config.dir.display() + ); + + Ok(()) + } +} + +impl ExportEraCommand { + /// Returns the underlying chain being used to run this command + pub fn chain_spec(&self) -> Option<&Arc> { + Some(&self.env.chain) + } +} diff --git a/crates/cli/commands/src/lib.rs b/crates/cli/commands/src/lib.rs index ed57a55aae8..bf4504074a5 100644 --- a/crates/cli/commands/src/lib.rs +++ b/crates/cli/commands/src/lib.rs @@ -13,6 +13,7 @@ pub mod config_cmd; pub mod db; pub mod download; pub mod dump_genesis; +pub mod export_era; pub mod import; pub mod import_era; pub mod import_op; diff --git a/crates/era-utils/src/export.rs b/crates/era-utils/src/export.rs index 5ff1a0d78ca..f76b3f82a12 100644 --- a/crates/era-utils/src/export.rs +++ b/crates/era-utils/src/export.rs @@ -18,7 +18,7 @@ use std::{ path::PathBuf, time::{Duration, Instant}, }; -use tracing::{info, warn}; +use tracing::{debug, info, warn}; const REPORT_INTERVAL_SECS: u64 = 10; const ENTRY_HEADER_SIZE: usize = 8; @@ -38,7 +38,7 @@ pub struct ExportConfig { /// It can never be larger than `MAX_BLOCKS_PER_ERA1 = 8192` /// See also <`https://github.com/eth-clients/e2store-format-specs/blob/main/formats/era1.md`> pub max_blocks_per_file: u64, - /// Network name + /// Network name. pub network: String, } @@ -133,7 +133,19 @@ where let headers = provider.headers_range(start_block..=end_block)?; - let era1_id = Era1Id::new(&config.network, start_block, block_count as u32); + // Extract first 4 bytes of last block's state root as historical identifier + let historical_root = headers + .last() + .map(|header| { + let state_root = header.state_root(); + [state_root[0], state_root[1], state_root[2], state_root[3]] + }) + .unwrap_or([0u8; 4]); + + let era1_id = Era1Id::new(&config.network, start_block, block_count as u32) + .with_hash(historical_root); + + debug!("Final file name {}", era1_id.to_file_name()); let file_path = config.dir.join(era1_id.to_file_name()); let file = std::fs::File::create(&file_path)?; let mut writer = Era1Writer::new(file); diff --git a/crates/era-utils/tests/it/genesis.rs b/crates/era-utils/tests/it/genesis.rs index dacef15eeac..0c35c458aac 100644 --- a/crates/era-utils/tests/it/genesis.rs +++ b/crates/era-utils/tests/it/genesis.rs @@ -23,7 +23,10 @@ fn test_export_with_genesis_only() { let file_path = &exported_files[0]; assert!(file_path.exists(), "Exported file should exist on disk"); let file_name = file_path.file_name().unwrap().to_str().unwrap(); - assert!(file_name.starts_with("mainnet-0-"), "File should have correct prefix"); + assert!( + file_name.starts_with("mainnet-00000-00001-"), + "File should have correct prefix with era format" + ); assert!(file_name.ends_with(".era1"), "File should have correct extension"); let metadata = fs::metadata(file_path).unwrap(); assert!(metadata.len() > 0, "Exported file should not be empty"); diff --git a/crates/era-utils/tests/it/history.rs b/crates/era-utils/tests/it/history.rs index 4811e729539..8e720f1001b 100644 --- a/crates/era-utils/tests/it/history.rs +++ b/crates/era-utils/tests/it/history.rs @@ -1,6 +1,7 @@ use crate::{ClientWithFakeIndex, ITHACA_ERA_INDEX_URL}; use reqwest::{Client, Url}; use reth_db_common::init::init_genesis; +use reth_era::execution_types::MAX_BLOCKS_PER_ERA1; use reth_era_downloader::{EraClient, EraStream, EraStreamConfig}; use reth_era_utils::{export, import, ExportConfig}; use reth_etl::Collector; @@ -129,10 +130,30 @@ async fn test_roundtrip_export_after_import() { blocks_numbers_per_file ); - // Verify exact ERA1 naming convention: `mainnet-{start_block}-{block_count}.era1` + // Verify format: mainnet-{era_number:05}-{era_count:05}-{8hexchars}.era1 + let era_number = file_start_block / MAX_BLOCKS_PER_ERA1 as u64; + + // Era count is always 1 for this test, as we are only exporting one era + let expected_prefix = format!("mainnet-{:05}-{:05}-", era_number, 1); + let file_name = file_path.file_name().unwrap().to_str().unwrap(); - let expected_filename = - format!("mainnet-{file_start_block}-{blocks_numbers_per_file}.era1"); - assert_eq!(file_name, expected_filename, "File {} should have correct name", i + 1); + assert!( + file_name.starts_with(&expected_prefix), + "File {} should start with '{expected_prefix}', got '{file_name}'", + i + 1 + ); + + // Verify the hash part is 8 characters + let hash_start = expected_prefix.len(); + let hash_end = file_name.len() - 5; // remove ".era1" + let hash_part = &file_name[hash_start..hash_end]; + assert_eq!( + hash_part.len(), + 8, + "File {} hash should be 8 characters, got {} in '{}'", + i + 1, + hash_part.len(), + file_name + ); } } diff --git a/crates/era/src/era1_types.rs b/crates/era/src/era1_types.rs index 135f7225f60..3078f952979 100644 --- a/crates/era/src/era1_types.rs +++ b/crates/era/src/era1_types.rs @@ -4,7 +4,7 @@ use crate::{ e2s_types::{E2sError, Entry}, - execution_types::{Accumulator, BlockTuple}, + execution_types::{Accumulator, BlockTuple, MAX_BLOCKS_PER_ERA1}, }; use alloy_primitives::BlockNumber; @@ -155,6 +155,7 @@ pub struct Era1Id { pub block_count: u32, /// Optional hash identifier for this file + /// First 4 bytes of the last historical root in the last state in the era file pub hash: Option<[u8; 4]>, } @@ -174,24 +175,38 @@ impl Era1Id { self } - /// Convert to file name following the era1 file naming: - /// `--.era1` - /// inspired from era file naming convention in + /// Convert to file name following the era file naming: + /// `---.era(1)` /// /// See also pub fn to_file_name(&self) -> String { + // Find which era the first block belongs to + let era_number = self.start_block / MAX_BLOCKS_PER_ERA1 as u64; + let era_count = self.calculate_era_count(era_number); if let Some(hash) = self.hash { - // Format with zero-padded era number and hash: - // For example network-00000-5ec1ffb8.era1 format!( - "{}-{:05}-{:02x}{:02x}{:02x}{:02x}.era1", - self.network_name, self.start_block, hash[0], hash[1], hash[2], hash[3] + "{}-{:05}-{:05}-{:02x}{:02x}{:02x}{:02x}.era1", + self.network_name, era_number, era_count, hash[0], hash[1], hash[2], hash[3] ) } else { - // Original format without hash - format!("{}-{}-{}.era1", self.network_name, self.start_block, self.block_count) + // era spec format with placeholder hash when no hash available + // Format: `---00000000.era1` + format!("{}-{:05}-{:05}-00000000.era1", self.network_name, era_number, era_count) } } + + // Helper function to calculate the number of eras per era1 file, + // If the user can decide how many blocks per era1 file there are, we need to calculate it. + // Most of the time it should be 1, but it can never be more than 2 eras per file + // as there is a maximum of 8192 blocks per era1 file. + const fn calculate_era_count(&self, first_era: u64) -> u64 { + // Calculate the actual last block number in the range + let last_block = self.start_block + self.block_count as u64 - 1; + // Find which era the last block belongs to + let last_era = last_block / MAX_BLOCKS_PER_ERA1 as u64; + // Count how many eras we span + last_era - first_era + 1 + } } #[cfg(test)] @@ -330,33 +345,33 @@ mod tests { #[test_case::test_case( Era1Id::new("mainnet", 0, 8192).with_hash([0x5e, 0xc1, 0xff, 0xb8]), - "mainnet-00000-5ec1ffb8.era1"; - "Mainnet 00000" + "mainnet-00000-00001-5ec1ffb8.era1"; + "Mainnet era 0" )] #[test_case::test_case( - Era1Id::new("mainnet", 12, 8192).with_hash([0x5e, 0xcb, 0x9b, 0xf9]), - "mainnet-00012-5ecb9bf9.era1"; - "Mainnet 00012" + Era1Id::new("mainnet", 8192, 8192).with_hash([0x5e, 0xcb, 0x9b, 0xf9]), + "mainnet-00001-00001-5ecb9bf9.era1"; + "Mainnet era 1" )] #[test_case::test_case( - Era1Id::new("sepolia", 5, 8192).with_hash([0x90, 0x91, 0x84, 0x72]), - "sepolia-00005-90918472.era1"; - "Sepolia 00005" + Era1Id::new("sepolia", 0, 8192).with_hash([0x90, 0x91, 0x84, 0x72]), + "sepolia-00000-00001-90918472.era1"; + "Sepolia era 0" )] #[test_case::test_case( - Era1Id::new("sepolia", 19, 8192).with_hash([0xfa, 0x77, 0x00, 0x19]), - "sepolia-00019-fa770019.era1"; - "Sepolia 00019" + Era1Id::new("sepolia", 155648, 8192).with_hash([0xfa, 0x77, 0x00, 0x19]), + "sepolia-00019-00001-fa770019.era1"; + "Sepolia era 19" )] #[test_case::test_case( Era1Id::new("mainnet", 1000, 100), - "mainnet-1000-100.era1"; + "mainnet-00000-00001-00000000.era1"; "ID without hash" )] #[test_case::test_case( - Era1Id::new("sepolia", 12345, 8192).with_hash([0xab, 0xcd, 0xef, 0x12]), - "sepolia-12345-abcdef12.era1"; - "Large block number" + Era1Id::new("sepolia", 101130240, 8192).with_hash([0xab, 0xcd, 0xef, 0x12]), + "sepolia-12345-00001-abcdef12.era1"; + "Large block number era 12345" )] fn test_era1id_file_naming(id: Era1Id, expected_file_name: &str) { let actual_file_name = id.to_file_name(); diff --git a/crates/ethereum/cli/src/interface.rs b/crates/ethereum/cli/src/interface.rs index 3d89c1317e1..f4920eff4b5 100644 --- a/crates/ethereum/cli/src/interface.rs +++ b/crates/ethereum/cli/src/interface.rs @@ -5,7 +5,7 @@ use clap::{Parser, Subcommand}; use reth_chainspec::ChainSpec; use reth_cli::chainspec::ChainSpecParser; use reth_cli_commands::{ - config_cmd, db, download, dump_genesis, import, import_era, init_cmd, init_state, + config_cmd, db, download, dump_genesis, export_era, import, import_era, init_cmd, init_state, launcher::FnLauncher, node::{self, NoArgs}, p2p, prune, re_execute, recover, stage, @@ -166,6 +166,9 @@ impl, Ext: clap::Args + fmt::Debug> Cl Commands::ImportEra(command) => { runner.run_blocking_until_ctrl_c(command.execute::()) } + Commands::ExportEra(command) => { + runner.run_blocking_until_ctrl_c(command.execute::()) + } Commands::DumpGenesis(command) => runner.run_blocking_until_ctrl_c(command.execute()), Commands::Db(command) => { runner.run_blocking_until_ctrl_c(command.execute::()) @@ -221,6 +224,9 @@ pub enum Commands { /// This syncs ERA encoded blocks from a directory. #[command(name = "import-era")] ImportEra(import_era::ImportEraCommand), + /// Exports block to era1 files in a specified directory. + #[command(name = "export-era")] + ExportEra(export_era::ExportEraCommand), /// Dumps genesis block JSON configuration to stdout. DumpGenesis(dump_genesis::DumpGenesisCommand), /// Database debugging utilities @@ -264,6 +270,7 @@ impl Commands { Self::Init(cmd) => cmd.chain_spec(), Self::InitState(cmd) => cmd.chain_spec(), Self::Import(cmd) => cmd.chain_spec(), + Self::ExportEra(cmd) => cmd.chain_spec(), Self::ImportEra(cmd) => cmd.chain_spec(), Self::DumpGenesis(cmd) => cmd.chain_spec(), Self::Db(cmd) => cmd.chain_spec(), diff --git a/docs/vocs/docs/pages/cli/SUMMARY.mdx b/docs/vocs/docs/pages/cli/SUMMARY.mdx index 44d7408253f..fff16ea5821 100644 --- a/docs/vocs/docs/pages/cli/SUMMARY.mdx +++ b/docs/vocs/docs/pages/cli/SUMMARY.mdx @@ -4,6 +4,7 @@ - [`reth init-state`](/cli/reth/init-state) - [`reth import`](/cli/reth/import) - [`reth import-era`](/cli/reth/import-era) + - [`reth export-era`](/cli/reth/export-era) - [`reth dump-genesis`](/cli/reth/dump-genesis) - [`reth db`](/cli/reth/db) - [`reth db stats`](/cli/reth/db/stats) diff --git a/docs/vocs/docs/pages/cli/reth.mdx b/docs/vocs/docs/pages/cli/reth.mdx index 031fe62f465..04775950b2e 100644 --- a/docs/vocs/docs/pages/cli/reth.mdx +++ b/docs/vocs/docs/pages/cli/reth.mdx @@ -14,6 +14,7 @@ Commands: init-state Initialize the database from a state dump file import This syncs RLP encoded blocks from a file import-era This syncs ERA encoded blocks from a directory + export-era Exports block to era1 files in a specified directory dump-genesis Dumps genesis block JSON configuration to stdout db Database debugging utilities download Download public node snapshots diff --git a/docs/vocs/docs/pages/cli/reth/export-era.mdx b/docs/vocs/docs/pages/cli/reth/export-era.mdx new file mode 100644 index 00000000000..165970638ba --- /dev/null +++ b/docs/vocs/docs/pages/cli/reth/export-era.mdx @@ -0,0 +1,162 @@ +# reth export-era + +Exports block to era1 files in a specified directory + +```bash +$ reth export-era --help +``` +```txt +Usage: reth export-era [OPTIONS] + +Options: + -h, --help + Print help (see a summary with '-h') + +Datadir: + --datadir + The path to the data dir for all reth files and subdirectories. + + Defaults to the OS-specific data directory: + + - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` + - Windows: `{FOLDERID_RoamingAppData}/reth/` + - macOS: `$HOME/Library/Application Support/reth/` + + [default: default] + + --datadir.static-files + The absolute path to store static files in. + + --config + The path to the configuration file to use + + --chain + The chain this node is running. + Possible values are either a built-in chain or the path to a chain specification file. + + Built-in chains: + mainnet, sepolia, holesky, hoodi, dev + + [default: mainnet] + +Database: + --db.log-level + Database logging level. Levels higher than "notice" require a debug build + + Possible values: + - fatal: Enables logging for critical conditions, i.e. assertion failures + - error: Enables logging for error conditions + - warn: Enables logging for warning conditions + - notice: Enables logging for normal but significant condition + - verbose: Enables logging for verbose informational + - debug: Enables logging for debug-level messages + - trace: Enables logging for trace debug-level messages + - extra: Enables logging for extra debug-level messages + + --db.exclusive + Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume + + [possible values: true, false] + + --db.max-size + Maximum database size (e.g., 4TB, 8MB) + + --db.growth-step + Database growth step (e.g., 4GB, 4KB) + + --db.read-transaction-timeout + Read transaction timeout in seconds, 0 means no timeout + + --first-block-number + Optional first block number to export from the db. + It is by default 0. + + --last-block-number + Optional last block number to export from the db. + It is by default 8191. + + --max-blocks-per-file + The maximum number of blocks per file, it can help you to decrease the size of the files. + Must be less than or equal to 8192. + + --path + The directory path where to export era1 files. + The block data are read from the database. + +Logging: + --log.stdout.format + The format to use for logs written to stdout + + [default: terminal] + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + --log.stdout.filter + The filter to use for logs written to stdout + + [default: ] + + --log.file.format + The format to use for logs written to the log file + + [default: terminal] + + Possible values: + - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging + - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications + - terminal: Represents terminal-friendly formatting for logs + + --log.file.filter + The filter to use for logs written to the log file + + [default: debug] + + --log.file.directory + The path to put log files in + + [default: /logs] + + --log.file.max-size + The maximum size (in MB) of one log file + + [default: 200] + + --log.file.max-files + The maximum amount of log files that will be stored. If set to 0, background file logging is disabled + + [default: 5] + + --log.journald + Write logs to journald + + --log.journald.filter + The filter to use for logs written to journald + + [default: error] + + --color + Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting + + [default: always] + + Possible values: + - always: Colors on + - auto: Colors on + - never: Colors off + +Display: + -v, --verbosity... + Set the minimum log level. + + -v Errors + -vv Warnings + -vvv Info + -vvvv Debug + -vvvvv Traces (warning: very verbose!) + + -q, --quiet + Silence all log output +``` \ No newline at end of file diff --git a/docs/vocs/sidebar.ts b/docs/vocs/sidebar.ts index 65829d8e48c..140b056e0a2 100644 --- a/docs/vocs/sidebar.ts +++ b/docs/vocs/sidebar.ts @@ -313,6 +313,10 @@ export const sidebar: SidebarItem[] = [ text: "reth import-era", link: "/cli/reth/import-era" }, + { + text: "reth export-era", + link: "/cli/reth/export-era" + }, { text: "reth dump-genesis", link: "/cli/reth/dump-genesis" From c274422bba756d4eda9169e8ebe60b3490ccce38 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Thu, 10 Jul 2025 08:45:14 -0400 Subject: [PATCH 124/305] feat(trie): add generics to SparseTrieTask (#17269) Co-authored-by: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> --- .../tree/src/tree/payload_processor/mod.rs | 17 ++++++------ .../src/tree/payload_processor/sparse_trie.rs | 26 +++++++++++-------- 2 files changed, 24 insertions(+), 19 deletions(-) diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index 2f70ff5cbd0..165b0ed1c2c 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -28,7 +28,7 @@ use reth_trie_parallel::{ proof_task::{ProofTaskCtx, ProofTaskManager}, root::ParallelStateRootError, }; -use reth_trie_sparse::SparseTrie; +use reth_trie_sparse::{SerialSparseTrie, SparseTrie}; use std::{ collections::VecDeque, sync::{ @@ -199,13 +199,14 @@ where // take the sparse trie if it was set let sparse_trie = self.sparse_trie.take(); - let mut sparse_trie_task = SparseTrieTask::new_with_stored_trie( - self.executor.clone(), - sparse_trie_rx, - proof_task.handle(), - self.trie_metrics.clone(), - sparse_trie, - ); + let mut sparse_trie_task = + SparseTrieTask::<_, SerialSparseTrie, SerialSparseTrie>::new_with_stored_trie( + self.executor.clone(), + sparse_trie_rx, + proof_task.handle(), + self.trie_metrics.clone(), + sparse_trie, + ); // wire the sparse trie to the state root response receiver let (state_root_tx, state_root_rx) = channel(); diff --git a/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs b/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs index bd8702826d4..bd1ae9fda9d 100644 --- a/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs +++ b/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs @@ -11,7 +11,7 @@ use reth_trie_parallel::root::ParallelStateRootError; use reth_trie_sparse::{ blinded::{BlindedProvider, BlindedProviderFactory}, errors::{SparseStateTrieResult, SparseTrieErrorKind}, - SparseStateTrie, SparseTrie, + SerialSparseTrie, SparseStateTrie, SparseTrie, SparseTrieInterface, }; use std::{ sync::mpsc, @@ -20,7 +20,7 @@ use std::{ use tracing::{debug, trace, trace_span}; /// A task responsible for populating the sparse trie. -pub(super) struct SparseTrieTask +pub(super) struct SparseTrieTask where BPF: BlindedProviderFactory + Send + Sync, BPF::AccountNodeProvider: BlindedProvider + Send + Sync, @@ -34,17 +34,19 @@ where /// Sparse Trie initialized with the blinded provider factory. /// /// It's kept as a field on the struct to prevent blocking on de-allocation in [`Self::run`]. - pub(super) trie: SparseStateTrie, + pub(super) trie: SparseStateTrie, pub(super) metrics: MultiProofTaskMetrics, /// Blinded node provider factory. blinded_provider_factory: BPF, } -impl SparseTrieTask +impl SparseTrieTask where BPF: BlindedProviderFactory + Send + Sync + Clone, BPF::AccountNodeProvider: BlindedProvider + Send + Sync, BPF::StorageNodeProvider: BlindedProvider + Send + Sync, + A: SparseTrieInterface + Send + Sync + Default, + S: SparseTrieInterface + Send + Sync + Default, { /// Creates a new sparse trie task. pub(super) fn new( @@ -69,7 +71,7 @@ where updates: mpsc::Receiver, blinded_provider_factory: BPF, trie_metrics: MultiProofTaskMetrics, - sparse_trie: Option, + sparse_trie: Option>, ) -> Self { if let Some(sparse_trie) = sparse_trie { Self::with_accounts_trie( @@ -91,7 +93,7 @@ where updates: mpsc::Receiver, blinded_provider_factory: BPF, metrics: MultiProofTaskMetrics, - sparse_trie: SparseTrie, + sparse_trie: SparseTrie, ) -> Self { debug_assert!(sparse_trie.is_blind()); let trie = SparseStateTrie::new().with_updates(true).with_accounts_trie(sparse_trie); @@ -106,7 +108,7 @@ where /// /// NOTE: This function does not take `self` by value to prevent blocking on [`SparseStateTrie`] /// drop. - pub(super) fn run(&mut self) -> Result { + pub(super) fn run(&mut self) -> Result, ParallelStateRootError> { let now = Instant::now(); let mut num_iterations = 0; @@ -159,18 +161,18 @@ where /// Outcome of the state root computation, including the state root itself with /// the trie updates. #[derive(Debug)] -pub struct StateRootComputeOutcome { +pub struct StateRootComputeOutcome { /// The state root. pub state_root: B256, /// The trie updates. pub trie_updates: TrieUpdates, /// The account state trie. - pub trie: SparseTrie, + pub trie: SparseTrie, } /// Updates the sparse trie with the given proofs and state, and returns the elapsed time. -pub(crate) fn update_sparse_trie( - trie: &mut SparseStateTrie, +pub(crate) fn update_sparse_trie( + trie: &mut SparseStateTrie, SparseTrieUpdate { mut state, multiproof }: SparseTrieUpdate, blinded_provider_factory: &BPF, ) -> SparseStateTrieResult @@ -178,6 +180,8 @@ where BPF: BlindedProviderFactory + Send + Sync, BPF::AccountNodeProvider: BlindedProvider + Send + Sync, BPF::StorageNodeProvider: BlindedProvider + Send + Sync, + A: SparseTrieInterface + Send + Sync + Default, + S: SparseTrieInterface + Send + Sync + Default, { trace!(target: "engine::root::sparse", "Updating sparse trie"); let started_at = Instant::now(); From d7aa75137944c1c8f82f2fe7170cb9fcbdeccc3b Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Thu, 10 Jul 2025 15:42:27 +0200 Subject: [PATCH 125/305] feat: add graph selection option to newpayload latency comparison script (#17097) --- .../scripts/compare_newpayload_latency.py | 128 ++++++++++++------ 1 file changed, 86 insertions(+), 42 deletions(-) diff --git a/bin/reth-bench/scripts/compare_newpayload_latency.py b/bin/reth-bench/scripts/compare_newpayload_latency.py index ff9cdad5262..7d3c212d490 100755 --- a/bin/reth-bench/scripts/compare_newpayload_latency.py +++ b/bin/reth-bench/scripts/compare_newpayload_latency.py @@ -3,7 +3,7 @@ # requires-python = ">=3.8" # dependencies = [ # "pandas", -# "matplotlib", +# "matplotlib", # "numpy", # ] # /// @@ -29,9 +29,21 @@ def main(): parser.add_argument('baseline_csv', help='First CSV file, used as the baseline/control') parser.add_argument('comparison_csv', help='Second CSV file, which is being compared to the baseline') parser.add_argument('-o', '--output', default='latency.png', help='Output image file (default: latency.png)') + parser.add_argument('--graphs', default='all', help='Comma-separated list of graphs to plot: histogram, line, all (default: all)') args = parser.parse_args() + # Parse graph selection + if args.graphs.lower() == 'all': + selected_graphs = {'histogram', 'line'} + else: + selected_graphs = set(graph.strip().lower() for graph in args.graphs.split(',')) + valid_graphs = {'histogram', 'line'} + invalid_graphs = selected_graphs - valid_graphs + if invalid_graphs: + print(f"Error: Invalid graph types: {', '.join(invalid_graphs)}. Valid options are: histogram, line, all", file=sys.stderr) + sys.exit(1) + try: df1 = pd.read_csv(args.baseline_csv) df2 = pd.read_csv(args.comparison_csv) @@ -70,54 +82,86 @@ def main(): print("Error: No valid percent differences could be calculated", file=sys.stderr) sys.exit(1) - # Create histogram with 1% buckets - min_diff = np.floor(percent_diff.min()) - max_diff = np.ceil(percent_diff.max()) - - bins = np.arange(min_diff, max_diff + 1, 1) - - # Create figure with two subplots - fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(12, 12)) - - # Top subplot: Histogram - ax1.hist(percent_diff, bins=bins, edgecolor='black', alpha=0.7) - ax1.set_xlabel('Percent Difference (%)') - ax1.set_ylabel('Number of Blocks') - ax1.set_title(f'Total Latency Percent Difference Histogram\n({args.baseline_csv} vs {args.comparison_csv})') - ax1.grid(True, alpha=0.3) - - # Add statistics to the histogram + # Calculate statistics once for use in graphs and output mean_diff = np.mean(percent_diff) median_diff = np.median(percent_diff) - ax1.axvline(mean_diff, color='red', linestyle='--', label=f'Mean: {mean_diff:.2f}%') - ax1.axvline(median_diff, color='orange', linestyle='--', label=f'Median: {median_diff:.2f}%') - ax1.legend() - - # Bottom subplot: Latency vs Block Number - if 'block_number' in df1.columns and 'block_number' in df2.columns: - block_numbers = df1['block_number'].values[:len(percent_diff)] - ax2.plot(block_numbers, latency1[:len(percent_diff)], 'b-', alpha=0.7, label=f'Baseline ({args.baseline_csv})') - ax2.plot(block_numbers, latency2[:len(percent_diff)], 'r-', alpha=0.7, label=f'Comparison ({args.comparison_csv})') - ax2.set_xlabel('Block Number') - ax2.set_ylabel('Total Latency (ms)') - ax2.set_title('Total Latency vs Block Number') - ax2.grid(True, alpha=0.3) - ax2.legend() + + # Determine number of subplots and create figure + num_plots = len(selected_graphs) + if num_plots == 0: + print("Error: No valid graphs selected", file=sys.stderr) + sys.exit(1) + + if num_plots == 1: + fig, ax = plt.subplots(1, 1, figsize=(12, 6)) + axes = [ax] else: - # If no block_number column, use index - indices = np.arange(len(percent_diff)) - ax2.plot(indices, latency1[:len(percent_diff)], 'b-', alpha=0.7, label=f'Baseline ({args.baseline_csv})') - ax2.plot(indices, latency2[:len(percent_diff)], 'r-', alpha=0.7, label=f'Comparison ({args.comparison_csv})') - ax2.set_xlabel('Block Index') - ax2.set_ylabel('Total Latency (ms)') - ax2.set_title('Total Latency vs Block Index') - ax2.grid(True, alpha=0.3) - ax2.legend() + fig, axes = plt.subplots(num_plots, 1, figsize=(12, 6 * num_plots)) + + plot_idx = 0 + + # Plot histogram if selected + if 'histogram' in selected_graphs: + min_diff = np.floor(percent_diff.min()) + max_diff = np.ceil(percent_diff.max()) + + # Create histogram with 1% buckets + bins = np.arange(min_diff, max_diff + 1, 1) + + ax = axes[plot_idx] + ax.hist(percent_diff, bins=bins, edgecolor='black', alpha=0.7) + ax.set_xlabel('Percent Difference (%)') + ax.set_ylabel('Number of Blocks') + ax.set_title(f'Total Latency Percent Difference Histogram\n({args.baseline_csv} vs {args.comparison_csv})') + ax.grid(True, alpha=0.3) + + # Add statistics to the histogram + ax.axvline(mean_diff, color='red', linestyle='--', label=f'Mean: {mean_diff:.2f}%') + ax.axvline(median_diff, color='orange', linestyle='--', label=f'Median: {median_diff:.2f}%') + ax.legend() + plot_idx += 1 + + # Plot line graph if selected + if 'line' in selected_graphs: + # Determine comparison color based on median change. The median being + # negative means processing time got faster, so that becomes green. + comparison_color = 'green' if median_diff < 0 else 'red' + + ax = axes[plot_idx] + if 'block_number' in df1.columns and 'block_number' in df2.columns: + block_numbers = df1['block_number'].values[:len(percent_diff)] + ax.plot(block_numbers, latency1[:len(percent_diff)], 'orange', alpha=0.7, label=f'Baseline ({args.baseline_csv})') + ax.plot(block_numbers, latency2[:len(percent_diff)], comparison_color, alpha=0.7, label=f'Comparison ({args.comparison_csv})') + ax.set_xlabel('Block Number') + ax.set_ylabel('Total Latency (ms)') + ax.set_title('Total Latency vs Block Number') + ax.grid(True, alpha=0.3) + ax.legend() + else: + # If no block_number column, use index + indices = np.arange(len(percent_diff)) + ax.plot(indices, latency1[:len(percent_diff)], 'orange', alpha=0.7, label=f'Baseline ({args.baseline_csv})') + ax.plot(indices, latency2[:len(percent_diff)], comparison_color, alpha=0.7, label=f'Comparison ({args.comparison_csv})') + ax.set_xlabel('Block Index') + ax.set_ylabel('Total Latency (ms)') + ax.set_title('Total Latency vs Block Index') + ax.grid(True, alpha=0.3) + ax.legend() + plot_idx += 1 plt.tight_layout() plt.savefig(args.output, dpi=300, bbox_inches='tight') - print(f"Histogram and latency graph saved to {args.output}") + # Create graph type description for output message + graph_types = [] + if 'histogram' in selected_graphs: + graph_types.append('histogram') + if 'line' in selected_graphs: + graph_types.append('latency graph') + graph_desc = ' and '.join(graph_types) + print(f"{graph_desc.capitalize()} saved to {args.output}") + + # Always print statistics print(f"\nStatistics:") print(f"Mean percent difference: {mean_diff:.2f}%") print(f"Median percent difference: {median_diff:.2f}%") From 6561e8ff467064d0c80452b9347c4979c22daa7b Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Thu, 10 Jul 2025 17:04:29 +0200 Subject: [PATCH 126/305] chore(trie): Implement ParallelSparseTrie::find_leaf (#17326) --- crates/trie/sparse-parallel/src/trie.rs | 315 ++++++++++++++++++++++-- crates/trie/sparse/src/state.rs | 12 +- crates/trie/sparse/src/traits.rs | 5 +- crates/trie/sparse/src/trie.rs | 56 +---- 4 files changed, 309 insertions(+), 79 deletions(-) diff --git a/crates/trie/sparse-parallel/src/trie.rs b/crates/trie/sparse-parallel/src/trie.rs index 9f36413cb72..9f9c251deba 100644 --- a/crates/trie/sparse-parallel/src/trie.rs +++ b/crates/trie/sparse-parallel/src/trie.rs @@ -13,8 +13,8 @@ use reth_trie_common::{ }; use reth_trie_sparse::{ blinded::{BlindedProvider, RevealedNode}, - RlpNodeStackItem, SparseNode, SparseNodeType, SparseTrieInterface, SparseTrieUpdates, - TrieMasks, + LeafLookup, LeafLookupError, RlpNodeStackItem, SparseNode, SparseNodeType, SparseTrieInterface, + SparseTrieUpdates, TrieMasks, }; use smallvec::SmallVec; use std::sync::mpsc; @@ -335,8 +335,11 @@ impl SparseTrieInterface for ParallelSparseTrie { loop { let curr_node = curr_subtrie.nodes.get_mut(&curr_path).unwrap(); - match Self::find_next_to_leaf(&curr_path, curr_node, full_path)? { + match Self::find_next_to_leaf(&curr_path, curr_node, full_path) { FindNextToLeafOutcome::NotFound => return Ok(()), // leaf isn't in the trie + FindNextToLeafOutcome::BlindedNode(hash) => { + return Err(SparseTrieErrorKind::BlindedNode { path: curr_path, hash }.into()) + } FindNextToLeafOutcome::Found => { // this node is the target leaf leaf_path = curr_path; @@ -378,7 +381,9 @@ impl SparseTrieInterface for ParallelSparseTrie { ext_grandparent_node = Some(curr_node.clone()); } SparseNode::Empty | SparseNode::Hash(_) | SparseNode::Leaf { .. } => { - unreachable!("find_next_to_leaf errors on non-revealed node, and return Found or NotFound on Leaf") + unreachable!( + "find_next_to_leaf only continues to a branch or extension" + ) } } @@ -657,10 +662,65 @@ impl SparseTrieInterface for ParallelSparseTrie { fn find_leaf( &self, - _full_path: &Nibbles, - _expected_value: Option<&Vec>, - ) -> Result { - todo!() + full_path: &Nibbles, + expected_value: Option<&Vec>, + ) -> Result { + // Inclusion proof + // + // First, do a quick check if the value exists in either the upper or lower subtrie's values + // map. We assume that if there exists a leaf node, then its value will be in the `values` + // map. + if let Some(actual_value) = std::iter::once(self.upper_subtrie.as_ref()) + .chain(self.lower_subtrie_for_path(full_path)) + .filter_map(|subtrie| subtrie.inner.values.get(full_path)) + .next() + { + // We found the leaf, check if the value matches (if expected value was provided) + return expected_value + .is_none_or(|v| v == actual_value) + .then_some(LeafLookup::Exists) + .ok_or_else(|| LeafLookupError::ValueMismatch { + path: *full_path, + expected: expected_value.cloned(), + actual: actual_value.clone(), + }) + } + + // If the value does not exist in the `values` map, then this means that the leaf either: + // - Does not exist in the trie + // - Is missing from the witness + // We traverse the trie to find the location where this leaf would have been, showing + // that it is not in the trie. Or we find a blinded node, showing that the witness is + // not complete. + let mut curr_path = Nibbles::new(); // start traversal from root + let mut curr_subtrie = self.upper_subtrie.as_ref(); + let mut curr_subtrie_is_upper = true; + + loop { + let curr_node = curr_subtrie.nodes.get(&curr_path).unwrap(); + + match Self::find_next_to_leaf(&curr_path, curr_node, full_path) { + FindNextToLeafOutcome::NotFound => return Ok(LeafLookup::NonExistent), + FindNextToLeafOutcome::BlindedNode(hash) => { + // We hit a blinded node - cannot determine if leaf exists + return Err(LeafLookupError::BlindedNode { path: curr_path, hash }); + } + FindNextToLeafOutcome::Found => { + panic!("target leaf {full_path:?} found at path {curr_path:?}, even though value wasn't in values hashmap"); + } + FindNextToLeafOutcome::ContinueFrom(next_path) => { + curr_path = next_path; + // If we were previously looking at the upper trie, and the new path is in the + // lower trie, we need to pull out a ref to the lower trie. + if curr_subtrie_is_upper { + if let Some(lower_subtrie) = self.lower_subtrie_for_path(&curr_path) { + curr_subtrie = lower_subtrie; + curr_subtrie_is_upper = false; + } + } + } + } + } } } @@ -759,51 +819,51 @@ impl ParallelSparseTrie { from_path: &Nibbles, from_node: &SparseNode, leaf_full_path: &Nibbles, - ) -> SparseTrieResult { + ) -> FindNextToLeafOutcome { debug_assert!(leaf_full_path.len() >= from_path.len()); debug_assert!(leaf_full_path.starts_with(from_path)); match from_node { - SparseNode::Empty => Err(SparseTrieErrorKind::Blind.into()), - SparseNode::Hash(hash) => { - Err(SparseTrieErrorKind::BlindedNode { path: *from_path, hash: *hash }.into()) - } + // If empty node is found it means the subtrie doesn't have any nodes in it, let alone + // the target leaf. + SparseNode::Empty => FindNextToLeafOutcome::NotFound, + SparseNode::Hash(hash) => FindNextToLeafOutcome::BlindedNode(*hash), SparseNode::Leaf { key, .. } => { let mut found_full_path = *from_path; found_full_path.extend(key); if &found_full_path == leaf_full_path { - return Ok(FindNextToLeafOutcome::Found) + return FindNextToLeafOutcome::Found } - Ok(FindNextToLeafOutcome::NotFound) + FindNextToLeafOutcome::NotFound } SparseNode::Extension { key, .. } => { if leaf_full_path.len() == from_path.len() { - return Ok(FindNextToLeafOutcome::NotFound) + return FindNextToLeafOutcome::NotFound } let mut child_path = *from_path; child_path.extend(key); if !leaf_full_path.starts_with(&child_path) { - return Ok(FindNextToLeafOutcome::NotFound) + return FindNextToLeafOutcome::NotFound } - Ok(FindNextToLeafOutcome::ContinueFrom(child_path)) + FindNextToLeafOutcome::ContinueFrom(child_path) } SparseNode::Branch { state_mask, .. } => { if leaf_full_path.len() == from_path.len() { - return Ok(FindNextToLeafOutcome::NotFound) + return FindNextToLeafOutcome::NotFound } let nibble = leaf_full_path.get_unchecked(from_path.len()); if !state_mask.is_bit_set(nibble) { - return Ok(FindNextToLeafOutcome::NotFound) + return FindNextToLeafOutcome::NotFound } let mut child_path = *from_path; child_path.push_unchecked(nibble); - Ok(FindNextToLeafOutcome::ContinueFrom(child_path)) + FindNextToLeafOutcome::ContinueFrom(child_path) } } } @@ -1163,6 +1223,9 @@ enum FindNextToLeafOutcome { /// `NotFound` indicates that there is no way to traverse to the leaf, as it is not in the /// trie. NotFound, + /// `BlindedNode` indicates that the node is blinded with the contained hash and cannot be + /// traversed. + BlindedNode(B256), } impl SparseSubtrie { @@ -2170,7 +2233,8 @@ mod tests { use reth_trie_db::DatabaseTrieCursorFactory; use reth_trie_sparse::{ blinded::{BlindedProvider, DefaultBlindedProvider, RevealedNode}, - SerialSparseTrie, SparseNode, SparseTrieInterface, SparseTrieUpdates, TrieMasks, + LeafLookup, LeafLookupError, SerialSparseTrie, SparseNode, SparseTrieInterface, + SparseTrieUpdates, TrieMasks, }; use std::collections::{BTreeMap, BTreeSet}; @@ -5876,4 +5940,211 @@ mod tests { b256!("29b07de8376e9ce7b3a69e9b102199869514d3f42590b5abc6f7d48ec9b8665c"); assert_eq!(trie.root(), expected_root); } + + #[test] + fn find_leaf_existing_leaf() { + // Create a simple trie with one leaf + let provider = DefaultBlindedProvider; + let mut sparse = ParallelSparseTrie::default(); + let path = Nibbles::from_nibbles([0x1, 0x2, 0x3]); + let value = b"test_value".to_vec(); + + sparse.update_leaf(path, value.clone(), &provider).unwrap(); + + // Check that the leaf exists + let result = sparse.find_leaf(&path, None); + assert_matches!(result, Ok(LeafLookup::Exists)); + + // Check with expected value matching + let result = sparse.find_leaf(&path, Some(&value)); + assert_matches!(result, Ok(LeafLookup::Exists)); + } + + #[test] + fn find_leaf_value_mismatch() { + // Create a simple trie with one leaf + let provider = DefaultBlindedProvider; + let mut sparse = ParallelSparseTrie::default(); + let path = Nibbles::from_nibbles([0x1, 0x2, 0x3]); + let value = b"test_value".to_vec(); + let wrong_value = b"wrong_value".to_vec(); + + sparse.update_leaf(path, value, &provider).unwrap(); + + // Check with wrong expected value + let result = sparse.find_leaf(&path, Some(&wrong_value)); + assert_matches!( + result, + Err(LeafLookupError::ValueMismatch { path: p, expected: Some(e), actual: _a }) if p == path && e == wrong_value + ); + } + + #[test] + fn find_leaf_not_found_empty_trie() { + // Empty trie + let sparse = ParallelSparseTrie::default(); + let path = Nibbles::from_nibbles([0x1, 0x2, 0x3]); + + // Leaf should not exist + let result = sparse.find_leaf(&path, None); + assert_matches!(result, Ok(LeafLookup::NonExistent)); + } + + #[test] + fn find_leaf_empty_trie() { + let sparse = ParallelSparseTrie::default(); + let path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4]); + + let result = sparse.find_leaf(&path, None); + assert_matches!(result, Ok(LeafLookup::NonExistent)); + } + + #[test] + fn find_leaf_exists_no_value_check() { + let provider = DefaultBlindedProvider; + let mut sparse = ParallelSparseTrie::default(); + let path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4]); + sparse.update_leaf(path, encode_account_value(0), &provider).unwrap(); + + let result = sparse.find_leaf(&path, None); + assert_matches!(result, Ok(LeafLookup::Exists)); + } + + #[test] + fn find_leaf_exists_with_value_check_ok() { + let provider = DefaultBlindedProvider; + let mut sparse = ParallelSparseTrie::default(); + let path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4]); + let value = encode_account_value(0); + sparse.update_leaf(path, value.clone(), &provider).unwrap(); + + let result = sparse.find_leaf(&path, Some(&value)); + assert_matches!(result, Ok(LeafLookup::Exists)); + } + + #[test] + fn find_leaf_exclusion_branch_divergence() { + let provider = DefaultBlindedProvider; + let mut sparse = ParallelSparseTrie::default(); + let path1 = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4]); // Creates branch at 0x12 + let path2 = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x5, 0x6]); // Belongs to same branch + let search_path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x7, 0x8]); // Diverges at nibble 7 + + sparse.update_leaf(path1, encode_account_value(0), &provider).unwrap(); + sparse.update_leaf(path2, encode_account_value(1), &provider).unwrap(); + + let result = sparse.find_leaf(&search_path, None); + assert_matches!(result, Ok(LeafLookup::NonExistent)) + } + + #[test] + fn find_leaf_exclusion_extension_divergence() { + let provider = DefaultBlindedProvider; + let mut sparse = ParallelSparseTrie::default(); + // This will create an extension node at root with key 0x12 + let path1 = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4, 0x5, 0x6]); + // This path diverges from the extension key + let search_path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x7, 0x8]); + + sparse.update_leaf(path1, encode_account_value(0), &provider).unwrap(); + + let result = sparse.find_leaf(&search_path, None); + assert_matches!(result, Ok(LeafLookup::NonExistent)) + } + + #[test] + fn find_leaf_exclusion_leaf_divergence() { + let provider = DefaultBlindedProvider; + let mut sparse = ParallelSparseTrie::default(); + let existing_leaf_path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4]); + let search_path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4, 0x5, 0x6]); + + sparse.update_leaf(existing_leaf_path, encode_account_value(0), &provider).unwrap(); + + let result = sparse.find_leaf(&search_path, None); + assert_matches!(result, Ok(LeafLookup::NonExistent)) + } + + #[test] + fn find_leaf_exclusion_path_ends_at_branch() { + let provider = DefaultBlindedProvider; + let mut sparse = ParallelSparseTrie::default(); + let path1 = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4]); // Creates branch at 0x12 + let path2 = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x5, 0x6]); + let search_path = Nibbles::from_nibbles_unchecked([0x1, 0x2]); // Path of the branch itself + + sparse.update_leaf(path1, encode_account_value(0), &provider).unwrap(); + sparse.update_leaf(path2, encode_account_value(1), &provider).unwrap(); + + let result = sparse.find_leaf(&search_path, None); + assert_matches!(result, Ok(LeafLookup::NonExistent)); + } + + #[test] + fn find_leaf_error_blinded_node_at_leaf_path() { + // Scenario: The node *at* the leaf path is blinded. + let blinded_hash = B256::repeat_byte(0xBB); + let leaf_path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4]); + + let sparse = new_test_trie( + [ + ( + // Ext 0x12 + Nibbles::default(), + SparseNode::new_ext(Nibbles::from_nibbles_unchecked([0x1, 0x2])), + ), + ( + // Ext 0x123 + Nibbles::from_nibbles_unchecked([0x1, 0x2]), + SparseNode::new_ext(Nibbles::from_nibbles_unchecked([0x3])), + ), + ( + // Branch at 0x123, child 4 + Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3]), + SparseNode::new_branch(TrieMask::new(0b10000)), + ), + ( + // Blinded node at 0x1234 + leaf_path, + SparseNode::Hash(blinded_hash), + ), + ] + .into_iter(), + ); + + let result = sparse.find_leaf(&leaf_path, None); + + // Should error because it hit the blinded node exactly at the leaf path + assert_matches!(result, Err(LeafLookupError::BlindedNode { path, hash }) + if path == leaf_path && hash == blinded_hash + ); + } + + #[test] + fn find_leaf_error_blinded_node() { + let blinded_hash = B256::repeat_byte(0xAA); + let path_to_blind = Nibbles::from_nibbles_unchecked([0x1]); + let search_path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4]); + + let sparse = new_test_trie( + [ + // Root is a branch with child 0x1 (blinded) and 0x5 (revealed leaf) + // So we set Bit 1 and Bit 5 in the state_mask + (Nibbles::default(), SparseNode::new_branch(TrieMask::new(0b100010))), + (path_to_blind, SparseNode::Hash(blinded_hash)), + ( + Nibbles::from_nibbles_unchecked([0x5]), + SparseNode::new_leaf(Nibbles::from_nibbles_unchecked([0x6, 0x7, 0x8])), + ), + ] + .into_iter(), + ); + + let result = sparse.find_leaf(&search_path, None); + + // Should error because it hit the blinded node at path 0x1 + assert_matches!(result, Err(LeafLookupError::BlindedNode { path, hash }) + if path == path_to_blind && hash == blinded_hash + ); + } } diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index 2dc443ac1bb..33d8c94f8d0 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -1,7 +1,7 @@ use crate::{ blinded::{BlindedProvider, BlindedProviderFactory}, traits::SparseTrieInterface, - LeafLookup, SerialSparseTrie, SparseTrie, TrieMasks, + SerialSparseTrie, SparseTrie, TrieMasks, }; use alloc::{collections::VecDeque, vec::Vec}; use alloy_primitives::{ @@ -111,10 +111,7 @@ where None => return false, }; - matches!( - trie.find_leaf(&path, None), - Ok(LeafLookup::Exists | LeafLookup::NonExistent { .. }) - ) + trie.find_leaf(&path, None).is_ok() } /// Was the storage-slot witness for (`address`,`slot`) complete? @@ -125,10 +122,7 @@ where None => return false, }; - matches!( - trie.find_leaf(&path, None), - Ok(LeafLookup::Exists | LeafLookup::NonExistent { .. }) - ) + trie.find_leaf(&path, None).is_ok() } /// Returns `true` if storage slot for account was already revealed. diff --git a/crates/trie/sparse/src/traits.rs b/crates/trie/sparse/src/traits.rs index e2b22f2daf9..2fe1838d777 100644 --- a/crates/trie/sparse/src/traits.rs +++ b/crates/trie/sparse/src/traits.rs @@ -289,8 +289,5 @@ pub enum LeafLookup { /// Leaf exists with expected value. Exists, /// Leaf does not exist (exclusion proof found). - NonExistent { - /// Path where the search diverged from the target path. - diverged_at: Nibbles, - }, + NonExistent, } diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 4e0d03b0900..0c0cf6800be 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -972,7 +972,7 @@ impl SparseTrieInterface for SerialSparseTrie { Some(SparseNode::Empty) | None => { // None implies no node is at the current path (even in the full trie) // Empty node means there is a node at this path and it is "Empty" - return Ok(LeafLookup::NonExistent { diverged_at: current }); + return Ok(LeafLookup::NonExistent); } Some(&SparseNode::Hash(hash)) => { // We hit a blinded node - cannot determine if leaf exists @@ -980,11 +980,7 @@ impl SparseTrieInterface for SerialSparseTrie { } Some(SparseNode::Leaf { key, .. }) => { // We found a leaf node before reaching our target depth - - // Temporarily append the leaf key to `current` - let saved_len = current.len(); current.extend(key); - if ¤t == full_path { // This should have been handled by our initial values map check if let Some(value) = self.values.get(full_path) { @@ -993,11 +989,9 @@ impl SparseTrieInterface for SerialSparseTrie { } } - let diverged_at = current.slice(..saved_len); - // The leaf node's path doesn't match our target path, // providing an exclusion proof - return Ok(LeafLookup::NonExistent { diverged_at }); + return Ok(LeafLookup::NonExistent); } Some(SparseNode::Extension { key, .. }) => { // Temporarily append the extension key to `current` @@ -1005,9 +999,8 @@ impl SparseTrieInterface for SerialSparseTrie { current.extend(key); if full_path.len() < current.len() || !full_path.starts_with(¤t) { - let diverged_at = current.slice(..saved_len); current.truncate(saved_len); // restore - return Ok(LeafLookup::NonExistent { diverged_at }); + return Ok(LeafLookup::NonExistent); } // Prefix matched, so we keep walking with the longer `current`. } @@ -1016,7 +1009,7 @@ impl SparseTrieInterface for SerialSparseTrie { let nibble = full_path.get_unchecked(current.len()); if !state_mask.is_bit_set(nibble) { // No child at this nibble - exclusion proof - return Ok(LeafLookup::NonExistent { diverged_at: current }); + return Ok(LeafLookup::NonExistent); } // Continue down the branch @@ -1041,17 +1034,12 @@ impl SparseTrieInterface for SerialSparseTrie { } _ => { // No leaf at exactly the target path - let parent_path = if full_path.is_empty() { - Nibbles::default() - } else { - full_path.slice(0..full_path.len() - 1) - }; - return Ok(LeafLookup::NonExistent { diverged_at: parent_path }); + return Ok(LeafLookup::NonExistent); } } // If we get here, there's no leaf at the target path - Ok(LeafLookup::NonExistent { diverged_at: current }) + Ok(LeafLookup::NonExistent) } } @@ -1983,10 +1971,7 @@ mod find_leaf_tests { // Leaf should not exist let result = sparse.find_leaf(&path, None); - assert_matches!( - result, - Ok(LeafLookup::NonExistent { diverged_at }) if diverged_at == Nibbles::default() - ); + assert_matches!(result, Ok(LeafLookup::NonExistent)); } #[test] @@ -1995,9 +1980,7 @@ mod find_leaf_tests { let path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4]); let result = sparse.find_leaf(&path, None); - - // In an empty trie, the search diverges immediately at the root. - assert_matches!(result, Ok(LeafLookup::NonExistent { diverged_at }) if diverged_at == Nibbles::default()); + assert_matches!(result, Ok(LeafLookup::NonExistent)); } #[test] @@ -2035,10 +2018,7 @@ mod find_leaf_tests { sparse.update_leaf(path2, VALUE_B(), &provider).unwrap(); let result = sparse.find_leaf(&search_path, None); - - // Diverged at the branch node because nibble '7' is not present. - let expected_divergence = Nibbles::from_nibbles_unchecked([0x1, 0x2]); - assert_matches!(result, Ok(LeafLookup::NonExistent { diverged_at }) if diverged_at == expected_divergence); + assert_matches!(result, Ok(LeafLookup::NonExistent)); } #[test] @@ -2053,10 +2033,7 @@ mod find_leaf_tests { sparse.update_leaf(path1, VALUE_A(), &provider).unwrap(); let result = sparse.find_leaf(&search_path, None); - - // Diverged where the extension node started because the path doesn't match its key prefix. - let expected_divergence = Nibbles::default(); - assert_matches!(result, Ok(LeafLookup::NonExistent { diverged_at }) if diverged_at == expected_divergence); + assert_matches!(result, Ok(LeafLookup::NonExistent)); } #[test] @@ -2069,12 +2046,7 @@ mod find_leaf_tests { sparse.update_leaf(existing_leaf_path, VALUE_A(), &provider).unwrap(); let result = sparse.find_leaf(&search_path, None); - - // Diverged when it hit the leaf node at the root, because the search path is longer - // than the leaf's key stored there. The code returns the path of the node (root) - // where the divergence occurred. - let expected_divergence = Nibbles::default(); - assert_matches!(result, Ok(LeafLookup::NonExistent { diverged_at }) if diverged_at == expected_divergence); + assert_matches!(result, Ok(LeafLookup::NonExistent)); } #[test] @@ -2089,11 +2061,7 @@ mod find_leaf_tests { sparse.update_leaf(path2, VALUE_B(), &provider).unwrap(); let result = sparse.find_leaf(&search_path, None); - - // The path ends, but the node at the path is a branch, not a leaf. - // Diverged at the parent of the node found at the search path. - let expected_divergence = Nibbles::from_nibbles_unchecked([0x1]); - assert_matches!(result, Ok(LeafLookup::NonExistent { diverged_at }) if diverged_at == expected_divergence); + assert_matches!(result, Ok(LeafLookup::NonExistent)); } #[test] From 2813776d4e7aa0f6726c5c1eb431a9770c025af9 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 10 Jul 2025 17:48:19 +0200 Subject: [PATCH 127/305] chore: add helpers for disabling read-tx timeout (#17339) Co-authored-by: Claude --- .../storage/db/src/implementation/mdbx/mod.rs | 10 ++++- .../src/providers/database/builder.rs | 39 ++++++++++++++++++- 2 files changed, 46 insertions(+), 3 deletions(-) diff --git a/crates/storage/db/src/implementation/mdbx/mod.rs b/crates/storage/db/src/implementation/mdbx/mod.rs index d536e69a270..3234666e7c7 100644 --- a/crates/storage/db/src/implementation/mdbx/mod.rs +++ b/crates/storage/db/src/implementation/mdbx/mod.rs @@ -146,12 +146,20 @@ impl DatabaseArguments { self } + /// Set the maximum duration of a read transaction. + pub const fn max_read_transaction_duration( + &mut self, + max_read_transaction_duration: Option, + ) { + self.max_read_transaction_duration = max_read_transaction_duration; + } + /// Set the maximum duration of a read transaction. pub const fn with_max_read_transaction_duration( mut self, max_read_transaction_duration: Option, ) -> Self { - self.max_read_transaction_duration = max_read_transaction_duration; + self.max_read_transaction_duration(max_read_transaction_duration); self } diff --git a/crates/storage/provider/src/providers/database/builder.rs b/crates/storage/provider/src/providers/database/builder.rs index 2f25c806945..4bc8569432e 100644 --- a/crates/storage/provider/src/providers/database/builder.rs +++ b/crates/storage/provider/src/providers/database/builder.rs @@ -4,7 +4,10 @@ //! up to the intended build target. use crate::{providers::StaticFileProvider, ProviderFactory}; -use reth_db::{mdbx::DatabaseArguments, open_db_read_only, DatabaseEnv}; +use reth_db::{ + mdbx::{DatabaseArguments, MaxReadTransactionDuration}, + open_db_read_only, DatabaseEnv, +}; use reth_db_api::{database_metrics::DatabaseMetrics, Database}; use reth_node_types::{NodeTypes, NodeTypesWithDBAdapter}; use std::{ @@ -62,7 +65,7 @@ impl ProviderFactoryBuilder { /// ```no_run /// use reth_chainspec::MAINNET; /// use reth_node_types::NodeTypes; - /// /// + /// /// use reth_provider::providers::{ProviderFactoryBuilder, ReadOnlyConfig}; /// /// fn demo>() { @@ -71,6 +74,29 @@ impl ProviderFactoryBuilder { /// .unwrap(); /// } /// ``` + /// + /// # Open an instance with disabled read-transaction timeout + /// + /// By default, read transactions are automatically terminated after a timeout to prevent + /// database free list growth. However, if the database is static (no writes occurring), this + /// safety mechanism can be disabled using + /// [`ReadOnlyConfig::disable_long_read_transaction_safety`]. + /// + /// ```no_run + /// use reth_chainspec::MAINNET; + /// use reth_node_types::NodeTypes; + /// + /// use reth_provider::providers::{ProviderFactoryBuilder, ReadOnlyConfig}; + /// + /// fn demo>() { + /// let provider_factory = ProviderFactoryBuilder::::default() + /// .open_read_only( + /// MAINNET.clone(), + /// ReadOnlyConfig::from_datadir("datadir").disable_long_read_transaction_safety(), + /// ) + /// .unwrap(); + /// } + /// ``` pub fn open_read_only( self, chainspec: Arc, @@ -129,6 +155,15 @@ impl ReadOnlyConfig { Self::from_dirs(datadir.join("db"), datadir.join("static_files")) } + /// Disables long-lived read transaction safety guarantees. + /// + /// Caution: Keeping database transaction open indefinitely can cause the free list to grow if + /// changes to the database are made. + pub const fn disable_long_read_transaction_safety(mut self) -> Self { + self.db_args.max_read_transaction_duration(Some(MaxReadTransactionDuration::Unbounded)); + self + } + /// Derives the [`ReadOnlyConfig`] from the database dir. /// /// By default this assumes the following datadir layout: From ccc14938482fc1100233d36e3019d0459dea5719 Mon Sep 17 00:00:00 2001 From: Amidamaru Date: Fri, 11 Jul 2025 03:00:01 +0700 Subject: [PATCH 128/305] chore: make `OpAddonsBuilder` generic over middleware (#17347) --- crates/optimism/node/src/node.rs | 50 +++++++++++++++++++++++++++----- 1 file changed, 42 insertions(+), 8 deletions(-) diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 4d642548e12..07cd3866c13 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -295,6 +295,7 @@ impl Default OpEthApiBuilder, OpEngineValidatorBuilder, OpEngineApiBuilder, + Identity, > where N: FullNodeComponents, @@ -305,12 +306,13 @@ where } } -impl +impl OpAddOns< N, OpEthApiBuilder, OpEngineValidatorBuilder, OpEngineApiBuilder, + RpcMiddleware, > where N: FullNodeComponents, @@ -546,7 +548,8 @@ where } } -impl RethRpcAddOns for OpAddOns, EV, EB> +impl RethRpcAddOns + for OpAddOns, EV, EB, RpcMiddleware> where N: FullNodeComponents< Types: OpFullNodeTypes, @@ -559,6 +562,7 @@ where NetworkT: op_alloy_network::Network + Unpin, EV: EngineValidatorBuilder, EB: EngineApiBuilder, + RpcMiddleware: RethRpcMiddleware, { type EthApi = OpEthApi; @@ -567,12 +571,14 @@ where } } -impl EngineValidatorAddOn for OpAddOns, EV, EB> +impl EngineValidatorAddOn + for OpAddOns, EV, EB, RpcMiddleware> where N: FullNodeComponents, OpEthApiBuilder: EthApiBuilder, EV: EngineValidatorBuilder + Default, EB: EngineApiBuilder, + RpcMiddleware: Send, { type Validator = >::Validator; @@ -584,7 +590,7 @@ where /// A regular optimism evm and executor builder. #[derive(Debug, Clone)] #[non_exhaustive] -pub struct OpAddOnsBuilder { +pub struct OpAddOnsBuilder { /// Sequencer client, configured to forward submitted transactions to sequencer of given OP /// network. sequencer_url: Option, @@ -600,6 +606,8 @@ pub struct OpAddOnsBuilder { _nt: PhantomData, /// Minimum suggested priority fee (tip) min_suggested_priority_fee: u64, + /// RPC middleware to use + rpc_middleware: RpcMiddleware, } impl Default for OpAddOnsBuilder { @@ -612,11 +620,12 @@ impl Default for OpAddOnsBuilder { enable_tx_conditional: false, min_suggested_priority_fee: 1_000_000, _nt: PhantomData, + rpc_middleware: Identity::new(), } } } -impl OpAddOnsBuilder { +impl OpAddOnsBuilder { /// With a [`SequencerClient`]. pub fn with_sequencer(mut self, sequencer_client: Option) -> Self { self.sequencer_url = sequencer_client; @@ -652,11 +661,35 @@ impl OpAddOnsBuilder { self.historical_rpc = historical_rpc; self } + + /// Configure the RPC middleware to use + pub fn with_rpc_middleware(self, rpc_middleware: T) -> OpAddOnsBuilder { + let Self { + sequencer_url, + sequencer_headers, + historical_rpc, + da_config, + enable_tx_conditional, + min_suggested_priority_fee, + _nt, + .. + } = self; + OpAddOnsBuilder { + sequencer_url, + sequencer_headers, + historical_rpc, + da_config, + enable_tx_conditional, + min_suggested_priority_fee, + _nt, + rpc_middleware, + } + } } -impl OpAddOnsBuilder { +impl OpAddOnsBuilder { /// Builds an instance of [`OpAddOns`]. - pub fn build(self) -> OpAddOns, EV, EB> + pub fn build(self) -> OpAddOns, EV, EB, RpcMiddleware> where N: FullNodeComponents, OpEthApiBuilder: EthApiBuilder, @@ -670,6 +703,7 @@ impl OpAddOnsBuilder { enable_tx_conditional, min_suggested_priority_fee, historical_rpc, + rpc_middleware, .. } = self; @@ -681,7 +715,7 @@ impl OpAddOnsBuilder { .with_min_suggested_priority_fee(min_suggested_priority_fee), EV::default(), EB::default(), - Default::default(), + rpc_middleware, ), da_config: da_config.unwrap_or_default(), sequencer_url, From 4668614f41150c1bc3eccf2363bf82b38012b3db Mon Sep 17 00:00:00 2001 From: "fuder.eth" Date: Thu, 10 Jul 2025 23:24:38 +0300 Subject: [PATCH 129/305] fix: Typographical Errors in Comments (#17333) --- crates/rpc/rpc/src/eth/helpers/receipt.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/rpc/rpc/src/eth/helpers/receipt.rs b/crates/rpc/rpc/src/eth/helpers/receipt.rs index 2018ba38aca..714815a551a 100644 --- a/crates/rpc/rpc/src/eth/helpers/receipt.rs +++ b/crates/rpc/rpc/src/eth/helpers/receipt.rs @@ -33,7 +33,7 @@ where let blob_params = self.provider().chain_spec().blob_params_at_timestamp(meta.timestamp); Ok(EthReceiptBuilder::new( - // Note: we assume this transaction is valid, because it's mined and therefor valid + // Note: we assume this transaction is valid, because it's mined and therefore valid tx.try_into_recovered_unchecked()?.as_recovered_ref(), meta, &receipt, From 2bf4646e2dbb450f99efd51f64df9505de5b9bce Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Thu, 10 Jul 2025 16:31:43 -0400 Subject: [PATCH 130/305] chore(trie): add Either type for SparseTrieInterface (#17267) --- Cargo.lock | 1 + Cargo.toml | 1 + crates/trie/sparse/Cargo.toml | 2 + crates/trie/sparse/src/lib.rs | 3 + crates/trie/sparse/src/traits.rs | 128 +++++++++++++++++++++++++++++++ 5 files changed, 135 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index d091d564a4d..7e0b4a9e5e1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10625,6 +10625,7 @@ dependencies = [ "assert_matches", "auto_impl", "codspeed-criterion-compat", + "either", "itertools 0.14.0", "metrics", "pretty_assertions", diff --git a/Cargo.toml b/Cargo.toml index 63a6c3a458b..a800852600d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -517,6 +517,7 @@ op-alloy-rpc-jsonrpsee = { version = "0.18.7", default-features = false } op-alloy-flz = { version = "0.13.1", default-features = false } # misc +either = { version = "1.15.0", default-features = false } aquamarine = "0.6" auto_impl = "1" backon = { version = "1.2", default-features = false, features = ["std-blocking-sleep", "tokio-sleep"] } diff --git a/crates/trie/sparse/Cargo.toml b/crates/trie/sparse/Cargo.toml index 8b40a72da2a..1a12608e15c 100644 --- a/crates/trie/sparse/Cargo.toml +++ b/crates/trie/sparse/Cargo.toml @@ -26,6 +26,7 @@ alloy-rlp.workspace = true # misc auto_impl.workspace = true smallvec = { workspace = true, features = ["const_new"] } +either.workspace = true # metrics reth-metrics = { workspace = true, optional = true } @@ -62,6 +63,7 @@ std = [ "reth-storage-api/std", "reth-trie-common/std", "tracing/std", + "either/std", ] metrics = ["dep:reth-metrics", "dep:metrics", "std"] test-utils = [ diff --git a/crates/trie/sparse/src/lib.rs b/crates/trie/sparse/src/lib.rs index 220a712d8c8..20884efb233 100644 --- a/crates/trie/sparse/src/lib.rs +++ b/crates/trie/sparse/src/lib.rs @@ -16,6 +16,9 @@ pub use traits::*; pub mod blinded; +// Re-export `Either` because it implements `SparseTrieInterface`. +pub use either::Either; + #[cfg(feature = "metrics")] mod metrics; diff --git a/crates/trie/sparse/src/traits.rs b/crates/trie/sparse/src/traits.rs index 2fe1838d777..304052ad7ec 100644 --- a/crates/trie/sparse/src/traits.rs +++ b/crates/trie/sparse/src/traits.rs @@ -8,6 +8,7 @@ use alloy_primitives::{ B256, }; use alloy_trie::{BranchNodeCompact, TrieMask}; +use either::Either; use reth_execution_errors::SparseTrieResult; use reth_trie_common::{Nibbles, TrieNode}; @@ -291,3 +292,130 @@ pub enum LeafLookup { /// Leaf does not exist (exclusion proof found). NonExistent, } + +impl SparseTrieInterface for Either +where + A: SparseTrieInterface, + B: SparseTrieInterface, +{ + fn with_root( + self, + root: TrieNode, + masks: TrieMasks, + retain_updates: bool, + ) -> SparseTrieResult { + match self { + Self::Left(trie) => trie.with_root(root, masks, retain_updates).map(Self::Left), + Self::Right(trie) => trie.with_root(root, masks, retain_updates).map(Self::Right), + } + } + + fn with_updates(self, retain_updates: bool) -> Self { + match self { + Self::Left(trie) => Self::Left(trie.with_updates(retain_updates)), + Self::Right(trie) => Self::Right(trie.with_updates(retain_updates)), + } + } + + fn reserve_nodes(&mut self, additional: usize) { + match self { + Self::Left(trie) => trie.reserve_nodes(additional), + Self::Right(trie) => trie.reserve_nodes(additional), + } + } + + fn reveal_node( + &mut self, + path: Nibbles, + node: TrieNode, + masks: TrieMasks, + ) -> SparseTrieResult<()> { + match self { + Self::Left(trie) => trie.reveal_node(path, node, masks), + Self::Right(trie) => trie.reveal_node(path, node, masks), + } + } + + fn update_leaf( + &mut self, + full_path: Nibbles, + value: Vec, + provider: P, + ) -> SparseTrieResult<()> { + match self { + Self::Left(trie) => trie.update_leaf(full_path, value, provider), + Self::Right(trie) => trie.update_leaf(full_path, value, provider), + } + } + + fn remove_leaf( + &mut self, + full_path: &Nibbles, + provider: P, + ) -> SparseTrieResult<()> { + match self { + Self::Left(trie) => trie.remove_leaf(full_path, provider), + Self::Right(trie) => trie.remove_leaf(full_path, provider), + } + } + + fn root(&mut self) -> B256 { + match self { + Self::Left(trie) => trie.root(), + Self::Right(trie) => trie.root(), + } + } + + fn update_subtrie_hashes(&mut self) { + match self { + Self::Left(trie) => trie.update_subtrie_hashes(), + Self::Right(trie) => trie.update_subtrie_hashes(), + } + } + + fn get_leaf_value(&self, full_path: &Nibbles) -> Option<&Vec> { + match self { + Self::Left(trie) => trie.get_leaf_value(full_path), + Self::Right(trie) => trie.get_leaf_value(full_path), + } + } + + fn find_leaf( + &self, + full_path: &Nibbles, + expected_value: Option<&Vec>, + ) -> Result { + match self { + Self::Left(trie) => trie.find_leaf(full_path, expected_value), + Self::Right(trie) => trie.find_leaf(full_path, expected_value), + } + } + + fn take_updates(&mut self) -> SparseTrieUpdates { + match self { + Self::Left(trie) => trie.take_updates(), + Self::Right(trie) => trie.take_updates(), + } + } + + fn wipe(&mut self) { + match self { + Self::Left(trie) => trie.wipe(), + Self::Right(trie) => trie.wipe(), + } + } + + fn clear(&mut self) { + match self { + Self::Left(trie) => trie.clear(), + Self::Right(trie) => trie.clear(), + } + } + + fn updates_ref(&self) -> Cow<'_, SparseTrieUpdates> { + match self { + Self::Left(trie) => trie.updates_ref(), + Self::Right(trie) => trie.updates_ref(), + } + } +} From ee11b424fc67f21c18af85fbaa235dc9729337aa Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 10 Jul 2025 23:05:03 +0200 Subject: [PATCH 131/305] chore: add helper convert into error object (#17354) --- crates/rpc/rpc-eth-types/src/error/mod.rs | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/crates/rpc/rpc-eth-types/src/error/mod.rs b/crates/rpc/rpc-eth-types/src/error/mod.rs index dcec8482f3d..1191196e1a0 100644 --- a/crates/rpc/rpc-eth-types/src/error/mod.rs +++ b/crates/rpc/rpc-eth-types/src/error/mod.rs @@ -202,6 +202,11 @@ impl EthApiError { { err.into() } + + /// Converts this error into the rpc error object. + pub fn into_rpc_err(self) -> jsonrpsee_types::error::ErrorObject<'static> { + self.into() + } } impl From for jsonrpsee_types::error::ErrorObject<'static> { @@ -586,9 +591,7 @@ impl RpcInvalidTransactionError { pub fn other(err: E) -> Self { Self::Other(Box::new(err)) } -} -impl RpcInvalidTransactionError { /// Returns the rpc error code for this error. pub const fn error_code(&self) -> i32 { match self { @@ -627,6 +630,11 @@ impl RpcInvalidTransactionError { OutOfGasError::InvalidOperand => Self::InvalidOperandOutOfGas(gas_limit), } } + + /// Converts this error into the rpc error object. + pub fn into_rpc_err(self) -> jsonrpsee_types::error::ErrorObject<'static> { + self.into() + } } impl From for jsonrpsee_types::error::ErrorObject<'static> { From 5479e115f9d139279ab844333e985068b036c6ae Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 10 Jul 2025 23:43:21 +0200 Subject: [PATCH 132/305] chore: add helper to access invalid tx error (#17353) --- crates/rpc/rpc-eth-types/src/error/mod.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/crates/rpc/rpc-eth-types/src/error/mod.rs b/crates/rpc/rpc-eth-types/src/error/mod.rs index 1191196e1a0..fdb0ade248e 100644 --- a/crates/rpc/rpc-eth-types/src/error/mod.rs +++ b/crates/rpc/rpc-eth-types/src/error/mod.rs @@ -187,6 +187,14 @@ impl EthApiError { matches!(self, Self::InvalidTransaction(RpcInvalidTransactionError::GasTooLow)) } + /// Returns the [`RpcInvalidTransactionError`] if this is a [`EthApiError::InvalidTransaction`] + pub const fn as_invalid_transaction(&self) -> Option<&RpcInvalidTransactionError> { + match self { + Self::InvalidTransaction(e) => Some(e), + _ => None, + } + } + /// Converts the given [`StateOverrideError`] into a new [`EthApiError`] instance. pub fn from_state_overrides_err(err: StateOverrideError) -> Self where From e263daebce0d2f903521e1fdc8fa4407d18b11a7 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 11 Jul 2025 00:04:24 +0200 Subject: [PATCH 133/305] chore: broadcast raw tx for opethapi (#17342) --- Cargo.lock | 1 - crates/optimism/rpc/Cargo.toml | 1 - crates/optimism/rpc/src/eth/mod.rs | 9 --------- crates/optimism/rpc/src/eth/transaction.rs | 4 ++++ 4 files changed, 4 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7e0b4a9e5e1..b9c9b7dc482 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9420,7 +9420,6 @@ dependencies = [ "op-revm", "parking_lot", "reqwest", - "reth-chain-state", "reth-chainspec", "reth-evm", "reth-metrics", diff --git a/crates/optimism/rpc/Cargo.toml b/crates/optimism/rpc/Cargo.toml index d31de8a0b43..954722b3fd4 100644 --- a/crates/optimism/rpc/Cargo.toml +++ b/crates/optimism/rpc/Cargo.toml @@ -16,7 +16,6 @@ workspace = true reth-evm.workspace = true reth-primitives-traits.workspace = true reth-storage-api.workspace = true -reth-chain-state.workspace = true reth-rpc-eth-api = { workspace = true, features = ["op"] } reth-rpc-eth-types.workspace = true reth-rpc-server-types.workspace = true diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index 29384e3aa0b..6f2bc1b0b19 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -13,7 +13,6 @@ use alloy_primitives::U256; use eyre::WrapErr; use op_alloy_network::Optimism; pub use receipt::{OpReceiptBuilder, OpReceiptFieldsBuilder}; -use reth_chain_state::CanonStateSubscriptions; use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardforks}; use reth_evm::ConfigureEvm; use reth_network_api::NetworkInfo; @@ -83,19 +82,11 @@ impl OpEthApi { tx_resp_builder: RpcConverter::with_mapper(OpTxInfoMapper::new(inner)), } } -} -impl OpEthApi -where - N: OpNodeCore< - Provider: BlockReaderIdExt + ChainSpecProvider + CanonStateSubscriptions + Clone + 'static, - >, -{ /// Returns a reference to the [`EthApiNodeBackend`]. pub fn eth_api(&self) -> &EthApiNodeBackend { self.inner.eth_api() } - /// Returns the configured sequencer client, if any. pub fn sequencer_client(&self) -> Option<&SequencerClient> { self.inner.sequencer_client() diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index 30422316ad9..106fe85b1f0 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -39,6 +39,10 @@ where /// Returns the hash of the transaction. async fn send_raw_transaction(&self, tx: Bytes) -> Result { let recovered = recover_raw_transaction(&tx)?; + + // broadcast raw transaction to subscribers if there is any. + self.eth_api().broadcast_raw_transaction(tx.clone()); + let pool_transaction = ::Transaction::from_pooled(recovered); // On optimism, transactions are forwarded directly to the sequencer to be included in From 2b142fb1981e48562a0ca09b1d0cec0801809203 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Thu, 10 Jul 2025 18:57:00 -0400 Subject: [PATCH 134/305] feat(trie): add HashedPostState::clear (#17358) --- crates/trie/common/src/hashed_state.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/crates/trie/common/src/hashed_state.rs b/crates/trie/common/src/hashed_state.rs index b6f60e2b2a1..374f36fdd44 100644 --- a/crates/trie/common/src/hashed_state.rs +++ b/crates/trie/common/src/hashed_state.rs @@ -334,6 +334,12 @@ impl HashedPostState { HashedPostStateSorted { accounts, storages } } + + /// Clears the account and storage maps of this `HashedPostState`. + pub fn clear(&mut self) { + self.accounts.clear(); + self.storages.clear(); + } } /// Representation of in-memory hashed storage. From a1dd69ee0e5e2b64f88cc019c474bd6d12c1d388 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Thu, 10 Jul 2025 18:57:06 -0400 Subject: [PATCH 135/305] feat(trie): add TrieUpdates::clear (#17359) --- crates/trie/common/src/updates.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/crates/trie/common/src/updates.rs b/crates/trie/common/src/updates.rs index c296589f65e..be62f38b967 100644 --- a/crates/trie/common/src/updates.rs +++ b/crates/trie/common/src/updates.rs @@ -133,6 +133,13 @@ impl TrieUpdates { .collect(), } } + + /// Clears the nodes and storage trie maps in this `TrieUpdates`. + pub fn clear(&mut self) { + self.account_nodes.clear(); + self.removed_nodes.clear(); + self.storage_tries.clear(); + } } /// Trie updates for storage trie of a single account. From 4560ac4fe73b9437b74387665195692ae1bc607c Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Fri, 11 Jul 2025 02:43:32 +0300 Subject: [PATCH 136/305] feat: support isthmus in reth-bench (#17351) --- Cargo.lock | 1 + bin/reth-bench/Cargo.toml | 1 + bin/reth-bench/src/bench/context.rs | 11 +- bin/reth-bench/src/bench/new_payload_fcu.rs | 52 ++-- bin/reth-bench/src/bench/new_payload_only.rs | 42 +-- bin/reth-bench/src/valid_payload.rs | 275 ++++++------------- crates/payload/primitives/src/lib.rs | 11 + 7 files changed, 142 insertions(+), 251 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b9c9b7dc482..430f63f076c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7262,6 +7262,7 @@ dependencies = [ "futures", "humantime", "op-alloy-consensus", + "op-alloy-rpc-types-engine", "reqwest", "reth-cli-runner", "reth-cli-util", diff --git a/bin/reth-bench/Cargo.toml b/bin/reth-bench/Cargo.toml index 640c582b7f4..f677521567a 100644 --- a/bin/reth-bench/Cargo.toml +++ b/bin/reth-bench/Cargo.toml @@ -35,6 +35,7 @@ alloy-transport-ipc.workspace = true alloy-transport-ws.workspace = true alloy-transport.workspace = true op-alloy-consensus = { workspace = true, features = ["alloy-compat"] } +op-alloy-rpc-types-engine = { workspace = true, features = ["serde"] } # reqwest reqwest = { workspace = true, default-features = false, features = ["rustls-tls-native-roots"] } diff --git a/bin/reth-bench/src/bench/context.rs b/bin/reth-bench/src/bench/context.rs index e5b1b363449..197af246c19 100644 --- a/bin/reth-bench/src/bench/context.rs +++ b/bin/reth-bench/src/bench/context.rs @@ -3,6 +3,7 @@ use crate::{authenticated_transport::AuthenticatedTransportConnect, bench_mode::BenchMode}; use alloy_eips::BlockNumberOrTag; +use alloy_primitives::address; use alloy_provider::{network::AnyNetwork, Provider, RootProvider}; use alloy_rpc_client::ClientBuilder; use alloy_rpc_types_engine::JwtSecret; @@ -25,6 +26,8 @@ pub(crate) struct BenchContext { pub(crate) benchmark_mode: BenchMode, /// The next block to fetch. pub(crate) next_block: u64, + /// Whether the chain is an OP rollup. + pub(crate) is_optimism: bool, } impl BenchContext { @@ -44,6 +47,12 @@ impl BenchContext { let client = ClientBuilder::default().http(rpc_url.parse()?); let block_provider = RootProvider::::new(client); + // Check if this is an OP chain by checking code at a predeploy address. + let is_optimism = !block_provider + .get_code_at(address!("0x420000000000000000000000000000000000000F")) + .await? + .is_empty(); + // If neither `--from` nor `--to` are provided, we will run the benchmark continuously, // starting at the latest block. let mut benchmark_mode = BenchMode::new(bench_args.from, bench_args.to)?; @@ -94,6 +103,6 @@ impl BenchContext { }; let next_block = first_block.header.number + 1; - Ok(Self { auth_provider, block_provider, benchmark_mode, next_block }) + Ok(Self { auth_provider, block_provider, benchmark_mode, next_block, is_optimism }) } } diff --git a/bin/reth-bench/src/bench/new_payload_fcu.rs b/bin/reth-bench/src/bench/new_payload_fcu.rs index 76166197a73..ac0ab66a864 100644 --- a/bin/reth-bench/src/bench/new_payload_fcu.rs +++ b/bin/reth-bench/src/bench/new_payload_fcu.rs @@ -9,10 +9,10 @@ use crate::{ GAS_OUTPUT_SUFFIX, }, }, - valid_payload::{call_forkchoice_updated, call_new_payload}, + valid_payload::{block_to_new_payload, call_forkchoice_updated, call_new_payload}, }; use alloy_provider::Provider; -use alloy_rpc_types_engine::{ExecutionPayload, ForkchoiceState}; +use alloy_rpc_types_engine::ForkchoiceState; use clap::Parser; use csv::Writer; use humantime::parse_duration; @@ -39,32 +39,23 @@ pub struct Command { impl Command { /// Execute `benchmark new-payload-fcu` command pub async fn execute(self, _ctx: CliContext) -> eyre::Result<()> { - let BenchContext { benchmark_mode, block_provider, auth_provider, mut next_block } = - BenchContext::new(&self.benchmark, self.rpc_url).await?; + let BenchContext { + benchmark_mode, + block_provider, + auth_provider, + mut next_block, + is_optimism, + } = BenchContext::new(&self.benchmark, self.rpc_url).await?; let (sender, mut receiver) = tokio::sync::mpsc::channel(1000); tokio::task::spawn(async move { while benchmark_mode.contains(next_block) { let block_res = block_provider.get_block_by_number(next_block.into()).full().await; let block = block_res.unwrap().unwrap(); + let header = block.header.clone(); - let block = block - .into_inner() - .map_header(|header| header.map(|h| h.into_header_with_defaults())) - .try_map_transactions(|tx| { - // try to convert unknowns into op type so that we can also support optimism - tx.try_into_either::() - }) - .unwrap() - .into_consensus(); - - let blob_versioned_hashes = - block.body.blob_versioned_hashes_iter().copied().collect::>(); - - // Convert to execution payload - let (payload, sidecar) = ExecutionPayload::from_block_slow(&block); - let header = block.header; - let head_block_hash = payload.block_hash(); + let (version, params) = block_to_new_payload(block, is_optimism).unwrap(); + let head_block_hash = header.hash; let safe_block_hash = block_provider.get_block_by_number(header.number.saturating_sub(32).into()); @@ -81,9 +72,8 @@ impl Command { sender .send(( header, - blob_versioned_hashes, - payload, - sidecar, + version, + params, head_block_hash, safe_block_hash, finalized_block_hash, @@ -98,7 +88,7 @@ impl Command { let total_benchmark_duration = Instant::now(); let mut total_wait_time = Duration::ZERO; - while let Some((header, versioned_hashes, payload, sidecar, head, safe, finalized)) = { + while let Some((header, version, params, head, safe, finalized)) = { let wait_start = Instant::now(); let result = receiver.recv().await; total_wait_time += wait_start.elapsed(); @@ -118,19 +108,11 @@ impl Command { }; let start = Instant::now(); - let message_version = call_new_payload( - &auth_provider, - payload, - sidecar, - header.parent_beacon_block_root, - versioned_hashes, - ) - .await?; + call_new_payload(&auth_provider, version, params).await?; let new_payload_result = NewPayloadResult { gas_used, latency: start.elapsed() }; - call_forkchoice_updated(&auth_provider, message_version, forkchoice_state, None) - .await?; + call_forkchoice_updated(&auth_provider, version, forkchoice_state, None).await?; // calculate the total duration and the fcu latency, record let total_latency = start.elapsed(); diff --git a/bin/reth-bench/src/bench/new_payload_only.rs b/bin/reth-bench/src/bench/new_payload_only.rs index 099ef8112e1..8dda7df4ecd 100644 --- a/bin/reth-bench/src/bench/new_payload_only.rs +++ b/bin/reth-bench/src/bench/new_payload_only.rs @@ -8,10 +8,9 @@ use crate::{ NEW_PAYLOAD_OUTPUT_SUFFIX, }, }, - valid_payload::call_new_payload, + valid_payload::{block_to_new_payload, call_new_payload}, }; use alloy_provider::Provider; -use alloy_rpc_types_engine::ExecutionPayload; use clap::Parser; use csv::Writer; use reth_cli_runner::CliContext; @@ -33,29 +32,25 @@ pub struct Command { impl Command { /// Execute `benchmark new-payload-only` command pub async fn execute(self, _ctx: CliContext) -> eyre::Result<()> { - let BenchContext { benchmark_mode, block_provider, auth_provider, mut next_block } = - BenchContext::new(&self.benchmark, self.rpc_url).await?; + let BenchContext { + benchmark_mode, + block_provider, + auth_provider, + mut next_block, + is_optimism, + } = BenchContext::new(&self.benchmark, self.rpc_url).await?; let (sender, mut receiver) = tokio::sync::mpsc::channel(1000); tokio::task::spawn(async move { while benchmark_mode.contains(next_block) { let block_res = block_provider.get_block_by_number(next_block.into()).full().await; let block = block_res.unwrap().unwrap(); - let block = block - .into_inner() - .map_header(|header| header.map(|h| h.into_header_with_defaults())) - .try_map_transactions(|tx| { - tx.try_into_either::() - }) - .unwrap() - .into_consensus(); - - let blob_versioned_hashes = - block.body.blob_versioned_hashes_iter().copied().collect::>(); - let (payload, sidecar) = ExecutionPayload::from_block_slow(&block); + let header = block.header.clone(); + + let (version, params) = block_to_new_payload(block, is_optimism).unwrap(); next_block += 1; - sender.send((block.header, blob_versioned_hashes, payload, sidecar)).await.unwrap(); + sender.send((header, version, params)).await.unwrap(); } }); @@ -64,7 +59,7 @@ impl Command { let total_benchmark_duration = Instant::now(); let mut total_wait_time = Duration::ZERO; - while let Some((header, versioned_hashes, payload, sidecar)) = { + while let Some((header, version, params)) = { let wait_start = Instant::now(); let result = receiver.recv().await; total_wait_time += wait_start.elapsed(); @@ -73,7 +68,7 @@ impl Command { // just put gas used here let gas_used = header.gas_used; - let block_number = payload.block_number(); + let block_number = header.number; debug!( target: "reth-bench", @@ -82,14 +77,7 @@ impl Command { ); let start = Instant::now(); - call_new_payload( - &auth_provider, - payload, - sidecar, - header.parent_beacon_block_root, - versioned_hashes, - ) - .await?; + call_new_payload(&auth_provider, version, params).await?; let new_payload_result = NewPayloadResult { gas_used, latency: start.elapsed() }; info!(%new_payload_result); diff --git a/bin/reth-bench/src/valid_payload.rs b/bin/reth-bench/src/valid_payload.rs index e2f83a0ec25..d253506b22b 100644 --- a/bin/reth-bench/src/valid_payload.rs +++ b/bin/reth-bench/src/valid_payload.rs @@ -2,53 +2,20 @@ //! response. This is useful for benchmarking, as it allows us to wait for a payload to be valid //! before sending additional calls. -use alloy_eips::eip7685::RequestsOrHash; -use alloy_primitives::B256; -use alloy_provider::{ext::EngineApi, Network, Provider}; +use alloy_eips::eip7685::Requests; +use alloy_provider::{ext::EngineApi, network::AnyRpcBlock, Network, Provider}; use alloy_rpc_types_engine::{ - ExecutionPayload, ExecutionPayloadInputV2, ExecutionPayloadSidecar, ExecutionPayloadV1, - ExecutionPayloadV3, ForkchoiceState, ForkchoiceUpdated, PayloadAttributes, PayloadStatus, + ExecutionPayload, ExecutionPayloadInputV2, ForkchoiceState, ForkchoiceUpdated, + PayloadAttributes, PayloadStatus, }; use alloy_transport::TransportResult; +use op_alloy_rpc_types_engine::OpExecutionPayloadV4; use reth_node_api::EngineApiMessageVersion; use tracing::error; /// An extension trait for providers that implement the engine API, to wait for a VALID response. #[async_trait::async_trait] pub trait EngineApiValidWaitExt: Send + Sync { - /// Calls `engine_newPayloadV1` with the given [`ExecutionPayloadV1`], and waits until the - /// response is VALID. - async fn new_payload_v1_wait( - &self, - payload: ExecutionPayloadV1, - ) -> TransportResult; - - /// Calls `engine_newPayloadV2` with the given [`ExecutionPayloadInputV2`], and waits until the - /// response is VALID. - async fn new_payload_v2_wait( - &self, - payload: ExecutionPayloadInputV2, - ) -> TransportResult; - - /// Calls `engine_newPayloadV3` with the given [`ExecutionPayloadV3`], parent beacon block root, - /// and versioned hashes, and waits until the response is VALID. - async fn new_payload_v3_wait( - &self, - payload: ExecutionPayloadV3, - versioned_hashes: Vec, - parent_beacon_block_root: B256, - ) -> TransportResult; - - /// Calls `engine_newPayloadV4` with the given [`ExecutionPayloadV3`], parent beacon block root, - /// versioned hashes, and requests hash, and waits until the response is VALID. - async fn new_payload_v4_wait( - &self, - payload: ExecutionPayloadV3, - versioned_hashes: Vec, - parent_beacon_block_root: B256, - requests_hash: B256, - ) -> TransportResult; - /// Calls `engine_forkChoiceUpdatedV1` with the given [`ForkchoiceState`] and optional /// [`PayloadAttributes`], and waits until the response is VALID. async fn fork_choice_updated_v1_wait( @@ -80,122 +47,6 @@ where N: Network, P: Provider + EngineApi, { - async fn new_payload_v1_wait( - &self, - payload: ExecutionPayloadV1, - ) -> TransportResult { - let mut status = self.new_payload_v1(payload.clone()).await?; - while !status.is_valid() { - if status.is_invalid() { - error!(?status, ?payload, "Invalid newPayloadV1",); - panic!("Invalid newPayloadV1: {status:?}"); - } - status = self.new_payload_v1(payload.clone()).await?; - } - Ok(status) - } - - async fn new_payload_v2_wait( - &self, - payload: ExecutionPayloadInputV2, - ) -> TransportResult { - let mut status = self.new_payload_v2(payload.clone()).await?; - while !status.is_valid() { - if status.is_invalid() { - error!(?status, ?payload, "Invalid newPayloadV2",); - panic!("Invalid newPayloadV2: {status:?}"); - } - status = self.new_payload_v2(payload.clone()).await?; - } - Ok(status) - } - - async fn new_payload_v3_wait( - &self, - payload: ExecutionPayloadV3, - versioned_hashes: Vec, - parent_beacon_block_root: B256, - ) -> TransportResult { - let mut status = self - .new_payload_v3(payload.clone(), versioned_hashes.clone(), parent_beacon_block_root) - .await?; - while !status.is_valid() { - if status.is_invalid() { - error!( - ?status, - ?payload, - ?versioned_hashes, - ?parent_beacon_block_root, - "Invalid newPayloadV3", - ); - panic!("Invalid newPayloadV3: {status:?}"); - } - if status.is_syncing() { - return Err(alloy_json_rpc::RpcError::UnsupportedFeature( - "invalid range: no canonical state found for parent of requested block", - )) - } - status = self - .new_payload_v3(payload.clone(), versioned_hashes.clone(), parent_beacon_block_root) - .await?; - } - Ok(status) - } - - async fn new_payload_v4_wait( - &self, - payload: ExecutionPayloadV3, - versioned_hashes: Vec, - parent_beacon_block_root: B256, - requests_hash: B256, - ) -> TransportResult { - // We cannot use `self.new_payload_v4` because it does not support sending - // `RequestsOrHash::Hash` - - let mut status: PayloadStatus = self - .client() - .request( - "engine_newPayloadV4", - ( - payload.clone(), - versioned_hashes.clone(), - parent_beacon_block_root, - RequestsOrHash::Hash(requests_hash), - ), - ) - .await?; - while !status.is_valid() { - if status.is_invalid() { - error!( - ?status, - ?payload, - ?versioned_hashes, - ?parent_beacon_block_root, - "Invalid newPayloadV4", - ); - panic!("Invalid newPayloadV4: {status:?}"); - } - if status.is_syncing() { - return Err(alloy_json_rpc::RpcError::UnsupportedFeature( - "invalid range: no canonical state found for parent of requested block", - )) - } - status = self - .client() - .request( - "engine_newPayloadV4", - ( - payload.clone(), - versioned_hashes.clone(), - parent_beacon_block_root, - RequestsOrHash::Hash(requests_hash), - ), - ) - .await?; - } - Ok(status) - } - async fn fork_choice_updated_v1_wait( &self, fork_choice_state: ForkchoiceState, @@ -282,39 +133,60 @@ where } } -/// Calls the correct `engine_newPayload` method depending on the given [`ExecutionPayload`] and its -/// versioned variant. Returns the [`EngineApiMessageVersion`] depending on the payload's version. -/// -/// # Panics -/// If the given payload is a V3 payload, but a parent beacon block root is provided as `None`. -pub(crate) async fn call_new_payload>( - provider: P, - payload: ExecutionPayload, - sidecar: ExecutionPayloadSidecar, - parent_beacon_block_root: Option, - versioned_hashes: Vec, -) -> TransportResult { - match payload { +pub(crate) fn block_to_new_payload( + block: AnyRpcBlock, + is_optimism: bool, +) -> eyre::Result<(EngineApiMessageVersion, serde_json::Value)> { + let block = block + .into_inner() + .map_header(|header| header.map(|h| h.into_header_with_defaults())) + .try_map_transactions(|tx| { + // try to convert unknowns into op type so that we can also support optimism + tx.try_into_either::() + })? + .into_consensus(); + + // Convert to execution payload + let (payload, sidecar) = ExecutionPayload::from_block_slow(&block); + + let (version, params) = match payload { ExecutionPayload::V3(payload) => { - // We expect the caller to provide `parent_beacon_block_root` for V3 payloads. - let parent_beacon_block_root = parent_beacon_block_root - .expect("parent_beacon_block_root is required for V3 payloads and higher"); + let cancun = sidecar.cancun().unwrap(); - if let Some(requests_hash) = sidecar.requests_hash() { - provider - .new_payload_v4_wait( - payload, - versioned_hashes, - parent_beacon_block_root, - requests_hash, + if let Some(prague) = sidecar.prague() { + if is_optimism { + ( + EngineApiMessageVersion::V4, + serde_json::to_value(( + OpExecutionPayloadV4 { + payload_inner: payload, + withdrawals_root: block.withdrawals_root.unwrap(), + }, + cancun.versioned_hashes.clone(), + cancun.parent_beacon_block_root, + Requests::default(), + ))?, + ) + } else { + ( + EngineApiMessageVersion::V4, + serde_json::to_value(( + payload, + cancun.versioned_hashes.clone(), + cancun.parent_beacon_block_root, + prague.requests.requests_hash(), + ))?, ) - .await?; - Ok(EngineApiMessageVersion::V4) + } } else { - provider - .new_payload_v3_wait(payload, versioned_hashes, parent_beacon_block_root) - .await?; - Ok(EngineApiMessageVersion::V3) + ( + EngineApiMessageVersion::V3, + serde_json::to_value(( + payload, + cancun.versioned_hashes.clone(), + cancun.parent_beacon_block_root, + ))?, + ) } } ExecutionPayload::V2(payload) => { @@ -323,16 +195,43 @@ pub(crate) async fn call_new_payload>( withdrawals: Some(payload.withdrawals), }; - provider.new_payload_v2_wait(input).await?; - - Ok(EngineApiMessageVersion::V2) + (EngineApiMessageVersion::V2, serde_json::to_value((input,))?) } ExecutionPayload::V1(payload) => { - provider.new_payload_v1_wait(payload).await?; + (EngineApiMessageVersion::V1, serde_json::to_value((payload,))?) + } + }; - Ok(EngineApiMessageVersion::V1) + Ok((version, params)) +} + +/// Calls the correct `engine_newPayload` method depending on the given [`ExecutionPayload`] and its +/// versioned variant. Returns the [`EngineApiMessageVersion`] depending on the payload's version. +/// +/// # Panics +/// If the given payload is a V3 payload, but a parent beacon block root is provided as `None`. +pub(crate) async fn call_new_payload>( + provider: P, + version: EngineApiMessageVersion, + params: serde_json::Value, +) -> TransportResult<()> { + let method = version.method_name(); + + let mut status: PayloadStatus = provider.client().request(method, ¶ms).await?; + + while !status.is_valid() { + if status.is_invalid() { + error!(?status, ?params, "Invalid {method}",); + panic!("Invalid {method}: {status:?}"); + } + if status.is_syncing() { + return Err(alloy_json_rpc::RpcError::UnsupportedFeature( + "invalid range: no canonical state found for parent of requested block", + )) } + status = provider.client().request(method, ¶ms).await?; } + Ok(()) } /// Calls the correct `engine_forkchoiceUpdated` method depending on the given diff --git a/crates/payload/primitives/src/lib.rs b/crates/payload/primitives/src/lib.rs index fb78cae16c7..5770c1381aa 100644 --- a/crates/payload/primitives/src/lib.rs +++ b/crates/payload/primitives/src/lib.rs @@ -413,6 +413,17 @@ impl EngineApiMessageVersion { pub const fn is_v5(&self) -> bool { matches!(self, Self::V5) } + + /// Returns the method name for the given version. + pub const fn method_name(&self) -> &'static str { + match self { + Self::V1 => "engine_newPayloadV1", + Self::V2 => "engine_newPayloadV2", + Self::V3 => "engine_newPayloadV3", + Self::V4 => "engine_newPayloadV4", + Self::V5 => "engine_newPayloadV5", + } + } } /// Determines how we should choose the payload to return. From 06a7d0564993485e168d5205e65aa4f7ecd72faa Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Thu, 10 Jul 2025 19:47:25 -0400 Subject: [PATCH 137/305] feat(cli): add enable-parallel-sparse-trie flag (#17357) Co-authored-by: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> --- crates/node/core/src/args/engine.rs | 6 ++++++ docs/vocs/docs/pages/cli/reth/node.mdx | 3 +++ 2 files changed, 9 insertions(+) diff --git a/crates/node/core/src/args/engine.rs b/crates/node/core/src/args/engine.rs index 8c03e42d9f2..64829c4c064 100644 --- a/crates/node/core/src/args/engine.rs +++ b/crates/node/core/src/args/engine.rs @@ -34,6 +34,10 @@ pub struct EngineArgs { #[arg(long = "engine.disable-caching-and-prewarming")] pub caching_and_prewarming_disabled: bool, + /// Enable the parallel sparse trie in the engine. + #[arg(long = "engine.parallel-sparse-trie", default_value = "false")] + pub parallel_sparse_trie_enabled: bool, + /// Enable state provider latency metrics. This allows the engine to collect and report stats /// about how long state provider calls took during execution, but this does introduce slight /// overhead to state provider calls. @@ -97,6 +101,7 @@ impl Default for EngineArgs { state_root_task_compare_updates: false, caching_and_prewarming_enabled: true, caching_and_prewarming_disabled: false, + parallel_sparse_trie_enabled: false, state_provider_metrics: false, cross_block_cache_size: DEFAULT_CROSS_BLOCK_CACHE_SIZE_MB, accept_execution_requests_hash: false, @@ -118,6 +123,7 @@ impl EngineArgs { .with_memory_block_buffer_target(self.memory_block_buffer_target) .with_legacy_state_root(self.legacy_state_root_task_enabled) .without_caching_and_prewarming(self.caching_and_prewarming_disabled) + .with_enable_parallel_sparse_trie(self.parallel_sparse_trie_enabled) .with_state_provider_metrics(self.state_provider_metrics) .with_always_compare_trie_updates(self.state_root_task_compare_updates) .with_cross_block_cache_size(self.cross_block_cache_size * 1024 * 1024) diff --git a/docs/vocs/docs/pages/cli/reth/node.mdx b/docs/vocs/docs/pages/cli/reth/node.mdx index 638c8fe33c0..d6a5e3e544b 100644 --- a/docs/vocs/docs/pages/cli/reth/node.mdx +++ b/docs/vocs/docs/pages/cli/reth/node.mdx @@ -761,6 +761,9 @@ Engine: --engine.disable-caching-and-prewarming Disable cross-block caching and parallel prewarming + --engine.parallel-sparse-trie + Enable the parallel sparse trie in the engine + --engine.state-provider-metrics Enable state provider latency metrics. This allows the engine to collect and report stats about how long state provider calls took during execution, but this does introduce slight overhead to state provider calls From f148cb31990bf69ebfac353f21f0468b86254b82 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Fri, 11 Jul 2025 11:21:08 +0200 Subject: [PATCH 138/305] feat(rpc): specialise contiguous receipt queries for logs (#16441) Co-authored-by: Matthias Seitz --- Cargo.lock | 1 + crates/rpc/rpc/Cargo.toml | 1 + crates/rpc/rpc/src/eth/filter.rs | 884 +++++++++++++++++- .../storage/provider/src/test_utils/mock.rs | 18 +- 4 files changed, 853 insertions(+), 51 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 430f63f076c..ecc7ddd1906 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9839,6 +9839,7 @@ dependencies = [ "reth-chain-state", "reth-chainspec", "reth-consensus", + "reth-db-api", "reth-engine-primitives", "reth-errors", "reth-ethereum-primitives", diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index 389502a2c73..4e6ca6ae24b 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -96,6 +96,7 @@ reth-evm-ethereum.workspace = true reth-testing-utils.workspace = true reth-transaction-pool = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } +reth-db-api.workspace = true alloy-consensus.workspace = true rand.workspace = true diff --git a/crates/rpc/rpc/src/eth/filter.rs b/crates/rpc/rpc/src/eth/filter.rs index 59d07a06f8b..4eecdee6490 100644 --- a/crates/rpc/rpc/src/eth/filter.rs +++ b/crates/rpc/rpc/src/eth/filter.rs @@ -1,7 +1,7 @@ //! `eth_` `Filter` RPC handler implementation use alloy_consensus::BlockHeader; -use alloy_primitives::TxHash; +use alloy_primitives::{Sealable, TxHash}; use alloy_rpc_types_eth::{ BlockNumHash, Filter, FilterBlockOption, FilterChanges, FilterId, Log, PendingTransactionFilterKind, @@ -10,7 +10,7 @@ use async_trait::async_trait; use futures::future::TryFutureExt; use jsonrpsee::{core::RpcResult, server::IdProvider}; use reth_errors::ProviderError; -use reth_primitives_traits::NodePrimitives; +use reth_primitives_traits::{NodePrimitives, SealedHeader}; use reth_rpc_eth_api::{ EngineEthFilter, EthApiTypes, EthFilterApiServer, FullEthApiTypes, QueryLimits, RpcConvert, RpcNodeCore, RpcNodeCoreExt, RpcTransaction, @@ -22,15 +22,15 @@ use reth_rpc_eth_types::{ use reth_rpc_server_types::{result::rpc_error_with_code, ToRpcResult}; use reth_storage_api::{ BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, HeaderProvider, ProviderBlock, - ProviderReceipt, TransactionsProvider, + ProviderReceipt, ReceiptProvider, TransactionsProvider, }; use reth_tasks::TaskSpawner; use reth_transaction_pool::{NewSubpoolTransactionStream, PoolTransaction, TransactionPool}; use std::{ - collections::HashMap, + collections::{HashMap, VecDeque}, fmt, future::Future, - iter::StepBy, + iter::{Peekable, StepBy}, ops::RangeInclusive, sync::Arc, time::{Duration, Instant}, @@ -39,7 +39,7 @@ use tokio::{ sync::{mpsc::Receiver, oneshot, Mutex}, time::MissedTickBehavior, }; -use tracing::{error, trace}; +use tracing::{debug, error, trace}; impl EngineEthFilter for EthFilter where @@ -56,6 +56,18 @@ where } } +/// Threshold for deciding between cached and range mode processing +const CACHED_MODE_BLOCK_THRESHOLD: u64 = 250; + +/// Threshold for bloom filter matches that triggers reduced caching +const HIGH_BLOOM_MATCH_THRESHOLD: usize = 20; + +/// Threshold for bloom filter matches that triggers moderately reduced caching +const MODERATE_BLOOM_MATCH_THRESHOLD: usize = 10; + +/// Minimum block count to apply bloom filter match adjustments +const BLOOM_ADJUSTMENT_MIN_BLOCKS: u64 = 100; + /// The maximum number of headers we read at once when handling a range filter. const MAX_HEADERS_RANGE: u64 = 1_000; // with ~530bytes per header this is ~500kb @@ -562,63 +574,93 @@ where /// Returns an error if: /// - underlying database error async fn get_logs_in_block_range_inner( - &self, + self: Arc, filter: &Filter, from_block: u64, to_block: u64, limits: QueryLimits, ) -> Result, EthFilterError> { let mut all_logs = Vec::new(); + let mut matching_headers = Vec::new(); + + // get current chain tip to determine processing mode + let chain_tip = self.provider().best_block_number()?; - // loop over the range of new blocks and check logs if the filter matches the log's bloom - // filter + // first collect all headers that match the bloom filter for cached mode decision for (from, to) in BlockRangeInclusiveIter::new(from_block..=to_block, self.max_headers_range) { let headers = self.provider().headers_range(from..=to)?; - for (idx, header) in headers - .iter() - .enumerate() - .filter(|(_, header)| filter.matches_bloom(header.logs_bloom())) - { - // these are consecutive headers, so we can use the parent hash of the next - // block to get the current header's hash - let block_hash = match headers.get(idx + 1) { - Some(child) => child.parent_hash(), - None => self - .provider() - .block_hash(header.number())? - .ok_or_else(|| ProviderError::HeaderNotFound(header.number().into()))?, - }; - let num_hash = BlockNumHash::new(header.number(), block_hash); - if let Some((receipts, maybe_block)) = - self.eth_cache().get_receipts_and_maybe_block(num_hash.hash).await? - { - append_matching_block_logs( - &mut all_logs, - maybe_block - .map(ProviderOrBlock::Block) - .unwrap_or_else(|| ProviderOrBlock::Provider(self.provider())), - filter, - num_hash, - &receipts, - false, - header.timestamp(), - )?; + let mut headers_iter = headers.into_iter().peekable(); + + while let Some(header) = headers_iter.next() { + if !filter.matches_bloom(header.logs_bloom()) { + continue + } + + let current_number = header.number(); - // size check but only if range is multiple blocks, so we always return all - // logs of a single block - let is_multi_block_range = from_block != to_block; - if let Some(max_logs_per_response) = limits.max_logs_per_response { - if is_multi_block_range && all_logs.len() > max_logs_per_response { - return Err(EthFilterError::QueryExceedsMaxResults { - max_logs: max_logs_per_response, - from_block, - to_block: num_hash.number.saturating_sub(1), - }); - } + let block_hash = match headers_iter.peek() { + Some(next_header) if next_header.number() == current_number + 1 => { + // Headers are consecutive, use the more efficient parent_hash + next_header.parent_hash() } + _ => { + // Headers not consecutive or last header, calculate hash + header.hash_slow() + } + }; + + matching_headers.push(SealedHeader::new(header, block_hash)); + } + } + + // initialize the appropriate range mode based on collected headers + let mut range_mode = RangeMode::new( + self.clone(), + matching_headers, + from_block, + to_block, + self.max_headers_range, + chain_tip, + ); + + // iterate through the range mode to get receipts and blocks + while let Some(ReceiptBlockResult { receipts, recovered_block, header }) = + range_mode.next().await? + { + let num_hash = header.num_hash(); + append_matching_block_logs( + &mut all_logs, + recovered_block + .map(ProviderOrBlock::Block) + .unwrap_or_else(|| ProviderOrBlock::Provider(self.provider())), + filter, + num_hash, + &receipts, + false, + header.timestamp(), + )?; + + // size check but only if range is multiple blocks, so we always return all + // logs of a single block + let is_multi_block_range = from_block != to_block; + if let Some(max_logs_per_response) = limits.max_logs_per_response { + if is_multi_block_range && all_logs.len() > max_logs_per_response { + debug!( + target: "rpc::eth::filter", + logs_found = all_logs.len(), + max_logs_per_response, + from_block, + to_block = num_hash.number.saturating_sub(1), + "Query exceeded max logs per response limit" + ); + return Err(EthFilterError::QueryExceedsMaxResults { + max_logs: max_logs_per_response, + from_block, + to_block: num_hash.number.saturating_sub(1), + }); } } } @@ -841,11 +883,218 @@ impl From for EthFilterError { } } +/// Helper type for the common pattern of returning receipts, block and the original header that is +/// a match for the filter. +struct ReceiptBlockResult

+where + P: ReceiptProvider + BlockReader, +{ + /// We always need the entire receipts for the matching block. + receipts: Arc>>, + /// Block can be optional and we can fetch it lazily when needed. + recovered_block: Option>>>, + /// The header of the block. + header: SealedHeader<

::Header>, +} + +/// Represents different modes for processing block ranges when filtering logs +enum RangeMode< + Eth: RpcNodeCoreExt + EthApiTypes + 'static, +> { + /// Use cache-based processing for recent blocks + Cached(CachedMode), + /// Use range-based processing for older blocks + Range(RangeBlockMode), +} + +impl< + Eth: RpcNodeCoreExt + EthApiTypes + 'static, + > RangeMode +{ + /// Creates a new `RangeMode`. + fn new( + filter_inner: Arc>, + sealed_headers: Vec::Header>>, + from_block: u64, + to_block: u64, + max_headers_range: u64, + chain_tip: u64, + ) -> Self { + let block_count = to_block - from_block + 1; + let distance_from_tip = chain_tip.saturating_sub(to_block); + + // Determine if we should use cached mode based on range characteristics + let use_cached_mode = + Self::should_use_cached_mode(&sealed_headers, block_count, distance_from_tip); + + if use_cached_mode && !sealed_headers.is_empty() { + Self::Cached(CachedMode { filter_inner, headers_iter: sealed_headers.into_iter() }) + } else { + Self::Range(RangeBlockMode { + filter_inner, + iter: sealed_headers.into_iter().peekable(), + next: VecDeque::new(), + max_range: max_headers_range as usize, + }) + } + } + + /// Determines whether to use cached mode based on bloom filter matches and range size + const fn should_use_cached_mode( + headers: &[SealedHeader<::Header>], + block_count: u64, + distance_from_tip: u64, + ) -> bool { + // Headers are already filtered by bloom, so count equals length + let bloom_matches = headers.len(); + + // Calculate adjusted threshold based on bloom matches + let adjusted_threshold = Self::calculate_adjusted_threshold(block_count, bloom_matches); + + block_count <= adjusted_threshold && distance_from_tip <= adjusted_threshold + } + + /// Calculates the adjusted cache threshold based on bloom filter matches + const fn calculate_adjusted_threshold(block_count: u64, bloom_matches: usize) -> u64 { + // Only apply adjustments for larger ranges + if block_count <= BLOOM_ADJUSTMENT_MIN_BLOCKS { + return CACHED_MODE_BLOCK_THRESHOLD; + } + + match bloom_matches { + n if n > HIGH_BLOOM_MATCH_THRESHOLD => CACHED_MODE_BLOCK_THRESHOLD / 2, + n if n > MODERATE_BLOOM_MATCH_THRESHOLD => (CACHED_MODE_BLOCK_THRESHOLD * 3) / 4, + _ => CACHED_MODE_BLOCK_THRESHOLD, + } + } + + /// Gets the next (receipts, `maybe_block`, header, `block_hash`) tuple. + async fn next(&mut self) -> Result>, EthFilterError> { + match self { + Self::Cached(cached) => cached.next().await, + Self::Range(range) => range.next().await, + } + } +} + +/// Mode for processing blocks using cache optimization for recent blocks +struct CachedMode< + Eth: RpcNodeCoreExt + EthApiTypes + 'static, +> { + filter_inner: Arc>, + headers_iter: std::vec::IntoIter::Header>>, +} + +impl< + Eth: RpcNodeCoreExt + EthApiTypes + 'static, + > CachedMode +{ + async fn next(&mut self) -> Result>, EthFilterError> { + for header in self.headers_iter.by_ref() { + // Use get_receipts_and_maybe_block which has automatic fallback to provider + if let Some((receipts, maybe_block)) = + self.filter_inner.eth_cache().get_receipts_and_maybe_block(header.hash()).await? + { + return Ok(Some(ReceiptBlockResult { + receipts, + recovered_block: maybe_block, + header, + })); + } + } + + Ok(None) // No more headers + } +} + +/// Mode for processing blocks using range queries for older blocks +struct RangeBlockMode< + Eth: RpcNodeCoreExt + EthApiTypes + 'static, +> { + filter_inner: Arc>, + iter: Peekable::Header>>>, + next: VecDeque>, + max_range: usize, +} + +impl< + Eth: RpcNodeCoreExt + EthApiTypes + 'static, + > RangeBlockMode +{ + async fn next(&mut self) -> Result>, EthFilterError> { + if let Some(result) = self.next.pop_front() { + return Ok(Some(result)); + } + + let Some(next_header) = self.iter.next() else { + return Ok(None); + }; + + let mut range_headers = Vec::with_capacity(self.max_range); + range_headers.push(next_header); + + // Collect consecutive blocks up to max_range size + while range_headers.len() < self.max_range { + let Some(peeked) = self.iter.peek() else { break }; + let Some(last_header) = range_headers.last() else { break }; + + let expected_next = last_header.header().number() + 1; + if peeked.header().number() != expected_next { + break; // Non-consecutive block, stop here + } + + let Some(next_header) = self.iter.next() else { break }; + range_headers.push(next_header); + } + + // Process each header individually to avoid queuing for all receipts + for header in range_headers { + // First check if already cached to avoid unnecessary provider calls + let (maybe_block, maybe_receipts) = self + .filter_inner + .eth_cache() + .maybe_cached_block_and_receipts(header.hash()) + .await?; + + let receipts = match maybe_receipts { + Some(receipts) => receipts, + None => { + // Not cached - fetch directly from provider without queuing + match self.filter_inner.provider().receipts_by_block(header.hash().into())? { + Some(receipts) => Arc::new(receipts), + None => continue, // No receipts found + } + } + }; + + if !receipts.is_empty() { + self.next.push_back(ReceiptBlockResult { + receipts, + recovered_block: maybe_block, + header, + }); + } + } + + Ok(self.next.pop_front()) + } +} + #[cfg(test)] mod tests { use super::*; + use crate::{eth::EthApi, EthApiBuilder}; + use alloy_primitives::FixedBytes; use rand::Rng; + use reth_chainspec::ChainSpecProvider; + use reth_ethereum_primitives::TxType; + use reth_evm_ethereum::EthEvmConfig; + use reth_network_api::noop::NoopNetwork; + use reth_provider::test_utils::MockEthProvider; + use reth_tasks::TokioTaskExecutor; use reth_testing_utils::generators; + use reth_transaction_pool::test_utils::{testing_pool, TestPool}; + use std::{collections::VecDeque, sync::Arc}; #[test] fn test_block_range_iter() { @@ -868,4 +1117,541 @@ mod tests { assert_eq!(end, *range.end()); } + + // Helper function to create a test EthApi instance + fn build_test_eth_api( + provider: MockEthProvider, + ) -> EthApi { + EthApiBuilder::new( + provider.clone(), + testing_pool(), + NoopNetwork::default(), + EthEvmConfig::new(provider.chain_spec()), + ) + .build() + } + + #[tokio::test] + async fn test_range_block_mode_empty_range() { + let provider = MockEthProvider::default(); + let eth_api = build_test_eth_api(provider); + + let eth_filter = super::EthFilter::new( + eth_api, + EthFilterConfig::default(), + Box::new(TokioTaskExecutor::default()), + ); + let filter_inner = eth_filter.inner; + + let headers = vec![]; + let max_range = 100; + + let mut range_mode = RangeBlockMode { + filter_inner, + iter: headers.into_iter().peekable(), + next: VecDeque::new(), + max_range, + }; + + let result = range_mode.next().await; + assert!(result.is_ok()); + assert!(result.unwrap().is_none()); + } + + #[tokio::test] + async fn test_range_block_mode_queued_results_priority() { + let provider = MockEthProvider::default(); + let eth_api = build_test_eth_api(provider); + + let eth_filter = super::EthFilter::new( + eth_api, + EthFilterConfig::default(), + Box::new(TokioTaskExecutor::default()), + ); + let filter_inner = eth_filter.inner; + + let headers = vec![ + SealedHeader::new( + alloy_consensus::Header { number: 100, ..Default::default() }, + FixedBytes::random(), + ), + SealedHeader::new( + alloy_consensus::Header { number: 101, ..Default::default() }, + FixedBytes::random(), + ), + ]; + + // create specific mock results to test ordering + let expected_block_hash_1 = FixedBytes::from([1u8; 32]); + let expected_block_hash_2 = FixedBytes::from([2u8; 32]); + + // create mock receipts to test receipt handling + let mock_receipt_1 = reth_ethereum_primitives::Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 100_000, + logs: vec![], + success: true, + }; + let mock_receipt_2 = reth_ethereum_primitives::Receipt { + tx_type: TxType::Eip1559, + cumulative_gas_used: 200_000, + logs: vec![], + success: true, + }; + let mock_receipt_3 = reth_ethereum_primitives::Receipt { + tx_type: TxType::Eip2930, + cumulative_gas_used: 150_000, + logs: vec![], + success: false, // Different success status + }; + + let mock_result_1 = ReceiptBlockResult { + receipts: Arc::new(vec![mock_receipt_1.clone(), mock_receipt_2.clone()]), + recovered_block: None, + header: SealedHeader::new( + alloy_consensus::Header { number: 42, ..Default::default() }, + expected_block_hash_1, + ), + }; + + let mock_result_2 = ReceiptBlockResult { + receipts: Arc::new(vec![mock_receipt_3.clone()]), + recovered_block: None, + header: SealedHeader::new( + alloy_consensus::Header { number: 43, ..Default::default() }, + expected_block_hash_2, + ), + }; + + let mut range_mode = RangeBlockMode { + filter_inner, + iter: headers.into_iter().peekable(), + next: VecDeque::from([mock_result_1, mock_result_2]), // Queue two results + max_range: 100, + }; + + // first call should return the first queued result (FIFO order) + let result1 = range_mode.next().await; + assert!(result1.is_ok()); + let receipt_result1 = result1.unwrap().unwrap(); + assert_eq!(receipt_result1.header.hash(), expected_block_hash_1); + assert_eq!(receipt_result1.header.number, 42); + + // verify receipts + assert_eq!(receipt_result1.receipts.len(), 2); + assert_eq!(receipt_result1.receipts[0].tx_type, mock_receipt_1.tx_type); + assert_eq!( + receipt_result1.receipts[0].cumulative_gas_used, + mock_receipt_1.cumulative_gas_used + ); + assert_eq!(receipt_result1.receipts[0].success, mock_receipt_1.success); + assert_eq!(receipt_result1.receipts[1].tx_type, mock_receipt_2.tx_type); + assert_eq!( + receipt_result1.receipts[1].cumulative_gas_used, + mock_receipt_2.cumulative_gas_used + ); + assert_eq!(receipt_result1.receipts[1].success, mock_receipt_2.success); + + // second call should return the second queued result + let result2 = range_mode.next().await; + assert!(result2.is_ok()); + let receipt_result2 = result2.unwrap().unwrap(); + assert_eq!(receipt_result2.header.hash(), expected_block_hash_2); + assert_eq!(receipt_result2.header.number, 43); + + // verify receipts + assert_eq!(receipt_result2.receipts.len(), 1); + assert_eq!(receipt_result2.receipts[0].tx_type, mock_receipt_3.tx_type); + assert_eq!( + receipt_result2.receipts[0].cumulative_gas_used, + mock_receipt_3.cumulative_gas_used + ); + assert_eq!(receipt_result2.receipts[0].success, mock_receipt_3.success); + + // queue should now be empty + assert!(range_mode.next.is_empty()); + + let result3 = range_mode.next().await; + assert!(result3.is_ok()); + } + + #[tokio::test] + async fn test_range_block_mode_single_block_no_receipts() { + let provider = MockEthProvider::default(); + let eth_api = build_test_eth_api(provider); + + let eth_filter = super::EthFilter::new( + eth_api, + EthFilterConfig::default(), + Box::new(TokioTaskExecutor::default()), + ); + let filter_inner = eth_filter.inner; + + let headers = vec![SealedHeader::new( + alloy_consensus::Header { number: 100, ..Default::default() }, + FixedBytes::random(), + )]; + + let mut range_mode = RangeBlockMode { + filter_inner, + iter: headers.into_iter().peekable(), + next: VecDeque::new(), + max_range: 100, + }; + + let result = range_mode.next().await; + assert!(result.is_ok()); + } + + #[tokio::test] + async fn test_range_block_mode_provider_receipts() { + let provider = MockEthProvider::default(); + + let header_1 = alloy_consensus::Header { number: 100, ..Default::default() }; + let header_2 = alloy_consensus::Header { number: 101, ..Default::default() }; + let header_3 = alloy_consensus::Header { number: 102, ..Default::default() }; + + let block_hash_1 = FixedBytes::random(); + let block_hash_2 = FixedBytes::random(); + let block_hash_3 = FixedBytes::random(); + + provider.add_header(block_hash_1, header_1.clone()); + provider.add_header(block_hash_2, header_2.clone()); + provider.add_header(block_hash_3, header_3.clone()); + + // create mock receipts to test provider fetching with mock logs + let mock_log = alloy_primitives::Log { + address: alloy_primitives::Address::ZERO, + data: alloy_primitives::LogData::new_unchecked(vec![], alloy_primitives::Bytes::new()), + }; + + let receipt_100_1 = reth_ethereum_primitives::Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 21_000, + logs: vec![mock_log.clone()], + success: true, + }; + let receipt_100_2 = reth_ethereum_primitives::Receipt { + tx_type: TxType::Eip1559, + cumulative_gas_used: 42_000, + logs: vec![mock_log.clone()], + success: true, + }; + let receipt_101_1 = reth_ethereum_primitives::Receipt { + tx_type: TxType::Eip2930, + cumulative_gas_used: 30_000, + logs: vec![mock_log.clone()], + success: false, + }; + + provider.add_receipts(100, vec![receipt_100_1.clone(), receipt_100_2.clone()]); + provider.add_receipts(101, vec![receipt_101_1.clone()]); + + let eth_api = build_test_eth_api(provider); + + let eth_filter = super::EthFilter::new( + eth_api, + EthFilterConfig::default(), + Box::new(TokioTaskExecutor::default()), + ); + let filter_inner = eth_filter.inner; + + let headers = vec![ + SealedHeader::new(header_1, block_hash_1), + SealedHeader::new(header_2, block_hash_2), + SealedHeader::new(header_3, block_hash_3), + ]; + + let mut range_mode = RangeBlockMode { + filter_inner, + iter: headers.into_iter().peekable(), + next: VecDeque::new(), + max_range: 3, // include the 3 blocks in the first queried results + }; + + // first call should fetch receipts from provider and return first block with receipts + let result = range_mode.next().await; + assert!(result.is_ok()); + let receipt_result = result.unwrap().unwrap(); + + assert_eq!(receipt_result.header.hash(), block_hash_1); + assert_eq!(receipt_result.header.number, 100); + assert_eq!(receipt_result.receipts.len(), 2); + + // verify receipts + assert_eq!(receipt_result.receipts[0].tx_type, receipt_100_1.tx_type); + assert_eq!( + receipt_result.receipts[0].cumulative_gas_used, + receipt_100_1.cumulative_gas_used + ); + assert_eq!(receipt_result.receipts[0].success, receipt_100_1.success); + + assert_eq!(receipt_result.receipts[1].tx_type, receipt_100_2.tx_type); + assert_eq!( + receipt_result.receipts[1].cumulative_gas_used, + receipt_100_2.cumulative_gas_used + ); + assert_eq!(receipt_result.receipts[1].success, receipt_100_2.success); + + // second call should return the second block with receipts + let result2 = range_mode.next().await; + assert!(result2.is_ok()); + let receipt_result2 = result2.unwrap().unwrap(); + + assert_eq!(receipt_result2.header.hash(), block_hash_2); + assert_eq!(receipt_result2.header.number, 101); + assert_eq!(receipt_result2.receipts.len(), 1); + + // verify receipts + assert_eq!(receipt_result2.receipts[0].tx_type, receipt_101_1.tx_type); + assert_eq!( + receipt_result2.receipts[0].cumulative_gas_used, + receipt_101_1.cumulative_gas_used + ); + assert_eq!(receipt_result2.receipts[0].success, receipt_101_1.success); + + // third call should return None since no more blocks with receipts + let result3 = range_mode.next().await; + assert!(result3.is_ok()); + assert!(result3.unwrap().is_none()); + } + + #[tokio::test] + async fn test_range_block_mode_iterator_exhaustion() { + let provider = MockEthProvider::default(); + let eth_api = build_test_eth_api(provider); + + let eth_filter = super::EthFilter::new( + eth_api, + EthFilterConfig::default(), + Box::new(TokioTaskExecutor::default()), + ); + let filter_inner = eth_filter.inner; + + let headers = vec![ + SealedHeader::new( + alloy_consensus::Header { number: 100, ..Default::default() }, + FixedBytes::random(), + ), + SealedHeader::new( + alloy_consensus::Header { number: 101, ..Default::default() }, + FixedBytes::random(), + ), + ]; + + let mut range_mode = RangeBlockMode { + filter_inner, + iter: headers.into_iter().peekable(), + next: VecDeque::new(), + max_range: 1, + }; + + let result1 = range_mode.next().await; + assert!(result1.is_ok()); + + assert!(range_mode.iter.peek().is_some()); + + let result2 = range_mode.next().await; + assert!(result2.is_ok()); + + // now iterator should be exhausted + assert!(range_mode.iter.peek().is_none()); + + // further calls should return None + let result3 = range_mode.next().await; + assert!(result3.is_ok()); + assert!(result3.unwrap().is_none()); + } + + #[tokio::test] + async fn test_cached_mode_with_mock_receipts() { + // create test data + let test_hash = FixedBytes::from([42u8; 32]); + let test_block_number = 100u64; + let test_header = SealedHeader::new( + alloy_consensus::Header { + number: test_block_number, + gas_used: 50_000, + ..Default::default() + }, + test_hash, + ); + + // add a mock receipt to the provider with a mock log + let mock_log = alloy_primitives::Log { + address: alloy_primitives::Address::ZERO, + data: alloy_primitives::LogData::new_unchecked(vec![], alloy_primitives::Bytes::new()), + }; + + let mock_receipt = reth_ethereum_primitives::Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 21_000, + logs: vec![mock_log], + success: true, + }; + + let provider = MockEthProvider::default(); + provider.add_header(test_hash, test_header.header().clone()); + provider.add_receipts(test_block_number, vec![mock_receipt.clone()]); + + let eth_api = build_test_eth_api(provider); + let eth_filter = super::EthFilter::new( + eth_api, + EthFilterConfig::default(), + Box::new(TokioTaskExecutor::default()), + ); + let filter_inner = eth_filter.inner; + + let headers = vec![test_header.clone()]; + + let mut cached_mode = CachedMode { filter_inner, headers_iter: headers.into_iter() }; + + // should find the receipt from provider fallback (cache will be empty) + let result = cached_mode.next().await.expect("next should succeed"); + let receipt_block_result = result.expect("should have receipt result"); + assert_eq!(receipt_block_result.header.hash(), test_hash); + assert_eq!(receipt_block_result.header.number, test_block_number); + assert_eq!(receipt_block_result.receipts.len(), 1); + assert_eq!(receipt_block_result.receipts[0].tx_type, mock_receipt.tx_type); + assert_eq!( + receipt_block_result.receipts[0].cumulative_gas_used, + mock_receipt.cumulative_gas_used + ); + assert_eq!(receipt_block_result.receipts[0].success, mock_receipt.success); + + // iterator should be exhausted + let result2 = cached_mode.next().await; + assert!(result2.is_ok()); + assert!(result2.unwrap().is_none()); + } + + #[tokio::test] + async fn test_cached_mode_empty_headers() { + let provider = MockEthProvider::default(); + let eth_api = build_test_eth_api(provider); + + let eth_filter = super::EthFilter::new( + eth_api, + EthFilterConfig::default(), + Box::new(TokioTaskExecutor::default()), + ); + let filter_inner = eth_filter.inner; + + let headers: Vec> = vec![]; + + let mut cached_mode = CachedMode { filter_inner, headers_iter: headers.into_iter() }; + + // should immediately return None for empty headers + let result = cached_mode.next().await.expect("next should succeed"); + assert!(result.is_none()); + } + + #[tokio::test] + async fn test_non_consecutive_headers_after_bloom_filter() { + let provider = MockEthProvider::default(); + + // Create 4 headers where only blocks 100 and 102 will match bloom filter + let mut expected_hashes = vec![]; + let mut prev_hash = alloy_primitives::B256::default(); + + // Create a transaction for blocks that will have receipts + use alloy_consensus::TxLegacy; + use reth_ethereum_primitives::{TransactionSigned, TxType}; + + let tx_inner = TxLegacy { + chain_id: Some(1), + nonce: 0, + gas_price: 21_000, + gas_limit: 21_000, + to: alloy_primitives::TxKind::Call(alloy_primitives::Address::ZERO), + value: alloy_primitives::U256::ZERO, + input: alloy_primitives::Bytes::new(), + }; + let signature = alloy_primitives::Signature::test_signature(); + let tx = TransactionSigned::new_unhashed(tx_inner.into(), signature); + + for i in 100u64..=103 { + let header = alloy_consensus::Header { + number: i, + parent_hash: prev_hash, + // Set bloom to match filter only for blocks 100 and 102 + logs_bloom: if i == 100 || i == 102 { + alloy_primitives::Bloom::from([1u8; 256]) + } else { + alloy_primitives::Bloom::default() + }, + ..Default::default() + }; + + let hash = header.hash_slow(); + expected_hashes.push(hash); + prev_hash = hash; + + // Add transaction to blocks that will have receipts (100 and 102) + let transactions = if i == 100 || i == 102 { vec![tx.clone()] } else { vec![] }; + + let block = reth_ethereum_primitives::Block { + header, + body: reth_ethereum_primitives::BlockBody { transactions, ..Default::default() }, + }; + provider.add_block(hash, block); + } + + // Add receipts with logs only to blocks that match bloom + let mock_log = alloy_primitives::Log { + address: alloy_primitives::Address::ZERO, + data: alloy_primitives::LogData::new_unchecked(vec![], alloy_primitives::Bytes::new()), + }; + + let receipt = reth_ethereum_primitives::Receipt { + tx_type: TxType::Legacy, + cumulative_gas_used: 21_000, + logs: vec![mock_log], + success: true, + }; + + provider.add_receipts(100, vec![receipt.clone()]); + provider.add_receipts(101, vec![]); + provider.add_receipts(102, vec![receipt.clone()]); + provider.add_receipts(103, vec![]); + + // Add block body indices for each block so receipts can be fetched + use reth_db_api::models::StoredBlockBodyIndices; + provider + .add_block_body_indices(100, StoredBlockBodyIndices { first_tx_num: 0, tx_count: 1 }); + provider + .add_block_body_indices(101, StoredBlockBodyIndices { first_tx_num: 1, tx_count: 0 }); + provider + .add_block_body_indices(102, StoredBlockBodyIndices { first_tx_num: 1, tx_count: 1 }); + provider + .add_block_body_indices(103, StoredBlockBodyIndices { first_tx_num: 2, tx_count: 0 }); + + let eth_api = build_test_eth_api(provider); + let eth_filter = EthFilter::new( + eth_api, + EthFilterConfig::default(), + Box::new(TokioTaskExecutor::default()), + ); + + // Use default filter which will match any non-empty bloom + let filter = Filter::default(); + + // Get logs in the range - this will trigger the bloom filtering + let logs = eth_filter + .inner + .clone() + .get_logs_in_block_range(filter, 100, 103, QueryLimits::default()) + .await + .expect("should succeed"); + + // We should get logs from blocks 100 and 102 only (bloom filtered) + assert_eq!(logs.len(), 2); + + assert_eq!(logs[0].block_number, Some(100)); + assert_eq!(logs[1].block_number, Some(102)); + + // Each block hash should be the hash of its own header, not derived from any other header + assert_eq!(logs[0].block_hash, Some(expected_hashes[0])); // block 100 + assert_eq!(logs[1].block_hash, Some(expected_hashes[2])); // block 102 + } } diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index 889712259c7..68f8c38e59d 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -64,6 +64,8 @@ pub struct MockEthProvider< pub chain_spec: Arc, /// Local state roots pub state_roots: Arc>>, + /// Local block body indices store + pub block_body_indices: Arc>>, tx: TxMock, prune_modes: Arc, } @@ -80,6 +82,7 @@ where accounts: self.accounts.clone(), chain_spec: self.chain_spec.clone(), state_roots: self.state_roots.clone(), + block_body_indices: self.block_body_indices.clone(), tx: self.tx.clone(), prune_modes: self.prune_modes.clone(), } @@ -96,6 +99,7 @@ impl MockEthProvider { accounts: Default::default(), chain_spec: Arc::new(reth_chainspec::ChainSpecBuilder::mainnet().build()), state_roots: Default::default(), + block_body_indices: Default::default(), tx: Default::default(), prune_modes: Default::default(), } @@ -156,6 +160,15 @@ impl MockEthProvider MockEthProvider StatePr impl BlockBodyIndicesProvider for MockEthProvider { - fn block_body_indices(&self, _num: u64) -> ProviderResult> { - Ok(None) + fn block_body_indices(&self, num: u64) -> ProviderResult> { + Ok(self.block_body_indices.lock().get(&num).copied()) } fn block_body_indices_range( &self, From 98c68c1f8a4c934cf6b57570213090e4d74bee28 Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Fri, 11 Jul 2025 11:28:45 +0200 Subject: [PATCH 139/305] perf(trie): reuse update action buffers in parallel sparse trie processing (#17352) --- crates/trie/sparse-parallel/src/trie.rs | 89 ++++++++++++++++++------- 1 file changed, 64 insertions(+), 25 deletions(-) diff --git a/crates/trie/sparse-parallel/src/trie.rs b/crates/trie/sparse-parallel/src/trie.rs index 9f9c251deba..4c0a02d0102 100644 --- a/crates/trie/sparse-parallel/src/trie.rs +++ b/crates/trie/sparse-parallel/src/trie.rs @@ -49,6 +49,9 @@ pub struct ParallelSparseTrie { branch_node_tree_masks: HashMap, /// When a bit is set, the corresponding child is stored as a hash in the database. branch_node_hash_masks: HashMap, + /// Reusable buffer pool used for collecting [`SparseTrieUpdatesAction`]s during hash + /// computations. + update_actions_buffers: Vec>, } impl Default for ParallelSparseTrie { @@ -63,6 +66,7 @@ impl Default for ParallelSparseTrie { updates: None, branch_node_tree_masks: HashMap::default(), branch_node_hash_masks: HashMap::default(), + update_actions_buffers: Vec::default(), } } } @@ -590,15 +594,16 @@ impl SparseTrieInterface for ParallelSparseTrie { #[cfg(not(feature = "std"))] // Update subtrie hashes serially if nostd - for ChangedSubtrie { index, mut subtrie, mut prefix_set } in subtries { - let mut update_actions = self.updates_enabled().then(|| Vec::new()); + for ChangedSubtrie { index, mut subtrie, mut prefix_set, mut update_actions_buf } in + subtries + { subtrie.update_hashes( &mut prefix_set, - &mut update_actions, + &mut update_actions_buf, &self.branch_node_tree_masks, &self.branch_node_hash_masks, ); - tx.send((index, subtrie, update_actions)).unwrap(); + tx.send((index, subtrie, update_actions_buf)).unwrap(); } #[cfg(feature = "std")] @@ -609,16 +614,22 @@ impl SparseTrieInterface for ParallelSparseTrie { let branch_node_hash_masks = &self.branch_node_hash_masks; subtries .into_par_iter() - .map(|ChangedSubtrie { index, mut subtrie, mut prefix_set }| { - let mut update_actions = self.updates_enabled().then(Vec::new); - subtrie.update_hashes( - &mut prefix_set, - &mut update_actions, - branch_node_tree_masks, - branch_node_hash_masks, - ); - (index, subtrie, update_actions) - }) + .map( + |ChangedSubtrie { + index, + mut subtrie, + mut prefix_set, + mut update_actions_buf, + }| { + subtrie.update_hashes( + &mut prefix_set, + &mut update_actions_buf, + branch_node_tree_masks, + branch_node_hash_masks, + ); + (index, subtrie, update_actions_buf) + }, + ) .for_each_init(|| tx.clone(), |tx, result| tx.send(result).unwrap()); } @@ -626,8 +637,15 @@ impl SparseTrieInterface for ParallelSparseTrie { // Return updated subtries back to the trie after executing any actions required on the // top-level `SparseTrieUpdates`. - for (index, subtrie, update_actions) in rx { - self.apply_subtrie_update_actions(update_actions); + for (index, subtrie, update_actions_buf) in rx { + if let Some(mut update_actions_buf) = update_actions_buf { + self.apply_subtrie_update_actions( + #[allow(clippy::iter_with_drain)] + update_actions_buf.drain(..), + ); + self.update_actions_buffers.push(update_actions_buf); + } + self.lower_subtries[index] = LowerSparseSubtrie::Revealed(subtrie); } } @@ -658,6 +676,8 @@ impl SparseTrieInterface for ParallelSparseTrie { } self.prefix_set.clear(); self.updates = None; + // `update_actions_buffers` doesn't need to be cleared; we want to reuse the Vecs it has + // buffered, and all of those are already inherently cleared when they get used. } fn find_leaf( @@ -1036,9 +1056,9 @@ impl ParallelSparseTrie { /// the given `updates` set. If the given set is None then this is a no-op. fn apply_subtrie_update_actions( &mut self, - update_actions: Option>, + update_actions: impl Iterator, ) { - if let (Some(updates), Some(update_actions)) = (self.updates.as_mut(), update_actions) { + if let Some(updates) = self.updates.as_mut() { for action in update_actions { match action { SparseTrieUpdatesAction::InsertRemoved(path) => { @@ -1067,7 +1087,9 @@ impl ParallelSparseTrie { is_in_prefix_set: None, }); - let mut update_actions = self.updates_enabled().then(Vec::new); + let mut update_actions_buf = + self.updates_enabled().then(|| self.update_actions_buffers.pop().unwrap_or_default()); + while let Some(stack_item) = self.upper_subtrie.inner.buffers.path_stack.pop() { let path = stack_item.path; let node = if path.len() < UPPER_TRIE_MAX_DEPTH { @@ -1092,7 +1114,7 @@ impl ParallelSparseTrie { // Calculate the RLP node for the current node using upper subtrie self.upper_subtrie.inner.rlp_node( prefix_set, - &mut update_actions, + &mut update_actions_buf, stack_item, node, &self.branch_node_tree_masks, @@ -1102,7 +1124,13 @@ impl ParallelSparseTrie { // If there were any branch node updates as a result of calculating the RLP node for the // upper trie then apply them to the top-level set. - self.apply_subtrie_update_actions(update_actions); + if let Some(mut update_actions_buf) = update_actions_buf { + self.apply_subtrie_update_actions( + #[allow(clippy::iter_with_drain)] + update_actions_buf.drain(..), + ); + self.update_actions_buffers.push(update_actions_buf); + } debug_assert_eq!(self.upper_subtrie.inner.buffers.rlp_node_stack.len(), 1); self.upper_subtrie.inner.buffers.rlp_node_stack.pop().unwrap().rlp_node @@ -1127,6 +1155,7 @@ impl ParallelSparseTrie { let mut prefix_set_iter = prefix_set_clone.into_iter().copied().peekable(); let mut changed_subtries = Vec::new(); let mut unchanged_prefix_set = PrefixSetMut::default(); + let updates_enabled = self.updates_enabled(); for (index, subtrie) in self.lower_subtries.iter_mut().enumerate() { if let Some(subtrie) = @@ -1173,7 +1202,15 @@ impl ParallelSparseTrie { _ => {} } - changed_subtries.push(ChangedSubtrie { index, subtrie, prefix_set }); + let update_actions_buf = + updates_enabled.then(|| self.update_actions_buffers.pop().unwrap_or_default()); + + changed_subtries.push(ChangedSubtrie { + index, + subtrie, + prefix_set, + update_actions_buf, + }); } } @@ -2168,8 +2205,10 @@ struct ChangedSubtrie { /// Changed subtrie subtrie: Box, /// Prefix set of keys that belong to the subtrie. - #[allow(unused)] prefix_set: PrefixSet, + /// Reusable buffer for collecting [`SparseTrieUpdatesAction`]s during computations. Will be + /// None if update retention is disabled. + update_actions_buf: Option>, } /// Convert first [`UPPER_TRIE_MAX_DEPTH`] nibbles of the path into a lower subtrie index in the @@ -2698,7 +2737,7 @@ mod tests { assert_eq!( subtries .into_iter() - .map(|ChangedSubtrie { index, subtrie, prefix_set }| { + .map(|ChangedSubtrie { index, subtrie, prefix_set, .. }| { (index, subtrie, prefix_set.iter().copied().collect::>()) }) .collect::>(), @@ -2742,7 +2781,7 @@ mod tests { assert_eq!( subtries .into_iter() - .map(|ChangedSubtrie { index, subtrie, prefix_set }| { + .map(|ChangedSubtrie { index, subtrie, prefix_set, .. }| { (index, subtrie, prefix_set.all()) }) .collect::>(), From ea35ebfda285a848239248bd3ffe58a568507d42 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Fri, 11 Jul 2025 13:07:38 +0300 Subject: [PATCH 140/305] feat: make ethereum Cli generic over node and remove debug commands (#17363) --- Cargo.lock | 38 -- bin/reth/Cargo.toml | 1 - crates/chainspec/src/api.rs | 3 +- crates/cli/commands/src/common.rs | 6 +- crates/cli/commands/src/launcher.rs | 23 +- crates/ethereum/cli/Cargo.toml | 58 +-- .../ethereum/cli/src/debug_cmd/build_block.rs | 274 --------------- .../ethereum/cli/src/debug_cmd/execution.rs | 253 ------------- .../cli/src/debug_cmd/in_memory_merkle.rs | 243 ------------- crates/ethereum/cli/src/debug_cmd/merkle.rs | 314 ----------------- crates/ethereum/cli/src/debug_cmd/mod.rs | 68 ---- crates/ethereum/cli/src/interface.rs | 116 +++--- crates/ethereum/cli/src/lib.rs | 1 - crates/optimism/cli/src/lib.rs | 4 +- docs/vocs/docs/pages/cli/SUMMARY.mdx | 5 - docs/vocs/docs/pages/cli/reth.mdx | 1 - docs/vocs/docs/pages/cli/reth/debug.mdx | 2 - .../docs/pages/cli/reth/debug/build-block.mdx | 164 --------- .../docs/pages/cli/reth/debug/execution.mdx | 328 ----------------- .../pages/cli/reth/debug/in-memory-merkle.mdx | 328 ----------------- .../vocs/docs/pages/cli/reth/debug/merkle.mdx | 331 ------------------ 21 files changed, 102 insertions(+), 2459 deletions(-) delete mode 100644 crates/ethereum/cli/src/debug_cmd/build_block.rs delete mode 100644 crates/ethereum/cli/src/debug_cmd/execution.rs delete mode 100644 crates/ethereum/cli/src/debug_cmd/in_memory_merkle.rs delete mode 100644 crates/ethereum/cli/src/debug_cmd/merkle.rs delete mode 100644 crates/ethereum/cli/src/debug_cmd/mod.rs delete mode 100644 docs/vocs/docs/pages/cli/reth/debug/build-block.mdx delete mode 100644 docs/vocs/docs/pages/cli/reth/debug/execution.mdx delete mode 100644 docs/vocs/docs/pages/cli/reth/debug/in-memory-merkle.mdx delete mode 100644 docs/vocs/docs/pages/cli/reth/debug/merkle.mdx diff --git a/Cargo.lock b/Cargo.lock index ecc7ddd1906..75d1991ce08 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7182,7 +7182,6 @@ dependencies = [ "reth-tasks", "reth-tokio-util", "reth-transaction-pool", - "similar-asserts", "tempfile", "tokio", "tracing", @@ -8230,57 +8229,20 @@ name = "reth-ethereum-cli" version = "1.5.1" dependencies = [ "alloy-consensus", - "alloy-eips", - "alloy-primitives", - "alloy-rlp", - "alloy-rpc-types", - "backon", "clap", "eyre", - "futures", - "reth-basic-payload-builder", "reth-chainspec", "reth-cli", "reth-cli-commands", "reth-cli-runner", - "reth-cli-util", - "reth-config", - "reth-consensus", "reth-db", - "reth-db-api", - "reth-downloaders", - "reth-errors", - "reth-ethereum-payload-builder", - "reth-ethereum-primitives", - "reth-evm", - "reth-execution-types", - "reth-exex", - "reth-fs-util", - "reth-network", - "reth-network-api", - "reth-network-p2p", "reth-node-api", "reth-node-builder", "reth-node-core", "reth-node-ethereum", - "reth-node-events", "reth-node-metrics", - "reth-payload-builder", - "reth-primitives-traits", - "reth-provider", - "reth-prune", - "reth-revm", - "reth-stages", - "reth-static-file", - "reth-tasks", "reth-tracing", - "reth-transaction-pool", - "reth-trie", - "reth-trie-db", - "serde_json", - "similar-asserts", "tempfile", - "tokio", "tracing", ] diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index fb940250033..ab78bc9cb12 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -64,7 +64,6 @@ eyre.workspace = true [dev-dependencies] backon.workspace = true -similar-asserts.workspace = true tempfile.workspace = true [features] diff --git a/crates/chainspec/src/api.rs b/crates/chainspec/src/api.rs index e22ebc721cb..cb5b47bc245 100644 --- a/crates/chainspec/src/api.rs +++ b/crates/chainspec/src/api.rs @@ -1,13 +1,14 @@ use crate::{ChainSpec, DepositContract}; use alloc::{boxed::Box, vec::Vec}; use alloy_chains::Chain; -use alloy_consensus::{BlockHeader, Header}; +use alloy_consensus::Header; use alloy_eips::{calc_next_block_base_fee, eip1559::BaseFeeParams, eip7840::BlobParams}; use alloy_genesis::Genesis; use alloy_primitives::{B256, U256}; use core::fmt::{Debug, Display}; use reth_ethereum_forks::EthereumHardforks; use reth_network_peers::NodeRecord; +use reth_primitives_traits::{AlloyBlockHeader, BlockHeader}; /// Trait representing type configuring a chain spec. #[auto_impl::auto_impl(&, Arc)] diff --git a/crates/cli/commands/src/common.rs b/crates/cli/commands/src/common.rs index 340dbf8e760..3249fc98113 100644 --- a/crates/cli/commands/src/common.rs +++ b/crates/cli/commands/src/common.rs @@ -232,7 +232,7 @@ where } /// Helper trait aggregating components required for the CLI. -pub trait CliNodeComponents { +pub trait CliNodeComponents: Send + Sync + 'static { /// Evm to use. type Evm: ConfigureEvm + 'static; /// Consensus implementation. @@ -263,14 +263,14 @@ where /// Helper trait alias for an [`FnOnce`] producing [`CliNodeComponents`]. pub trait CliComponentsBuilder: - FnOnce(Arc) -> Self::Components + FnOnce(Arc) -> Self::Components + Send + Sync + 'static { type Components: CliNodeComponents; } impl CliComponentsBuilder for F where - F: FnOnce(Arc) -> Comp, + F: FnOnce(Arc) -> Comp + Send + Sync + 'static, Comp: CliNodeComponents, { type Components = Comp; diff --git a/crates/cli/commands/src/launcher.rs b/crates/cli/commands/src/launcher.rs index e5e35f97aac..86cc8d33dc3 100644 --- a/crates/cli/commands/src/launcher.rs +++ b/crates/cli/commands/src/launcher.rs @@ -2,7 +2,7 @@ use futures::Future; use reth_cli::chainspec::ChainSpecParser; use reth_db::DatabaseEnv; use reth_node_builder::{NodeBuilder, WithLaunchContext}; -use std::{fmt, marker::PhantomData, sync::Arc}; +use std::{fmt, sync::Arc}; /// A trait for launching a reth node with custom configuration strategies. /// @@ -40,14 +40,12 @@ where /// This struct adapts existing closures to work with the new [`Launcher`] trait, /// maintaining backward compatibility with current node implementations while /// enabling the transition to the more flexible trait-based approach. -pub struct FnLauncher { +pub struct FnLauncher { /// The function to execute when launching the node func: F, - /// Phantom data to track the future type - _result: PhantomData, } -impl FnLauncher { +impl FnLauncher { /// Creates a new function launcher adapter. /// /// Type parameters `C` and `Ext` help the compiler infer correct types @@ -59,18 +57,23 @@ impl FnLauncher { pub fn new(func: F) -> Self where C: ChainSpecParser, - F: FnOnce(WithLaunchContext, C::ChainSpec>>, Ext) -> Fut, + F: AsyncFnOnce( + WithLaunchContext, C::ChainSpec>>, + Ext, + ) -> eyre::Result<()>, { - Self { func, _result: PhantomData } + Self { func } } } -impl Launcher for FnLauncher +impl Launcher for FnLauncher where C: ChainSpecParser, Ext: clap::Args + fmt::Debug, - F: FnOnce(WithLaunchContext, C::ChainSpec>>, Ext) -> Fut, - Fut: Future>, + F: AsyncFnOnce( + WithLaunchContext, C::ChainSpec>>, + Ext, + ) -> eyre::Result<()>, { fn entrypoint( self, diff --git a/crates/ethereum/cli/Cargo.toml b/crates/ethereum/cli/Cargo.toml index 78080bcbc42..77cca65d016 100644 --- a/crates/ethereum/cli/Cargo.toml +++ b/crates/ethereum/cli/Cargo.toml @@ -17,60 +17,15 @@ reth-cli-commands.workspace = true reth-cli-runner.workspace = true reth-chainspec.workspace = true reth-db.workspace = true -reth-ethereum-primitives.workspace = true -reth-network.workspace = true reth-node-builder.workspace = true reth-node-core.workspace = true reth-node-ethereum.workspace = true reth-node-metrics.workspace = true reth-tracing.workspace = true -reth-db-api.workspace = true -reth-consensus.workspace = true -reth-errors.workspace = true -reth-ethereum-payload-builder.workspace = true -reth-evm.workspace = true -reth-execution-types.workspace = true -reth-fs-util.workspace = true reth-node-api.workspace = true -reth-basic-payload-builder.workspace = true -reth-primitives-traits.workspace = true -reth-provider.workspace = true -reth-revm.workspace = true -reth-stages.workspace = true -reth-transaction-pool.workspace = true -reth-trie.workspace = true -reth-trie-db.workspace = true -reth-cli-util.workspace = true -reth-config.workspace = true -reth-downloaders.workspace = true -reth-exex.workspace = true -reth-network-api.workspace = true -reth-network-p2p.workspace = true -reth-node-events.workspace = true -reth-prune.workspace = true -reth-static-file.workspace = true -reth-tasks.workspace = true -reth-payload-builder.workspace = true - -# serde -serde_json.workspace = true - -# backoff -backon.workspace = true - -# test -similar-asserts.workspace = true - -# async -tokio.workspace = true -futures.workspace = true # alloy -alloy-eips = { workspace = true, features = ["kzg"] } -alloy-rlp.workspace = true -alloy-rpc-types = { workspace = true, features = ["engine"] } alloy-consensus.workspace = true -alloy-primitives.workspace = true # misc clap.workspace = true @@ -85,31 +40,28 @@ reth-cli-commands.workspace = true tempfile.workspace = true [features] -default = ["jemalloc", "reth-revm/portable"] +default = ["jemalloc"] dev = ["reth-cli-commands/arbitrary"] asm-keccak = [ "reth-node-core/asm-keccak", - "alloy-primitives/asm-keccak", ] jemalloc = [ - "reth-cli-util/jemalloc", "reth-node-core/jemalloc", "reth-node-metrics/jemalloc", ] jemalloc-prof = [ - "reth-cli-util/jemalloc", - "reth-cli-util/jemalloc-prof", + "reth-node-core/jemalloc", ] -tracy-allocator = ["reth-cli-util/tracy-allocator"] +tracy-allocator = [] # Because jemalloc is default and preferred over snmalloc when both features are # enabled, `--no-default-features` should be used when enabling snmalloc or # snmalloc-native. -snmalloc = ["reth-cli-util/snmalloc"] -snmalloc-native = ["reth-cli-util/snmalloc-native"] +snmalloc = [] +snmalloc-native = [] min-error-logs = ["tracing/release_max_level_error"] min-warn-logs = ["tracing/release_max_level_warn"] diff --git a/crates/ethereum/cli/src/debug_cmd/build_block.rs b/crates/ethereum/cli/src/debug_cmd/build_block.rs deleted file mode 100644 index 098de0ce323..00000000000 --- a/crates/ethereum/cli/src/debug_cmd/build_block.rs +++ /dev/null @@ -1,274 +0,0 @@ -//! Command for debugging block building. -use alloy_consensus::BlockHeader; -use alloy_eips::{ - eip2718::Encodable2718, eip4844::env_settings::EnvKzgSettings, - eip7594::BlobTransactionSidecarVariant, -}; -use alloy_primitives::{Address, Bytes, B256}; -use alloy_rlp::Decodable; -use alloy_rpc_types::engine::{BlobsBundleV1, PayloadAttributes}; -use clap::Parser; -use eyre::Context; -use reth_basic_payload_builder::{BuildArguments, BuildOutcome, PayloadBuilder, PayloadConfig}; -use reth_chainspec::{ChainSpec, EthereumHardforks}; -use reth_cli::chainspec::ChainSpecParser; -use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; -use reth_cli_runner::CliContext; -use reth_consensus::{Consensus, FullConsensus}; -use reth_errors::{ConsensusError, RethResult}; -use reth_ethereum_payload_builder::EthereumBuilderConfig; -use reth_ethereum_primitives::{EthPrimitives, TransactionSigned}; -use reth_evm::{execute::Executor, ConfigureEvm}; -use reth_execution_types::ExecutionOutcome; -use reth_fs_util as fs; -use reth_node_api::{BlockTy, EngineApiMessageVersion, PayloadBuilderAttributes}; -use reth_node_ethereum::{consensus::EthBeaconConsensus, EthEvmConfig}; -use reth_primitives_traits::{Block as _, SealedBlock, SealedHeader, SignedTransaction}; -use reth_provider::{ - providers::{BlockchainProvider, ProviderNodeTypes}, - BlockHashReader, BlockReader, BlockWriter, ChainSpecProvider, ProviderFactory, - StageCheckpointReader, StateProviderFactory, -}; -use reth_revm::{cached::CachedReads, cancelled::CancelOnDrop, database::StateProviderDatabase}; -use reth_stages::StageId; -use reth_transaction_pool::{ - blobstore::InMemoryBlobStore, BlobStore, EthPooledTransaction, PoolConfig, TransactionOrigin, - TransactionPool, TransactionValidationTaskExecutor, -}; -use reth_trie::StateRoot; -use reth_trie_db::DatabaseStateRoot; -use std::{path::PathBuf, str::FromStr, sync::Arc}; -use tracing::*; - -/// `reth debug build-block` command -/// This debug routine requires that the node is positioned at the block before the target. -/// The script will then parse the block and attempt to build a similar one. -#[derive(Debug, Parser)] -pub struct Command { - #[command(flatten)] - env: EnvironmentArgs, - - #[arg(long)] - parent_beacon_block_root: Option, - - #[arg(long)] - prev_randao: B256, - - #[arg(long)] - timestamp: u64, - - #[arg(long)] - suggested_fee_recipient: Address, - - /// Array of transactions. - /// NOTE: 4844 transactions must be provided in the same order as they appear in the blobs - /// bundle. - #[arg(long, value_delimiter = ',')] - transactions: Vec, - - /// Path to the file that contains a corresponding blobs bundle. - #[arg(long)] - blobs_bundle_path: Option, -} - -impl> Command { - /// Fetches the best block from the database. - /// - /// If the database is empty, returns the genesis block. - fn lookup_best_block>( - &self, - factory: ProviderFactory, - ) -> RethResult>>> { - let provider = factory.provider()?; - - let best_number = - provider.get_stage_checkpoint(StageId::Finish)?.unwrap_or_default().block_number; - let best_hash = provider - .block_hash(best_number)? - .expect("the hash for the latest block is missing, database is corrupt"); - - Ok(Arc::new( - provider - .block(best_number.into())? - .expect("the header for the latest block is missing, database is corrupt") - .seal_unchecked(best_hash), - )) - } - - /// Returns the default KZG settings - const fn kzg_settings(&self) -> eyre::Result { - Ok(EnvKzgSettings::Default) - } - - /// Execute `debug build-block` command - pub async fn execute>( - self, - ctx: CliContext, - ) -> eyre::Result<()> { - let Environment { provider_factory, .. } = self.env.init::(AccessRights::RW)?; - - let consensus: Arc> = - Arc::new(EthBeaconConsensus::new(provider_factory.chain_spec())); - - // fetch the best block from the database - let best_block = self - .lookup_best_block(provider_factory.clone()) - .wrap_err("the head block is missing")?; - - let blockchain_db = BlockchainProvider::new(provider_factory.clone())?; - let blob_store = InMemoryBlobStore::default(); - - let validator = TransactionValidationTaskExecutor::eth_builder(blockchain_db.clone()) - .with_head_timestamp(best_block.timestamp) - .kzg_settings(self.kzg_settings()?) - .with_additional_tasks(1) - .build_with_tasks(ctx.task_executor.clone(), blob_store.clone()); - - let transaction_pool = reth_transaction_pool::Pool::eth_pool( - validator, - blob_store.clone(), - PoolConfig::default(), - ); - info!(target: "reth::cli", "Transaction pool initialized"); - - let mut blobs_bundle = self - .blobs_bundle_path - .map(|path| -> eyre::Result { - let contents = fs::read_to_string(&path) - .wrap_err(format!("could not read {}", path.display()))?; - serde_json::from_str(&contents).wrap_err("failed to deserialize blobs bundle") - }) - .transpose()?; - - for tx_bytes in &self.transactions { - debug!(target: "reth::cli", bytes = ?tx_bytes, "Decoding transaction"); - let transaction = TransactionSigned::decode(&mut &Bytes::from_str(tx_bytes)?[..])? - .try_into_recovered() - .map_err(|tx| eyre::eyre!("failed to recover tx: {}", tx.tx_hash()))?; - - let encoded_length = match transaction.inner() { - TransactionSigned::Eip4844(tx) => { - let blobs_bundle = blobs_bundle.as_mut().ok_or_else(|| { - eyre::eyre!("encountered a blob tx. `--blobs-bundle-path` must be provided") - })?; - - let sidecar: BlobTransactionSidecarVariant = - BlobTransactionSidecarVariant::Eip4844( - blobs_bundle.pop_sidecar(tx.tx().blob_versioned_hashes.len()), - ); - - let pooled = transaction - .clone() - .into_inner() - .try_into_pooled_eip4844(sidecar.clone()) - .expect("should not fail to convert blob tx if it is already eip4844"); - let encoded_length = pooled.encode_2718_len(); - - // insert the blob into the store - blob_store.insert(*transaction.tx_hash(), sidecar)?; - - encoded_length - } - _ => transaction.encode_2718_len(), - }; - - debug!(target: "reth::cli", ?transaction, "Adding transaction to the pool"); - transaction_pool - .add_transaction( - TransactionOrigin::External, - EthPooledTransaction::new(transaction, encoded_length), - ) - .await?; - } - - let payload_attrs = PayloadAttributes { - parent_beacon_block_root: self.parent_beacon_block_root, - prev_randao: self.prev_randao, - timestamp: self.timestamp, - suggested_fee_recipient: self.suggested_fee_recipient, - // Set empty withdrawals vector if Shanghai is active, None otherwise - withdrawals: provider_factory - .chain_spec() - .is_shanghai_active_at_timestamp(self.timestamp) - .then(Vec::new), - }; - let payload_config = PayloadConfig::new( - Arc::new(SealedHeader::new(best_block.header().clone(), best_block.hash())), - reth_payload_builder::EthPayloadBuilderAttributes::try_new( - best_block.hash(), - payload_attrs, - EngineApiMessageVersion::default() as u8, - )?, - ); - - let args = BuildArguments::new( - CachedReads::default(), - payload_config, - CancelOnDrop::default(), - None, - ); - - let payload_builder = reth_ethereum_payload_builder::EthereumPayloadBuilder::new( - blockchain_db.clone(), - transaction_pool, - EthEvmConfig::new(provider_factory.chain_spec()), - EthereumBuilderConfig::new(), - ); - - match payload_builder.try_build(args)? { - BuildOutcome::Better { payload, .. } => { - let block = payload.block(); - debug!(target: "reth::cli", ?block, "Built new payload"); - - consensus.validate_header(block.sealed_header())?; - consensus.validate_block_pre_execution(block)?; - - let block_with_senders = block.clone().try_recover().unwrap(); - - let state_provider = blockchain_db.latest()?; - let db = StateProviderDatabase::new(&state_provider); - let evm_config = EthEvmConfig::ethereum(provider_factory.chain_spec()); - let executor = evm_config.batch_executor(db); - - let block_execution_output = executor.execute(&block_with_senders)?; - let execution_outcome = - ExecutionOutcome::from((block_execution_output, block.number)); - debug!(target: "reth::cli", ?execution_outcome, "Executed block"); - - let hashed_post_state = state_provider.hashed_post_state(execution_outcome.state()); - let (state_root, trie_updates) = StateRoot::overlay_root_with_updates( - provider_factory.provider()?.tx_ref(), - hashed_post_state.clone(), - )?; - - if state_root != block_with_senders.state_root() { - eyre::bail!( - "state root mismatch. expected: {}. got: {}", - block_with_senders.state_root, - state_root - ); - } - - // Attempt to insert new block without committing - let provider_rw = provider_factory.provider_rw()?; - provider_rw.append_blocks_with_state( - Vec::from([block_with_senders]), - &execution_outcome, - hashed_post_state.into_sorted(), - trie_updates, - )?; - info!(target: "reth::cli", "Successfully appended built block"); - } - _ => unreachable!("other outcomes are unreachable"), - }; - - Ok(()) - } -} - -impl Command { - /// Returns the underlying chain being used to run this command - pub const fn chain_spec(&self) -> Option<&Arc> { - Some(&self.env.chain) - } -} diff --git a/crates/ethereum/cli/src/debug_cmd/execution.rs b/crates/ethereum/cli/src/debug_cmd/execution.rs deleted file mode 100644 index 63a9cc3a80e..00000000000 --- a/crates/ethereum/cli/src/debug_cmd/execution.rs +++ /dev/null @@ -1,253 +0,0 @@ -//! Command for debugging execution. - -use alloy_eips::BlockHashOrNumber; -use alloy_primitives::{BlockNumber, B256}; -use clap::Parser; -use futures::StreamExt; -use reth_chainspec::ChainSpec; -use reth_cli::chainspec::ChainSpecParser; -use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; -use reth_cli_runner::CliContext; -use reth_cli_util::get_secret_key; -use reth_config::Config; -use reth_consensus::FullConsensus; -use reth_db::DatabaseEnv; -use reth_downloaders::{ - bodies::bodies::BodiesDownloaderBuilder, - headers::reverse_headers::ReverseHeadersDownloaderBuilder, -}; -use reth_errors::ConsensusError; -use reth_ethereum_primitives::EthPrimitives; -use reth_exex::ExExManagerHandle; -use reth_network::{BlockDownloaderProvider, NetworkHandle}; -use reth_network_api::NetworkInfo; -use reth_network_p2p::{headers::client::HeadersClient, EthBlockClient}; -use reth_node_api::NodeTypesWithDBAdapter; -use reth_node_core::{args::NetworkArgs, utils::get_single_header}; -use reth_node_ethereum::{consensus::EthBeaconConsensus, EthEvmConfig}; -use reth_node_events::node::NodeEvent; -use reth_provider::{ - providers::ProviderNodeTypes, ChainSpecProvider, ProviderFactory, StageCheckpointReader, -}; -use reth_prune::PruneModes; -use reth_stages::{ - sets::DefaultStages, stages::ExecutionStage, ExecutionStageThresholds, Pipeline, StageId, - StageSet, -}; -use reth_static_file::StaticFileProducer; -use reth_tasks::TaskExecutor; -use std::{path::PathBuf, sync::Arc}; -use tokio::sync::watch; -use tracing::*; - -/// `reth debug execution` command -#[derive(Debug, Parser)] -pub struct Command { - #[command(flatten)] - env: EnvironmentArgs, - - #[command(flatten)] - network: NetworkArgs, - - /// The maximum block height. - #[arg(long)] - pub to: u64, - - /// The block interval for sync and unwind. - /// Defaults to `1000`. - #[arg(long, default_value = "1000")] - pub interval: u64, -} - -impl> Command { - fn build_pipeline( - &self, - config: &Config, - client: Client, - consensus: Arc>, - provider_factory: ProviderFactory, - task_executor: &TaskExecutor, - static_file_producer: StaticFileProducer>, - ) -> eyre::Result> - where - N: ProviderNodeTypes, - Client: EthBlockClient + 'static, - { - // building network downloaders using the fetch client - let header_downloader = ReverseHeadersDownloaderBuilder::new(config.stages.headers) - .build(client.clone(), consensus.clone()) - .into_task_with(task_executor); - - let body_downloader = BodiesDownloaderBuilder::new(config.stages.bodies) - .build(client, consensus.clone(), provider_factory.clone()) - .into_task_with(task_executor); - - let stage_conf = &config.stages; - let prune_modes = config.prune.clone().map(|prune| prune.segments).unwrap_or_default(); - - let (tip_tx, tip_rx) = watch::channel(B256::ZERO); - let executor = EthEvmConfig::ethereum(provider_factory.chain_spec()); - - let pipeline = Pipeline::::builder() - .with_tip_sender(tip_tx) - .add_stages( - DefaultStages::new( - provider_factory.clone(), - tip_rx, - consensus.clone(), - header_downloader, - body_downloader, - executor.clone(), - stage_conf.clone(), - prune_modes, - None, - ) - .set(ExecutionStage::new( - executor, - consensus.clone(), - ExecutionStageThresholds { - max_blocks: None, - max_changes: None, - max_cumulative_gas: None, - max_duration: None, - }, - stage_conf.execution_external_clean_threshold(), - ExExManagerHandle::empty(), - )), - ) - .build(provider_factory, static_file_producer); - - Ok(pipeline) - } - - async fn build_network< - N: CliNodeTypes, - >( - &self, - config: &Config, - task_executor: TaskExecutor, - provider_factory: ProviderFactory>>, - network_secret_path: PathBuf, - default_peers_path: PathBuf, - ) -> eyre::Result { - let secret_key = get_secret_key(&network_secret_path)?; - let network = self - .network - .network_config(config, provider_factory.chain_spec(), secret_key, default_peers_path) - .with_task_executor(Box::new(task_executor)) - .build(provider_factory) - .start_network() - .await?; - info!(target: "reth::cli", peer_id = %network.peer_id(), local_addr = %network.local_addr(), "Connected to P2P network"); - debug!(target: "reth::cli", peer_id = ?network.peer_id(), "Full peer ID"); - Ok(network) - } - - async fn fetch_block_hash( - &self, - client: Client, - block: BlockNumber, - ) -> eyre::Result - where - Client: HeadersClient, - { - info!(target: "reth::cli", ?block, "Fetching block from the network."); - loop { - match get_single_header(&client, BlockHashOrNumber::Number(block)).await { - Ok(tip_header) => { - info!(target: "reth::cli", ?block, "Successfully fetched block"); - return Ok(tip_header.hash()) - } - Err(error) => { - error!(target: "reth::cli", ?block, %error, "Failed to fetch the block. Retrying..."); - } - } - } - } - - /// Execute `execution-debug` command - pub async fn execute>( - self, - ctx: CliContext, - ) -> eyre::Result<()> { - let Environment { provider_factory, config, data_dir } = - self.env.init::(AccessRights::RW)?; - - let consensus: Arc> = - Arc::new(EthBeaconConsensus::new(provider_factory.chain_spec())); - - // Configure and build network - let network_secret_path = - self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret()); - let network = self - .build_network( - &config, - ctx.task_executor.clone(), - provider_factory.clone(), - network_secret_path, - data_dir.known_peers(), - ) - .await?; - - let static_file_producer = - StaticFileProducer::new(provider_factory.clone(), PruneModes::default()); - - // Configure the pipeline - let fetch_client = network.fetch_client().await?; - let mut pipeline = self.build_pipeline( - &config, - fetch_client.clone(), - consensus.clone(), - provider_factory.clone(), - &ctx.task_executor, - static_file_producer, - )?; - - let provider = provider_factory.provider()?; - - let latest_block_number = - provider.get_stage_checkpoint(StageId::Finish)?.map(|ch| ch.block_number); - if latest_block_number.unwrap_or_default() >= self.to { - info!(target: "reth::cli", latest = latest_block_number, "Nothing to run"); - return Ok(()) - } - - ctx.task_executor.spawn_critical( - "events task", - reth_node_events::node::handle_events( - Some(Box::new(network)), - latest_block_number, - pipeline.events().map(Into::>::into), - ), - ); - - let mut current_max_block = latest_block_number.unwrap_or_default(); - while current_max_block < self.to { - let next_block = current_max_block + 1; - let target_block = self.to.min(current_max_block + self.interval); - let target_block_hash = - self.fetch_block_hash(fetch_client.clone(), target_block).await?; - - // Run the pipeline - info!(target: "reth::cli", from = next_block, to = target_block, tip = ?target_block_hash, "Starting pipeline"); - pipeline.set_tip(target_block_hash); - let result = pipeline.run_loop().await?; - trace!(target: "reth::cli", from = next_block, to = target_block, tip = ?target_block_hash, ?result, "Pipeline finished"); - - // Unwind the pipeline without committing. - provider_factory.provider_rw()?.unwind_trie_state_range(next_block..=target_block)?; - - // Update latest block - current_max_block = target_block; - } - - Ok(()) - } -} - -impl Command { - /// Returns the underlying chain being used to run this command - pub const fn chain_spec(&self) -> Option<&Arc> { - Some(&self.env.chain) - } -} diff --git a/crates/ethereum/cli/src/debug_cmd/in_memory_merkle.rs b/crates/ethereum/cli/src/debug_cmd/in_memory_merkle.rs deleted file mode 100644 index b45e712da29..00000000000 --- a/crates/ethereum/cli/src/debug_cmd/in_memory_merkle.rs +++ /dev/null @@ -1,243 +0,0 @@ -//! Command for debugging in-memory merkle trie calculation. - -use alloy_consensus::BlockHeader; -use alloy_eips::BlockHashOrNumber; -use backon::{ConstantBuilder, Retryable}; -use clap::Parser; -use reth_chainspec::ChainSpec; -use reth_cli::chainspec::ChainSpecParser; -use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; -use reth_cli_runner::CliContext; -use reth_cli_util::get_secret_key; -use reth_config::Config; -use reth_ethereum_primitives::EthPrimitives; -use reth_evm::{execute::Executor, ConfigureEvm}; -use reth_execution_types::ExecutionOutcome; -use reth_network::{BlockDownloaderProvider, NetworkHandle}; -use reth_network_api::NetworkInfo; -use reth_node_api::{BlockTy, NodePrimitives}; -use reth_node_core::{ - args::NetworkArgs, - utils::{get_single_body, get_single_header}, -}; -use reth_node_ethereum::{consensus::EthBeaconConsensus, EthEvmConfig}; -use reth_primitives_traits::SealedBlock; -use reth_provider::{ - providers::ProviderNodeTypes, AccountExtReader, ChainSpecProvider, DatabaseProviderFactory, - HashedPostStateProvider, HashingWriter, LatestStateProviderRef, OriginalValuesKnown, - ProviderFactory, StageCheckpointReader, StateWriter, StorageLocation, StorageReader, -}; -use reth_revm::database::StateProviderDatabase; -use reth_stages::StageId; -use reth_tasks::TaskExecutor; -use reth_trie::StateRoot; -use reth_trie_db::DatabaseStateRoot; -use std::{path::PathBuf, sync::Arc}; -use tracing::*; - -/// `reth debug in-memory-merkle` command -/// This debug routine requires that the node is positioned at the block before the target. -/// The script will then download the block from p2p network and attempt to calculate and verify -/// merkle root for it. -#[derive(Debug, Parser)] -pub struct Command { - #[command(flatten)] - env: EnvironmentArgs, - - #[command(flatten)] - network: NetworkArgs, - - /// The number of retries per request - #[arg(long, default_value = "5")] - retries: usize, - - /// The depth after which we should start comparing branch nodes - #[arg(long)] - skip_node_depth: Option, -} - -impl> Command { - async fn build_network< - N: ProviderNodeTypes< - ChainSpec = C::ChainSpec, - Primitives: NodePrimitives< - Block = reth_ethereum_primitives::Block, - Receipt = reth_ethereum_primitives::Receipt, - BlockHeader = alloy_consensus::Header, - >, - >, - >( - &self, - config: &Config, - task_executor: TaskExecutor, - provider_factory: ProviderFactory, - network_secret_path: PathBuf, - default_peers_path: PathBuf, - ) -> eyre::Result { - let secret_key = get_secret_key(&network_secret_path)?; - let network = self - .network - .network_config(config, provider_factory.chain_spec(), secret_key, default_peers_path) - .with_task_executor(Box::new(task_executor)) - .build(provider_factory) - .start_network() - .await?; - info!(target: "reth::cli", peer_id = %network.peer_id(), local_addr = %network.local_addr(), "Connected to P2P network"); - debug!(target: "reth::cli", peer_id = ?network.peer_id(), "Full peer ID"); - Ok(network) - } - - /// Execute `debug in-memory-merkle` command - pub async fn execute>( - self, - ctx: CliContext, - ) -> eyre::Result<()> { - let Environment { provider_factory, config, data_dir } = - self.env.init::(AccessRights::RW)?; - - let provider = provider_factory.provider()?; - - // Look up merkle checkpoint - let merkle_checkpoint = provider - .get_stage_checkpoint(StageId::MerkleExecute)? - .expect("merkle checkpoint exists"); - - let merkle_block_number = merkle_checkpoint.block_number; - - // Configure and build network - let network_secret_path = - self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret()); - let network = self - .build_network( - &config, - ctx.task_executor.clone(), - provider_factory.clone(), - network_secret_path, - data_dir.known_peers(), - ) - .await?; - - let target_block_number = merkle_block_number + 1; - - info!(target: "reth::cli", target_block_number, "Downloading full block"); - let fetch_client = network.fetch_client().await?; - - let retries = self.retries.max(1); - let backoff = ConstantBuilder::default().with_max_times(retries); - - let client = fetch_client.clone(); - let header = (move || { - get_single_header(client.clone(), BlockHashOrNumber::Number(target_block_number)) - }) - .retry(backoff) - .notify(|err, _| warn!(target: "reth::cli", "Error requesting header: {err}. Retrying...")) - .await?; - - let client = fetch_client.clone(); - let chain = provider_factory.chain_spec(); - let consensus = Arc::new(EthBeaconConsensus::new(chain.clone())); - let block: SealedBlock> = (move || { - get_single_body(client.clone(), header.clone(), consensus.clone()) - }) - .retry(backoff) - .notify(|err, _| warn!(target: "reth::cli", "Error requesting body: {err}. Retrying...")) - .await?; - - let state_provider = LatestStateProviderRef::new(&provider); - let db = StateProviderDatabase::new(&state_provider); - - let evm_config = EthEvmConfig::ethereum(provider_factory.chain_spec()); - let executor = evm_config.batch_executor(db); - let block_execution_output = executor.execute(&block.clone().try_recover()?)?; - let execution_outcome = ExecutionOutcome::from((block_execution_output, block.number())); - - // Unpacked `BundleState::state_root_slow` function - let (in_memory_state_root, in_memory_updates) = StateRoot::overlay_root_with_updates( - provider.tx_ref(), - state_provider.hashed_post_state(execution_outcome.state()), - )?; - - if in_memory_state_root == block.state_root() { - info!(target: "reth::cli", state_root = ?in_memory_state_root, "Computed in-memory state root matches"); - return Ok(()) - } - - let provider_rw = provider_factory.database_provider_rw()?; - - // Insert block, state and hashes - provider_rw.insert_historical_block(block.clone().try_recover()?)?; - provider_rw.write_state( - &execution_outcome, - OriginalValuesKnown::No, - StorageLocation::Database, - )?; - let storage_lists = - provider_rw.changed_storages_with_range(block.number..=block.number())?; - let storages = provider_rw.plain_state_storages(storage_lists)?; - provider_rw.insert_storage_for_hashing(storages)?; - let account_lists = - provider_rw.changed_accounts_with_range(block.number..=block.number())?; - let accounts = provider_rw.basic_accounts(account_lists)?; - provider_rw.insert_account_for_hashing(accounts)?; - - let (state_root, incremental_trie_updates) = StateRoot::incremental_root_with_updates( - provider_rw.tx_ref(), - block.number..=block.number(), - )?; - if state_root != block.state_root() { - eyre::bail!( - "Computed incremental state root mismatch. Expected: {:?}. Got: {:?}", - block.state_root, - state_root - ); - } - - // Compare updates - let mut in_mem_mismatched = Vec::new(); - let mut incremental_mismatched = Vec::new(); - let mut in_mem_updates_iter = in_memory_updates.account_nodes_ref().iter().peekable(); - let mut incremental_updates_iter = - incremental_trie_updates.account_nodes_ref().iter().peekable(); - - while in_mem_updates_iter.peek().is_some() || incremental_updates_iter.peek().is_some() { - match (in_mem_updates_iter.next(), incremental_updates_iter.next()) { - (Some(in_mem), Some(incr)) => { - similar_asserts::assert_eq!(in_mem.0, incr.0, "Nibbles don't match"); - if in_mem.1 != incr.1 && - in_mem.0.len() > self.skip_node_depth.unwrap_or_default() - { - in_mem_mismatched.push(in_mem); - incremental_mismatched.push(incr); - } - } - (Some(in_mem), None) => { - warn!(target: "reth::cli", next = ?in_mem, "In-memory trie updates have more entries"); - } - (None, Some(incr)) => { - tracing::warn!(target: "reth::cli", next = ?incr, "Incremental trie updates have more entries"); - } - (None, None) => { - tracing::info!(target: "reth::cli", "Exhausted all trie updates entries"); - } - } - } - - similar_asserts::assert_eq!( - incremental_mismatched, - in_mem_mismatched, - "Mismatched trie updates" - ); - - // Drop without committing. - drop(provider_rw); - - Ok(()) - } -} - -impl Command { - /// Returns the underlying chain being used to run this command - pub const fn chain_spec(&self) -> Option<&Arc> { - Some(&self.env.chain) - } -} diff --git a/crates/ethereum/cli/src/debug_cmd/merkle.rs b/crates/ethereum/cli/src/debug_cmd/merkle.rs deleted file mode 100644 index 63c18f9d2dc..00000000000 --- a/crates/ethereum/cli/src/debug_cmd/merkle.rs +++ /dev/null @@ -1,314 +0,0 @@ -//! Command for debugging merkle tree calculation. -use alloy_eips::BlockHashOrNumber; -use backon::{ConstantBuilder, Retryable}; -use clap::Parser; -use reth_chainspec::ChainSpec; -use reth_cli::chainspec::ChainSpecParser; -use reth_cli_commands::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; -use reth_cli_runner::CliContext; -use reth_cli_util::get_secret_key; -use reth_config::Config; -use reth_consensus::{Consensus, ConsensusError}; -use reth_db_api::{cursor::DbCursorRO, tables, transaction::DbTx}; -use reth_ethereum_primitives::EthPrimitives; -use reth_evm::{execute::Executor, ConfigureEvm}; -use reth_execution_types::ExecutionOutcome; -use reth_network::{BlockDownloaderProvider, NetworkHandle}; -use reth_network_api::NetworkInfo; -use reth_network_p2p::full_block::FullBlockClient; -use reth_node_api::{BlockTy, NodePrimitives}; -use reth_node_core::{args::NetworkArgs, utils::get_single_header}; -use reth_node_ethereum::{consensus::EthBeaconConsensus, EthEvmConfig}; -use reth_provider::{ - providers::ProviderNodeTypes, BlockNumReader, BlockWriter, ChainSpecProvider, - DatabaseProviderFactory, LatestStateProviderRef, OriginalValuesKnown, ProviderFactory, - StateWriter, StorageLocation, -}; -use reth_revm::database::StateProviderDatabase; -use reth_stages::{ - stages::{AccountHashingStage, MerkleStage, StorageHashingStage}, - ExecInput, Stage, StageCheckpoint, -}; -use reth_tasks::TaskExecutor; -use std::{path::PathBuf, sync::Arc}; -use tracing::*; - -/// `reth debug merkle` command -#[derive(Debug, Parser)] -pub struct Command { - #[command(flatten)] - env: EnvironmentArgs, - - #[command(flatten)] - network: NetworkArgs, - - /// The number of retries per request - #[arg(long, default_value = "5")] - retries: usize, - - /// The height to finish at - #[arg(long)] - to: u64, - - /// The depth after which we should start comparing branch nodes - #[arg(long)] - skip_node_depth: Option, -} - -impl> Command { - async fn build_network< - N: ProviderNodeTypes< - ChainSpec = C::ChainSpec, - Primitives: NodePrimitives< - Block = reth_ethereum_primitives::Block, - Receipt = reth_ethereum_primitives::Receipt, - BlockHeader = alloy_consensus::Header, - >, - >, - >( - &self, - config: &Config, - task_executor: TaskExecutor, - provider_factory: ProviderFactory, - network_secret_path: PathBuf, - default_peers_path: PathBuf, - ) -> eyre::Result { - let secret_key = get_secret_key(&network_secret_path)?; - let network = self - .network - .network_config(config, provider_factory.chain_spec(), secret_key, default_peers_path) - .with_task_executor(Box::new(task_executor)) - .build(provider_factory) - .start_network() - .await?; - info!(target: "reth::cli", peer_id = %network.peer_id(), local_addr = %network.local_addr(), "Connected to P2P network"); - debug!(target: "reth::cli", peer_id = ?network.peer_id(), "Full peer ID"); - Ok(network) - } - - /// Execute `merkle-debug` command - pub async fn execute>( - self, - ctx: CliContext, - ) -> eyre::Result<()> { - let Environment { provider_factory, config, data_dir } = - self.env.init::(AccessRights::RW)?; - - let provider_rw = provider_factory.database_provider_rw()?; - - // Configure and build network - let network_secret_path = - self.network.p2p_secret_key.clone().unwrap_or_else(|| data_dir.p2p_secret()); - let network = self - .build_network( - &config, - ctx.task_executor.clone(), - provider_factory.clone(), - network_secret_path, - data_dir.known_peers(), - ) - .await?; - - let executor_provider = EthEvmConfig::ethereum(provider_factory.chain_spec()); - - // Initialize the fetch client - info!(target: "reth::cli", target_block_number = self.to, "Downloading tip of block range"); - let fetch_client = network.fetch_client().await?; - - // fetch the header at `self.to` - let retries = self.retries.max(1); - let backoff = ConstantBuilder::default().with_max_times(retries); - let client = fetch_client.clone(); - let to_header = (move || { - get_single_header(client.clone(), BlockHashOrNumber::Number(self.to)) - }) - .retry(backoff) - .notify(|err, _| warn!(target: "reth::cli", "Error requesting header: {err}. Retrying...")) - .await?; - info!(target: "reth::cli", target_block_number=self.to, "Finished downloading tip of block range"); - - // build the full block client - let consensus: Arc, Error = ConsensusError>> = - Arc::new(EthBeaconConsensus::new(provider_factory.chain_spec())); - let block_range_client = FullBlockClient::new(fetch_client, consensus); - - // get best block number - let best_block_number = provider_rw.best_block_number()?; - assert!(best_block_number < self.to, "Nothing to run"); - - // get the block range from the network - let block_range = best_block_number + 1..=self.to; - info!(target: "reth::cli", ?block_range, "Downloading range of blocks"); - let blocks = block_range_client - .get_full_block_range(to_header.hash_slow(), self.to - best_block_number) - .await; - - let mut account_hashing_stage = AccountHashingStage::default(); - let mut storage_hashing_stage = StorageHashingStage::default(); - let mut merkle_stage = MerkleStage::default_execution(); - - for block in blocks.into_iter().rev() { - let block_number = block.number; - let sealed_block = - block.try_recover().map_err(|_| eyre::eyre!("Error sealing block with senders"))?; - trace!(target: "reth::cli", block_number, "Executing block"); - - provider_rw.insert_block(sealed_block.clone(), StorageLocation::Database)?; - - let executor = executor_provider.batch_executor(StateProviderDatabase::new( - LatestStateProviderRef::new(&provider_rw), - )); - let output = executor.execute(&sealed_block)?; - - provider_rw.write_state( - &ExecutionOutcome::single(block_number, output), - OriginalValuesKnown::Yes, - StorageLocation::Database, - )?; - - let checkpoint = Some(StageCheckpoint::new( - block_number - .checked_sub(1) - .ok_or_else(|| eyre::eyre!("GenesisBlockHasNoParent"))?, - )); - - let mut account_hashing_done = false; - while !account_hashing_done { - let output = account_hashing_stage - .execute(&provider_rw, ExecInput { target: Some(block_number), checkpoint })?; - account_hashing_done = output.done; - } - - let mut storage_hashing_done = false; - while !storage_hashing_done { - let output = storage_hashing_stage - .execute(&provider_rw, ExecInput { target: Some(block_number), checkpoint })?; - storage_hashing_done = output.done; - } - - let incremental_result = merkle_stage - .execute(&provider_rw, ExecInput { target: Some(block_number), checkpoint }); - - if incremental_result.is_ok() { - debug!(target: "reth::cli", block_number, "Successfully computed incremental root"); - continue - } - - warn!(target: "reth::cli", block_number, "Incremental calculation failed, retrying from scratch"); - let incremental_account_trie = provider_rw - .tx_ref() - .cursor_read::()? - .walk_range(..)? - .collect::, _>>()?; - let incremental_storage_trie = provider_rw - .tx_ref() - .cursor_dup_read::()? - .walk_range(..)? - .collect::, _>>()?; - - let clean_input = ExecInput { target: Some(sealed_block.number), checkpoint: None }; - loop { - let clean_result = merkle_stage - .execute(&provider_rw, clean_input) - .map_err(|e| eyre::eyre!("Clean state root calculation failed: {}", e))?; - if clean_result.done { - break; - } - } - - let clean_account_trie = provider_rw - .tx_ref() - .cursor_read::()? - .walk_range(..)? - .collect::, _>>()?; - let clean_storage_trie = provider_rw - .tx_ref() - .cursor_dup_read::()? - .walk_range(..)? - .collect::, _>>()?; - - info!(target: "reth::cli", block_number, "Comparing incremental trie vs clean trie"); - - // Account trie - let mut incremental_account_mismatched = Vec::new(); - let mut clean_account_mismatched = Vec::new(); - let mut incremental_account_trie_iter = incremental_account_trie.into_iter().peekable(); - let mut clean_account_trie_iter = clean_account_trie.into_iter().peekable(); - while incremental_account_trie_iter.peek().is_some() || - clean_account_trie_iter.peek().is_some() - { - match (incremental_account_trie_iter.next(), clean_account_trie_iter.next()) { - (Some(incremental), Some(clean)) => { - similar_asserts::assert_eq!(incremental.0, clean.0, "Nibbles don't match"); - if incremental.1 != clean.1 && - clean.0 .0.len() > self.skip_node_depth.unwrap_or_default() - { - incremental_account_mismatched.push(incremental); - clean_account_mismatched.push(clean); - } - } - (Some(incremental), None) => { - warn!(target: "reth::cli", next = ?incremental, "Incremental account trie has more entries"); - } - (None, Some(clean)) => { - warn!(target: "reth::cli", next = ?clean, "Clean account trie has more entries"); - } - (None, None) => { - info!(target: "reth::cli", "Exhausted all account trie entries"); - } - } - } - - // Storage trie - let mut first_mismatched_storage = None; - let mut incremental_storage_trie_iter = incremental_storage_trie.into_iter().peekable(); - let mut clean_storage_trie_iter = clean_storage_trie.into_iter().peekable(); - while incremental_storage_trie_iter.peek().is_some() || - clean_storage_trie_iter.peek().is_some() - { - match (incremental_storage_trie_iter.next(), clean_storage_trie_iter.next()) { - (Some(incremental), Some(clean)) => { - if incremental != clean && - clean.1.nibbles.len() > self.skip_node_depth.unwrap_or_default() - { - first_mismatched_storage = Some((incremental, clean)); - break - } - } - (Some(incremental), None) => { - warn!(target: "reth::cli", next = ?incremental, "Incremental storage trie has more entries"); - } - (None, Some(clean)) => { - warn!(target: "reth::cli", next = ?clean, "Clean storage trie has more entries") - } - (None, None) => { - info!(target: "reth::cli", "Exhausted all storage trie entries.") - } - } - } - - similar_asserts::assert_eq!( - ( - incremental_account_mismatched, - first_mismatched_storage.as_ref().map(|(incremental, _)| incremental) - ), - ( - clean_account_mismatched, - first_mismatched_storage.as_ref().map(|(_, clean)| clean) - ), - "Mismatched trie nodes" - ); - } - - info!(target: "reth::cli", ?block_range, "Successfully validated incremental roots"); - - Ok(()) - } -} - -impl Command { - /// Returns the underlying chain being used to run this command - pub const fn chain_spec(&self) -> Option<&Arc> { - Some(&self.env.chain) - } -} diff --git a/crates/ethereum/cli/src/debug_cmd/mod.rs b/crates/ethereum/cli/src/debug_cmd/mod.rs deleted file mode 100644 index 1a7bd5ed0cc..00000000000 --- a/crates/ethereum/cli/src/debug_cmd/mod.rs +++ /dev/null @@ -1,68 +0,0 @@ -//! `reth debug` command. Collection of various debugging routines. - -use clap::{Parser, Subcommand}; -use reth_chainspec::ChainSpec; -use reth_cli::chainspec::ChainSpecParser; -use reth_cli_commands::common::CliNodeTypes; -use reth_cli_runner::CliContext; -use reth_ethereum_primitives::EthPrimitives; -use reth_node_ethereum::EthEngineTypes; -use std::sync::Arc; - -mod build_block; -mod execution; -mod in_memory_merkle; -mod merkle; - -/// `reth debug` command -#[derive(Debug, Parser)] -pub struct Command { - #[command(subcommand)] - command: Subcommands, -} - -/// `reth debug` subcommands -#[derive(Subcommand, Debug)] -pub enum Subcommands { - /// Debug the roundtrip execution of blocks as well as the generated data. - Execution(execution::Command), - /// Debug the clean & incremental state root calculations. - Merkle(merkle::Command), - /// Debug in-memory state root calculation. - InMemoryMerkle(in_memory_merkle::Command), - /// Debug block building. - BuildBlock(build_block::Command), -} - -impl> Command { - /// Execute `debug` command - pub async fn execute< - N: CliNodeTypes< - Payload = EthEngineTypes, - Primitives = EthPrimitives, - ChainSpec = C::ChainSpec, - >, - >( - self, - ctx: CliContext, - ) -> eyre::Result<()> { - match self.command { - Subcommands::Execution(command) => command.execute::(ctx).await, - Subcommands::Merkle(command) => command.execute::(ctx).await, - Subcommands::InMemoryMerkle(command) => command.execute::(ctx).await, - Subcommands::BuildBlock(command) => command.execute::(ctx).await, - } - } -} - -impl Command { - /// Returns the underlying chain being used to run this command - pub const fn chain_spec(&self) -> Option<&Arc> { - match &self.command { - Subcommands::Execution(command) => command.chain_spec(), - Subcommands::Merkle(command) => command.chain_spec(), - Subcommands::InMemoryMerkle(command) => command.chain_spec(), - Subcommands::BuildBlock(command) => command.chain_spec(), - } - } -} diff --git a/crates/ethereum/cli/src/interface.rs b/crates/ethereum/cli/src/interface.rs index f4920eff4b5..f1bace672bd 100644 --- a/crates/ethereum/cli/src/interface.rs +++ b/crates/ethereum/cli/src/interface.rs @@ -1,10 +1,11 @@ //! CLI definition and entrypoint to executable -use crate::{chainspec::EthereumChainSpecParser, debug_cmd}; +use crate::chainspec::EthereumChainSpecParser; use clap::{Parser, Subcommand}; -use reth_chainspec::ChainSpec; +use reth_chainspec::{ChainSpec, EthChainSpec, Hardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_cli_commands::{ + common::{CliComponentsBuilder, CliNodeTypes}, config_cmd, db, download, dump_genesis, export_era, import, import_era, init_cmd, init_state, launcher::FnLauncher, node::{self, NoArgs}, @@ -12,6 +13,7 @@ use reth_cli_commands::{ }; use reth_cli_runner::CliRunner; use reth_db::DatabaseEnv; +use reth_node_api::NodePrimitives; use reth_node_builder::{NodeBuilder, WithLaunchContext}; use reth_node_core::{ args::LogArgs, @@ -55,7 +57,7 @@ impl Cli { } } -impl, Ext: clap::Args + fmt::Debug> Cli { +impl Cli { /// Execute the configured cli command. /// /// This accepts a closure that is used to launch the node via the @@ -102,10 +104,35 @@ impl, Ext: clap::Args + fmt::Debug> Cl where L: FnOnce(WithLaunchContext, C::ChainSpec>>, Ext) -> Fut, Fut: Future>, + C: ChainSpecParser, { self.with_runner(CliRunner::try_default_runtime()?, launcher) } + /// Execute the configured cli command with the provided [`CliComponentsBuilder`]. + /// + /// This accepts a closure that is used to launch the node via the + /// [`NodeCommand`](node::NodeCommand). + /// + /// This command will be run on the [default tokio runtime](reth_cli_runner::tokio_runtime). + pub fn run_with_components( + self, + components: impl CliComponentsBuilder, + launcher: impl AsyncFnOnce( + WithLaunchContext, C::ChainSpec>>, + Ext, + ) -> eyre::Result<()>, + ) -> eyre::Result<()> + where + N: CliNodeTypes< + Primitives: NodePrimitives, + ChainSpec: Hardforks, + >, + C: ChainSpecParser, + { + self.with_runner_and_components(CliRunner::try_default_runtime()?, components, launcher) + } + /// Execute the configured cli command with the provided [`CliRunner`]. /// /// @@ -116,13 +143,7 @@ impl, Ext: clap::Args + fmt::Debug> Cl /// use reth_ethereum_cli::interface::Cli; /// use reth_node_ethereum::EthereumNode; /// - /// let runtime = tokio::runtime::Builder::new_multi_thread() - /// .worker_threads(4) - /// .max_blocking_threads(256) - /// .enable_all() - /// .build() - /// .unwrap(); - /// let runner = CliRunner::from_runtime(runtime); + /// let runner = CliRunner::try_default_runtime().unwrap(); /// /// Cli::parse_args() /// .with_runner(runner, |builder, _| async move { @@ -131,15 +152,45 @@ impl, Ext: clap::Args + fmt::Debug> Cl /// }) /// .unwrap(); /// ``` - pub fn with_runner(mut self, runner: CliRunner, launcher: L) -> eyre::Result<()> + pub fn with_runner(self, runner: CliRunner, launcher: L) -> eyre::Result<()> where L: FnOnce(WithLaunchContext, C::ChainSpec>>, Ext) -> Fut, Fut: Future>, + C: ChainSpecParser, + { + let components = |spec: Arc| { + (EthEvmConfig::ethereum(spec.clone()), EthBeaconConsensus::new(spec)) + }; + + self.with_runner_and_components::( + runner, + components, + async move |builder, ext| launcher(builder, ext).await, + ) + } + + /// Execute the configured cli command with the provided [`CliRunner`] and + /// [`CliComponentsBuilder`]. + pub fn with_runner_and_components( + mut self, + runner: CliRunner, + components: impl CliComponentsBuilder, + launcher: impl AsyncFnOnce( + WithLaunchContext, C::ChainSpec>>, + Ext, + ) -> eyre::Result<()>, + ) -> eyre::Result<()> + where + N: CliNodeTypes< + Primitives: NodePrimitives, + ChainSpec: Hardforks, + >, + C: ChainSpecParser, { // Add network name if available to the logs dir if let Some(chain_spec) = self.command.chain_spec() { self.logs.log_file_directory = - self.logs.log_file_directory.join(chain_spec.chain.to_string()); + self.logs.log_file_directory.join(chain_spec.chain().to_string()); } let _guard = self.init_tracing()?; info!(target: "reth::cli", "Initialized tracing, debug log directory: {}", self.logs.log_file_directory); @@ -147,50 +198,39 @@ impl, Ext: clap::Args + fmt::Debug> Cl // Install the prometheus recorder to be sure to record all metrics let _ = install_prometheus_recorder(); - let components = |spec: Arc| { - (EthEvmConfig::ethereum(spec.clone()), EthBeaconConsensus::new(spec)) - }; match self.command { Commands::Node(command) => runner.run_command_until_exit(|ctx| { command.execute(ctx, FnLauncher::new::(launcher)) }), - Commands::Init(command) => { - runner.run_blocking_until_ctrl_c(command.execute::()) - } + Commands::Init(command) => runner.run_blocking_until_ctrl_c(command.execute::()), Commands::InitState(command) => { - runner.run_blocking_until_ctrl_c(command.execute::()) + runner.run_blocking_until_ctrl_c(command.execute::()) } Commands::Import(command) => { - runner.run_blocking_until_ctrl_c(command.execute::(components)) + runner.run_blocking_until_ctrl_c(command.execute::(components)) } Commands::ImportEra(command) => { - runner.run_blocking_until_ctrl_c(command.execute::()) + runner.run_blocking_until_ctrl_c(command.execute::()) } Commands::ExportEra(command) => { - runner.run_blocking_until_ctrl_c(command.execute::()) + runner.run_blocking_until_ctrl_c(command.execute::()) } Commands::DumpGenesis(command) => runner.run_blocking_until_ctrl_c(command.execute()), - Commands::Db(command) => { - runner.run_blocking_until_ctrl_c(command.execute::()) - } - Commands::Download(command) => { - runner.run_blocking_until_ctrl_c(command.execute::()) + Commands::Db(command) => runner.run_blocking_until_ctrl_c(command.execute::()), + Commands::Download(command) => runner.run_blocking_until_ctrl_c(command.execute::()), + Commands::Stage(command) => { + runner.run_command_until_exit(|ctx| command.execute::(ctx, components)) } - Commands::Stage(command) => runner - .run_command_until_exit(|ctx| command.execute::(ctx, components)), - Commands::P2P(command) => runner.run_until_ctrl_c(command.execute::()), + Commands::P2P(command) => runner.run_until_ctrl_c(command.execute::()), #[cfg(feature = "dev")] Commands::TestVectors(command) => runner.run_until_ctrl_c(command.execute()), Commands::Config(command) => runner.run_until_ctrl_c(command.execute()), - Commands::Debug(command) => { - runner.run_command_until_exit(|ctx| command.execute::(ctx)) - } Commands::Recover(command) => { - runner.run_command_until_exit(|ctx| command.execute::(ctx)) + runner.run_command_until_exit(|ctx| command.execute::(ctx)) } - Commands::Prune(command) => runner.run_until_ctrl_c(command.execute::()), + Commands::Prune(command) => runner.run_until_ctrl_c(command.execute::()), Commands::ReExecute(command) => { - runner.run_until_ctrl_c(command.execute::(components)) + runner.run_until_ctrl_c(command.execute::(components)) } } } @@ -248,9 +288,6 @@ pub enum Commands { /// Write config to stdout #[command(name = "config")] Config(config_cmd::Command), - /// Various debug routines - #[command(name = "debug")] - Debug(Box>), /// Scripts for node recovery #[command(name = "recover")] Recover(recover::Command), @@ -280,7 +317,6 @@ impl Commands { #[cfg(feature = "dev")] Self::TestVectors(_) => None, Self::Config(_) => None, - Self::Debug(cmd) => cmd.chain_spec(), Self::Recover(cmd) => cmd.chain_spec(), Self::Prune(cmd) => cmd.chain_spec(), Self::ReExecute(cmd) => cmd.chain_spec(), diff --git a/crates/ethereum/cli/src/lib.rs b/crates/ethereum/cli/src/lib.rs index a9d0e355bac..067d49d1682 100644 --- a/crates/ethereum/cli/src/lib.rs +++ b/crates/ethereum/cli/src/lib.rs @@ -10,7 +10,6 @@ /// Chain specification parser. pub mod chainspec; -pub mod debug_cmd; pub mod interface; pub use interface::Cli; diff --git a/crates/optimism/cli/src/lib.rs b/crates/optimism/cli/src/lib.rs index 68e58cef81e..4fca639fb28 100644 --- a/crates/optimism/cli/src/lib.rs +++ b/crates/optimism/cli/src/lib.rs @@ -122,7 +122,9 @@ where { let mut this = self.configure(); this.set_runner(runner); - this.run(FnLauncher::new::(launcher)) + this.run(FnLauncher::new::(async move |builder, chain_spec| { + launcher(builder, chain_spec).await + })) } } diff --git a/docs/vocs/docs/pages/cli/SUMMARY.mdx b/docs/vocs/docs/pages/cli/SUMMARY.mdx index fff16ea5821..970814b73eb 100644 --- a/docs/vocs/docs/pages/cli/SUMMARY.mdx +++ b/docs/vocs/docs/pages/cli/SUMMARY.mdx @@ -39,11 +39,6 @@ - [`reth p2p rlpx ping`](/cli/reth/p2p/rlpx/ping) - [`reth p2p bootnode`](/cli/reth/p2p/bootnode) - [`reth config`](/cli/reth/config) - - [`reth debug`](/cli/reth/debug) - - [`reth debug execution`](/cli/reth/debug/execution) - - [`reth debug merkle`](/cli/reth/debug/merkle) - - [`reth debug in-memory-merkle`](/cli/reth/debug/in-memory-merkle) - - [`reth debug build-block`](/cli/reth/debug/build-block) - [`reth recover`](/cli/reth/recover) - [`reth recover storage-tries`](/cli/reth/recover/storage-tries) - [`reth prune`](/cli/reth/prune) diff --git a/docs/vocs/docs/pages/cli/reth.mdx b/docs/vocs/docs/pages/cli/reth.mdx index 04775950b2e..0d2a4355c84 100644 --- a/docs/vocs/docs/pages/cli/reth.mdx +++ b/docs/vocs/docs/pages/cli/reth.mdx @@ -21,7 +21,6 @@ Commands: stage Manipulate individual stages p2p P2P Debugging utilities config Write config to stdout - debug Various debug routines recover Scripts for node recovery prune Prune according to the configuration without any limits re-execute Re-execute blocks in parallel to verify historical sync correctness diff --git a/docs/vocs/docs/pages/cli/reth/debug.mdx b/docs/vocs/docs/pages/cli/reth/debug.mdx index 0f616236a67..f56a60aa941 100644 --- a/docs/vocs/docs/pages/cli/reth/debug.mdx +++ b/docs/vocs/docs/pages/cli/reth/debug.mdx @@ -9,10 +9,8 @@ $ reth debug --help Usage: reth debug [OPTIONS] Commands: - execution Debug the roundtrip execution of blocks as well as the generated data merkle Debug the clean & incremental state root calculations in-memory-merkle Debug in-memory state root calculation - build-block Debug block building help Print this message or the help of the given subcommand(s) Options: diff --git a/docs/vocs/docs/pages/cli/reth/debug/build-block.mdx b/docs/vocs/docs/pages/cli/reth/debug/build-block.mdx deleted file mode 100644 index ac8ab6d3214..00000000000 --- a/docs/vocs/docs/pages/cli/reth/debug/build-block.mdx +++ /dev/null @@ -1,164 +0,0 @@ -# reth debug build-block - -Debug block building - -```bash -$ reth debug build-block --help -``` -```txt -Usage: reth debug build-block [OPTIONS] --prev-randao --timestamp --suggested-fee-recipient - -Options: - -h, --help - Print help (see a summary with '-h') - -Datadir: - --datadir - The path to the data dir for all reth files and subdirectories. - - Defaults to the OS-specific data directory: - - - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - - Windows: `{FOLDERID_RoamingAppData}/reth/` - - macOS: `$HOME/Library/Application Support/reth/` - - [default: default] - - --datadir.static-files - The absolute path to store static files in. - - --config - The path to the configuration file to use - - --chain - The chain this node is running. - Possible values are either a built-in chain or the path to a chain specification file. - - Built-in chains: - mainnet, sepolia, holesky, hoodi, dev - - [default: mainnet] - -Database: - --db.log-level - Database logging level. Levels higher than "notice" require a debug build - - Possible values: - - fatal: Enables logging for critical conditions, i.e. assertion failures - - error: Enables logging for error conditions - - warn: Enables logging for warning conditions - - notice: Enables logging for normal but significant condition - - verbose: Enables logging for verbose informational - - debug: Enables logging for debug-level messages - - trace: Enables logging for trace debug-level messages - - extra: Enables logging for extra debug-level messages - - --db.exclusive - Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume - - [possible values: true, false] - - --db.max-size - Maximum database size (e.g., 4TB, 8MB) - - --db.growth-step - Database growth step (e.g., 4GB, 4KB) - - --db.read-transaction-timeout - Read transaction timeout in seconds, 0 means no timeout - - --parent-beacon-block-root - - - --prev-randao - - - --timestamp - - - --suggested-fee-recipient - - - --transactions - Array of transactions. NOTE: 4844 transactions must be provided in the same order as they appear in the blobs bundle - - --blobs-bundle-path - Path to the file that contains a corresponding blobs bundle - -Logging: - --log.stdout.format - The format to use for logs written to stdout - - [default: terminal] - - Possible values: - - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - - terminal: Represents terminal-friendly formatting for logs - - --log.stdout.filter - The filter to use for logs written to stdout - - [default: ] - - --log.file.format - The format to use for logs written to the log file - - [default: terminal] - - Possible values: - - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - - terminal: Represents terminal-friendly formatting for logs - - --log.file.filter - The filter to use for logs written to the log file - - [default: debug] - - --log.file.directory - The path to put log files in - - [default: /logs] - - --log.file.max-size - The maximum size (in MB) of one log file - - [default: 200] - - --log.file.max-files - The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - - [default: 5] - - --log.journald - Write logs to journald - - --log.journald.filter - The filter to use for logs written to journald - - [default: error] - - --color - Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - - [default: always] - - Possible values: - - always: Colors on - - auto: Colors on - - never: Colors off - -Display: - -v, --verbosity... - Set the minimum log level. - - -v Errors - -vv Warnings - -vvv Info - -vvvv Debug - -vvvvv Traces (warning: very verbose!) - - -q, --quiet - Silence all log output -``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/debug/execution.mdx b/docs/vocs/docs/pages/cli/reth/debug/execution.mdx deleted file mode 100644 index ef7069f8173..00000000000 --- a/docs/vocs/docs/pages/cli/reth/debug/execution.mdx +++ /dev/null @@ -1,328 +0,0 @@ -# reth debug execution - -Debug the roundtrip execution of blocks as well as the generated data - -```bash -$ reth debug execution --help -``` -```txt -Usage: reth debug execution [OPTIONS] --to - -Options: - -h, --help - Print help (see a summary with '-h') - -Datadir: - --datadir - The path to the data dir for all reth files and subdirectories. - - Defaults to the OS-specific data directory: - - - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - - Windows: `{FOLDERID_RoamingAppData}/reth/` - - macOS: `$HOME/Library/Application Support/reth/` - - [default: default] - - --datadir.static-files - The absolute path to store static files in. - - --config - The path to the configuration file to use - - --chain - The chain this node is running. - Possible values are either a built-in chain or the path to a chain specification file. - - Built-in chains: - mainnet, sepolia, holesky, hoodi, dev - - [default: mainnet] - -Database: - --db.log-level - Database logging level. Levels higher than "notice" require a debug build - - Possible values: - - fatal: Enables logging for critical conditions, i.e. assertion failures - - error: Enables logging for error conditions - - warn: Enables logging for warning conditions - - notice: Enables logging for normal but significant condition - - verbose: Enables logging for verbose informational - - debug: Enables logging for debug-level messages - - trace: Enables logging for trace debug-level messages - - extra: Enables logging for extra debug-level messages - - --db.exclusive - Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume - - [possible values: true, false] - - --db.max-size - Maximum database size (e.g., 4TB, 8MB) - - --db.growth-step - Database growth step (e.g., 4GB, 4KB) - - --db.read-transaction-timeout - Read transaction timeout in seconds, 0 means no timeout - -Networking: - -d, --disable-discovery - Disable the discovery service - - --disable-dns-discovery - Disable the DNS discovery - - --disable-discv4-discovery - Disable Discv4 discovery - - --enable-discv5-discovery - Enable Discv5 discovery - - --disable-nat - Disable Nat discovery - - --discovery.addr - The UDP address to use for devp2p peer discovery version 4 - - [default: 0.0.0.0] - - --discovery.port - The UDP port to use for devp2p peer discovery version 4 - - [default: 30303] - - --discovery.v5.addr - The UDP IPv4 address to use for devp2p peer discovery version 5. Overwritten by `RLPx` address, if it's also IPv4 - - --discovery.v5.addr.ipv6 - The UDP IPv6 address to use for devp2p peer discovery version 5. Overwritten by `RLPx` address, if it's also IPv6 - - --discovery.v5.port - The UDP IPv4 port to use for devp2p peer discovery version 5. Not used unless `--addr` is IPv4, or `--discovery.v5.addr` is set - - [default: 9200] - - --discovery.v5.port.ipv6 - The UDP IPv6 port to use for devp2p peer discovery version 5. Not used unless `--addr` is IPv6, or `--discovery.addr.ipv6` is set - - [default: 9200] - - --discovery.v5.lookup-interval - The interval in seconds at which to carry out periodic lookup queries, for the whole run of the program - - [default: 20] - - --discovery.v5.bootstrap.lookup-interval - The interval in seconds at which to carry out boost lookup queries, for a fixed number of times, at bootstrap - - [default: 5] - - --discovery.v5.bootstrap.lookup-countdown - The number of times to carry out boost lookup queries at bootstrap - - [default: 200] - - --trusted-peers - Comma separated enode URLs of trusted peers for P2P connections. - - --trusted-peers enode://abcd@192.168.0.1:30303 - - --trusted-only - Connect to or accept from trusted peers only - - --bootnodes - Comma separated enode URLs for P2P discovery bootstrap. - - Will fall back to a network-specific default if not specified. - - --dns-retries - Amount of DNS resolution requests retries to perform when peering - - [default: 0] - - --peers-file - The path to the known peers file. Connected peers are dumped to this file on nodes - shutdown, and read on startup. Cannot be used with `--no-persist-peers`. - - --identity - Custom node identity - - [default: reth/-/] - - --p2p-secret-key - Secret key to use for this node. - - This will also deterministically set the peer ID. If not specified, it will be set in the data dir for the chain being used. - - --no-persist-peers - Do not persist peers. - - --nat - NAT resolution method (any|none|upnp|publicip|extip:\) - - [default: any] - - --addr - Network listening address - - [default: 0.0.0.0] - - --port - Network listening port - - [default: 30303] - - --max-outbound-peers - Maximum number of outbound requests. default: 100 - - --max-inbound-peers - Maximum number of inbound requests. default: 30 - - --max-tx-reqs - Max concurrent `GetPooledTransactions` requests. - - [default: 130] - - --max-tx-reqs-peer - Max concurrent `GetPooledTransactions` requests per peer. - - [default: 1] - - --max-seen-tx-history - Max number of seen transactions to remember per peer. - - Default is 320 transaction hashes. - - [default: 320] - - --max-pending-imports - Max number of transactions to import concurrently. - - [default: 4096] - - --pooled-tx-response-soft-limit - Experimental, for usage in research. Sets the max accumulated byte size of transactions - to pack in one response. - Spec'd at 2MiB. - - [default: 2097152] - - --pooled-tx-pack-soft-limit - Experimental, for usage in research. Sets the max accumulated byte size of transactions to - request in one request. - - Since `RLPx` protocol version 68, the byte size of a transaction is shared as metadata in a - transaction announcement (see `RLPx` specs). This allows a node to request a specific size - response. - - By default, nodes request only 128 KiB worth of transactions, but should a peer request - more, up to 2 MiB, a node will answer with more than 128 KiB. - - Default is 128 KiB. - - [default: 131072] - - --max-tx-pending-fetch - Max capacity of cache of hashes for transactions pending fetch. - - [default: 25600] - - --net-if.experimental - Name of network interface used to communicate with peers. - - If flag is set, but no value is passed, the default interface for docker `eth0` is tried. - - --tx-propagation-policy - Transaction Propagation Policy - - The policy determines which peers transactions are gossiped to. - - [default: All] - - --to - The maximum block height - - --interval - The block interval for sync and unwind. Defaults to `1000` - - [default: 1000] - -Logging: - --log.stdout.format - The format to use for logs written to stdout - - [default: terminal] - - Possible values: - - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - - terminal: Represents terminal-friendly formatting for logs - - --log.stdout.filter - The filter to use for logs written to stdout - - [default: ] - - --log.file.format - The format to use for logs written to the log file - - [default: terminal] - - Possible values: - - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - - terminal: Represents terminal-friendly formatting for logs - - --log.file.filter - The filter to use for logs written to the log file - - [default: debug] - - --log.file.directory - The path to put log files in - - [default: /logs] - - --log.file.max-size - The maximum size (in MB) of one log file - - [default: 200] - - --log.file.max-files - The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - - [default: 5] - - --log.journald - Write logs to journald - - --log.journald.filter - The filter to use for logs written to journald - - [default: error] - - --color - Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - - [default: always] - - Possible values: - - always: Colors on - - auto: Colors on - - never: Colors off - -Display: - -v, --verbosity... - Set the minimum log level. - - -v Errors - -vv Warnings - -vvv Info - -vvvv Debug - -vvvvv Traces (warning: very verbose!) - - -q, --quiet - Silence all log output -``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/debug/in-memory-merkle.mdx b/docs/vocs/docs/pages/cli/reth/debug/in-memory-merkle.mdx deleted file mode 100644 index 7db3b2d2ba8..00000000000 --- a/docs/vocs/docs/pages/cli/reth/debug/in-memory-merkle.mdx +++ /dev/null @@ -1,328 +0,0 @@ -# reth debug in-memory-merkle - -Debug in-memory state root calculation - -```bash -$ reth debug in-memory-merkle --help -``` -```txt -Usage: reth debug in-memory-merkle [OPTIONS] - -Options: - -h, --help - Print help (see a summary with '-h') - -Datadir: - --datadir - The path to the data dir for all reth files and subdirectories. - - Defaults to the OS-specific data directory: - - - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - - Windows: `{FOLDERID_RoamingAppData}/reth/` - - macOS: `$HOME/Library/Application Support/reth/` - - [default: default] - - --datadir.static-files - The absolute path to store static files in. - - --config - The path to the configuration file to use - - --chain - The chain this node is running. - Possible values are either a built-in chain or the path to a chain specification file. - - Built-in chains: - mainnet, sepolia, holesky, hoodi, dev - - [default: mainnet] - -Database: - --db.log-level - Database logging level. Levels higher than "notice" require a debug build - - Possible values: - - fatal: Enables logging for critical conditions, i.e. assertion failures - - error: Enables logging for error conditions - - warn: Enables logging for warning conditions - - notice: Enables logging for normal but significant condition - - verbose: Enables logging for verbose informational - - debug: Enables logging for debug-level messages - - trace: Enables logging for trace debug-level messages - - extra: Enables logging for extra debug-level messages - - --db.exclusive - Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume - - [possible values: true, false] - - --db.max-size - Maximum database size (e.g., 4TB, 8MB) - - --db.growth-step - Database growth step (e.g., 4GB, 4KB) - - --db.read-transaction-timeout - Read transaction timeout in seconds, 0 means no timeout - -Networking: - -d, --disable-discovery - Disable the discovery service - - --disable-dns-discovery - Disable the DNS discovery - - --disable-discv4-discovery - Disable Discv4 discovery - - --enable-discv5-discovery - Enable Discv5 discovery - - --disable-nat - Disable Nat discovery - - --discovery.addr - The UDP address to use for devp2p peer discovery version 4 - - [default: 0.0.0.0] - - --discovery.port - The UDP port to use for devp2p peer discovery version 4 - - [default: 30303] - - --discovery.v5.addr - The UDP IPv4 address to use for devp2p peer discovery version 5. Overwritten by `RLPx` address, if it's also IPv4 - - --discovery.v5.addr.ipv6 - The UDP IPv6 address to use for devp2p peer discovery version 5. Overwritten by `RLPx` address, if it's also IPv6 - - --discovery.v5.port - The UDP IPv4 port to use for devp2p peer discovery version 5. Not used unless `--addr` is IPv4, or `--discovery.v5.addr` is set - - [default: 9200] - - --discovery.v5.port.ipv6 - The UDP IPv6 port to use for devp2p peer discovery version 5. Not used unless `--addr` is IPv6, or `--discovery.addr.ipv6` is set - - [default: 9200] - - --discovery.v5.lookup-interval - The interval in seconds at which to carry out periodic lookup queries, for the whole run of the program - - [default: 20] - - --discovery.v5.bootstrap.lookup-interval - The interval in seconds at which to carry out boost lookup queries, for a fixed number of times, at bootstrap - - [default: 5] - - --discovery.v5.bootstrap.lookup-countdown - The number of times to carry out boost lookup queries at bootstrap - - [default: 200] - - --trusted-peers - Comma separated enode URLs of trusted peers for P2P connections. - - --trusted-peers enode://abcd@192.168.0.1:30303 - - --trusted-only - Connect to or accept from trusted peers only - - --bootnodes - Comma separated enode URLs for P2P discovery bootstrap. - - Will fall back to a network-specific default if not specified. - - --dns-retries - Amount of DNS resolution requests retries to perform when peering - - [default: 0] - - --peers-file - The path to the known peers file. Connected peers are dumped to this file on nodes - shutdown, and read on startup. Cannot be used with `--no-persist-peers`. - - --identity - Custom node identity - - [default: reth/-/] - - --p2p-secret-key - Secret key to use for this node. - - This will also deterministically set the peer ID. If not specified, it will be set in the data dir for the chain being used. - - --no-persist-peers - Do not persist peers. - - --nat - NAT resolution method (any|none|upnp|publicip|extip:\) - - [default: any] - - --addr - Network listening address - - [default: 0.0.0.0] - - --port - Network listening port - - [default: 30303] - - --max-outbound-peers - Maximum number of outbound requests. default: 100 - - --max-inbound-peers - Maximum number of inbound requests. default: 30 - - --max-tx-reqs - Max concurrent `GetPooledTransactions` requests. - - [default: 130] - - --max-tx-reqs-peer - Max concurrent `GetPooledTransactions` requests per peer. - - [default: 1] - - --max-seen-tx-history - Max number of seen transactions to remember per peer. - - Default is 320 transaction hashes. - - [default: 320] - - --max-pending-imports - Max number of transactions to import concurrently. - - [default: 4096] - - --pooled-tx-response-soft-limit - Experimental, for usage in research. Sets the max accumulated byte size of transactions - to pack in one response. - Spec'd at 2MiB. - - [default: 2097152] - - --pooled-tx-pack-soft-limit - Experimental, for usage in research. Sets the max accumulated byte size of transactions to - request in one request. - - Since `RLPx` protocol version 68, the byte size of a transaction is shared as metadata in a - transaction announcement (see `RLPx` specs). This allows a node to request a specific size - response. - - By default, nodes request only 128 KiB worth of transactions, but should a peer request - more, up to 2 MiB, a node will answer with more than 128 KiB. - - Default is 128 KiB. - - [default: 131072] - - --max-tx-pending-fetch - Max capacity of cache of hashes for transactions pending fetch. - - [default: 25600] - - --net-if.experimental - Name of network interface used to communicate with peers. - - If flag is set, but no value is passed, the default interface for docker `eth0` is tried. - - --tx-propagation-policy - Transaction Propagation Policy - - The policy determines which peers transactions are gossiped to. - - [default: All] - - --retries - The number of retries per request - - [default: 5] - - --skip-node-depth - The depth after which we should start comparing branch nodes - -Logging: - --log.stdout.format - The format to use for logs written to stdout - - [default: terminal] - - Possible values: - - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - - terminal: Represents terminal-friendly formatting for logs - - --log.stdout.filter - The filter to use for logs written to stdout - - [default: ] - - --log.file.format - The format to use for logs written to the log file - - [default: terminal] - - Possible values: - - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - - terminal: Represents terminal-friendly formatting for logs - - --log.file.filter - The filter to use for logs written to the log file - - [default: debug] - - --log.file.directory - The path to put log files in - - [default: /logs] - - --log.file.max-size - The maximum size (in MB) of one log file - - [default: 200] - - --log.file.max-files - The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - - [default: 5] - - --log.journald - Write logs to journald - - --log.journald.filter - The filter to use for logs written to journald - - [default: error] - - --color - Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - - [default: always] - - Possible values: - - always: Colors on - - auto: Colors on - - never: Colors off - -Display: - -v, --verbosity... - Set the minimum log level. - - -v Errors - -vv Warnings - -vvv Info - -vvvv Debug - -vvvvv Traces (warning: very verbose!) - - -q, --quiet - Silence all log output -``` \ No newline at end of file diff --git a/docs/vocs/docs/pages/cli/reth/debug/merkle.mdx b/docs/vocs/docs/pages/cli/reth/debug/merkle.mdx deleted file mode 100644 index 03b16a35e38..00000000000 --- a/docs/vocs/docs/pages/cli/reth/debug/merkle.mdx +++ /dev/null @@ -1,331 +0,0 @@ -# reth debug merkle - -Debug the clean & incremental state root calculations - -```bash -$ reth debug merkle --help -``` -```txt -Usage: reth debug merkle [OPTIONS] --to - -Options: - -h, --help - Print help (see a summary with '-h') - -Datadir: - --datadir - The path to the data dir for all reth files and subdirectories. - - Defaults to the OS-specific data directory: - - - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/` - - Windows: `{FOLDERID_RoamingAppData}/reth/` - - macOS: `$HOME/Library/Application Support/reth/` - - [default: default] - - --datadir.static-files - The absolute path to store static files in. - - --config - The path to the configuration file to use - - --chain - The chain this node is running. - Possible values are either a built-in chain or the path to a chain specification file. - - Built-in chains: - mainnet, sepolia, holesky, hoodi, dev - - [default: mainnet] - -Database: - --db.log-level - Database logging level. Levels higher than "notice" require a debug build - - Possible values: - - fatal: Enables logging for critical conditions, i.e. assertion failures - - error: Enables logging for error conditions - - warn: Enables logging for warning conditions - - notice: Enables logging for normal but significant condition - - verbose: Enables logging for verbose informational - - debug: Enables logging for debug-level messages - - trace: Enables logging for trace debug-level messages - - extra: Enables logging for extra debug-level messages - - --db.exclusive - Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume - - [possible values: true, false] - - --db.max-size - Maximum database size (e.g., 4TB, 8MB) - - --db.growth-step - Database growth step (e.g., 4GB, 4KB) - - --db.read-transaction-timeout - Read transaction timeout in seconds, 0 means no timeout - -Networking: - -d, --disable-discovery - Disable the discovery service - - --disable-dns-discovery - Disable the DNS discovery - - --disable-discv4-discovery - Disable Discv4 discovery - - --enable-discv5-discovery - Enable Discv5 discovery - - --disable-nat - Disable Nat discovery - - --discovery.addr - The UDP address to use for devp2p peer discovery version 4 - - [default: 0.0.0.0] - - --discovery.port - The UDP port to use for devp2p peer discovery version 4 - - [default: 30303] - - --discovery.v5.addr - The UDP IPv4 address to use for devp2p peer discovery version 5. Overwritten by `RLPx` address, if it's also IPv4 - - --discovery.v5.addr.ipv6 - The UDP IPv6 address to use for devp2p peer discovery version 5. Overwritten by `RLPx` address, if it's also IPv6 - - --discovery.v5.port - The UDP IPv4 port to use for devp2p peer discovery version 5. Not used unless `--addr` is IPv4, or `--discovery.v5.addr` is set - - [default: 9200] - - --discovery.v5.port.ipv6 - The UDP IPv6 port to use for devp2p peer discovery version 5. Not used unless `--addr` is IPv6, or `--discovery.addr.ipv6` is set - - [default: 9200] - - --discovery.v5.lookup-interval - The interval in seconds at which to carry out periodic lookup queries, for the whole run of the program - - [default: 20] - - --discovery.v5.bootstrap.lookup-interval - The interval in seconds at which to carry out boost lookup queries, for a fixed number of times, at bootstrap - - [default: 5] - - --discovery.v5.bootstrap.lookup-countdown - The number of times to carry out boost lookup queries at bootstrap - - [default: 200] - - --trusted-peers - Comma separated enode URLs of trusted peers for P2P connections. - - --trusted-peers enode://abcd@192.168.0.1:30303 - - --trusted-only - Connect to or accept from trusted peers only - - --bootnodes - Comma separated enode URLs for P2P discovery bootstrap. - - Will fall back to a network-specific default if not specified. - - --dns-retries - Amount of DNS resolution requests retries to perform when peering - - [default: 0] - - --peers-file - The path to the known peers file. Connected peers are dumped to this file on nodes - shutdown, and read on startup. Cannot be used with `--no-persist-peers`. - - --identity - Custom node identity - - [default: reth/-/] - - --p2p-secret-key - Secret key to use for this node. - - This will also deterministically set the peer ID. If not specified, it will be set in the data dir for the chain being used. - - --no-persist-peers - Do not persist peers. - - --nat - NAT resolution method (any|none|upnp|publicip|extip:\) - - [default: any] - - --addr - Network listening address - - [default: 0.0.0.0] - - --port - Network listening port - - [default: 30303] - - --max-outbound-peers - Maximum number of outbound requests. default: 100 - - --max-inbound-peers - Maximum number of inbound requests. default: 30 - - --max-tx-reqs - Max concurrent `GetPooledTransactions` requests. - - [default: 130] - - --max-tx-reqs-peer - Max concurrent `GetPooledTransactions` requests per peer. - - [default: 1] - - --max-seen-tx-history - Max number of seen transactions to remember per peer. - - Default is 320 transaction hashes. - - [default: 320] - - --max-pending-imports - Max number of transactions to import concurrently. - - [default: 4096] - - --pooled-tx-response-soft-limit - Experimental, for usage in research. Sets the max accumulated byte size of transactions - to pack in one response. - Spec'd at 2MiB. - - [default: 2097152] - - --pooled-tx-pack-soft-limit - Experimental, for usage in research. Sets the max accumulated byte size of transactions to - request in one request. - - Since `RLPx` protocol version 68, the byte size of a transaction is shared as metadata in a - transaction announcement (see `RLPx` specs). This allows a node to request a specific size - response. - - By default, nodes request only 128 KiB worth of transactions, but should a peer request - more, up to 2 MiB, a node will answer with more than 128 KiB. - - Default is 128 KiB. - - [default: 131072] - - --max-tx-pending-fetch - Max capacity of cache of hashes for transactions pending fetch. - - [default: 25600] - - --net-if.experimental - Name of network interface used to communicate with peers. - - If flag is set, but no value is passed, the default interface for docker `eth0` is tried. - - --tx-propagation-policy - Transaction Propagation Policy - - The policy determines which peers transactions are gossiped to. - - [default: All] - - --retries - The number of retries per request - - [default: 5] - - --to - The height to finish at - - --skip-node-depth - The depth after which we should start comparing branch nodes - -Logging: - --log.stdout.format - The format to use for logs written to stdout - - [default: terminal] - - Possible values: - - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - - terminal: Represents terminal-friendly formatting for logs - - --log.stdout.filter - The filter to use for logs written to stdout - - [default: ] - - --log.file.format - The format to use for logs written to the log file - - [default: terminal] - - Possible values: - - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging - - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications - - terminal: Represents terminal-friendly formatting for logs - - --log.file.filter - The filter to use for logs written to the log file - - [default: debug] - - --log.file.directory - The path to put log files in - - [default: /logs] - - --log.file.max-size - The maximum size (in MB) of one log file - - [default: 200] - - --log.file.max-files - The maximum amount of log files that will be stored. If set to 0, background file logging is disabled - - [default: 5] - - --log.journald - Write logs to journald - - --log.journald.filter - The filter to use for logs written to journald - - [default: error] - - --color - Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting - - [default: always] - - Possible values: - - always: Colors on - - auto: Colors on - - never: Colors off - -Display: - -v, --verbosity... - Set the minimum log level. - - -v Errors - -vv Warnings - -vvv Info - -vvvv Debug - -vvvvv Traces (warning: very verbose!) - - -q, --quiet - Silence all log output -``` \ No newline at end of file From cbf2ceb3449d1092ef58831f9d90b12a6ac78949 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Fri, 11 Jul 2025 06:27:58 -0400 Subject: [PATCH 141/305] chore(consensus): remove outdated comment from validate_block_pre_execution (#17360) --- crates/consensus/common/src/validation.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index a682bc2f910..450817f2705 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -132,7 +132,6 @@ where /// - Compares the ommer hash in the block header to the block body /// - Compares the transactions root in the block header to the block body /// - Pre-execution transaction validation -/// - (Optionally) Compares the receipts root in the block header to the block body pub fn validate_block_pre_execution( block: &SealedBlock, chain_spec: &ChainSpec, From 88ce599f658e599e8a0e27959d8f271789662205 Mon Sep 17 00:00:00 2001 From: Yash Atreya <44857776+yash-atreya@users.noreply.github.com> Date: Fri, 11 Jul 2025 16:35:51 +0530 Subject: [PATCH 142/305] fix(`docs`): update-book-cli job (#17365) --- docs/cli/help.rs | 59 ++++-------------- docs/vocs/docs/pages/cli/SUMMARY.mdx | 90 ++++++++++++++-------------- 2 files changed, 57 insertions(+), 92 deletions(-) diff --git a/docs/cli/help.rs b/docs/cli/help.rs index e97d0bbfc46..c6e73318e08 100755 --- a/docs/cli/help.rs +++ b/docs/cli/help.rs @@ -5,26 +5,20 @@ edition = "2021" [dependencies] clap = { version = "4", features = ["derive"] } -pathdiff = "0.2" regex = "1" --- use clap::Parser; use regex::Regex; use std::{ borrow::Cow, - fmt, - fs::{self, File}, - io::{self, Write}, + fmt, fs, io, iter::once, path::{Path, PathBuf}, - process, process::{Command, Stdio}, str, sync::LazyLock, }; -const SECTION_START: &str = "{/* CLI_REFERENCE START */}"; -const SECTION_END: &str = "{/* CLI_REFERENCE END */"; const README: &str = r#"import Summary from './SUMMARY.mdx'; # CLI Reference @@ -124,10 +118,11 @@ fn main() -> io::Result<()> { // Generate SUMMARY.mdx. let summary: String = output .iter() - .map(|(cmd, _)| cmd_summary(None, cmd, 0)) + .map(|(cmd, _)| cmd_summary(cmd, 0)) .chain(once("\n".to_string())) .collect(); + println!("Writing SUMMARY.mdx to \"{}\"", out_dir.to_string_lossy()); write_file(&out_dir.clone().join("SUMMARY.mdx"), &summary)?; // Generate README.md. @@ -143,10 +138,7 @@ fn main() -> io::Result<()> { if args.root_summary { let root_summary: String = output .iter() - .map(|(cmd, _)| { - let root_path = pathdiff::diff_paths(&out_dir, &args.root_dir); - cmd_summary(root_path, cmd, args.root_indentation) - }) + .map(|(cmd, _)| cmd_summary(cmd, args.root_indentation)) .collect(); let path = Path::new(args.root_dir.as_str()); @@ -154,7 +146,7 @@ fn main() -> io::Result<()> { println!("Updating root summary in \"{}\"", path.to_string_lossy()); } // TODO: This is where we update the cli reference sidebar.ts - // update_root_summary(path, &root_summary)?; + update_root_summary(path, &root_summary)?; } Ok(()) @@ -244,47 +236,20 @@ fn parse_description(s: &str) -> (&str, &str) { } /// Returns the summary for a command and its subcommands. -fn cmd_summary(md_root: Option, cmd: &Cmd, indent: usize) -> String { +fn cmd_summary(cmd: &Cmd, indent: usize) -> String { let cmd_s = cmd.to_string(); let cmd_path = cmd_s.replace(" ", "/"); - let full_cmd_path = match md_root { - None => cmd_path, - Some(md_root) => format!("{}/{}", md_root.to_string_lossy(), cmd_path), - }; let indent_string = " ".repeat(indent + (cmd.subcommands.len() * 2)); - format!("{}- [`{}`](/cli/{})\n", indent_string, cmd_s, full_cmd_path) + format!("{}- [`{}`](/cli/{})\n", indent_string, cmd_s, cmd_path) } -/// Replaces the CLI_REFERENCE section in the root SUMMARY.mdx file. +/// Overwrites the root SUMMARY.mdx file with the generated content. fn update_root_summary(root_dir: &Path, root_summary: &str) -> io::Result<()> { - let summary_file = root_dir.join("SUMMARY.mdx"); - let original_summary_content = fs::read_to_string(&summary_file)?; - - let section_re = regex!(&format!(r"(?s)\s*{SECTION_START}.*?{SECTION_END}")); - if !section_re.is_match(&original_summary_content) { - eprintln!( - "Could not find CLI_REFERENCE section in {}. Please add the following section to the file:\n{}\n... CLI Reference goes here ...\n\n{}", - summary_file.display(), - SECTION_START, - SECTION_END - ); - process::exit(1); - } - - let section_end_re = regex!(&format!(r".*{SECTION_END}")); - let last_line = section_end_re - .find(&original_summary_content) - .map(|m| m.as_str().to_string()) - .expect("Could not extract last line of CLI_REFERENCE section"); - - let root_summary_s = root_summary.trim_end().replace("\n\n", "\n"); - let replace_with = format!(" {}\n{}\n{}", SECTION_START, root_summary_s, last_line); - - let new_root_summary = - section_re.replace(&original_summary_content, replace_with.as_str()).to_string(); + let summary_file = root_dir.join("vocs/docs/pages/cli/SUMMARY.mdx"); + println!("Overwriting {}", summary_file.display()); - let mut root_summary_file = File::create(&summary_file)?; - root_summary_file.write_all(new_root_summary.as_bytes()) + // Simply write the root summary content to the file + write_file(&summary_file, root_summary) } /// Preprocesses the help output of a command. diff --git a/docs/vocs/docs/pages/cli/SUMMARY.mdx b/docs/vocs/docs/pages/cli/SUMMARY.mdx index 970814b73eb..d7582ab64c5 100644 --- a/docs/vocs/docs/pages/cli/SUMMARY.mdx +++ b/docs/vocs/docs/pages/cli/SUMMARY.mdx @@ -1,45 +1,45 @@ -- [`reth`](/cli/reth) - - [`reth node`](/cli/reth/node) - - [`reth init`](/cli/reth/init) - - [`reth init-state`](/cli/reth/init-state) - - [`reth import`](/cli/reth/import) - - [`reth import-era`](/cli/reth/import-era) - - [`reth export-era`](/cli/reth/export-era) - - [`reth dump-genesis`](/cli/reth/dump-genesis) - - [`reth db`](/cli/reth/db) - - [`reth db stats`](/cli/reth/db/stats) - - [`reth db list`](/cli/reth/db/list) - - [`reth db checksum`](/cli/reth/db/checksum) - - [`reth db diff`](/cli/reth/db/diff) - - [`reth db get`](/cli/reth/db/get) - - [`reth db get mdbx`](/cli/reth/db/get/mdbx) - - [`reth db get static-file`](/cli/reth/db/get/static-file) - - [`reth db drop`](/cli/reth/db/drop) - - [`reth db clear`](/cli/reth/db/clear) - - [`reth db clear mdbx`](/cli/reth/db/clear/mdbx) - - [`reth db clear static-file`](/cli/reth/db/clear/static-file) - - [`reth db version`](/cli/reth/db/version) - - [`reth db path`](/cli/reth/db/path) - - [`reth download`](/cli/reth/download) - - [`reth stage`](/cli/reth/stage) - - [`reth stage run`](/cli/reth/stage/run) - - [`reth stage drop`](/cli/reth/stage/drop) - - [`reth stage dump`](/cli/reth/stage/dump) - - [`reth stage dump execution`](/cli/reth/stage/dump/execution) - - [`reth stage dump storage-hashing`](/cli/reth/stage/dump/storage-hashing) - - [`reth stage dump account-hashing`](/cli/reth/stage/dump/account-hashing) - - [`reth stage dump merkle`](/cli/reth/stage/dump/merkle) - - [`reth stage unwind`](/cli/reth/stage/unwind) - - [`reth stage unwind to-block`](/cli/reth/stage/unwind/to-block) - - [`reth stage unwind num-blocks`](/cli/reth/stage/unwind/num-blocks) - - [`reth p2p`](/cli/reth/p2p) - - [`reth p2p header`](/cli/reth/p2p/header) - - [`reth p2p body`](/cli/reth/p2p/body) - - [`reth p2p rlpx`](/cli/reth/p2p/rlpx) - - [`reth p2p rlpx ping`](/cli/reth/p2p/rlpx/ping) - - [`reth p2p bootnode`](/cli/reth/p2p/bootnode) - - [`reth config`](/cli/reth/config) - - [`reth recover`](/cli/reth/recover) - - [`reth recover storage-tries`](/cli/reth/recover/storage-tries) - - [`reth prune`](/cli/reth/prune) - - [`reth re-execute`](/cli/reth/re-execute) + - [`reth`](/cli/reth) + - [`reth node`](/cli/reth/node) + - [`reth init`](/cli/reth/init) + - [`reth init-state`](/cli/reth/init-state) + - [`reth import`](/cli/reth/import) + - [`reth import-era`](/cli/reth/import-era) + - [`reth export-era`](/cli/reth/export-era) + - [`reth dump-genesis`](/cli/reth/dump-genesis) + - [`reth db`](/cli/reth/db) + - [`reth db stats`](/cli/reth/db/stats) + - [`reth db list`](/cli/reth/db/list) + - [`reth db checksum`](/cli/reth/db/checksum) + - [`reth db diff`](/cli/reth/db/diff) + - [`reth db get`](/cli/reth/db/get) + - [`reth db get mdbx`](/cli/reth/db/get/mdbx) + - [`reth db get static-file`](/cli/reth/db/get/static-file) + - [`reth db drop`](/cli/reth/db/drop) + - [`reth db clear`](/cli/reth/db/clear) + - [`reth db clear mdbx`](/cli/reth/db/clear/mdbx) + - [`reth db clear static-file`](/cli/reth/db/clear/static-file) + - [`reth db version`](/cli/reth/db/version) + - [`reth db path`](/cli/reth/db/path) + - [`reth download`](/cli/reth/download) + - [`reth stage`](/cli/reth/stage) + - [`reth stage run`](/cli/reth/stage/run) + - [`reth stage drop`](/cli/reth/stage/drop) + - [`reth stage dump`](/cli/reth/stage/dump) + - [`reth stage dump execution`](/cli/reth/stage/dump/execution) + - [`reth stage dump storage-hashing`](/cli/reth/stage/dump/storage-hashing) + - [`reth stage dump account-hashing`](/cli/reth/stage/dump/account-hashing) + - [`reth stage dump merkle`](/cli/reth/stage/dump/merkle) + - [`reth stage unwind`](/cli/reth/stage/unwind) + - [`reth stage unwind to-block`](/cli/reth/stage/unwind/to-block) + - [`reth stage unwind num-blocks`](/cli/reth/stage/unwind/num-blocks) + - [`reth p2p`](/cli/reth/p2p) + - [`reth p2p header`](/cli/reth/p2p/header) + - [`reth p2p body`](/cli/reth/p2p/body) + - [`reth p2p rlpx`](/cli/reth/p2p/rlpx) + - [`reth p2p rlpx ping`](/cli/reth/p2p/rlpx/ping) + - [`reth p2p bootnode`](/cli/reth/p2p/bootnode) + - [`reth config`](/cli/reth/config) + - [`reth recover`](/cli/reth/recover) + - [`reth recover storage-tries`](/cli/reth/recover/storage-tries) + - [`reth prune`](/cli/reth/prune) + - [`reth re-execute`](/cli/reth/re-execute) \ No newline at end of file From 00d117dd3e1045610aef58d857f74eb5bb945493 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Fri, 11 Jul 2025 08:05:03 -0400 Subject: [PATCH 143/305] chore(trie): impl TrieUpdates::drain_into_sorted (#17361) --- crates/trie/common/src/updates.rs | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/crates/trie/common/src/updates.rs b/crates/trie/common/src/updates.rs index be62f38b967..f9bb0d21e92 100644 --- a/crates/trie/common/src/updates.rs +++ b/crates/trie/common/src/updates.rs @@ -118,6 +118,30 @@ impl TrieUpdates { TrieUpdatesSorted { removed_nodes: self.removed_nodes, account_nodes, storage_tries } } + /// Converts trie updates into [`TrieUpdatesSorted`], but keeping the maps allocated by + /// draining. + /// + /// This effectively clears all the fields in the [`TrieUpdatesSorted`]. + /// + /// This allows us to re-use the allocated space. This allocates new space for the sorted + /// updates, like `into_sorted`. + pub fn drain_into_sorted(&mut self) -> TrieUpdatesSorted { + let mut account_nodes = self.account_nodes.drain().collect::>(); + account_nodes.sort_unstable_by(|a, b| a.0.cmp(&b.0)); + + let storage_tries = self + .storage_tries + .drain() + .map(|(hashed_address, updates)| (hashed_address, updates.into_sorted())) + .collect(); + + TrieUpdatesSorted { + removed_nodes: self.removed_nodes.clone(), + account_nodes, + storage_tries, + } + } + /// Converts trie updates into [`TrieUpdatesSortedRef`]. pub fn into_sorted_ref<'a>(&'a self) -> TrieUpdatesSortedRef<'a> { let mut account_nodes = self.account_nodes.iter().collect::>(); From bcc9ed461e85bc596dd88320fdffd432b1f86538 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Fri, 11 Jul 2025 08:05:15 -0400 Subject: [PATCH 144/305] chore(trie): impl HashedPostState::drain_into_sorted (#17362) --- crates/trie/common/src/hashed_state.rs | 29 ++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/crates/trie/common/src/hashed_state.rs b/crates/trie/common/src/hashed_state.rs index 374f36fdd44..eb0f4e653d7 100644 --- a/crates/trie/common/src/hashed_state.rs +++ b/crates/trie/common/src/hashed_state.rs @@ -335,6 +335,35 @@ impl HashedPostState { HashedPostStateSorted { accounts, storages } } + /// Converts hashed post state into [`HashedPostStateSorted`], but keeping the maps allocated by + /// draining. + /// + /// This effectively clears all the fields in the [`HashedPostStateSorted`]. + /// + /// This allows us to re-use the allocated space. This allocates new space for the sorted hashed + /// post state, like `into_sorted`. + pub fn drain_into_sorted(&mut self) -> HashedPostStateSorted { + let mut updated_accounts = Vec::new(); + let mut destroyed_accounts = HashSet::default(); + for (hashed_address, info) in self.accounts.drain() { + if let Some(info) = info { + updated_accounts.push((hashed_address, info)); + } else { + destroyed_accounts.insert(hashed_address); + } + } + updated_accounts.sort_unstable_by_key(|(address, _)| *address); + let accounts = HashedAccountsSorted { accounts: updated_accounts, destroyed_accounts }; + + let storages = self + .storages + .drain() + .map(|(hashed_address, storage)| (hashed_address, storage.into_sorted())) + .collect(); + + HashedPostStateSorted { accounts, storages } + } + /// Clears the account and storage maps of this `HashedPostState`. pub fn clear(&mut self) { self.accounts.clear(); From 2060813af5456464f24fe7fb847b8e5cf5545925 Mon Sep 17 00:00:00 2001 From: Tomass <155266802+zeroprooff@users.noreply.github.com> Date: Fri, 11 Jul 2025 15:41:34 +0300 Subject: [PATCH 145/305] docs:fix spelling error in flowchart (#17346) --- crates/engine/tree/docs/mermaid/state-root-task.mmd | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/engine/tree/docs/mermaid/state-root-task.mmd b/crates/engine/tree/docs/mermaid/state-root-task.mmd index 011196d9e0d..d1993035f21 100644 --- a/crates/engine/tree/docs/mermaid/state-root-task.mmd +++ b/crates/engine/tree/docs/mermaid/state-root-task.mmd @@ -4,7 +4,7 @@ flowchart TD StateRootMessage::PrefetchProofs StateRootMessage::EmptyProof StateRootMessage::ProofCalculated - StataRootMessage::FinishedStateUpdates + StateRootMessage::FinishedStateUpdates end subgraph StateRootTask[State Root Task thread] @@ -40,5 +40,5 @@ flowchart TD StateRootMessage::ProofCalculated --> NewProof NewProof ---> MultiProofCompletion ProofSequencerCondition -->|Yes, send multiproof and state update| SparseTrieUpdate - StataRootMessage::FinishedStateUpdates --> EndCondition1 + StateRootMessage::FinishedStateUpdates --> EndCondition1 EndCondition3 -->|Close SparseTrieUpdate channel| SparseTrieUpdate From 96f8faf8f084834b70f74a5b36f4759755499911 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Fri, 11 Jul 2025 09:26:22 -0400 Subject: [PATCH 146/305] feat(trie): wire parallel trie config to PayloadProcessor (#17355) --- Cargo.lock | 2 +- crates/engine/tree/Cargo.toml | 1 + .../configured_sparse_trie.rs | 162 +++++++++++++++ .../tree/src/tree/payload_processor/mod.rs | 184 +++++++++++++++--- crates/trie/sparse/Cargo.toml | 2 - crates/trie/sparse/src/lib.rs | 3 - crates/trie/sparse/src/traits.rs | 128 ------------ crates/trie/sparse/src/trie.rs | 39 ++-- 8 files changed, 349 insertions(+), 172 deletions(-) create mode 100644 crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs diff --git a/Cargo.lock b/Cargo.lock index 75d1991ce08..7a55f03052a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8008,6 +8008,7 @@ dependencies = [ "reth-trie-db", "reth-trie-parallel", "reth-trie-sparse", + "reth-trie-sparse-parallel", "revm", "revm-primitives", "revm-state", @@ -10588,7 +10589,6 @@ dependencies = [ "assert_matches", "auto_impl", "codspeed-criterion-compat", - "either", "itertools 0.14.0", "metrics", "pretty_assertions", diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index c8a4b730cbe..550895798dd 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -32,6 +32,7 @@ reth-tasks.workspace = true reth-trie-db.workspace = true reth-trie-parallel.workspace = true reth-trie-sparse = { workspace = true, features = ["std", "metrics"] } +reth-trie-sparse-parallel = { workspace = true, features = ["std"] } reth-trie.workspace = true # alloy diff --git a/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs b/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs new file mode 100644 index 00000000000..55cd203c0b9 --- /dev/null +++ b/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs @@ -0,0 +1,162 @@ +//! Configured sparse trie enum for switching between serial and parallel implementations. + +use alloy_primitives::B256; +use reth_trie::{Nibbles, TrieNode}; +use reth_trie_sparse::{ + blinded::BlindedProvider, errors::SparseTrieResult, LeafLookup, LeafLookupError, + SerialSparseTrie, SparseTrieInterface, SparseTrieUpdates, TrieMasks, +}; +use reth_trie_sparse_parallel::ParallelSparseTrie; +use std::borrow::Cow; + +/// Enum for switching between serial and parallel sparse trie implementations. +/// +/// This type allows runtime selection between different sparse trie implementations, +/// providing flexibility in choosing the appropriate implementation based on workload +/// characteristics. +#[derive(Debug)] +pub enum ConfiguredSparseTrie { + /// Serial implementation of the sparse trie. + Serial(Box), + /// Parallel implementation of the sparse trie. + Parallel(Box), +} + +impl From for ConfiguredSparseTrie { + fn from(trie: SerialSparseTrie) -> Self { + Self::Serial(Box::new(trie)) + } +} + +impl From for ConfiguredSparseTrie { + fn from(trie: ParallelSparseTrie) -> Self { + Self::Parallel(Box::new(trie)) + } +} + +impl SparseTrieInterface for ConfiguredSparseTrie { + fn with_root( + self, + root: TrieNode, + masks: TrieMasks, + retain_updates: bool, + ) -> SparseTrieResult { + match self { + Self::Serial(trie) => { + trie.with_root(root, masks, retain_updates).map(|t| Self::Serial(Box::new(t))) + } + Self::Parallel(trie) => { + trie.with_root(root, masks, retain_updates).map(|t| Self::Parallel(Box::new(t))) + } + } + } + + fn with_updates(self, retain_updates: bool) -> Self { + match self { + Self::Serial(trie) => Self::Serial(Box::new(trie.with_updates(retain_updates))), + Self::Parallel(trie) => Self::Parallel(Box::new(trie.with_updates(retain_updates))), + } + } + + fn reserve_nodes(&mut self, additional: usize) { + match self { + Self::Serial(trie) => trie.reserve_nodes(additional), + Self::Parallel(trie) => trie.reserve_nodes(additional), + } + } + + fn reveal_node( + &mut self, + path: Nibbles, + node: TrieNode, + masks: TrieMasks, + ) -> SparseTrieResult<()> { + match self { + Self::Serial(trie) => trie.reveal_node(path, node, masks), + Self::Parallel(trie) => trie.reveal_node(path, node, masks), + } + } + + fn update_leaf( + &mut self, + full_path: Nibbles, + value: Vec, + provider: P, + ) -> SparseTrieResult<()> { + match self { + Self::Serial(trie) => trie.update_leaf(full_path, value, provider), + Self::Parallel(trie) => trie.update_leaf(full_path, value, provider), + } + } + + fn remove_leaf( + &mut self, + full_path: &Nibbles, + provider: P, + ) -> SparseTrieResult<()> { + match self { + Self::Serial(trie) => trie.remove_leaf(full_path, provider), + Self::Parallel(trie) => trie.remove_leaf(full_path, provider), + } + } + + fn root(&mut self) -> B256 { + match self { + Self::Serial(trie) => trie.root(), + Self::Parallel(trie) => trie.root(), + } + } + + fn update_subtrie_hashes(&mut self) { + match self { + Self::Serial(trie) => trie.update_subtrie_hashes(), + Self::Parallel(trie) => trie.update_subtrie_hashes(), + } + } + + fn get_leaf_value(&self, full_path: &Nibbles) -> Option<&Vec> { + match self { + Self::Serial(trie) => trie.get_leaf_value(full_path), + Self::Parallel(trie) => trie.get_leaf_value(full_path), + } + } + + fn find_leaf( + &self, + full_path: &Nibbles, + expected_value: Option<&Vec>, + ) -> Result { + match self { + Self::Serial(trie) => trie.find_leaf(full_path, expected_value), + Self::Parallel(trie) => trie.find_leaf(full_path, expected_value), + } + } + + fn take_updates(&mut self) -> SparseTrieUpdates { + match self { + Self::Serial(trie) => trie.take_updates(), + Self::Parallel(trie) => trie.take_updates(), + } + } + + fn wipe(&mut self) { + match self { + Self::Serial(trie) => trie.wipe(), + Self::Parallel(trie) => trie.wipe(), + } + } + + fn clear(&mut self) { + match self { + Self::Serial(trie) => trie.clear(), + Self::Parallel(trie) => trie.clear(), + } + } + + fn updates_ref(&self) -> Cow<'_, SparseTrieUpdates> { + match self { + Self::Serial(trie) => trie.updates_ref(), + Self::Parallel(trie) => trie.updates_ref(), + } + } +} diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index 165b0ed1c2c..d50ef6f1da2 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -13,7 +13,7 @@ use alloy_consensus::{transaction::Recovered, BlockHeader}; use alloy_evm::block::StateChangeSource; use alloy_primitives::B256; use executor::WorkloadExecutor; -use multiproof::*; +use multiproof::{SparseTrieUpdate, *}; use parking_lot::RwLock; use prewarm::PrewarmMetrics; use reth_evm::{ConfigureEvm, OnStateHook, SpecFor}; @@ -28,7 +28,11 @@ use reth_trie_parallel::{ proof_task::{ProofTaskCtx, ProofTaskManager}, root::ParallelStateRootError, }; -use reth_trie_sparse::{SerialSparseTrie, SparseTrie}; +use reth_trie_sparse::{ + blinded::{BlindedProvider, BlindedProviderFactory}, + SerialSparseTrie, SparseTrie, SparseTrieInterface, +}; +use reth_trie_sparse_parallel::ParallelSparseTrie; use std::{ collections::VecDeque, sync::{ @@ -40,11 +44,14 @@ use std::{ use super::precompile_cache::PrecompileCacheMap; +mod configured_sparse_trie; pub mod executor; pub mod multiproof; pub mod prewarm; pub mod sparse_trie; +use configured_sparse_trie::ConfiguredSparseTrie; + /// Entrypoint for executing the payload. #[derive(Debug)] pub struct PayloadProcessor @@ -70,7 +77,9 @@ where precompile_cache_map: PrecompileCacheMap>, /// A cleared sparse trie, kept around to be reused for the state root computation so that /// allocations can be minimized. - sparse_trie: Option, + sparse_trie: Option>, + /// Whether to use the parallel sparse trie. + use_parallel_sparse_trie: bool, _marker: std::marker::PhantomData, } @@ -96,6 +105,7 @@ where precompile_cache_disabled: config.precompile_cache_disabled(), precompile_cache_map, sparse_trie: None, + use_parallel_sparse_trie: config.enable_parallel_sparse_trie(), _marker: Default::default(), } } @@ -196,24 +206,20 @@ where multi_proof_task.run(); }); - // take the sparse trie if it was set - let sparse_trie = self.sparse_trie.take(); - - let mut sparse_trie_task = - SparseTrieTask::<_, SerialSparseTrie, SerialSparseTrie>::new_with_stored_trie( - self.executor.clone(), - sparse_trie_rx, - proof_task.handle(), - self.trie_metrics.clone(), - sparse_trie, - ); - // wire the sparse trie to the state root response receiver let (state_root_tx, state_root_rx) = channel(); - self.executor.spawn_blocking(move || { - let res = sparse_trie_task.run(); - let _ = state_root_tx.send(res); - }); + + // Take the stored sparse trie + let stored_trie = self.sparse_trie.take(); + + // Spawn the sparse trie task using any stored trie and parallel trie configuration. + self.spawn_sparse_trie_task( + sparse_trie_rx, + proof_task.handle(), + state_root_tx, + stored_trie, + self.use_parallel_sparse_trie, + ); // spawn the proof task self.executor.spawn_blocking(move || { @@ -252,7 +258,7 @@ where } /// Sets the sparse trie to be kept around for the state root computation. - pub(super) fn set_sparse_trie(&mut self, sparse_trie: SparseTrie) { + pub(super) fn set_sparse_trie(&mut self, sparse_trie: SparseTrie) { self.sparse_trie = Some(sparse_trie); } @@ -318,6 +324,134 @@ where SavedCache::new(parent_hash, cache, CachedStateMetrics::zeroed()) }) } + + /// Generic function to spawn a sparse trie task for any trie type that can be converted to + /// `ConfiguredSparseTrie`. + fn spawn_trie_task( + &self, + sparse_trie_rx: mpsc::Receiver, + proof_task_handle: BPF, + state_root_tx: mpsc::Sender< + Result, ParallelStateRootError>, + >, + sparse_trie: Option>, + ) where + BPF: BlindedProviderFactory + Clone + Send + Sync + 'static, + BPF::AccountNodeProvider: BlindedProvider + Send + Sync, + BPF::StorageNodeProvider: BlindedProvider + Send + Sync, + A: SparseTrieInterface + Send + Sync + Default + 'static, + ConfiguredSparseTrie: From, + { + let mut task = SparseTrieTask::<_, A, SerialSparseTrie>::new_with_stored_trie( + self.executor.clone(), + sparse_trie_rx, + proof_task_handle, + self.trie_metrics.clone(), + sparse_trie, + ); + + self.executor.spawn_blocking(move || { + let res = task.run(); + let converted = res.map(|outcome| StateRootComputeOutcome { + state_root: outcome.state_root, + trie_updates: outcome.trie_updates, + trie: match outcome.trie { + SparseTrie::Blind(opt) => { + SparseTrie::Blind(opt.map(|t| Box::new(ConfiguredSparseTrie::from(*t)))) + } + SparseTrie::Revealed(t) => { + SparseTrie::Revealed(Box::new(ConfiguredSparseTrie::from(*t))) + } + }, + }); + let _ = state_root_tx.send(converted); + }); + } + + /// Helper to dispatch trie spawn based on the `ConfiguredSparseTrie` variant + fn dispatch_trie_spawn( + &self, + configured_trie: ConfiguredSparseTrie, + sparse_trie_rx: mpsc::Receiver, + proof_task_handle: BPF, + state_root_tx: mpsc::Sender< + Result, ParallelStateRootError>, + >, + is_revealed: bool, + ) where + BPF: BlindedProviderFactory + Clone + Send + Sync + 'static, + BPF::AccountNodeProvider: BlindedProvider + Send + Sync, + BPF::StorageNodeProvider: BlindedProvider + Send + Sync, + { + match configured_trie { + ConfiguredSparseTrie::Serial(boxed_serial) => { + let trie = if is_revealed { + Some(SparseTrie::Revealed(boxed_serial)) + } else { + Some(SparseTrie::Blind(Some(boxed_serial))) + }; + self.spawn_trie_task(sparse_trie_rx, proof_task_handle, state_root_tx, trie); + } + ConfiguredSparseTrie::Parallel(boxed_parallel) => { + let trie = if is_revealed { + Some(SparseTrie::Revealed(boxed_parallel)) + } else { + Some(SparseTrie::Blind(Some(boxed_parallel))) + }; + self.spawn_trie_task(sparse_trie_rx, proof_task_handle, state_root_tx, trie); + } + } + } + + /// Helper method that handles sparse trie task spawning. + /// + /// If we have a stored trie, we will re-use it for spawning. If we do not have a stored trie, + /// we will create a new trie based on the configured trie type (parallel or serial). + fn spawn_sparse_trie_task( + &self, + sparse_trie_rx: mpsc::Receiver, + proof_task_handle: BPF, + state_root_tx: mpsc::Sender< + Result, ParallelStateRootError>, + >, + stored_trie: Option>, + use_parallel_for_new: bool, + ) where + BPF: BlindedProviderFactory + Clone + Send + Sync + 'static, + BPF::AccountNodeProvider: BlindedProvider + Send + Sync, + BPF::StorageNodeProvider: BlindedProvider + Send + Sync, + { + let is_revealed = stored_trie.as_ref().is_some_and(|trie| trie.is_revealed()); + match stored_trie { + Some(SparseTrie::Revealed(boxed) | SparseTrie::Blind(Some(boxed))) => { + self.dispatch_trie_spawn( + *boxed, + sparse_trie_rx, + proof_task_handle, + state_root_tx, + is_revealed, + ); + } + _ => { + // No stored trie, create new based on config + if use_parallel_for_new { + self.spawn_trie_task::<_, ParallelSparseTrie>( + sparse_trie_rx, + proof_task_handle, + state_root_tx, + None, + ); + } else { + self.spawn_trie_task::<_, SerialSparseTrie>( + sparse_trie_rx, + proof_task_handle, + state_root_tx, + None, + ); + } + } + } + } } /// Handle to all the spawned tasks. @@ -328,7 +462,11 @@ pub struct PayloadHandle { // must include the receiver of the state root wired to the sparse trie prewarm_handle: CacheTaskHandle, /// Receiver for the state root - state_root: Option>>, + state_root: Option< + mpsc::Receiver< + Result, ParallelStateRootError>, + >, + >, } impl PayloadHandle { @@ -337,7 +475,9 @@ impl PayloadHandle { /// # Panics /// /// If payload processing was started without background tasks. - pub fn state_root(&mut self) -> Result { + pub fn state_root( + &mut self, + ) -> Result, ParallelStateRootError> { self.state_root .take() .expect("state_root is None") diff --git a/crates/trie/sparse/Cargo.toml b/crates/trie/sparse/Cargo.toml index 1a12608e15c..8b40a72da2a 100644 --- a/crates/trie/sparse/Cargo.toml +++ b/crates/trie/sparse/Cargo.toml @@ -26,7 +26,6 @@ alloy-rlp.workspace = true # misc auto_impl.workspace = true smallvec = { workspace = true, features = ["const_new"] } -either.workspace = true # metrics reth-metrics = { workspace = true, optional = true } @@ -63,7 +62,6 @@ std = [ "reth-storage-api/std", "reth-trie-common/std", "tracing/std", - "either/std", ] metrics = ["dep:reth-metrics", "dep:metrics", "std"] test-utils = [ diff --git a/crates/trie/sparse/src/lib.rs b/crates/trie/sparse/src/lib.rs index 20884efb233..220a712d8c8 100644 --- a/crates/trie/sparse/src/lib.rs +++ b/crates/trie/sparse/src/lib.rs @@ -16,9 +16,6 @@ pub use traits::*; pub mod blinded; -// Re-export `Either` because it implements `SparseTrieInterface`. -pub use either::Either; - #[cfg(feature = "metrics")] mod metrics; diff --git a/crates/trie/sparse/src/traits.rs b/crates/trie/sparse/src/traits.rs index 304052ad7ec..2fe1838d777 100644 --- a/crates/trie/sparse/src/traits.rs +++ b/crates/trie/sparse/src/traits.rs @@ -8,7 +8,6 @@ use alloy_primitives::{ B256, }; use alloy_trie::{BranchNodeCompact, TrieMask}; -use either::Either; use reth_execution_errors::SparseTrieResult; use reth_trie_common::{Nibbles, TrieNode}; @@ -292,130 +291,3 @@ pub enum LeafLookup { /// Leaf does not exist (exclusion proof found). NonExistent, } - -impl SparseTrieInterface for Either -where - A: SparseTrieInterface, - B: SparseTrieInterface, -{ - fn with_root( - self, - root: TrieNode, - masks: TrieMasks, - retain_updates: bool, - ) -> SparseTrieResult { - match self { - Self::Left(trie) => trie.with_root(root, masks, retain_updates).map(Self::Left), - Self::Right(trie) => trie.with_root(root, masks, retain_updates).map(Self::Right), - } - } - - fn with_updates(self, retain_updates: bool) -> Self { - match self { - Self::Left(trie) => Self::Left(trie.with_updates(retain_updates)), - Self::Right(trie) => Self::Right(trie.with_updates(retain_updates)), - } - } - - fn reserve_nodes(&mut self, additional: usize) { - match self { - Self::Left(trie) => trie.reserve_nodes(additional), - Self::Right(trie) => trie.reserve_nodes(additional), - } - } - - fn reveal_node( - &mut self, - path: Nibbles, - node: TrieNode, - masks: TrieMasks, - ) -> SparseTrieResult<()> { - match self { - Self::Left(trie) => trie.reveal_node(path, node, masks), - Self::Right(trie) => trie.reveal_node(path, node, masks), - } - } - - fn update_leaf( - &mut self, - full_path: Nibbles, - value: Vec, - provider: P, - ) -> SparseTrieResult<()> { - match self { - Self::Left(trie) => trie.update_leaf(full_path, value, provider), - Self::Right(trie) => trie.update_leaf(full_path, value, provider), - } - } - - fn remove_leaf( - &mut self, - full_path: &Nibbles, - provider: P, - ) -> SparseTrieResult<()> { - match self { - Self::Left(trie) => trie.remove_leaf(full_path, provider), - Self::Right(trie) => trie.remove_leaf(full_path, provider), - } - } - - fn root(&mut self) -> B256 { - match self { - Self::Left(trie) => trie.root(), - Self::Right(trie) => trie.root(), - } - } - - fn update_subtrie_hashes(&mut self) { - match self { - Self::Left(trie) => trie.update_subtrie_hashes(), - Self::Right(trie) => trie.update_subtrie_hashes(), - } - } - - fn get_leaf_value(&self, full_path: &Nibbles) -> Option<&Vec> { - match self { - Self::Left(trie) => trie.get_leaf_value(full_path), - Self::Right(trie) => trie.get_leaf_value(full_path), - } - } - - fn find_leaf( - &self, - full_path: &Nibbles, - expected_value: Option<&Vec>, - ) -> Result { - match self { - Self::Left(trie) => trie.find_leaf(full_path, expected_value), - Self::Right(trie) => trie.find_leaf(full_path, expected_value), - } - } - - fn take_updates(&mut self) -> SparseTrieUpdates { - match self { - Self::Left(trie) => trie.take_updates(), - Self::Right(trie) => trie.take_updates(), - } - } - - fn wipe(&mut self) { - match self { - Self::Left(trie) => trie.wipe(), - Self::Right(trie) => trie.wipe(), - } - } - - fn clear(&mut self) { - match self { - Self::Left(trie) => trie.clear(), - Self::Right(trie) => trie.clear(), - } - } - - fn updates_ref(&self) -> Cow<'_, SparseTrieUpdates> { - match self { - Self::Left(trie) => trie.updates_ref(), - Self::Right(trie) => trie.updates_ref(), - } - } -} diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 0c0cf6800be..06891a441f5 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -66,22 +66,6 @@ impl Default for SparseTrie { } impl SparseTrie { - /// Creates a new blind sparse trie. - /// - /// # Examples - /// - /// ``` - /// use reth_trie_sparse::{blinded::DefaultBlindedProvider, SerialSparseTrie, SparseTrie}; - /// - /// let trie = SparseTrie::::blind(); - /// assert!(trie.is_blind()); - /// let trie = SparseTrie::::default(); - /// assert!(trie.is_blind()); - /// ``` - pub const fn blind() -> Self { - Self::Blind(None) - } - /// Creates a new revealed but empty sparse trie with `SparseNode::Empty` as root node. /// /// # Examples @@ -128,12 +112,35 @@ impl SparseTrie { Ok(self.as_revealed_mut().unwrap()) } +} + +impl SparseTrie { + /// Creates a new blind sparse trie. + /// + /// # Examples + /// + /// ``` + /// use reth_trie_sparse::{blinded::DefaultBlindedProvider, SerialSparseTrie, SparseTrie}; + /// + /// let trie = SparseTrie::::blind(); + /// assert!(trie.is_blind()); + /// let trie = SparseTrie::::default(); + /// assert!(trie.is_blind()); + /// ``` + pub const fn blind() -> Self { + Self::Blind(None) + } /// Returns `true` if the sparse trie has no revealed nodes. pub const fn is_blind(&self) -> bool { matches!(self, Self::Blind(_)) } + /// Returns `true` if the sparse trie is revealed. + pub const fn is_revealed(&self) -> bool { + matches!(self, Self::Revealed(_)) + } + /// Returns an immutable reference to the underlying revealed sparse trie. /// /// Returns `None` if the trie is blinded. From 99baeeb413e720c063066a93e461f34e98ff8b9a Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Fri, 11 Jul 2025 15:27:07 +0200 Subject: [PATCH 147/305] chore(ci): unpin hive (#17370) --- .github/workflows/hive.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/workflows/hive.yml b/.github/workflows/hive.yml index 70516f0361f..b9a927500ec 100644 --- a/.github/workflows/hive.yml +++ b/.github/workflows/hive.yml @@ -32,8 +32,6 @@ jobs: uses: actions/checkout@v4 with: repository: ethereum/hive - # TODO: unpin when https://github.com/ethereum/hive/issues/1306 is fixed - ref: edd9969338dd1798ba2e61f049c7e3a15cef53e6 path: hivetests - uses: actions/setup-go@v5 From f6839ac352d0ae40d3a9810576d32a60cac100c1 Mon Sep 17 00:00:00 2001 From: Yash Atreya <44857776+yash-atreya@users.noreply.github.com> Date: Fri, 11 Jul 2025 20:58:29 +0530 Subject: [PATCH 148/305] fix(`docs`): rustdocs search functionality (#17367) --- docs/vocs/scripts/inject-cargo-docs.ts | 50 ++++++++++++++++++++++++-- 1 file changed, 48 insertions(+), 2 deletions(-) diff --git a/docs/vocs/scripts/inject-cargo-docs.ts b/docs/vocs/scripts/inject-cargo-docs.ts index 1f8fee260d9..1ea30d34790 100644 --- a/docs/vocs/scripts/inject-cargo-docs.ts +++ b/docs/vocs/scripts/inject-cargo-docs.ts @@ -70,13 +70,18 @@ async function injectCargoDocs() { .replace(/data-static-root-path="\.\.\/static\.files\/"/g, `data-static-root-path="${BASE_PATH}/static.files/"`) // Fix search index paths - .replace(/data-search-index-js="([^"]+)"/g, `data-search-index-js="${BASE_PATH}/static.files/$1"`) + .replace(/data-search-index-js="[^"]+"/g, `data-search-index-js="${BASE_PATH}/search-index.js"`) .replace(/data-search-js="([^"]+)"/g, `data-search-js="${BASE_PATH}/static.files/$1"`) .replace(/data-settings-js="([^"]+)"/g, `data-settings-js="${BASE_PATH}/static.files/$1"`) // Fix logo paths .replace(/src="\.\/static\.files\/rust-logo/g, `src="${BASE_PATH}/static.files/rust-logo`) - .replace(/src="\.\.\/static\.files\/rust-logo/g, `src="${BASE_PATH}/static.files/rust-logo`); + .replace(/src="\.\.\/static\.files\/rust-logo/g, `src="${BASE_PATH}/static.files/rust-logo`) + + // Fix search functionality by ensuring correct load order + // Add the rustdoc-vars initialization before other scripts + .replace(/`); await fs.writeFile(file, content, 'utf-8'); } @@ -94,6 +99,47 @@ async function injectCargoDocs() { .replace(/"\.\/([^/]+)\/index\.html"/g, `"${BASE_PATH}/$1/index.html"`) .replace(/"\.\.\/([^/]+)\/index\.html"/g, `"${BASE_PATH}/$1/index.html"`); + // Fix the search form submission issue that causes page reload + // Instead of submitting a form, just ensure the search functionality is loaded + if (file.includes('main-') && file.endsWith('.js')) { + content = content.replace( + /function sendSearchForm\(\)\{document\.getElementsByClassName\("search-form"\)\[0\]\.submit\(\)\}/g, + 'function sendSearchForm(){/* Fixed: No form submission needed - search loads via script */}' + ); + + // Also fix the root path references in the search functionality + content = content.replace( + /getVar\("root-path"\)/g, + `"${BASE_PATH}/"` + ); + + // Fix static-root-path to avoid double paths + content = content.replace( + /getVar\("static-root-path"\)/g, + `"${BASE_PATH}/static.files/"` + ); + + // Fix the search-js variable to return just the filename + content = content.replace( + /getVar\("search-js"\)/g, + `"search-f7877310.js"` + ); + + // Fix the search index loading path + content = content.replace( + /resourcePath\("search-index",".js"\)/g, + `"${BASE_PATH}/search-index.js"` + ); + } + + // Fix paths in storage.js which contains the web components + if (file.includes('storage-') && file.endsWith('.js')) { + content = content.replace( + /getVar\("root-path"\)/g, + `"${BASE_PATH}/"` + ); + } + await fs.writeFile(file, content, 'utf-8'); } From 80767f1f3016d0298bd0646a78618f0b740c8089 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Fri, 11 Jul 2025 18:17:51 +0100 Subject: [PATCH 149/305] perf(engine): clear accounts trie in background to not block state root (#17369) --- crates/engine/tree/src/tree/mod.rs | 5 +- .../configured_sparse_trie.rs | 2 +- .../tree/src/tree/payload_processor/mod.rs | 78 ++++++++----------- .../src/tree/payload_processor/sparse_trie.rs | 30 +++++-- crates/trie/sparse/src/state.rs | 6 +- 5 files changed, 58 insertions(+), 63 deletions(-) diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 3618c80a39d..108dce4d037 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -2300,7 +2300,7 @@ where if use_state_root_task { debug!(target: "engine::tree", block=?block_num_hash, "Using sparse trie state root algorithm"); match handle.state_root() { - Ok(StateRootComputeOutcome { state_root, trie_updates, trie }) => { + Ok(StateRootComputeOutcome { state_root, trie_updates }) => { let elapsed = execution_finish.elapsed(); info!(target: "engine::tree", ?state_root, ?elapsed, "State root task finished"); // we double check the state root here for good measure @@ -2314,9 +2314,6 @@ where "State root task returned incorrect state root" ); } - - // hold on to the sparse trie for the next payload - self.payload_processor.set_sparse_trie(trie); } Err(error) => { debug!(target: "engine::tree", %error, "Background parallel state root computation failed"); diff --git a/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs b/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs index 55cd203c0b9..61ad88b67fb 100644 --- a/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs +++ b/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs @@ -15,7 +15,7 @@ use std::borrow::Cow; /// providing flexibility in choosing the appropriate implementation based on workload /// characteristics. #[derive(Debug)] -pub enum ConfiguredSparseTrie { +pub(crate) enum ConfiguredSparseTrie { /// Serial implementation of the sparse trie. Serial(Box), /// Parallel implementation of the sparse trie. diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index d50ef6f1da2..cc4d291912c 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -14,7 +14,7 @@ use alloy_evm::block::StateChangeSource; use alloy_primitives::B256; use executor::WorkloadExecutor; use multiproof::{SparseTrieUpdate, *}; -use parking_lot::RwLock; +use parking_lot::{Mutex, RwLock}; use prewarm::PrewarmMetrics; use reth_evm::{ConfigureEvm, OnStateHook, SpecFor}; use reth_primitives_traits::{NodePrimitives, SealedHeaderFor}; @@ -75,9 +75,9 @@ where precompile_cache_disabled: bool, /// Precompile cache map. precompile_cache_map: PrecompileCacheMap>, - /// A cleared sparse trie, kept around to be reused for the state root computation so that - /// allocations can be minimized. - sparse_trie: Option>, + /// A cleared accounts sparse trie, kept around to be reused for the state root computation so + /// that allocations can be minimized. + accounts_trie: Arc>>>, /// Whether to use the parallel sparse trie. use_parallel_sparse_trie: bool, _marker: std::marker::PhantomData, @@ -104,7 +104,7 @@ where evm_config, precompile_cache_disabled: config.precompile_cache_disabled(), precompile_cache_map, - sparse_trie: None, + accounts_trie: Arc::default(), use_parallel_sparse_trie: config.enable_parallel_sparse_trie(), _marker: Default::default(), } @@ -209,15 +209,15 @@ where // wire the sparse trie to the state root response receiver let (state_root_tx, state_root_rx) = channel(); - // Take the stored sparse trie - let stored_trie = self.sparse_trie.take(); + // Take the stored accounts trie + let stored_accounts_trie = self.accounts_trie.lock().take(); // Spawn the sparse trie task using any stored trie and parallel trie configuration. self.spawn_sparse_trie_task( sparse_trie_rx, proof_task.handle(), state_root_tx, - stored_trie, + stored_accounts_trie, self.use_parallel_sparse_trie, ); @@ -257,11 +257,6 @@ where PayloadHandle { to_multi_proof: None, prewarm_handle, state_root: None } } - /// Sets the sparse trie to be kept around for the state root computation. - pub(super) fn set_sparse_trie(&mut self, sparse_trie: SparseTrie) { - self.sparse_trie = Some(sparse_trie); - } - /// Spawn prewarming optionally wired to the multiproof task for target updates. fn spawn_caching_with

( &self, @@ -331,9 +326,7 @@ where &self, sparse_trie_rx: mpsc::Receiver, proof_task_handle: BPF, - state_root_tx: mpsc::Sender< - Result, ParallelStateRootError>, - >, + state_root_tx: mpsc::Sender>, sparse_trie: Option>, ) where BPF: BlindedProviderFactory + Clone + Send + Sync + 'static, @@ -350,21 +343,22 @@ where sparse_trie, ); + let accounts_trie = Arc::clone(&self.accounts_trie); self.executor.spawn_blocking(move || { - let res = task.run(); - let converted = res.map(|outcome| StateRootComputeOutcome { - state_root: outcome.state_root, - trie_updates: outcome.trie_updates, - trie: match outcome.trie { - SparseTrie::Blind(opt) => { - SparseTrie::Blind(opt.map(|t| Box::new(ConfiguredSparseTrie::from(*t)))) - } - SparseTrie::Revealed(t) => { - SparseTrie::Revealed(Box::new(ConfiguredSparseTrie::from(*t))) - } - }, - }); - let _ = state_root_tx.send(converted); + let (result, trie) = task.run(); + // Send state root computation result + let _ = state_root_tx.send(result); + + // Clear and return accounts trie back to the payload processor + let trie = match trie { + SparseTrie::Blind(opt) => { + SparseTrie::Blind(opt.map(|t| Box::new(ConfiguredSparseTrie::from(*t)))) + } + SparseTrie::Revealed(t) => { + SparseTrie::Revealed(Box::new(ConfiguredSparseTrie::from(*t))) + } + }; + accounts_trie.lock().replace(trie.clear()); }); } @@ -374,9 +368,7 @@ where configured_trie: ConfiguredSparseTrie, sparse_trie_rx: mpsc::Receiver, proof_task_handle: BPF, - state_root_tx: mpsc::Sender< - Result, ParallelStateRootError>, - >, + state_root_tx: mpsc::Sender>, is_revealed: bool, ) where BPF: BlindedProviderFactory + Clone + Send + Sync + 'static, @@ -411,18 +403,16 @@ where &self, sparse_trie_rx: mpsc::Receiver, proof_task_handle: BPF, - state_root_tx: mpsc::Sender< - Result, ParallelStateRootError>, - >, - stored_trie: Option>, + state_root_tx: mpsc::Sender>, + stored_accounts_trie: Option>, use_parallel_for_new: bool, ) where BPF: BlindedProviderFactory + Clone + Send + Sync + 'static, BPF::AccountNodeProvider: BlindedProvider + Send + Sync, BPF::StorageNodeProvider: BlindedProvider + Send + Sync, { - let is_revealed = stored_trie.as_ref().is_some_and(|trie| trie.is_revealed()); - match stored_trie { + let is_revealed = stored_accounts_trie.as_ref().is_some_and(|trie| trie.is_revealed()); + match stored_accounts_trie { Some(SparseTrie::Revealed(boxed) | SparseTrie::Blind(Some(boxed))) => { self.dispatch_trie_spawn( *boxed, @@ -462,11 +452,7 @@ pub struct PayloadHandle { // must include the receiver of the state root wired to the sparse trie prewarm_handle: CacheTaskHandle, /// Receiver for the state root - state_root: Option< - mpsc::Receiver< - Result, ParallelStateRootError>, - >, - >, + state_root: Option>>, } impl PayloadHandle { @@ -475,9 +461,7 @@ impl PayloadHandle { /// # Panics /// /// If payload processing was started without background tasks. - pub fn state_root( - &mut self, - ) -> Result, ParallelStateRootError> { + pub fn state_root(&mut self) -> Result { self.state_root .take() .expect("state_root is None") diff --git a/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs b/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs index bd1ae9fda9d..458ba1b08b4 100644 --- a/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs +++ b/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs @@ -108,7 +108,26 @@ where /// /// NOTE: This function does not take `self` by value to prevent blocking on [`SparseStateTrie`] /// drop. - pub(super) fn run(&mut self) -> Result, ParallelStateRootError> { + /// + /// # Returns + /// + /// - State root computation outcome. + /// - Accounts trie that needs to be cleared and re-used to avoid reallocations. + pub(super) fn run( + &mut self, + ) -> (Result, SparseTrie) { + // run the main loop to completion + let result = self.run_inner(); + // take the account trie so that we can reuse its already allocated data structures. + let trie = self.trie.take_accounts_trie(); + + (result, trie) + } + + /// Inner function to run the sparse trie task to completion. + /// + /// See [`Self::run`] for more information. + fn run_inner(&mut self) -> Result { let now = Instant::now(); let mut num_iterations = 0; @@ -151,23 +170,18 @@ where self.metrics.sparse_trie_final_update_duration_histogram.record(start.elapsed()); self.metrics.sparse_trie_total_duration_histogram.record(now.elapsed()); - // take the account trie so that we can reuse its already allocated data structures. - let trie = self.trie.take_cleared_accounts_trie(); - - Ok(StateRootComputeOutcome { state_root, trie_updates, trie }) + Ok(StateRootComputeOutcome { state_root, trie_updates }) } } /// Outcome of the state root computation, including the state root itself with /// the trie updates. #[derive(Debug)] -pub struct StateRootComputeOutcome { +pub struct StateRootComputeOutcome { /// The state root. pub state_root: B256, /// The trie updates. pub trie_updates: TrieUpdates, - /// The account state trie. - pub trie: SparseTrie, } /// Updates the sparse trie with the given proofs and state, and returns the elapsed time. diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index 33d8c94f8d0..af3d8a5b268 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -93,9 +93,9 @@ where self } - /// Takes the `SparseTrie` from within the state root and clears it if it is not blinded. - pub fn take_cleared_accounts_trie(&mut self) -> SparseTrie { - core::mem::take(&mut self.state).clear() + /// Takes the accounts trie. + pub fn take_accounts_trie(&mut self) -> SparseTrie { + core::mem::take(&mut self.state) } /// Returns `true` if account was already revealed. From 1d6a83080374dff8354a8e70b7acc32eb8c26635 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9a=20Narzis?= <78718413+lean-apple@users.noreply.github.com> Date: Sat, 12 Jul 2025 08:49:36 +0200 Subject: [PATCH 150/305] feat: make `Receipt` generic over `TxType` (#17237) Co-authored-by: Arsenii Kulikov --- Cargo.lock | 1 - crates/ethereum/consensus/src/validation.rs | 4 +- crates/ethereum/primitives/Cargo.toml | 1 - crates/ethereum/primitives/src/receipt.rs | 266 +++++++++++++++----- 4 files changed, 210 insertions(+), 62 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7a55f03052a..8d7559bac30 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8342,7 +8342,6 @@ dependencies = [ "secp256k1 0.30.0", "serde", "serde_with", - "test-fuzz", ] [[package]] diff --git a/crates/ethereum/consensus/src/validation.rs b/crates/ethereum/consensus/src/validation.rs index f58b77cc575..485828e6080 100644 --- a/crates/ethereum/consensus/src/validation.rs +++ b/crates/ethereum/consensus/src/validation.rs @@ -122,7 +122,7 @@ mod tests { #[test] fn test_verify_receipts_success() { // Create a vector of 5 default Receipt instances - let receipts = vec![Receipt::default(); 5]; + let receipts: Vec = vec![Receipt::default(); 5]; // Compare against expected values assert!(verify_receipts( @@ -140,7 +140,7 @@ mod tests { let expected_logs_bloom = Bloom::random(); // Create a vector of 5 random Receipt instances - let receipts = vec![Receipt::default(); 5]; + let receipts: Vec = vec![Receipt::default(); 5]; assert!(verify_receipts(expected_receipts_root, expected_logs_bloom, &receipts).is_err()); } diff --git a/crates/ethereum/primitives/Cargo.toml b/crates/ethereum/primitives/Cargo.toml index 5a5f0ee101c..b99f2d34e58 100644 --- a/crates/ethereum/primitives/Cargo.toml +++ b/crates/ethereum/primitives/Cargo.toml @@ -40,7 +40,6 @@ rand.workspace = true reth-codecs = { workspace = true, features = ["test-utils"] } reth-zstd-compressors.workspace = true secp256k1 = { workspace = true, features = ["rand"] } -test-fuzz.workspace = true alloy-consensus = { workspace = true, features = ["serde", "arbitrary"] } [features] diff --git a/crates/ethereum/primitives/src/receipt.rs b/crates/ethereum/primitives/src/receipt.rs index ffc06c7fc82..d40789c0a18 100644 --- a/crates/ethereum/primitives/src/receipt.rs +++ b/crates/ethereum/primitives/src/receipt.rs @@ -1,30 +1,55 @@ +use core::fmt::Debug; + use alloc::vec::Vec; use alloy_consensus::{ Eip2718EncodableReceipt, Eip658Value, ReceiptWithBloom, RlpDecodableReceipt, RlpEncodableReceipt, TxReceipt, TxType, Typed2718, }; use alloy_eips::{ - eip2718::{Eip2718Result, Encodable2718, IsTyped2718}, + eip2718::{Eip2718Error, Eip2718Result, Encodable2718, IsTyped2718}, Decodable2718, }; use alloy_primitives::{Bloom, Log, B256}; use alloy_rlp::{BufMut, Decodable, Encodable, Header}; use reth_primitives_traits::{proofs::ordered_trie_root_with_encoder, InMemorySize}; +/// Helper trait alias with requirements for transaction type generic to be used within [`Receipt`]. +pub trait TxTy: + Debug + + Copy + + Eq + + Send + + Sync + + InMemorySize + + Typed2718 + + TryFrom + + Decodable + + 'static +{ +} +impl TxTy for T where + T: Debug + + Copy + + Eq + + Send + + Sync + + InMemorySize + + Typed2718 + + TryFrom + + Decodable + + 'static +{ +} + /// Typed ethereum transaction receipt. /// Receipt containing result of transaction execution. #[derive(Clone, Debug, PartialEq, Eq, Default)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -#[cfg_attr(feature = "reth-codec", derive(reth_codecs::CompactZstd))] #[cfg_attr(feature = "reth-codec", reth_codecs::add_arbitrary_tests(compact, rlp))] -#[cfg_attr(feature = "reth-codec", reth_zstd( - compressor = reth_zstd_compressors::RECEIPT_COMPRESSOR, - decompressor = reth_zstd_compressors::RECEIPT_DECOMPRESSOR -))] -pub struct Receipt { +pub struct Receipt { /// Receipt type. - pub tx_type: TxType, + pub tx_type: T, /// If transaction is executed successfully. /// /// This is the `statusCode` @@ -35,7 +60,7 @@ pub struct Receipt { pub logs: Vec, } -impl Receipt { +impl Receipt { /// Returns length of RLP-encoded receipt fields with the given [`Bloom`] without an RLP header. pub fn rlp_encoded_fields_length(&self, bloom: &Bloom) -> usize { self.success.length() + @@ -61,7 +86,7 @@ impl Receipt { /// network header. pub fn rlp_decode_inner( buf: &mut &[u8], - tx_type: TxType, + tx_type: T, ) -> alloy_rlp::Result> { let header = Header::decode(buf)?; if !header.list { @@ -112,10 +137,7 @@ impl Receipt { /// RLP-decodes the receipt from the provided buffer. This does not expect a type byte or /// network header. - pub fn rlp_decode_inner_without_bloom( - buf: &mut &[u8], - tx_type: TxType, - ) -> alloy_rlp::Result { + pub fn rlp_decode_inner_without_bloom(buf: &mut &[u8], tx_type: T) -> alloy_rlp::Result { let header = Header::decode(buf)?; if !header.list { return Err(alloy_rlp::Error::UnexpectedString); @@ -134,21 +156,21 @@ impl Receipt { } } -impl Eip2718EncodableReceipt for Receipt { +impl Eip2718EncodableReceipt for Receipt { fn eip2718_encoded_length_with_bloom(&self, bloom: &Bloom) -> usize { !self.tx_type.is_legacy() as usize + self.rlp_header_inner(bloom).length_with_payload() } fn eip2718_encode_with_bloom(&self, bloom: &Bloom, out: &mut dyn BufMut) { if !self.tx_type.is_legacy() { - out.put_u8(self.tx_type as u8); + out.put_u8(self.tx_type.ty()); } self.rlp_header_inner(bloom).encode(out); self.rlp_encode_fields(bloom, out); } } -impl RlpEncodableReceipt for Receipt { +impl RlpEncodableReceipt for Receipt { fn rlp_encoded_length_with_bloom(&self, bloom: &Bloom) -> usize { let mut len = self.eip2718_encoded_length_with_bloom(bloom); if !self.tx_type.is_legacy() { @@ -171,21 +193,21 @@ impl RlpEncodableReceipt for Receipt { } } -impl RlpDecodableReceipt for Receipt { +impl RlpDecodableReceipt for Receipt { fn rlp_decode_with_bloom(buf: &mut &[u8]) -> alloy_rlp::Result> { let header_buf = &mut &**buf; let header = Header::decode(header_buf)?; // Legacy receipt, reuse initial buffer without advancing if header.list { - return Self::rlp_decode_inner(buf, TxType::Legacy) + return Self::rlp_decode_inner(buf, T::try_from(0)?) } // Otherwise, advance the buffer and try decoding type flag followed by receipt *buf = *header_buf; let remaining = buf.len(); - let tx_type = TxType::decode(buf)?; + let tx_type = T::decode(buf)?; let this = Self::rlp_decode_inner(buf, tx_type)?; if buf.len() + header.payload_length != remaining { @@ -196,7 +218,7 @@ impl RlpDecodableReceipt for Receipt { } } -impl Encodable2718 for Receipt { +impl Encodable2718 for Receipt { fn encode_2718_len(&self) -> usize { (!self.tx_type.is_legacy() as usize) + self.rlp_header_inner_without_bloom().length_with_payload() @@ -205,24 +227,24 @@ impl Encodable2718 for Receipt { // encode the header fn encode_2718(&self, out: &mut dyn BufMut) { if !self.tx_type.is_legacy() { - out.put_u8(self.tx_type as u8); + out.put_u8(self.tx_type.ty()); } self.rlp_header_inner_without_bloom().encode(out); self.rlp_encode_fields_without_bloom(out); } } -impl Decodable2718 for Receipt { +impl Decodable2718 for Receipt { fn typed_decode(ty: u8, buf: &mut &[u8]) -> Eip2718Result { - Ok(Self::rlp_decode_inner_without_bloom(buf, TxType::try_from(ty)?)?) + Ok(Self::rlp_decode_inner_without_bloom(buf, T::try_from(ty)?)?) } fn fallback_decode(buf: &mut &[u8]) -> Eip2718Result { - Ok(Self::rlp_decode_inner_without_bloom(buf, TxType::Legacy)?) + Ok(Self::rlp_decode_inner_without_bloom(buf, T::try_from(0)?)?) } } -impl Encodable for Receipt { +impl Encodable for Receipt { fn encode(&self, out: &mut dyn BufMut) { self.network_encode(out); } @@ -232,13 +254,13 @@ impl Encodable for Receipt { } } -impl Decodable for Receipt { +impl Decodable for Receipt { fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { Ok(Self::network_decode(buf)?) } } -impl TxReceipt for Receipt { +impl TxReceipt for Receipt { type Log = Log; fn status_or_post_state(&self) -> Eip658Value { @@ -262,19 +284,19 @@ impl TxReceipt for Receipt { } } -impl Typed2718 for Receipt { +impl Typed2718 for Receipt { fn ty(&self) -> u8 { - self.tx_type as u8 + self.tx_type.ty() } } -impl IsTyped2718 for Receipt { +impl IsTyped2718 for Receipt { fn is_type(type_id: u8) -> bool { ::is_type(type_id) } } -impl InMemorySize for Receipt { +impl InMemorySize for Receipt { fn size(&self) -> usize { self.tx_type.size() + core::mem::size_of::() + @@ -283,7 +305,7 @@ impl InMemorySize for Receipt { } } -impl From> for Receipt +impl From> for Receipt where T: Into, { @@ -299,8 +321,8 @@ where } } -impl From for alloy_consensus::Receipt { - fn from(value: Receipt) -> Self { +impl From> for alloy_consensus::Receipt { + fn from(value: Receipt) -> Self { Self { status: value.success.into(), cumulative_gas_used: value.cumulative_gas_used, @@ -309,8 +331,8 @@ impl From for alloy_consensus::Receipt { } } -impl From for alloy_consensus::ReceiptEnvelope { - fn from(value: Receipt) -> Self { +impl From> for alloy_consensus::ReceiptEnvelope { + fn from(value: Receipt) -> Self { let tx_type = value.tx_type; let receipt = value.into_with_bloom().map_receipt(Into::into); match tx_type { @@ -327,7 +349,9 @@ impl From for alloy_consensus::ReceiptEnvelope { pub(super) mod serde_bincode_compat { use alloc::{borrow::Cow, vec::Vec}; use alloy_consensus::TxType; + use alloy_eips::eip2718::Eip2718Error; use alloy_primitives::{Log, U8}; + use core::fmt::Debug; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde_with::{DeserializeAs, SerializeAs}; @@ -335,6 +359,7 @@ pub(super) mod serde_bincode_compat { /// /// Intended to use with the [`serde_with::serde_as`] macro in the following way: /// ```rust + /// use alloy_consensus::TxType; /// use reth_ethereum_primitives::{serde_bincode_compat, Receipt}; /// use serde::{de::DeserializeOwned, Deserialize, Serialize}; /// use serde_with::serde_as; @@ -343,14 +368,15 @@ pub(super) mod serde_bincode_compat { /// #[derive(Serialize, Deserialize)] /// struct Data { /// #[serde_as(as = "serde_bincode_compat::Receipt<'_>")] - /// receipt: Receipt, + /// receipt: Receipt, /// } /// ``` #[derive(Debug, Serialize, Deserialize)] - pub struct Receipt<'a> { + #[serde(bound(deserialize = "T: TryFrom"))] + pub struct Receipt<'a, T = TxType> { /// Receipt type. #[serde(deserialize_with = "deserde_txtype")] - pub tx_type: TxType, + pub tx_type: T, /// If transaction is executed successfully. /// /// This is the `statusCode` @@ -362,16 +388,16 @@ pub(super) mod serde_bincode_compat { } /// Ensures that txtype is deserialized symmetrically as U8 - fn deserde_txtype<'de, D>(deserializer: D) -> Result + fn deserde_txtype<'de, D, T>(deserializer: D) -> Result where D: Deserializer<'de>, + T: TryFrom, { - let value = U8::deserialize(deserializer)?; - value.to::().try_into().map_err(serde::de::Error::custom) + U8::deserialize(deserializer)?.to::().try_into().map_err(serde::de::Error::custom) } - impl<'a> From<&'a super::Receipt> for Receipt<'a> { - fn from(value: &'a super::Receipt) -> Self { + impl<'a, T: Copy> From<&'a super::Receipt> for Receipt<'a, T> { + fn from(value: &'a super::Receipt) -> Self { Self { tx_type: value.tx_type, success: value.success, @@ -381,8 +407,8 @@ pub(super) mod serde_bincode_compat { } } - impl<'a> From> for super::Receipt { - fn from(value: Receipt<'a>) -> Self { + impl<'a, T> From> for super::Receipt { + fn from(value: Receipt<'a, T>) -> Self { Self { tx_type: value.tx_type, success: value.success, @@ -392,8 +418,8 @@ pub(super) mod serde_bincode_compat { } } - impl SerializeAs for Receipt<'_> { - fn serialize_as(source: &super::Receipt, serializer: S) -> Result + impl SerializeAs> for Receipt<'_, T> { + fn serialize_as(source: &super::Receipt, serializer: S) -> Result where S: Serializer, { @@ -401,17 +427,22 @@ pub(super) mod serde_bincode_compat { } } - impl<'de> DeserializeAs<'de, super::Receipt> for Receipt<'de> { - fn deserialize_as(deserializer: D) -> Result + impl<'de, T: TryFrom> DeserializeAs<'de, super::Receipt> + for Receipt<'de, T> + { + fn deserialize_as(deserializer: D) -> Result, D::Error> where D: Deserializer<'de>, { - Receipt::<'_>::deserialize(deserializer).map(Into::into) + Receipt::<'_, T>::deserialize(deserializer).map(Into::into) } } - impl reth_primitives_traits::serde_bincode_compat::SerdeBincodeCompat for super::Receipt { - type BincodeRepr<'a> = Receipt<'a>; + impl reth_primitives_traits::serde_bincode_compat::SerdeBincodeCompat for super::Receipt + where + T: Copy + Serialize + TryFrom + Debug + 'static, + { + type BincodeRepr<'a> = Receipt<'a, T>; fn as_repr(&self) -> Self::BincodeRepr<'_> { self.into() @@ -425,6 +456,7 @@ pub(super) mod serde_bincode_compat { #[cfg(test)] mod tests { use crate::{receipt::serde_bincode_compat, Receipt}; + use alloy_consensus::TxType; use arbitrary::Arbitrary; use rand::Rng; use serde_with::serde_as; @@ -435,8 +467,8 @@ pub(super) mod serde_bincode_compat { #[derive(Debug, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] struct Data { - #[serde_as(as = "serde_bincode_compat::Receipt<'_>")] - receipt: Receipt, + #[serde_as(as = "serde_bincode_compat::Receipt<'_, TxType>")] + receipt: Receipt, } let mut bytes = [0u8; 1024]; @@ -451,6 +483,124 @@ pub(super) mod serde_bincode_compat { } } +#[cfg(feature = "reth-codec")] +mod compact { + use super::*; + use reth_codecs::{ + Compact, + __private::{modular_bitfield::prelude::*, Buf}, + }; + + impl Receipt { + #[doc = "Used bytes by [`ReceiptFlags`]"] + pub const fn bitflag_encoded_bytes() -> usize { + 1u8 as usize + } + #[doc = "Unused bits for new fields by [`ReceiptFlags`]"] + pub const fn bitflag_unused_bits() -> usize { + 0u8 as usize + } + } + + #[allow(non_snake_case, unused_parens)] + mod flags { + use super::*; + + #[doc = "Fieldset that facilitates compacting the parent type. Used bytes: 1 | Unused bits: 0"] + #[bitfield] + #[derive(Clone, Copy, Debug, Default)] + pub struct ReceiptFlags { + pub tx_type_len: B2, + pub success_len: B1, + pub cumulative_gas_used_len: B4, + pub __zstd: B1, + } + + impl ReceiptFlags { + #[doc = r" Deserializes this fieldset and returns it, alongside the original slice in an advanced position."] + pub fn from(mut buf: &[u8]) -> (Self, &[u8]) { + (Self::from_bytes([buf.get_u8()]), buf) + } + } + } + + pub use flags::ReceiptFlags; + + impl Compact for Receipt { + fn to_compact(&self, buf: &mut B) -> usize + where + B: reth_codecs::__private::bytes::BufMut + AsMut<[u8]>, + { + let mut flags = ReceiptFlags::default(); + let mut total_length = 0; + let mut buffer = reth_codecs::__private::bytes::BytesMut::new(); + + let tx_type_len = self.tx_type.to_compact(&mut buffer); + flags.set_tx_type_len(tx_type_len as u8); + let success_len = self.success.to_compact(&mut buffer); + flags.set_success_len(success_len as u8); + let cumulative_gas_used_len = self.cumulative_gas_used.to_compact(&mut buffer); + flags.set_cumulative_gas_used_len(cumulative_gas_used_len as u8); + self.logs.to_compact(&mut buffer); + + let zstd = buffer.len() > 7; + if zstd { + flags.set___zstd(1); + } + + let flags = flags.into_bytes(); + total_length += flags.len() + buffer.len(); + buf.put_slice(&flags); + if zstd { + reth_zstd_compressors::RECEIPT_COMPRESSOR.with(|compressor| { + let compressed = + compressor.borrow_mut().compress(&buffer).expect("Failed to compress."); + buf.put(compressed.as_slice()); + }); + } else { + buf.put(buffer); + } + total_length + } + + fn from_compact(buf: &[u8], _len: usize) -> (Self, &[u8]) { + let (flags, mut buf) = ReceiptFlags::from(buf); + if flags.__zstd() != 0 { + reth_zstd_compressors::RECEIPT_DECOMPRESSOR.with(|decompressor| { + let decompressor = &mut decompressor.borrow_mut(); + let decompressed = decompressor.decompress(buf); + let original_buf = buf; + let mut buf: &[u8] = decompressed; + let (tx_type, new_buf) = T::from_compact(buf, flags.tx_type_len() as usize); + buf = new_buf; + let (success, new_buf) = bool::from_compact(buf, flags.success_len() as usize); + buf = new_buf; + let (cumulative_gas_used, new_buf) = + u64::from_compact(buf, flags.cumulative_gas_used_len() as usize); + buf = new_buf; + let (logs, _) = Vec::from_compact(buf, buf.len()); + (Self { tx_type, success, cumulative_gas_used, logs }, original_buf) + }) + } else { + let (tx_type, new_buf) = T::from_compact(buf, flags.tx_type_len() as usize); + buf = new_buf; + let (success, new_buf) = bool::from_compact(buf, flags.success_len() as usize); + buf = new_buf; + let (cumulative_gas_used, new_buf) = + u64::from_compact(buf, flags.cumulative_gas_used_len() as usize); + buf = new_buf; + let (logs, new_buf) = Vec::from_compact(buf, buf.len()); + buf = new_buf; + let obj = Self { tx_type, success, cumulative_gas_used, logs }; + (obj, buf) + } + } + } +} + +#[cfg(feature = "reth-codec")] +pub use compact::*; + #[cfg(test)] mod tests { use super::*; @@ -472,7 +622,7 @@ mod tests { #[test] fn test_decode_receipt() { - reth_codecs::test_utils::test_decode::(&hex!( + reth_codecs::test_utils::test_decode::>(&hex!( "c428b52ffd23fc42696156b10200f034792b6a94c3850215c2fef7aea361a0c31b79d9a32652eefc0d4e2e730036061cff7344b6fc6132b50cda0ed810a991ae58ef013150c12b2522533cb3b3a8b19b7786a8b5ff1d3cdc84225e22b02def168c8858df" )); } @@ -564,7 +714,7 @@ mod tests { let mut data = vec![]; receipt.to_compact(&mut data); - let (decoded, _) = Receipt::from_compact(&data[..], data.len()); + let (decoded, _) = Receipt::::from_compact(&data[..], data.len()); assert_eq!(decoded, receipt); } From e9389dc640f464e5366da6255b5daaa8b48e778e Mon Sep 17 00:00:00 2001 From: otc group Date: Sat, 12 Jul 2025 13:44:56 +0200 Subject: [PATCH 151/305] docs: fix link to installation (#17375) Co-authored-by: Matthias Seitz --- docs/vocs/docs/pages/run/ethereum.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/vocs/docs/pages/run/ethereum.mdx b/docs/vocs/docs/pages/run/ethereum.mdx index 992cd70dd5f..1444c7104da 100644 --- a/docs/vocs/docs/pages/run/ethereum.mdx +++ b/docs/vocs/docs/pages/run/ethereum.mdx @@ -88,7 +88,7 @@ In the meantime, consider setting up [observability](/run/monitoring) to monitor {/* TODO: Add more logs to help node operators debug any weird CL to EL messages! */} -[installation]: ./../installation/overview +[installation]: ./../../installation/overview [docs]: https://github.com/paradigmxyz/reth/tree/main/docs [metrics]: https://github.com/paradigmxyz/reth/blob/main/docs/design/metrics.md#metrics From 4767e1c25125133bd694c271db2df6e2ad46d199 Mon Sep 17 00:00:00 2001 From: crStiv Date: Sat, 12 Jul 2025 18:55:12 +0300 Subject: [PATCH 152/305] docs: typos (#17335) Co-authored-by: Matthias Seitz --- crates/engine/tree/docs/root.md | 10 +++++----- docs/design/headers-downloader.md | 6 +++--- docs/vocs/docs/pages/cli/cli.mdx | 2 +- docs/vocs/docs/pages/index.mdx | 2 +- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/crates/engine/tree/docs/root.md b/crates/engine/tree/docs/root.md index d3c4e1e5757..a5b9bcb1d48 100644 --- a/crates/engine/tree/docs/root.md +++ b/crates/engine/tree/docs/root.md @@ -10,7 +10,7 @@ root of the new state. 4. Compares the root with the one received in the block header. 5. Considers the block valid. -This document describes the lifecycle of a payload with the focus on state root calculation, +This document describes the lifecycle of a payload with a focus on state root calculation, from the moment the payload is received, to the moment we have a new state root. We will look at the following components: @@ -26,7 +26,7 @@ We will look at the following components: It all starts with the `engine_newPayload` request coming from the [Consensus Client](https://ethereum.org/en/developers/docs/nodes-and-clients/#consensus-clients). We extract the block from the payload, and eventually pass it to the `EngineApiTreeHandler::insert_block_inner` -method which executes the block and calculates the state root. +method that executes the block and calculates the state root. https://github.com/paradigmxyz/reth/blob/2ba54bf1c1f38c7173838f37027315a09287c20a/crates/engine/tree/src/tree/mod.rs#L2359-L2362 Let's walk through the steps involved in the process. @@ -166,7 +166,7 @@ and send `StateRootMessage::ProofCalculated` to the [State Root Task](#state-roo ### Exhausting the pending queue -To exhaust the pending queue from the step 2 of the `spawn_or_queue` described above, +To exhaust the pending queue from step 2 of the `spawn_or_queue` described above, the [State Root Task](#state-root-task) calls into another method `on_calculation_complete` every time a proof is calculated. https://github.com/paradigmxyz/reth/blob/2ba54bf1c1f38c7173838f37027315a09287c20a/crates/engine/tree/src/tree/root.rs#L379-L387 @@ -230,11 +230,11 @@ https://github.com/paradigmxyz/reth/blob/2ba54bf1c1f38c7173838f37027315a09287c20 https://github.com/paradigmxyz/reth/blob/2ba54bf1c1f38c7173838f37027315a09287c20a/crates/engine/tree/src/tree/root.rs#L1093 3. Update accounts trie https://github.com/paradigmxyz/reth/blob/2ba54bf1c1f38c7173838f37027315a09287c20a/crates/engine/tree/src/tree/root.rs#L1133 -4. Calculate keccak hashes of the nodes below the certain level +4. Calculate keccak hashes of the nodes below a certain level https://github.com/paradigmxyz/reth/blob/2ba54bf1c1f38c7173838f37027315a09287c20a/crates/engine/tree/src/tree/root.rs#L1139 As you can see, we do not calculate the state root hash of the accounts trie -(the one that will be the result of the whole task), but instead calculate only the certain hashes. +(the one that will be the result of the whole task), but instead calculate only certain hashes. This is an optimization that comes from the fact that we will likely update the top 2-3 levels of the trie in every transaction, so doing that work every time would be wasteful. diff --git a/docs/design/headers-downloader.md b/docs/design/headers-downloader.md index 8b160265a2b..c31aeefc249 100644 --- a/docs/design/headers-downloader.md +++ b/docs/design/headers-downloader.md @@ -6,6 +6,6 @@ * First, we implemented the reverse linear download. It received the current chain tip and local head as arguments and requested blocks in batches starting from the tip, and retried on request failure. See [`reth#58`](https://github.com/paradigmxyz/reth/pull/58) and [`reth#119`](https://github.com/paradigmxyz/reth/pull/119). * The first complete implementation of the headers stage was introduced in [`reth#126`](https://github.com/paradigmxyz/reth/pull/126). The stage looked up the local head & queried the consensus for the chain tip and queried the downloader passing them as arguments. After the download finished, the stage would proceed to insert headers in the ascending order by appending the entries to the corresponding tables. * The original downloader was refactored in [`reth#249`](https://github.com/paradigmxyz/reth/pull/249) to return a `Future` which would resolve when either the download is completed or the error occurred during polling. This future kept a pointer to a current request at any time, allowing to retry the request in case of failure. The insert logic of the headers stage remained unchanged. - * NOTE: Up to this point the headers stage awaited full range of blocks (from local head to tip) to be downloaded before proceeding to insert. -* [`reth#296`](https://github.com/paradigmxyz/reth/pull/296) introduced the `Stream` implementation of the download as well as the commit threshold for the headers stage. The `Stream` implementation yields headers as soon as they are received and validated. It dispatches the request for the next header batch until the head is reached. The headers stage now has a configurable commit threshold which allows configuring the insert batch size. With this change, the headers stage no longer waits for the download to be complete, but rather collects the headers from the stream up to the commit threshold parameter. After collecting, the stage proceeds to insert the batch. The process is repeated until the stream is drained. At this point, we populated all tables except for HeadersTD since it has to be computed in a linear ascending order. The stage starts walking the populated headers table and computes & inserts new total difficulty values. -* This header implementation is unique because it is implemented as a Stream, it yields headers as soon as they become available (contrary to waiting for download to complete) and it keeps only one header in buffer (required to form the next header request) . + * NOTE: Up to this point the headers stage awaited the full range of blocks (from local head to tip) to be downloaded before proceeding to insert. +* [`reth#296`](https://github.com/paradigmxyz/reth/pull/296) introduced the `Stream` implementation of the download as well as the commit threshold for the headers stage. The `Stream` implementation yields headers as soon as they are received and validated. It dispatches the request for the next header batch until the head is reached. The headers stage now has a configurable commit threshold which allows configuring the insert batch size. With this change, the headers stage no longer waits for the download to be complete, but rather collects the headers from the stream up to the commit threshold parameter. After collecting, the stage proceeds to insert the batch. The process is repeated until the stream is drained. At this point, we populated all tables except for HeadersTD since it has to be computed in a linear ascending order. The stage starts walking through the populated headers table and computes & inserts new total difficulty values. +* This header implementation is unique because it is implemented as a Stream, it yields headers as soon as they become available (contrary to waiting for download to complete), and it keeps only one header in buffer (required to form the next header request) . diff --git a/docs/vocs/docs/pages/cli/cli.mdx b/docs/vocs/docs/pages/cli/cli.mdx index 20046ce9e77..d7a02e2b738 100644 --- a/docs/vocs/docs/pages/cli/cli.mdx +++ b/docs/vocs/docs/pages/cli/cli.mdx @@ -2,7 +2,7 @@ import Summary from './SUMMARY.mdx'; # CLI Reference -The Reth node is operated via the CLI by running the `reth node` command. To stop it, press `ctrl-c`. You may need to wait a bit as Reth tears down existing p2p connections or other cleanup tasks. +The Reth node is operated via the CLI by running the `reth node` command. To stop it, press `ctrl-c`. You may need to wait a bit as Reth tears down existing p2p connections or performs other cleanup tasks. However, Reth has more commands: diff --git a/docs/vocs/docs/pages/index.mdx b/docs/vocs/docs/pages/index.mdx index 5e65d0695ce..a3ba66c3932 100644 --- a/docs/vocs/docs/pages/index.mdx +++ b/docs/vocs/docs/pages/index.mdx @@ -150,7 +150,7 @@ Leading infra companies use Reth for MEV applications, staking, RPC services and ## Built with Reth SDK -Production chains and networks powered by Reth's modular architecture. These nodes are built using existing components without forking, saving several engineering hours while improving maintainability. +Production chains and networks are powered by Reth's modular architecture. These nodes are built using existing components without forking, saving several engineering hours while improving maintainability.

From ac5d3357962af5ebd2339333707828e8635fca1c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9a=20Narzis?= <78718413+lean-apple@users.noreply.github.com> Date: Sun, 13 Jul 2025 10:24:00 +0200 Subject: [PATCH 153/305] feat: add `into_logs()` to `TxReceipt` for `Receipt`/`OpReceipt` (#17383) --- crates/ethereum/primitives/src/receipt.rs | 4 ++++ crates/optimism/primitives/src/receipt.rs | 11 +++++++++++ 2 files changed, 15 insertions(+) diff --git a/crates/ethereum/primitives/src/receipt.rs b/crates/ethereum/primitives/src/receipt.rs index d40789c0a18..6c81f8bd69d 100644 --- a/crates/ethereum/primitives/src/receipt.rs +++ b/crates/ethereum/primitives/src/receipt.rs @@ -282,6 +282,10 @@ impl TxReceipt for Receipt { fn logs(&self) -> &[Log] { &self.logs } + + fn into_logs(self) -> Vec { + self.logs + } } impl Typed2718 for Receipt { diff --git a/crates/optimism/primitives/src/receipt.rs b/crates/optimism/primitives/src/receipt.rs index e0ef6318081..d7549670d2a 100644 --- a/crates/optimism/primitives/src/receipt.rs +++ b/crates/optimism/primitives/src/receipt.rs @@ -1,3 +1,4 @@ +use alloc::vec::Vec; use alloy_consensus::{ Eip2718EncodableReceipt, Eip658Value, Receipt, ReceiptWithBloom, RlpDecodableReceipt, RlpEncodableReceipt, TxReceipt, Typed2718, @@ -357,6 +358,16 @@ impl TxReceipt for OpReceipt { fn logs(&self) -> &[Log] { self.as_receipt().logs() } + + fn into_logs(self) -> Vec { + match self { + Self::Legacy(receipt) | + Self::Eip2930(receipt) | + Self::Eip1559(receipt) | + Self::Eip7702(receipt) => receipt.logs, + Self::Deposit(receipt) => receipt.inner.logs, + } + } } impl Typed2718 for OpReceipt { From e010ec290aedd1a76a14a1a51c7ab42484049a41 Mon Sep 17 00:00:00 2001 From: crStiv Date: Sun, 13 Jul 2025 11:35:00 +0300 Subject: [PATCH 154/305] docs: typos (#17283) --- docs/crates/eth-wire.md | 10 +++++----- docs/design/database.md | 2 +- docs/design/metrics.md | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/docs/crates/eth-wire.md b/docs/crates/eth-wire.md index 1b4ba2d80e3..ece625764cb 100644 --- a/docs/crates/eth-wire.md +++ b/docs/crates/eth-wire.md @@ -10,7 +10,7 @@ This crate can be thought of as having 2 components: (Note that ECIES is implemented in a separate `reth-ecies` crate.) ## Types -The most basic Eth-wire type is an `ProtocolMessage`. It describes all messages that reth can send/receive. +The most basic Eth-wire type is a `ProtocolMessage`. It describes all messages that reth can send/receive. [File: crates/net/eth-wire/src/types/message.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/types/message.rs) ```rust, ignore @@ -78,7 +78,7 @@ In reth all [RLP](https://ethereum.org/en/developers/docs/data-structures-and-en Note that the `ProtocolMessage` itself implements these traits, so any stream of bytes can be converted into it by calling `ProtocolMessage::decode()` and vice versa with `ProtocolMessage::encode()`. The message type is determined by the first byte of the byte stream. ### Example: The Transactions message -Let's understand how an `EthMessage` is implemented by taking a look at the `Transactions` Message. The eth specification describes a Transaction message as a list of RLP encoded transactions: +Let's understand how an `EthMessage` is implemented by taking a look at the `Transactions` Message. The eth specification describes a Transaction message as a list of RLP-encoded transactions: [File: ethereum/devp2p/caps/eth.md](https://github.com/ethereum/devp2p/blob/master/caps/eth.md#transactions-0x02) ``` @@ -138,7 +138,7 @@ Now that we know how the types work, let's take a look at how these are utilized ## P2PStream The lowest level stream to communicate with other peers is the P2P stream. It takes an underlying Tokio stream and does the following: -- Tracks and Manages Ping and pong messages and sends them when needed. +- Tracks and Manages Ping and Pong messages and sends them when needed. - Keeps track of the SharedCapabilities between the reth node and its peers. - Receives bytes from peers, decompresses and forwards them to its parent stream. - Receives bytes from its parent stream, compresses them and sends it to peers. @@ -161,7 +161,7 @@ pub struct P2PStream { } ``` ### Pinger -To manage pinging, an instance of the `Pinger` struct is used. This is a state machine which keeps track of how many pings +To manage pinging, an instance of the `Pinger` struct is used. This is a state machine that keeps track of how many pings we have sent/received and the timeouts associated with them. [File: crates/net/eth-wire/src/pinger.rs](https://github.com/paradigmxyz/reth/blob/1563506aea09049a85e5cc72c2894f3f7a371581/crates/net/eth-wire/src/pinger.rs) @@ -218,7 +218,7 @@ pub(crate) fn poll_ping( ``` ### Sending and receiving data -To send and receive data, the P2PStream itself is a future which implements the `Stream` and `Sink` traits from the `futures` crate. +To send and receive data, the P2PStream itself is a future that implements the `Stream` and `Sink` traits from the `futures` crate. For the `Stream` trait, the `inner` stream is polled, decompressed and returned. Most of the code is just error handling and is omitted here for clarity. diff --git a/docs/design/database.md b/docs/design/database.md index d81aced6f0c..1ce75d3dc25 100644 --- a/docs/design/database.md +++ b/docs/design/database.md @@ -23,7 +23,7 @@ ### Table layout -Historical state changes are indexed by `BlockNumber`. This means that `reth` stores the state for every account after every block that touched it, and it provides indexes for accessing that data quickly. While this may make the database size bigger (needs benchmark once `reth` is closer to prod), it provides fast access to historical state. +Historical state changes are indexed by `BlockNumber`. This means that `reth` stores the state for every account after every block that touched it, and it provides indexes for accessing that data quickly. While this may make the database size bigger (needs benchmark once `reth` is closer to prod), it provides fast access to the historical state. Below, you can see the table design that implements this scheme: diff --git a/docs/design/metrics.md b/docs/design/metrics.md index a769f9d625f..1aeb2f37c1e 100644 --- a/docs/design/metrics.md +++ b/docs/design/metrics.md @@ -13,7 +13,7 @@ The main difference between metrics and traces is therefore that metrics are sys **For most things, you likely want a metric**, except for two scenarios: - For contributors, traces are a good profiling tool -- For end-users that run complicated infrastructure, traces in the RPC component makes sense +- For end-users who run complicated infrastructure, traces in the RPC component make sense ### How to add a metric From b08586946c4cd03072ef45c8313ed5ea8a29b05e Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sun, 13 Jul 2025 10:57:45 +0200 Subject: [PATCH 155/305] chore: consolidate typo fixes from multiple PRs (#17387) --- HARDFORK-CHECKLIST.md | 2 +- bin/reth-bench/src/bench/mod.rs | 2 +- crates/net/downloaders/src/bodies/bodies.rs | 2 +- crates/node/builder/src/builder/mod.rs | 2 +- crates/trie/common/src/hashed_state.rs | 2 +- crates/trie/common/src/updates.rs | 2 +- crates/trie/sparse/src/state.rs | 2 +- docs/vocs/docs/pages/guides/history-expiry.mdx | 2 +- docs/vocs/docs/pages/run/ethereum.mdx | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/HARDFORK-CHECKLIST.md b/HARDFORK-CHECKLIST.md index 3e3628f0b4c..0b6361221bb 100644 --- a/HARDFORK-CHECKLIST.md +++ b/HARDFORK-CHECKLIST.md @@ -30,7 +30,7 @@ Opstack tries to be as close to the L1 engine API as much as possible. Isthmus (Prague equivalent) introduced the first deviation from the L1 engine API with an additional field in the `ExecutionPayload`. For this reason the op engine API -has it's own server traits `OpEngineApi`. +has its own server traits `OpEngineApi`. Adding a new versioned endpoint requires the same changes as for L1 just for the dedicated OP types. ### Hardforks diff --git a/bin/reth-bench/src/bench/mod.rs b/bin/reth-bench/src/bench/mod.rs index afc76b3b6ac..da3ccb1a8bb 100644 --- a/bin/reth-bench/src/bench/mod.rs +++ b/bin/reth-bench/src/bench/mod.rs @@ -38,7 +38,7 @@ pub enum Subcommands { /// /// One powerful use case is pairing this command with the `cast block` command, for example: /// - /// `cast block latest--full --json | reth-bench send-payload --rpc-url localhost:5000 + /// `cast block latest --full --json | reth-bench send-payload --rpc-url localhost:5000 /// --jwt-secret $(cat ~/.local/share/reth/mainnet/jwt.hex)` SendPayload(send_payload::Command), } diff --git a/crates/net/downloaders/src/bodies/bodies.rs b/crates/net/downloaders/src/bodies/bodies.rs index a6e454b0414..e4ef306b018 100644 --- a/crates/net/downloaders/src/bodies/bodies.rs +++ b/crates/net/downloaders/src/bodies/bodies.rs @@ -230,7 +230,7 @@ where self.metrics.buffered_responses.set(self.buffered_responses.len() as f64); } - /// Returns a response if it's first block number matches the next expected. + /// Returns a response if its first block number matches the next expected. fn try_next_buffered(&mut self) -> Option>> { if let Some(next) = self.buffered_responses.peek() { let expected = self.next_expected_block_number(); diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index 8bac819ab69..0779196b89d 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -311,7 +311,7 @@ where } } -/// A [`NodeBuilder`] with it's launch context already configured. +/// A [`NodeBuilder`] with its launch context already configured. /// /// This exposes the same methods as [`NodeBuilder`] but with the launch context already configured, /// See [`WithLaunchContext::launch`] diff --git a/crates/trie/common/src/hashed_state.rs b/crates/trie/common/src/hashed_state.rs index eb0f4e653d7..8e4ca75e808 100644 --- a/crates/trie/common/src/hashed_state.rs +++ b/crates/trie/common/src/hashed_state.rs @@ -340,7 +340,7 @@ impl HashedPostState { /// /// This effectively clears all the fields in the [`HashedPostStateSorted`]. /// - /// This allows us to re-use the allocated space. This allocates new space for the sorted hashed + /// This allows us to reuse the allocated space. This allocates new space for the sorted hashed /// post state, like `into_sorted`. pub fn drain_into_sorted(&mut self) -> HashedPostStateSorted { let mut updated_accounts = Vec::new(); diff --git a/crates/trie/common/src/updates.rs b/crates/trie/common/src/updates.rs index f9bb0d21e92..a752fd06d73 100644 --- a/crates/trie/common/src/updates.rs +++ b/crates/trie/common/src/updates.rs @@ -123,7 +123,7 @@ impl TrieUpdates { /// /// This effectively clears all the fields in the [`TrieUpdatesSorted`]. /// - /// This allows us to re-use the allocated space. This allocates new space for the sorted + /// This allows us to reuse the allocated space. This allocates new space for the sorted /// updates, like `into_sorted`. pub fn drain_into_sorted(&mut self) -> TrieUpdatesSorted { let mut account_nodes = self.account_nodes.drain().collect::>(); diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index af3d8a5b268..d80813b2e3a 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -753,7 +753,7 @@ where trace!(target: "trie::sparse", ?address, "Retrieving storage root from account leaf to update account"); // The account was revealed, either... if let Some(value) = self.get_account_value(&address) { - // ..it exists and we should take it's current storage root or... + // ..it exists and we should take its current storage root or... TrieAccount::decode(&mut &value[..])?.storage_root } else { // ...the account is newly created and the storage trie is empty. diff --git a/docs/vocs/docs/pages/guides/history-expiry.mdx b/docs/vocs/docs/pages/guides/history-expiry.mdx index 91066218dee..1ba3394681a 100644 --- a/docs/vocs/docs/pages/guides/history-expiry.mdx +++ b/docs/vocs/docs/pages/guides/history-expiry.mdx @@ -34,7 +34,7 @@ If enabled, importing blocks from ERA1 files can be done automatically with no m #### Enabling the ERA stage -The import from ERA1 files within the pre-merge block range is included in the [reth node](../cli/reth/node) synchronization pipeline. It is disabled by default. To enable it, pass the `--era.enable` flag when running the [`node`](../cli/reth/node) command. +The import from ERA1 files within the pre-merge block range is included in the [reth node](/cli/reth/node) synchronization pipeline. It is disabled by default. To enable it, pass the `--era.enable` flag when running the [`node`](/cli/reth/node) command. The benefit of using this option is significant increase in the synchronization speed for the headers and mainly bodies stage of the pipeline within the ERA1 block range. We encourage you to use it! Eventually, it will become enabled by default. diff --git a/docs/vocs/docs/pages/run/ethereum.mdx b/docs/vocs/docs/pages/run/ethereum.mdx index 1444c7104da..6e068dcd312 100644 --- a/docs/vocs/docs/pages/run/ethereum.mdx +++ b/docs/vocs/docs/pages/run/ethereum.mdx @@ -94,7 +94,7 @@ In the meantime, consider setting up [observability](/run/monitoring) to monitor ## Running without a Consensus Layer -We provide a method for running Reth without a Consensus Layer via the `--debug.tip ` parameter. If you provide that to your node, it will simulate sending an `engine_forkchoiceUpdated` message _once_ and will trigger syncing to the provided block hash. This is useful for testing and debugging purposes, but in order to have a node that can keep up with the tip you'll need to run a CL alongside it. At the moment we have no plans of including a Consensus Layer implementation in Reth, and we are open to including light clients other methods of syncing like importing Lighthouse as a library. +We provide a method for running Reth without a Consensus Layer via the `--debug.tip ` parameter. If you provide that to your node, it will simulate sending an `engine_forkchoiceUpdated` message _once_ and will trigger syncing to the provided block hash. This is useful for testing and debugging purposes, but in order to have a node that can keep up with the tip you'll need to run a CL alongside it. At the moment we have no plans of including a Consensus Layer implementation in Reth, and we are open to including light clients and other methods of syncing like importing Lighthouse as a library. ## Running with Etherscan as Block Source From e5e42e79f9f385e5a4d1911fa85516724336a8a9 Mon Sep 17 00:00:00 2001 From: maradini77 <140460067+maradini77@users.noreply.github.com> Date: Sun, 13 Jul 2025 11:03:41 +0200 Subject: [PATCH 156/305] fix: broken link to system requirements in troubleshooting guide (#17384) --- docs/vocs/docs/pages/run/faq/troubleshooting.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/vocs/docs/pages/run/faq/troubleshooting.mdx b/docs/vocs/docs/pages/run/faq/troubleshooting.mdx index 08b9c6fbe5d..1f26cba9dae 100644 --- a/docs/vocs/docs/pages/run/faq/troubleshooting.mdx +++ b/docs/vocs/docs/pages/run/faq/troubleshooting.mdx @@ -58,7 +58,7 @@ Currently, there are two main ways to fix this issue. #### Compact the database It will take around 5-6 hours and require **additional** disk space located on the same or different drive -equal to the [freshly synced node](/installation/overview#hardware-requirements). +equal to the [freshly synced node](/run/system-requirements). 1. Clone Reth ```bash From 332c65661778b71d8fdc655c5178cb9d4718feb7 Mon Sep 17 00:00:00 2001 From: nekomoto911 Date: Sun, 13 Jul 2025 17:44:18 +0800 Subject: [PATCH 157/305] perf(blob): optimize blob store gets (#17388) --- crates/transaction-pool/src/blobstore/disk.rs | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/crates/transaction-pool/src/blobstore/disk.rs b/crates/transaction-pool/src/blobstore/disk.rs index e738bfc6681..b550b085fb1 100644 --- a/crates/transaction-pool/src/blobstore/disk.rs +++ b/crates/transaction-pool/src/blobstore/disk.rs @@ -424,10 +424,9 @@ impl DiskFileBlobStoreInner { if let Some(blob) = self.blob_cache.lock().get(&tx) { return Ok(Some(blob.clone())) } - let blob = self.read_one(tx)?; - if let Some(blob) = &blob { - let blob_arc = Arc::new(blob.clone()); + if let Some(blob) = self.read_one(tx)? { + let blob_arc = Arc::new(blob); self.blob_cache.lock().insert(tx, blob_arc.clone()); return Ok(Some(blob_arc)) } @@ -542,11 +541,18 @@ impl DiskFileBlobStoreInner { if from_disk.is_empty() { return Ok(res) } + let from_disk = from_disk + .into_iter() + .map(|(tx, data)| { + let data = Arc::new(data); + res.push((tx, data.clone())); + (tx, data) + }) + .collect::>(); + let mut cache = self.blob_cache.lock(); for (tx, data) in from_disk { - let arc = Arc::new(data.clone()); - cache.insert(tx, arc.clone()); - res.push((tx, arc.clone())); + cache.insert(tx, data); } Ok(res) From b19b1b07907b8929265c119df70dabd4e9f730c8 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 14 Jul 2025 12:19:39 +0200 Subject: [PATCH 158/305] chore(deps): weekly `cargo update` (#17386) Co-authored-by: github-merge-queue <118344674+github-merge-queue@users.noreply.github.com> --- Cargo.lock | 183 +++++++++++++++++++++++++++-------------------------- 1 file changed, 92 insertions(+), 91 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8d7559bac30..5cf02311d54 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -112,9 +112,9 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "1.0.18" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06b31d7560fdebcf24e21fcba9ed316c2fdf2854b2ca652a24741bf8192cd40a" +checksum = "73e7f99e3a50210eaee2abd57293a2e72b1a5b7bb251b44c4bf33d02ddd402ab" dependencies = [ "alloy-eips", "alloy-primitives", @@ -138,9 +138,9 @@ dependencies = [ [[package]] name = "alloy-consensus-any" -version = "1.0.17" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bf397edad57b696501702d5887e4e14d7d0bbae9fbb6439e148d361f7254f45" +checksum = "9945351a277c914f3776ae72b3fc1d22f90d2e840276830e48e9be5bf371a8fe" dependencies = [ "alloy-consensus", "alloy-eips", @@ -153,9 +153,9 @@ dependencies = [ [[package]] name = "alloy-contract" -version = "1.0.18" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5efc90130119c22079b468c30eab6feda1ab4981c3ea88ed8e12dc155cc26ea1" +checksum = "1f27be9e6b587904ee5135f72182a565adaf0c7dd341bae330ee6f0e342822b1" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -236,9 +236,9 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "1.0.18" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe854e4051afb5b47931b78ba7b5af1952d06e903637430e98c8321192d29eca" +checksum = "4134375e533d095e045982cd7684a29c37089ab7a605ecf2b4aa17a5e61d72d3" dependencies = [ "alloy-eip2124", "alloy-eip2930", @@ -279,9 +279,9 @@ dependencies = [ [[package]] name = "alloy-genesis" -version = "1.0.18" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38cc4c7363f48a2b61de395d9b2df52280e303a5af45a22ed33cf27cd30d7975" +checksum = "d61d58e94791b74c2566a2f240f3f796366e2479d4d39b4a3ec848c733fb92ce" dependencies = [ "alloy-eips", "alloy-primitives", @@ -293,9 +293,9 @@ dependencies = [ [[package]] name = "alloy-hardforks" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ce138b29a2f8e7ed97c064af8359dfa6559c12cba5e821ae4eb93081a56557e" +checksum = "819a3620fe125e0fff365363315ee5e24c23169173b19747dfd6deba33db8990" dependencies = [ "alloy-chains", "alloy-eip2124", @@ -319,9 +319,9 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "1.0.18" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b2c0cb72ef87173b9d78cd29be898820c44498ce60a7d5de82b577c8c002bb8" +checksum = "1edaf2255b0ea9213ecbb056fa92870d858719911e04fb4260bcc43f7743d370" dependencies = [ "alloy-primitives", "alloy-sol-types", @@ -334,9 +334,9 @@ dependencies = [ [[package]] name = "alloy-network" -version = "1.0.18" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4965cff485617f5c2f4016a2e48503b735fb6ec3845ba86c68fdf338da9e85e7" +checksum = "c224eafcd1bd4c54cc45b5fc3634ae42722bdb9253780ac64a5deffd794a6cec" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -360,9 +360,9 @@ dependencies = [ [[package]] name = "alloy-network-primitives" -version = "1.0.17" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a03ad273e1c55cc481889b4130e82860e33624e6969e9a08854e0f3ebe659295" +checksum = "0b21283a28b117505a75ee1f2e63c16ea2ea72afca44f670b1f02795d9f5d988" dependencies = [ "alloy-consensus", "alloy-eips", @@ -390,9 +390,9 @@ dependencies = [ [[package]] name = "alloy-op-hardforks" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a9a510692bef4871797062ca09ec7873c45dc68c7f3f72291165320f53606a3" +checksum = "2090f21bb6df43e147d976e754bc9a007ca851badbfc6685377aa679b5f151d9" dependencies = [ "alloy-chains", "alloy-hardforks", @@ -433,9 +433,9 @@ dependencies = [ [[package]] name = "alloy-provider" -version = "1.0.18" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "956116526887a732fb5823648544ae56c78a38cf56d4e1c2c076d7432a90674c" +checksum = "09e5f02654272d9a95c66949b78f30c87701c232cf8302d4a1dab02957f5a0c1" dependencies = [ "alloy-chains", "alloy-consensus", @@ -477,9 +477,9 @@ dependencies = [ [[package]] name = "alloy-pubsub" -version = "1.0.18" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6b19131a9cbf17486ef7fa37663f8c3631c3fa606aec3d77733042066439d68" +checksum = "08acc8843da1207a80f778bc0ac3e5dc94c2683280fa70ff3090b895d0179537" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -520,9 +520,9 @@ dependencies = [ [[package]] name = "alloy-rpc-client" -version = "1.0.18" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5699f859f61936425d753c0709b8049ec7d83988ea4f0793526885f63d8d863b" +checksum = "c956d223a5fa7ef28af1c6ae41b77ecb95a36d686d5644ee22266f6b517615b4" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -548,9 +548,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types" -version = "1.0.18" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cca073dd05362d7a66241468862a18d95255f5eb7c28a9d83b458c8216b751bd" +checksum = "99074f79ad4b188b1049807f8f96637abc3cc019fde53791906edc26bc092a57" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -561,9 +561,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-admin" -version = "1.0.18" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74e7c6f85d9b38302ca0051420cb687d035f75cc1ff09cdf4f98991ff211fb9f" +checksum = "61ad30ddbec9c315b002e02ba13f4327767cd5e6bdefadbfcec3d95ff6a3206e" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -573,9 +573,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" -version = "1.0.18" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6425892f9addc08b0c687878feb8e4a61a89e085ffdf52865fd44fa1d54f84f" +checksum = "9d34231e06b5f1ad5f274a6ddb3eca8730db5eb868b70a4494a1e4b716b7fe88" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -585,9 +585,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-any" -version = "1.0.17" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef5b22062142ce3b2ed3374337d4b343437e5de6959397f55d2c9fe2c2ce0162" +checksum = "0c13e5081ae6b99a7f4e46c18b80d652440320ff404790932cb8259ec73f596e" dependencies = [ "alloy-consensus-any", "alloy-rpc-types-eth", @@ -596,9 +596,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-beacon" -version = "1.0.18" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30d8751cf34201ceb637974388971e38abbd84f9e10a03103170ac7b1e9f3137" +checksum = "544101ff1933e5c8074238b7b49cecb87d47afc411e74927ef58201561c98bf7" dependencies = [ "alloy-eips", "alloy-primitives", @@ -614,9 +614,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-debug" -version = "1.0.18" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2acde603d444a8f6f50bb79e1296602e8c0bf193b2fa9af0afe0287e8aaf87df" +checksum = "220aeda799891b518a171d3d640ec310bab2f4d80c3987c9ea089cedd8a67008" dependencies = [ "alloy-primitives", "serde", @@ -624,9 +624,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" -version = "1.0.18" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24aa5a872715979dbb831ed9a50e983a1d2500c44ded79550000c905a4d5ca8e" +checksum = "14796fd8574c77213802b0dc0e85886b5cb27c44e72678ab7d0a4a2d5aee79e9" dependencies = [ "alloy-consensus", "alloy-eips", @@ -645,9 +645,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "1.0.18" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fce2ac0e27fe24f27f1a6d0e0088b94c03c67dfcfb0461813a4a44b8197a8105" +checksum = "1bea7326ca6cd6971c58042055a039d5c97a1431e30380d8b4883ad98067c1b5" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -666,9 +666,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-mev" -version = "1.0.18" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e082bf96fb0eec9efa1d981d6d9ff9880266884aea32ecf2f344c25073e19d5" +checksum = "15aac86a4cb20c2f36b1d14202a20eca6baa92691b0aebcfacfe31dd0fedc6ee" dependencies = [ "alloy-consensus", "alloy-eips", @@ -681,9 +681,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" -version = "1.0.18" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18db18563210da6a1e7e172c9bf0049bc8e00058e31043458ec3cae92c51d1cb" +checksum = "fbc92f9dd9e56a9edcfe0c28c0d1898a2c5281a2944d89e2b8a4effeca13823e" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -695,9 +695,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-txpool" -version = "1.0.18" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5c202af188d9a60000d09309c6a2605cabf49d0b1de0307c3b9f221e8a545a5" +checksum = "fadc5c919b4e8b3bdcbea2705d63dccb8ed2ce864399d005fed534eefebc8fe4" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -707,9 +707,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "1.0.18" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ad318c341481a5f5e50d69d830853873d8b5e8d2b73ea2c0da69cf78537c970" +checksum = "06c02a06ae34d2354398dc9d2de0503129c3f0904a3eb791b5d0149f267c2688" dependencies = [ "alloy-primitives", "arbitrary", @@ -719,9 +719,9 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "1.0.18" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a758b004483b906d622f607d27e1bc0923246a092adc475069b5509ab83c8148" +checksum = "2389ec473fc24735896960b1189f1d92177ed53c4e464d285e54ed3483f9cca3" dependencies = [ "alloy-primitives", "async-trait", @@ -734,9 +734,9 @@ dependencies = [ [[package]] name = "alloy-signer-local" -version = "1.0.18" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51d44ff6b720feb3fc17763f5d6cd90e57b05400acd2a5083a7d7020e351e5bb" +checksum = "ab70b75dee5f4673ace65058927310658c8ffac63a94aa4b973f925bab020367" dependencies = [ "alloy-consensus", "alloy-network", @@ -822,9 +822,9 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "1.0.18" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e551a125a5a96377ee0befc63db27b68078873d316c65b74587f14704dac630" +checksum = "b99ffb19be54a61d18599843ef887ddd12c3b713244462c184e2eab67106d51a" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -845,9 +845,9 @@ dependencies = [ [[package]] name = "alloy-transport-http" -version = "1.0.18" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8640f66b33f0d85df0fcb0528739fb5d424f691a7c58963395b2417a68274703" +checksum = "92b5a640491f3ab18d17bd6e521c64744041cd86f741b25cdb6a346ca0e90c66" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -860,9 +860,9 @@ dependencies = [ [[package]] name = "alloy-transport-ipc" -version = "1.0.18" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d88ab7ac8a7aac07313bdeabbcd70818e6f675e4a9f101a3056d15aeb15be279" +checksum = "17fe2576d9689409724f7cb737aa7fdd70674edfec4b9c3ce54f6ffac00e83ca" dependencies = [ "alloy-json-rpc", "alloy-pubsub", @@ -880,9 +880,9 @@ dependencies = [ [[package]] name = "alloy-transport-ws" -version = "1.0.18" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "972664516ff27c90b156a7df9870d813b85b948d5063d3a1e9093109810b77b7" +checksum = "4816ea8425e789057d08804452eff399204808b7b7a233ac3f7534183cae2236" dependencies = [ "alloy-pubsub", "alloy-transport", @@ -918,9 +918,9 @@ dependencies = [ [[package]] name = "alloy-tx-macros" -version = "1.0.17" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79bf2869e66904b2148c809e7a75e23ca26f5d7b46663a149a1444fb98a69d1d" +checksum = "afd621a9ddef2fdc06d17089f45e47cf84d0b46ca5a1bc6c83807c9119636f52" dependencies = [ "alloy-primitives", "darling", @@ -2020,9 +2020,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "castaway" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0abae9be0aaf9ea96a3b1b8b1b55c602ca751eba1b1500220cea4ecbafe7c0d5" +checksum = "dec551ab6e7578819132c713a93c022a05d60159dc86e7a7050223577484c55a" dependencies = [ "rustversion", ] @@ -2130,9 +2130,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.40" +version = "4.5.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40b6887a1d8685cebccf115538db5c0efe625ccac9696ad45c409d96566e910f" +checksum = "be92d32e80243a54711e5d7ce823c35c41c9d929dc4ab58e1276f625841aadf9" dependencies = [ "clap_builder", "clap_derive", @@ -2140,9 +2140,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.40" +version = "4.5.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0c66c08ce9f0c698cbce5c0279d0bb6ac936d8674174fe48f736533b964f59e" +checksum = "707eab41e9622f9139419d573eca0900137718000c517d47da73045f54331c3d" dependencies = [ "anstream", "anstyle", @@ -2152,9 +2152,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.40" +version = "4.5.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2c7947ae4cc3d851207c1adb5b5e260ff0cca11446b1d6d1423788e442257ce" +checksum = "ef4f52386a59ca4c860f7393bcf8abd8dfd91ecccc0f774635ff68e92eeef491" dependencies = [ "heck", "proc-macro2", @@ -2612,9 +2612,9 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "4.1.3" +version = "4.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" +checksum = "373b7c5dbd637569a2cca66e8d66b8c446a1e7bf064ea321d265d7b3dfe7c97e" dependencies = [ "cfg-if", "cpufeatures", @@ -3006,9 +3006,9 @@ dependencies = [ [[package]] name = "ed25519-dalek" -version = "2.1.1" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a3daa8e81a3963a60642bcc1f90a670680bd4a77535faa384e9d1c79d620871" +checksum = "70e796c081cee67dc755e1a36a0a172b897fab85fc3f6bc48307991f64e4eca9" dependencies = [ "curve25519-dalek", "ed25519", @@ -3709,9 +3709,9 @@ dependencies = [ [[package]] name = "fiat-crypto" -version = "0.2.9" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" +checksum = "64cd1e32ddd350061ae6edb1b082d7c54915b5c672c389143b9a63403a109f24" [[package]] name = "filetime" @@ -4388,9 +4388,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.14" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc2fdfdbff08affe55bb779f33b053aa1fe5dd5b54c257343c17edfa55711bdb" +checksum = "7f66d5bd4c6f02bf0542fad85d626775bab9258cf795a4256dcaf3161114d1df" dependencies = [ "base64 0.22.1", "bytes", @@ -5926,12 +5926,13 @@ dependencies = [ [[package]] name = "nybbles" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11d51b0175c49668a033fe7cc69080110d9833b291566cdf332905f3ad9c68a0" +checksum = "675b3a54e5b12af997abc8b6638b0aee51a28caedab70d4967e0d5db3a3f1d06" dependencies = [ "alloy-rlp", "arbitrary", + "cfg-if", "proptest", "ruint", "serde", @@ -11107,9 +11108,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.28" +version = "0.23.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7160e3e10bf4535308537f3c4e1641468cd0e485175d6163087c0393c7d46643" +checksum = "2491382039b29b9b11ff08b76ff6c97cf287671dbb74f0be44bda389fffe9bd1" dependencies = [ "log", "once_cell", @@ -11171,9 +11172,9 @@ checksum = "f87165f0995f63a9fbeea62b64d10b4d9d8e78ec6d7d51fb2125fda7bb36788f" [[package]] name = "rustls-webpki" -version = "0.103.3" +version = "0.103.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4a72fe2bcf7a6ac6fd7d0b9e5cb68aeb7d4c0a0271730218b3e92d43b4eb435" +checksum = "0a17884ae0c1b773f1ccd2bd4a8c72f16da897310a98b0e84bf349ad5ead92fc" dependencies = [ "ring", "rustls-pki-types", @@ -11242,9 +11243,9 @@ dependencies = [ [[package]] name = "schemars" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1375ba8ef45a6f15d83fa8748f1079428295d403d6ea991d09ab100155fbc06d" +checksum = "82d20c4491bc164fa2f6c5d44565947a52ad80b9505d8e36f8d54c27c739fcd0" dependencies = [ "dyn-clone", "ref-cast", @@ -11479,7 +11480,7 @@ dependencies = [ "indexmap 1.9.3", "indexmap 2.10.0", "schemars 0.9.0", - "schemars 1.0.3", + "schemars 1.0.4", "serde", "serde_derive", "serde_json", @@ -13671,9 +13672,9 @@ checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" [[package]] name = "winnow" -version = "0.7.11" +version = "0.7.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74c7b26e3480b707944fc872477815d29a8e429d2f93a1ce000f5fa84a15cbcd" +checksum = "f3edebf492c8125044983378ecb5766203ad3b4c2f7a922bd7dd207f6d443e95" dependencies = [ "memchr", ] From 4edd55aacd0bbf4860800e837a7ceab82fe54d9a Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 14 Jul 2025 13:05:20 +0200 Subject: [PATCH 159/305] chore: make clippy happy (#17399) --- crates/engine/tree/src/tree/mod.rs | 26 ++++++++++----------- crates/net/network/src/manager.rs | 8 +++---- crates/rpc/rpc-builder/src/auth.rs | 9 +++---- crates/rpc/rpc-eth-api/src/helpers/state.rs | 8 +++---- crates/storage/db-models/src/accounts.rs | 5 +--- crates/transaction-pool/src/pool/txpool.rs | 11 +++++---- crates/trie/common/benches/prefix_set.rs | 1 + 7 files changed, 32 insertions(+), 36 deletions(-) diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 108dce4d037..b09828bd93d 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -1205,18 +1205,18 @@ where debug!(target: "engine::tree", "received backfill sync finished event"); self.backfill_sync_state = BackfillSyncState::Idle; - // backfill height is the block number that the backfill finished at - let mut backfill_height = ctrl.block_number(); - // Pipeline unwound, memorize the invalid block and wait for CL for next sync target. - if let ControlFlow::Unwind { bad_block, target } = &ctrl { + let backfill_height = if let ControlFlow::Unwind { bad_block, target } = &ctrl { warn!(target: "engine::tree", invalid_block=?bad_block, "Bad block detected in unwind"); // update the `invalid_headers` cache with the new invalid header self.state.invalid_headers.insert(**bad_block); // if this was an unwind then the target is the new height - backfill_height = Some(*target); - } + Some(*target) + } else { + // backfill height is the block number that the backfill finished at + ctrl.block_number() + }; // backfill height is the block number that the backfill finished at let Some(backfill_height) = backfill_height else { return Ok(()) }; @@ -1778,20 +1778,18 @@ where ) -> Option { let sync_target_state = self.state.forkchoice_state_tracker.sync_target_state(); - // check if the distance exceeds the threshold for backfill sync - let mut exceeds_backfill_threshold = - self.exceeds_backfill_run_threshold(canonical_tip_num, target_block_number); - // check if the downloaded block is the tracked finalized block - if let Some(buffered_finalized) = sync_target_state + let mut exceeds_backfill_threshold = if let Some(buffered_finalized) = sync_target_state .as_ref() .and_then(|state| self.state.buffer.block(&state.finalized_block_hash)) { // if we have buffered the finalized block, we should check how far // we're off - exceeds_backfill_threshold = - self.exceeds_backfill_run_threshold(canonical_tip_num, buffered_finalized.number()); - } + self.exceeds_backfill_run_threshold(canonical_tip_num, buffered_finalized.number()) + } else { + // check if the distance exceeds the threshold for backfill sync + self.exceeds_backfill_run_threshold(canonical_tip_num, target_block_number) + }; // If this is invoked after we downloaded a block we can check if this block is the // finalized block diff --git a/crates/net/network/src/manager.rs b/crates/net/network/src/manager.rs index 465039ec193..7107faaf588 100644 --- a/crates/net/network/src/manager.rs +++ b/crates/net/network/src/manager.rs @@ -837,8 +837,7 @@ impl NetworkManager { "Session disconnected" ); - let mut reason = None; - if let Some(ref err) = error { + let reason = if let Some(ref err) = error { // If the connection was closed due to an error, we report // the peer self.swarm.state_mut().peers_mut().on_active_session_dropped( @@ -846,11 +845,12 @@ impl NetworkManager { &peer_id, err, ); - reason = err.as_disconnected(); + err.as_disconnected() } else { // Gracefully disconnected self.swarm.state_mut().peers_mut().on_active_session_gracefully_closed(peer_id); - } + None + }; self.metrics.closed_sessions.increment(1); self.update_active_connection_metrics(); diff --git a/crates/rpc/rpc-builder/src/auth.rs b/crates/rpc/rpc-builder/src/auth.rs index ca96adec8eb..b1a4f4166bd 100644 --- a/crates/rpc/rpc-builder/src/auth.rs +++ b/crates/rpc/rpc-builder/src/auth.rs @@ -68,16 +68,17 @@ impl AuthServerConfig { .map_err(|err| RpcError::server_error(err, ServerKind::Auth(socket_addr)))?; let handle = server.start(module.inner.clone()); - let mut ipc_handle: Option = None; - if let Some(ipc_server_config) = ipc_server_config { + let ipc_handle = if let Some(ipc_server_config) = ipc_server_config { let ipc_endpoint_str = ipc_endpoint .clone() .unwrap_or_else(|| constants::DEFAULT_ENGINE_API_IPC_ENDPOINT.to_string()); let ipc_server = ipc_server_config.build(ipc_endpoint_str); let res = ipc_server.start(module.inner).await?; - ipc_handle = Some(res); - } + Some(res) + } else { + None + }; Ok(AuthServerHandle { handle: Some(handle), local_addr, secret, ipc_endpoint, ipc_handle }) } diff --git a/crates/rpc/rpc-eth-api/src/helpers/state.rs b/crates/rpc/rpc-eth-api/src/helpers/state.rs index 008b78ced46..4fa4edee8bc 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/state.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/state.rs @@ -277,17 +277,15 @@ pub trait LoadState: { self.spawn_blocking_io(move |this| { // first fetch the on chain nonce of the account - let on_chain_account_nonce = this + let mut next_nonce = this .latest_state()? .account_nonce(&address) .map_err(Self::Error::from_eth_err)? .unwrap_or_default(); - let mut next_nonce = on_chain_account_nonce; // Retrieve the highest consecutive transaction for the sender from the transaction pool - if let Some(highest_tx) = this - .pool() - .get_highest_consecutive_transaction_by_sender(address, on_chain_account_nonce) + if let Some(highest_tx) = + this.pool().get_highest_consecutive_transaction_by_sender(address, next_nonce) { // Return the nonce of the highest consecutive transaction + 1 next_nonce = highest_tx.nonce().checked_add(1).ok_or_else(|| { diff --git a/crates/storage/db-models/src/accounts.rs b/crates/storage/db-models/src/accounts.rs index 477c18f1c00..cbae5d84aa6 100644 --- a/crates/storage/db-models/src/accounts.rs +++ b/crates/storage/db-models/src/accounts.rs @@ -27,10 +27,7 @@ impl reth_codecs::Compact for AccountBeforeTx { // for now put full bytes and later compress it. buf.put_slice(self.address.as_slice()); - let mut acc_len = 0; - if let Some(account) = self.info { - acc_len = account.to_compact(buf); - } + let acc_len = if let Some(account) = self.info { account.to_compact(buf) } else { 0 }; acc_len + 20 } diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 8c016c92a9c..57020b09e31 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -1368,11 +1368,10 @@ impl AllTransactions { } }; } - // tracks the balance if the sender was changed in the block - let mut changed_balance = None; + // track the balance if the sender was changed in the block // check if this is a changed account - if let Some(info) = changed_accounts.get(&id.sender) { + let changed_balance = if let Some(info) = changed_accounts.get(&id.sender) { // discard all transactions with a nonce lower than the current state nonce if id.nonce < info.state_nonce { updates.push(PoolUpdate { @@ -1397,8 +1396,10 @@ impl AllTransactions { } } - changed_balance = Some(&info.balance); - } + Some(&info.balance) + } else { + None + }; // If there's a nonce gap, we can shortcircuit, because there's nothing to update yet. if tx.state.has_nonce_gap() { diff --git a/crates/trie/common/benches/prefix_set.rs b/crates/trie/common/benches/prefix_set.rs index 1448e41502e..bc2a8dc2592 100644 --- a/crates/trie/common/benches/prefix_set.rs +++ b/crates/trie/common/benches/prefix_set.rs @@ -292,6 +292,7 @@ mod implementations { } #[derive(Default)] + #[allow(dead_code)] pub struct VecBinarySearchWithLastFoundPrefixSet { keys: Vec, last_found_idx: usize, From b9c63f6a10bfbf89b7b3585acf34750d3dbf7493 Mon Sep 17 00:00:00 2001 From: Acat Date: Mon, 14 Jul 2025 19:55:08 +0800 Subject: [PATCH 160/305] fix(txpool): Propagate promoted transactions on account updates (#17396) Co-authored-by: Matthias Seitz --- crates/transaction-pool/src/pool/mod.rs | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 5f99790c080..19d40125fbb 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -418,6 +418,21 @@ where let changed_senders = self.changed_senders(accounts.into_iter()); let UpdateOutcome { promoted, discarded } = self.pool.write().update_accounts(changed_senders); + + // Notify about promoted pending transactions (similar to notify_on_new_state) + if !promoted.is_empty() { + self.pending_transaction_listener.lock().retain_mut(|listener| { + let promoted_hashes = promoted.iter().filter_map(|tx| { + if listener.kind.is_propagate_only() && !tx.propagate { + None + } else { + Some(*tx.hash()) + } + }); + listener.send_all(promoted_hashes) + }); + } + let mut listener = self.event_listener.write(); for tx in &promoted { From 44cc67be00c210adafaa4867d8439a0fc3769720 Mon Sep 17 00:00:00 2001 From: Acat Date: Mon, 14 Jul 2025 22:07:32 +0800 Subject: [PATCH 161/305] perf: optimize txpool_status RPC by avoiding full transaction collection (#17392) Co-authored-by: Matthias Seitz --- crates/rpc/rpc/src/txpool.rs | 4 ++-- crates/transaction-pool/src/lib.rs | 7 +++++++ crates/transaction-pool/src/noop.rs | 4 ++++ crates/transaction-pool/src/pool/parked.rs | 2 +- crates/transaction-pool/src/pool/pending.rs | 2 +- crates/transaction-pool/src/pool/txpool.rs | 10 ++++++++++ crates/transaction-pool/src/traits.rs | 4 ++++ 7 files changed, 29 insertions(+), 4 deletions(-) diff --git a/crates/rpc/rpc/src/txpool.rs b/crates/rpc/rpc/src/txpool.rs index e910e6a101e..5c7bcd45a84 100644 --- a/crates/rpc/rpc/src/txpool.rs +++ b/crates/rpc/rpc/src/txpool.rs @@ -88,8 +88,8 @@ where /// Handler for `txpool_status` async fn txpool_status(&self) -> RpcResult { trace!(target: "rpc::eth", "Serving txpool_status"); - let all = self.pool.all_transactions(); - Ok(TxpoolStatus { pending: all.pending.len() as u64, queued: all.queued.len() as u64 }) + let (pending, queued) = self.pool.pending_and_queued_txn_count(); + Ok(TxpoolStatus { pending: pending as u64, queued: queued as u64 }) } /// Returns a summary of all the transactions currently pending for inclusion in the next diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index 67bee20b558..14c44056cc8 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -591,6 +591,13 @@ where self.pool.queued_transactions() } + fn pending_and_queued_txn_count(&self) -> (usize, usize) { + let data = self.pool.get_pool_data(); + let pending = data.pending_transactions_count(); + let queued = data.queued_transactions_count(); + (pending, queued) + } + fn all_transactions(&self) -> AllPoolTransactions { self.pool.all_transactions() } diff --git a/crates/transaction-pool/src/noop.rs b/crates/transaction-pool/src/noop.rs index 132854bb712..bf4f55e57c4 100644 --- a/crates/transaction-pool/src/noop.rs +++ b/crates/transaction-pool/src/noop.rs @@ -190,6 +190,10 @@ impl TransactionPool for NoopTransactionPool { vec![] } + fn pending_and_queued_txn_count(&self) -> (usize, usize) { + (0, 0) + } + fn all_transactions(&self) -> AllPoolTransactions { AllPoolTransactions::default() } diff --git a/crates/transaction-pool/src/pool/parked.rs b/crates/transaction-pool/src/pool/parked.rs index 33056dd6ec5..d3e90b6e3c1 100644 --- a/crates/transaction-pool/src/pool/parked.rs +++ b/crates/transaction-pool/src/pool/parked.rs @@ -131,7 +131,7 @@ impl ParkedPool { /// Returns an iterator over all transactions in the pool pub(crate) fn all( &self, - ) -> impl Iterator>> + '_ { + ) -> impl ExactSizeIterator>> + '_ { self.by_id.values().map(|tx| tx.transaction.clone().into()) } diff --git a/crates/transaction-pool/src/pool/pending.rs b/crates/transaction-pool/src/pool/pending.rs index 3e90722dcd6..594db4f9f00 100644 --- a/crates/transaction-pool/src/pool/pending.rs +++ b/crates/transaction-pool/src/pool/pending.rs @@ -158,7 +158,7 @@ impl PendingPool { /// Returns an iterator over all transactions in the pool pub(crate) fn all( &self, - ) -> impl Iterator>> + '_ { + ) -> impl ExactSizeIterator>> + '_ { self.by_id.values().map(|tx| tx.transaction.clone()) } diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 57020b09e31..1763e19cf0f 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -434,6 +434,11 @@ impl TxPool { self.pending_pool.all() } + /// Returns the number of transactions from the pending sub-pool + pub(crate) fn pending_transactions_count(&self) -> usize { + self.pending_pool.len() + } + /// Returns all pending transactions filtered by predicate pub(crate) fn pending_transactions_with_predicate( &self, @@ -462,6 +467,11 @@ impl TxPool { self.basefee_pool.all().chain(self.queued_pool.all()) } + /// Returns the number of transactions in parked pools + pub(crate) fn queued_transactions_count(&self) -> usize { + self.basefee_pool.len() + self.queued_pool.len() + } + /// Returns queued and pending transactions for the specified sender pub fn queued_and_pending_txs_by_sender( &self, diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index e9f58c27a32..0621394d11e 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -355,6 +355,10 @@ pub trait TransactionPool: Clone + Debug + Send + Sync { /// Consumer: RPC fn queued_transactions(&self) -> Vec>>; + /// Returns the number of transactions that are ready for inclusion in the next block and the + /// number of transactions that are ready for inclusion in future blocks: `(pending, queued)`. + fn pending_and_queued_txn_count(&self) -> (usize, usize); + /// Returns all transactions that are currently in the pool grouped by whether they are ready /// for inclusion in the next block or not. /// From 61bbe5ee29a8a68915f50c10cdf391efeb79563b Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 14 Jul 2025 17:23:10 +0200 Subject: [PATCH 162/305] perf: release listner lock early (#17400) --- crates/transaction-pool/src/pool/mod.rs | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 19d40125fbb..7df08a59528 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -433,13 +433,15 @@ where }); } - let mut listener = self.event_listener.write(); + { + let mut listener = self.event_listener.write(); - for tx in &promoted { - listener.pending(tx.hash(), None); - } - for tx in &discarded { - listener.discarded(tx.hash()); + for tx in &promoted { + listener.pending(tx.hash(), None); + } + for tx in &discarded { + listener.discarded(tx.hash()); + } } // This deletes outdated blob txs from the blob store, based on the account's nonce. This is From f83e29cdd332570c4498ea22430755bfbfad81f7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9a=20Narzis?= <78718413+lean-apple@users.noreply.github.com> Date: Mon, 14 Jul 2025 18:45:42 +0200 Subject: [PATCH 163/305] docs(guides): add export era in history section (#17391) --- .../vocs/docs/pages/guides/history-expiry.mdx | 23 ++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/docs/vocs/docs/pages/guides/history-expiry.mdx b/docs/vocs/docs/pages/guides/history-expiry.mdx index 1ba3394681a..1f03b6b4aca 100644 --- a/docs/vocs/docs/pages/guides/history-expiry.mdx +++ b/docs/vocs/docs/pages/guides/history-expiry.mdx @@ -20,7 +20,12 @@ See also [Partial history expiry announcement](https://blog.ethereum.org/2025/07 ## File format -The historical data is packaged and distributed in files of special formats with different names, all of which are based on [e2store](https://github.com/status-im/nimbus-eth2/blob/613f4a9a50c9c4bd8568844eaffb3ac15d067e56/docs/e2store.md#introduction). The most important ones are the **ERA1**, which deals with block range from genesis until the last pre-merge block, and **ERA**, which deals with block range from the merge onwards. See their [specification](https://github.com/eth-clients/e2store-format-specs) for more details. +The historical data is packaged and distributed in files of special formats with different names, all of which are based on [e2store](https://github.com/status-im/nimbus-eth2/blob/613f4a9a50c9c4bd8568844eaffb3ac15d067e56/docs/e2store.md#introduction). The most important ones are the **ERA1**, which deals with block range from genesis until the last pre-merge block, and **ERA**, which deals with block range from the merge onwards. + +See the following specifications for more details : +- [E2store specification](https://github.com/eth-clients/e2store-format-specs) +- [ERA1 specification](https://github.com/eth-clients/e2store-format-specs/blob/main/formats/era1.md) +- [ERA specification](https://github.com/eth-clients/e2store-format-specs/blob/main/formats/era.md) The contents of these archives is an ordered sequence of blocks. We're mostly concerned with headers and transactions. For ERA1, there is 8192 blocks per file except for the last one, i.e. the one containing pre-merge block, which can be less than that. @@ -57,3 +62,19 @@ There are two kinds of data sources for the ERA1 import. * Local from a file-system directory. Use the option `--era.path` with a directory containing ERA1 files. Both options cannot be used at the same time. If no option is specified, the remote source is used with a URL derived from the chain ID. Only Mainnet and Sepolia have ERA1 files. If the node is running on a different chain, no source is provided and nothing is imported. + +## Export + +In this section we discuss how to export blocks data into ERA1 files. + +### Manual export +You can manually export block data from your database to ERA1 files using the [`export-era`](../cli/reth/export-era) command. + +The CLI reads block headers, bodies, and receipts from your local database and packages them into the standardized ERA1 format with up to 8,192 blocks per file. + +#### Set up +The export command allows you to specify: + +- Block ranges with `--first-block-number` and `--last-block-number` +- Output directory with `--path` for the export destination +- File size limits with `--max-blocks-per-file` with a maximum of 8,192 blocks per ERA1 file From 52bd07b8fdd7fe1f704860cf80f8454fdde3f7a5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9a=20Narzis?= <78718413+lean-apple@users.noreply.github.com> Date: Mon, 14 Jul 2025 19:15:55 +0200 Subject: [PATCH 164/305] refactor(rpc): change receipt to `Cow` for `build_receipt` (#17382) --- crates/optimism/rpc/src/eth/receipt.rs | 41 +++++++++-------- crates/rpc/rpc-eth-types/src/receipt.rs | 54 +++++++++++++---------- crates/rpc/rpc/src/eth/helpers/block.rs | 11 ++++- crates/rpc/rpc/src/eth/helpers/receipt.rs | 3 +- 4 files changed, 66 insertions(+), 43 deletions(-) diff --git a/crates/optimism/rpc/src/eth/receipt.rs b/crates/optimism/rpc/src/eth/receipt.rs index 69eba47910c..81f9702db00 100644 --- a/crates/optimism/rpc/src/eth/receipt.rs +++ b/crates/optimism/rpc/src/eth/receipt.rs @@ -15,6 +15,7 @@ use reth_primitives_traits::Recovered; use reth_rpc_eth_api::{helpers::LoadReceipt, FromEthApiError, RpcReceipt}; use reth_rpc_eth_types::{receipt::build_receipt, EthApiError}; use reth_storage_api::{ReceiptProvider, TransactionsProvider}; +use std::borrow::Cow; impl LoadReceipt for OpEthApi where @@ -236,25 +237,29 @@ impl OpReceiptBuilder { let timestamp = meta.timestamp; let block_number = meta.block_number; let tx_signed = *transaction.inner(); - let core_receipt = - build_receipt(transaction, meta, receipt, all_receipts, None, |receipt_with_bloom| { - match receipt { - OpReceipt::Legacy(_) => OpReceiptEnvelope::Legacy(receipt_with_bloom), - OpReceipt::Eip2930(_) => OpReceiptEnvelope::Eip2930(receipt_with_bloom), - OpReceipt::Eip1559(_) => OpReceiptEnvelope::Eip1559(receipt_with_bloom), - OpReceipt::Eip7702(_) => OpReceiptEnvelope::Eip7702(receipt_with_bloom), - OpReceipt::Deposit(receipt) => { - OpReceiptEnvelope::Deposit(OpDepositReceiptWithBloom { - receipt: OpDepositReceipt { - inner: receipt_with_bloom.receipt, - deposit_nonce: receipt.deposit_nonce, - deposit_receipt_version: receipt.deposit_receipt_version, - }, - logs_bloom: receipt_with_bloom.logs_bloom, - }) - } + let core_receipt = build_receipt( + transaction, + meta, + Cow::Borrowed(receipt), + all_receipts, + None, + |receipt_with_bloom| match receipt { + OpReceipt::Legacy(_) => OpReceiptEnvelope::Legacy(receipt_with_bloom), + OpReceipt::Eip2930(_) => OpReceiptEnvelope::Eip2930(receipt_with_bloom), + OpReceipt::Eip1559(_) => OpReceiptEnvelope::Eip1559(receipt_with_bloom), + OpReceipt::Eip7702(_) => OpReceiptEnvelope::Eip7702(receipt_with_bloom), + OpReceipt::Deposit(receipt) => { + OpReceiptEnvelope::Deposit(OpDepositReceiptWithBloom { + receipt: OpDepositReceipt { + inner: receipt_with_bloom.receipt, + deposit_nonce: receipt.deposit_nonce, + deposit_receipt_version: receipt.deposit_receipt_version, + }, + logs_bloom: receipt_with_bloom.logs_bloom, + }) } - }); + }, + ); let op_receipt_fields = OpReceiptFieldsBuilder::new(timestamp, block_number) .l1_block_info(chain_spec, tx_signed, l1_block_info)? diff --git a/crates/rpc/rpc-eth-types/src/receipt.rs b/crates/rpc/rpc-eth-types/src/receipt.rs index 4988d13879b..f68547ddac6 100644 --- a/crates/rpc/rpc-eth-types/src/receipt.rs +++ b/crates/rpc/rpc-eth-types/src/receipt.rs @@ -8,12 +8,13 @@ use alloy_eips::eip7840::BlobParams; use alloy_primitives::{Address, TxKind}; use alloy_rpc_types_eth::{Log, ReceiptWithBloom, TransactionReceipt}; use reth_ethereum_primitives::{Receipt, TransactionSigned}; +use std::borrow::Cow; /// Builds an [`TransactionReceipt`] obtaining the inner receipt envelope from the given closure. pub fn build_receipt( transaction: Recovered<&T>, meta: TransactionMeta, - receipt: &R, + receipt: Cow<'_, R>, all_receipts: &[R], blob_params: Option, build_envelope: impl FnOnce(ReceiptWithBloom>) -> E, @@ -40,6 +41,8 @@ where let blob_gas_price = blob_gas_used.and_then(|_| Some(blob_params?.calc_blob_fee(meta.excess_blob_gas?))); + let status = receipt.status_or_post_state(); + let cumulative_gas_used = receipt.cumulative_gas_used(); let logs_bloom = receipt.bloom(); // get number of logs in the block @@ -48,28 +51,31 @@ where num_logs += prev_receipt.logs().len(); } - let logs: Vec = receipt - .logs() - .iter() - .enumerate() - .map(|(tx_log_idx, log)| Log { - inner: log.clone(), - block_hash: Some(meta.block_hash), - block_number: Some(meta.block_number), - block_timestamp: Some(meta.timestamp), - transaction_hash: Some(meta.tx_hash), - transaction_index: Some(meta.index), - log_index: Some((num_logs + tx_log_idx) as u64), - removed: false, - }) - .collect(); - - let rpc_receipt = alloy_rpc_types_eth::Receipt { - status: receipt.status_or_post_state(), - cumulative_gas_used: receipt.cumulative_gas_used(), - logs, + macro_rules! build_rpc_logs { + ($logs:expr) => { + $logs + .enumerate() + .map(|(tx_log_idx, log)| Log { + inner: log, + block_hash: Some(meta.block_hash), + block_number: Some(meta.block_number), + block_timestamp: Some(meta.timestamp), + transaction_hash: Some(meta.tx_hash), + transaction_index: Some(meta.index), + log_index: Some((num_logs + tx_log_idx) as u64), + removed: false, + }) + .collect() + }; + } + + let logs = match receipt { + Cow::Borrowed(r) => build_rpc_logs!(r.logs().iter().cloned()), + Cow::Owned(r) => build_rpc_logs!(r.into_logs().into_iter()), }; + let rpc_receipt = alloy_rpc_types_eth::Receipt { status, cumulative_gas_used, logs }; + let (contract_address, to) = match transaction.kind() { TxKind::Create => (Some(from.create(transaction.nonce())), None), TxKind::Call(addr) => (None, Some(Address(*addr))), @@ -107,17 +113,19 @@ impl EthReceiptBuilder { pub fn new( transaction: Recovered<&TransactionSigned>, meta: TransactionMeta, - receipt: &Receipt, + receipt: Cow<'_, Receipt>, all_receipts: &[Receipt], blob_params: Option, ) -> Self { + let tx_type = receipt.tx_type; + let base = build_receipt( transaction, meta, receipt, all_receipts, blob_params, - |receipt_with_bloom| ReceiptEnvelope::from_typed(receipt.tx_type, receipt_with_bloom), + |receipt_with_bloom| ReceiptEnvelope::from_typed(tx_type, receipt_with_bloom), ); Self { base } diff --git a/crates/rpc/rpc/src/eth/helpers/block.rs b/crates/rpc/rpc/src/eth/helpers/block.rs index 6665644dbc7..b9ae198bba1 100644 --- a/crates/rpc/rpc/src/eth/helpers/block.rs +++ b/crates/rpc/rpc/src/eth/helpers/block.rs @@ -1,5 +1,7 @@ //! Contains RPC handler implementations specific to blocks. +use std::borrow::Cow; + use alloy_consensus::{transaction::TransactionMeta, BlockHeader}; use alloy_rpc_types_eth::{BlockId, TransactionReceipt}; use reth_chainspec::{ChainSpecProvider, EthChainSpec}; @@ -59,7 +61,14 @@ where excess_blob_gas, timestamp, }; - Ok(EthReceiptBuilder::new(tx, meta, receipt, &receipts, blob_params).build()) + Ok(EthReceiptBuilder::new( + tx, + meta, + Cow::Borrowed(receipt), + &receipts, + blob_params, + ) + .build()) }) .collect::, Self::Error>>() .map(Some) diff --git a/crates/rpc/rpc/src/eth/helpers/receipt.rs b/crates/rpc/rpc/src/eth/helpers/receipt.rs index 714815a551a..44b9910b2fa 100644 --- a/crates/rpc/rpc/src/eth/helpers/receipt.rs +++ b/crates/rpc/rpc/src/eth/helpers/receipt.rs @@ -7,6 +7,7 @@ use reth_ethereum_primitives::{Receipt, TransactionSigned}; use reth_rpc_eth_api::{helpers::LoadReceipt, FromEthApiError, RpcNodeCoreExt, RpcReceipt}; use reth_rpc_eth_types::{EthApiError, EthReceiptBuilder}; use reth_storage_api::{BlockReader, ReceiptProvider, TransactionsProvider}; +use std::borrow::Cow; impl LoadReceipt for EthApi where @@ -36,7 +37,7 @@ where // Note: we assume this transaction is valid, because it's mined and therefore valid tx.try_into_recovered_unchecked()?.as_recovered_ref(), meta, - &receipt, + Cow::Owned(receipt), &all_receipts, blob_params, ) From 73f2edb90c979afa0ea6e1fe93577b8388d1eced Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Roman=20Hodul=C3=A1k?= Date: Mon, 14 Jul 2025 19:46:52 +0200 Subject: [PATCH 165/305] feat(rpc): Use generic transaction request as input (#17092) Co-authored-by: Arsenii Kulikov --- .github/assets/check_wasm.sh | 1 + Cargo.lock | 175 +++++++++--------- Cargo.toml | 54 +++--- .../engine/invalid-block-hooks/src/witness.rs | 7 +- crates/ethereum/node/Cargo.toml | 1 + crates/ethereum/node/src/node.rs | 3 +- crates/net/network-api/Cargo.toml | 4 + crates/net/network-api/src/noop.rs | 15 +- crates/optimism/node/Cargo.toml | 1 - crates/optimism/node/src/node.rs | 8 +- crates/optimism/rpc/Cargo.toml | 1 - crates/optimism/rpc/src/eth/call.rs | 4 +- crates/optimism/rpc/src/eth/mod.rs | 65 ++++--- crates/optimism/rpc/src/eth/transaction.rs | 33 ++-- crates/primitives-traits/Cargo.toml | 1 + crates/rpc/rpc-api/src/debug.rs | 9 +- crates/rpc/rpc-api/src/engine.rs | 7 +- crates/rpc/rpc-api/src/trace.rs | 10 +- crates/rpc/rpc-builder/src/lib.rs | 38 ++-- crates/rpc/rpc-builder/tests/it/http.rs | 57 ++++-- crates/rpc/rpc-builder/tests/it/utils.rs | 16 +- crates/rpc/rpc-convert/Cargo.toml | 1 + crates/rpc/rpc-convert/src/rpc.rs | 97 +++++++++- crates/rpc/rpc-eth-api/src/core.rs | 30 +-- crates/rpc/rpc-eth-api/src/helpers/call.rs | 71 ++++--- .../rpc/rpc-eth-api/src/helpers/estimate.rs | 16 +- crates/rpc/rpc-eth-api/src/helpers/signer.rs | 4 +- crates/rpc/rpc-eth-api/src/helpers/spec.rs | 19 +- .../rpc-eth-api/src/helpers/transaction.rs | 37 ++-- crates/rpc/rpc-eth-api/src/types.rs | 7 +- crates/rpc/rpc-eth-types/Cargo.toml | 1 + crates/rpc/rpc-eth-types/src/simulate.rs | 86 ++++----- crates/rpc/rpc-testing-util/src/debug.rs | 2 +- crates/rpc/rpc-testing-util/src/trace.rs | 2 +- crates/rpc/rpc/src/debug.rs | 14 +- crates/rpc/rpc/src/engine.rs | 13 +- crates/rpc/rpc/src/eth/builder.rs | 15 +- crates/rpc/rpc/src/eth/core.rs | 78 ++++---- crates/rpc/rpc/src/eth/filter.rs | 3 +- crates/rpc/rpc/src/eth/helpers/block.rs | 12 +- crates/rpc/rpc/src/eth/helpers/call.rs | 25 ++- crates/rpc/rpc/src/eth/helpers/fees.rs | 9 +- .../rpc/rpc/src/eth/helpers/pending_block.rs | 11 +- crates/rpc/rpc/src/eth/helpers/receipt.rs | 21 ++- crates/rpc/rpc/src/eth/helpers/signer.rs | 40 ++-- crates/rpc/rpc/src/eth/helpers/spec.rs | 15 +- crates/rpc/rpc/src/eth/helpers/state.rs | 26 ++- crates/rpc/rpc/src/eth/helpers/trace.rs | 5 +- crates/rpc/rpc/src/eth/helpers/transaction.rs | 22 ++- crates/rpc/rpc/src/lib.rs | 1 + crates/rpc/rpc/src/trace.rs | 12 +- crates/trie/common/Cargo.toml | 1 + 52 files changed, 708 insertions(+), 498 deletions(-) diff --git a/.github/assets/check_wasm.sh b/.github/assets/check_wasm.sh index 2d0eade3d74..faec5157950 100755 --- a/.github/assets/check_wasm.sh +++ b/.github/assets/check_wasm.sh @@ -48,6 +48,7 @@ exclude_crates=( reth-rpc-api reth-rpc-api-testing-util reth-rpc-builder + reth-rpc-convert reth-rpc-e2e-tests reth-rpc-engine-api reth-rpc-eth-api diff --git a/Cargo.lock b/Cargo.lock index 5cf02311d54..9ed950df3e0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -112,9 +112,9 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "1.0.20" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73e7f99e3a50210eaee2abd57293a2e72b1a5b7bb251b44c4bf33d02ddd402ab" +checksum = "ca3b746060277f3d7f9c36903bb39b593a741cb7afcb0044164c28f0e9b673f0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -138,9 +138,9 @@ dependencies = [ [[package]] name = "alloy-consensus-any" -version = "1.0.20" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9945351a277c914f3776ae72b3fc1d22f90d2e840276830e48e9be5bf371a8fe" +checksum = "bf98679329fa708fa809ea596db6d974da892b068ad45e48ac1956f582edf946" dependencies = [ "alloy-consensus", "alloy-eips", @@ -153,9 +153,9 @@ dependencies = [ [[package]] name = "alloy-contract" -version = "1.0.20" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f27be9e6b587904ee5135f72182a565adaf0c7dd341bae330ee6f0e342822b1" +checksum = "a10e47f5305ea08c37b1772086c1573e9a0a257227143996841172d37d3831bb" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -236,9 +236,9 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "1.0.20" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4134375e533d095e045982cd7684a29c37089ab7a605ecf2b4aa17a5e61d72d3" +checksum = "f562a81278a3ed83290e68361f2d1c75d018ae3b8589a314faf9303883e18ec9" dependencies = [ "alloy-eip2124", "alloy-eip2930", @@ -279,9 +279,9 @@ dependencies = [ [[package]] name = "alloy-genesis" -version = "1.0.20" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d61d58e94791b74c2566a2f240f3f796366e2479d4d39b4a3ec848c733fb92ce" +checksum = "dc41384e9ab8c9b2fb387c52774d9d432656a28edcda1c2d4083e96051524518" dependencies = [ "alloy-eips", "alloy-primitives", @@ -319,9 +319,9 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "1.0.20" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1edaf2255b0ea9213ecbb056fa92870d858719911e04fb4260bcc43f7743d370" +checksum = "12c454fcfcd5d26ed3b8cae5933cbee9da5f0b05df19b46d4bd4446d1f082565" dependencies = [ "alloy-primitives", "alloy-sol-types", @@ -334,9 +334,9 @@ dependencies = [ [[package]] name = "alloy-network" -version = "1.0.20" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c224eafcd1bd4c54cc45b5fc3634ae42722bdb9253780ac64a5deffd794a6cec" +checksum = "42d6d39eabe5c7b3d8f23ac47b0b683b99faa4359797114636c66e0743103d05" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -360,9 +360,9 @@ dependencies = [ [[package]] name = "alloy-network-primitives" -version = "1.0.20" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b21283a28b117505a75ee1f2e63c16ea2ea72afca44f670b1f02795d9f5d988" +checksum = "3704fa8b7ba9ba3f378d99b3d628c8bc8c2fc431b709947930f154e22a8368b6" dependencies = [ "alloy-consensus", "alloy-eips", @@ -433,9 +433,9 @@ dependencies = [ [[package]] name = "alloy-provider" -version = "1.0.20" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09e5f02654272d9a95c66949b78f30c87701c232cf8302d4a1dab02957f5a0c1" +checksum = "08800e8cbe70c19e2eb7cf3d7ff4b28bdd9b3933f8e1c8136c7d910617ba03bf" dependencies = [ "alloy-chains", "alloy-consensus", @@ -477,9 +477,9 @@ dependencies = [ [[package]] name = "alloy-pubsub" -version = "1.0.20" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08acc8843da1207a80f778bc0ac3e5dc94c2683280fa70ff3090b895d0179537" +checksum = "ae68457a2c2ead6bd7d7acb5bf5f1623324b1962d4f8e7b0250657a3c3ab0a0b" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -520,9 +520,9 @@ dependencies = [ [[package]] name = "alloy-rpc-client" -version = "1.0.20" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c956d223a5fa7ef28af1c6ae41b77ecb95a36d686d5644ee22266f6b517615b4" +checksum = "162301b5a57d4d8f000bf30f4dcb82f9f468f3e5e846eeb8598dd39e7886932c" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -531,7 +531,6 @@ dependencies = [ "alloy-transport-http", "alloy-transport-ipc", "alloy-transport-ws", - "async-stream", "futures", "pin-project", "reqwest", @@ -541,16 +540,15 @@ dependencies = [ "tokio-stream", "tower", "tracing", - "tracing-futures", "url", "wasmtimer", ] [[package]] name = "alloy-rpc-types" -version = "1.0.20" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99074f79ad4b188b1049807f8f96637abc3cc019fde53791906edc26bc092a57" +checksum = "6cd8ca94ae7e2b32cc3895d9981f3772aab0b4756aa60e9ed0bcfee50f0e1328" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -561,9 +559,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-admin" -version = "1.0.20" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61ad30ddbec9c315b002e02ba13f4327767cd5e6bdefadbfcec3d95ff6a3206e" +checksum = "e7bff682e76f3f72e9ddc75e54a1bd1db5ce53cbdf2cce2d63a3a981437f78f5" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -573,9 +571,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" -version = "1.0.20" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d34231e06b5f1ad5f274a6ddb3eca8730db5eb868b70a4494a1e4b716b7fe88" +checksum = "9f3ff6a778ebda3deaed9af17930d678611afe1effa895c4260b61009c314f82" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -585,9 +583,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-any" -version = "1.0.20" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c13e5081ae6b99a7f4e46c18b80d652440320ff404790932cb8259ec73f596e" +checksum = "076b47e834b367d8618c52dd0a0d6a711ddf66154636df394805300af4923b8a" dependencies = [ "alloy-consensus-any", "alloy-rpc-types-eth", @@ -596,9 +594,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-beacon" -version = "1.0.20" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "544101ff1933e5c8074238b7b49cecb87d47afc411e74927ef58201561c98bf7" +checksum = "48f39da9b760e78fc3f347fba4da257aa6328fb33f73682b26cc0a6874798f7d" dependencies = [ "alloy-eips", "alloy-primitives", @@ -614,9 +612,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-debug" -version = "1.0.20" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "220aeda799891b518a171d3d640ec310bab2f4d80c3987c9ea089cedd8a67008" +checksum = "94a2a86ad7b7d718c15e79d0779bd255561b6b22968dc5ed2e7c0fbc43bb55fe" dependencies = [ "alloy-primitives", "serde", @@ -624,9 +622,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" -version = "1.0.20" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14796fd8574c77213802b0dc0e85886b5cb27c44e72678ab7d0a4a2d5aee79e9" +checksum = "4ba838417c42e8f1fe5eb4f4bbfacb7b5d4b9e615b8d2e831b921e04bf0bed62" dependencies = [ "alloy-consensus", "alloy-eips", @@ -645,9 +643,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "1.0.20" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bea7326ca6cd6971c58042055a039d5c97a1431e30380d8b4883ad98067c1b5" +checksum = "2c2f847e635ec0be819d06e2ada4bcc4e4204026a83c4bfd78ae8d550e027ae7" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -661,14 +659,15 @@ dependencies = [ "itertools 0.14.0", "serde", "serde_json", + "serde_with", "thiserror 2.0.12", ] [[package]] name = "alloy-rpc-types-mev" -version = "1.0.20" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15aac86a4cb20c2f36b1d14202a20eca6baa92691b0aebcfacfe31dd0fedc6ee" +checksum = "fb1c9b23cedf70aeb99ea9f16b78cdf902f524e227922fb340e3eb899ebe96dc" dependencies = [ "alloy-consensus", "alloy-eips", @@ -681,9 +680,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" -version = "1.0.20" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbc92f9dd9e56a9edcfe0c28c0d1898a2c5281a2944d89e2b8a4effeca13823e" +checksum = "6fc58180302a94c934d455eeedb3ecb99cdc93da1dbddcdbbdb79dd6fe618b2a" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -695,9 +694,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-txpool" -version = "1.0.20" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fadc5c919b4e8b3bdcbea2705d63dccb8ed2ce864399d005fed534eefebc8fe4" +checksum = "0f9f089d78bb94148e0fcfda087d4ce5fd35a7002847b5e90610c0fcb140f7b4" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -707,9 +706,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "1.0.20" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06c02a06ae34d2354398dc9d2de0503129c3f0904a3eb791b5d0149f267c2688" +checksum = "ae699248d02ade9db493bbdae61822277dc14ae0f82a5a4153203b60e34422a6" dependencies = [ "alloy-primitives", "arbitrary", @@ -719,9 +718,9 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "1.0.20" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2389ec473fc24735896960b1189f1d92177ed53c4e464d285e54ed3483f9cca3" +checksum = "3cf7d793c813515e2b627b19a15693960b3ed06670f9f66759396d06ebe5747b" dependencies = [ "alloy-primitives", "async-trait", @@ -734,9 +733,9 @@ dependencies = [ [[package]] name = "alloy-signer-local" -version = "1.0.20" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab70b75dee5f4673ace65058927310658c8ffac63a94aa4b973f925bab020367" +checksum = "51a424bc5a11df0d898ce0fd15906b88ebe2a6e4f17a514b51bc93946bb756bd" dependencies = [ "alloy-consensus", "alloy-network", @@ -822,9 +821,9 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "1.0.20" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b99ffb19be54a61d18599843ef887ddd12c3b713244462c184e2eab67106d51a" +checksum = "4f317d20f047b3de4d9728c556e2e9a92c9a507702d2016424cd8be13a74ca5e" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -845,9 +844,9 @@ dependencies = [ [[package]] name = "alloy-transport-http" -version = "1.0.20" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92b5a640491f3ab18d17bd6e521c64744041cd86f741b25cdb6a346ca0e90c66" +checksum = "ff084ac7b1f318c87b579d221f11b748341d68b9ddaa4ffca5e62ed2b8cfefb4" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -860,9 +859,9 @@ dependencies = [ [[package]] name = "alloy-transport-ipc" -version = "1.0.20" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17fe2576d9689409724f7cb737aa7fdd70674edfec4b9c3ce54f6ffac00e83ca" +checksum = "edb099cdad8ed2e6a80811cdf9bbf715ebf4e34c981b4a6e2d1f9daacbf8b218" dependencies = [ "alloy-json-rpc", "alloy-pubsub", @@ -880,9 +879,9 @@ dependencies = [ [[package]] name = "alloy-transport-ws" -version = "1.0.20" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4816ea8425e789057d08804452eff399204808b7b7a233ac3f7534183cae2236" +checksum = "0e915e1250dc129ad48d264573ccd08e4716fdda564a772fd217875b8459aff9" dependencies = [ "alloy-pubsub", "alloy-transport", @@ -918,9 +917,9 @@ dependencies = [ [[package]] name = "alloy-tx-macros" -version = "1.0.20" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afd621a9ddef2fdc06d17089f45e47cf84d0b46ca5a1bc6c83807c9119636f52" +checksum = "1154c8187a5ff985c95a8b2daa2fedcf778b17d7668e5e50e556c4ff9c881154" dependencies = [ "alloy-primitives", "darling", @@ -1359,9 +1358,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.25" +version = "0.4.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40f6024f3f856663b45fd0c9b6f2024034a702f453549449e0d84a305900dad4" +checksum = "ddb939d66e4ae03cee6091612804ba446b12878410cfa17f785f4dd67d4014e8" dependencies = [ "brotli", "flate2", @@ -2469,9 +2468,9 @@ checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" [[package]] name = "crc32fast" -version = "1.4.2" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" +checksum = "9481c1c90cbf2ac953f07c8d4a58aa3945c425b7185c9154d67a65e4230da511" dependencies = [ "cfg-if", ] @@ -5498,9 +5497,9 @@ checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0" [[package]] name = "memmap2" -version = "0.9.5" +version = "0.9.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd3f7eed9d3848f8b98834af67102b720745c4ec028fcd0aa0239277e7de374f" +checksum = "483758ad303d734cec05e5c12b41d7e93e6a6390c5e9dae6bdeb7c1259012d28" dependencies = [ "libc", ] @@ -6083,9 +6082,9 @@ dependencies = [ [[package]] name = "op-revm" -version = "8.0.2" +version = "8.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf1273c005f27528400dae0e2489a41378cfc29f0e42ea17f21b7d9679aef679" +checksum = "ee9ba9cab294a5ed02afd1a1060220762b3c52911acab635db33822e93f7276d" dependencies = [ "auto_impl", "once_cell", @@ -7078,9 +7077,9 @@ checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "regress" -version = "0.10.3" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ef7fa9ed0256d64a688a3747d0fef7a88851c18a5e1d57f115f38ec2e09366" +checksum = "145bb27393fe455dd64d6cbc8d059adfa392590a45eadf079c01b11857e7b010" dependencies = [ "hashbrown 0.15.4", "memchr", @@ -8709,8 +8708,10 @@ dependencies = [ name = "reth-network-api" version = "1.5.1" dependencies = [ + "alloy-consensus", "alloy-primitives", "alloy-rpc-types-admin", + "alloy-rpc-types-eth", "auto_impl", "derive_more", "enr", @@ -9245,7 +9246,6 @@ dependencies = [ "eyre", "futures", "op-alloy-consensus", - "op-alloy-network", "op-alloy-rpc-types-engine", "op-revm", "reth-chainspec", @@ -9382,7 +9382,6 @@ dependencies = [ "op-alloy-rpc-types", "op-alloy-rpc-types-engine", "op-revm", - "parking_lot", "reqwest", "reth-chainspec", "reth-evm", @@ -9952,6 +9951,7 @@ dependencies = [ "alloy-network", "alloy-primitives", "alloy-rpc-types-eth", + "alloy-signer", "jsonrpsee-types", "op-alloy-consensus", "op-alloy-rpc-types", @@ -10071,6 +10071,7 @@ dependencies = [ "alloy-consensus", "alloy-eips", "alloy-evm", + "alloy-network", "alloy-primitives", "alloy-rpc-types-eth", "alloy-sol-types", @@ -10646,9 +10647,9 @@ dependencies = [ [[package]] name = "revm" -version = "27.0.2" +version = "27.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24188978ab59b8fd508d0193f8a08848bdcd19ae0f73f2ad1d6ee3b2cd6c0903" +checksum = "70a84455f03d3480d4ed2e7271c15f2ec95b758e86d57cb8d258a8ff1c22e9a4" dependencies = [ "revm-bytecode", "revm-context", @@ -10678,9 +10679,9 @@ dependencies = [ [[package]] name = "revm-context" -version = "8.0.2" +version = "8.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c949e6b9d996ae5c7606cd4f82d997dabad30909f85601b5876b704d95b505b" +checksum = "a990abf66b47895ca3e915d5f3652bb7c6a4cff6e5351fdf0fc2795171fd411c" dependencies = [ "cfg-if", "derive-where", @@ -10737,9 +10738,9 @@ dependencies = [ [[package]] name = "revm-handler" -version = "8.0.2" +version = "8.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35b3a613d012189571b28fb13befc8c8af54e54f4f76997a0c02828cea0584a3" +checksum = "03c35a17a38203976f97109e20eccf6732447ce6c9c42973bae42732b2e957ff" dependencies = [ "auto_impl", "derive-where", @@ -10756,9 +10757,9 @@ dependencies = [ [[package]] name = "revm-inspector" -version = "8.0.2" +version = "8.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64aee1f5f5b07cfa73250f530edf4c8c3bb8da693d5d00fe9f94f70499978f00" +checksum = "e69abf6a076741bd5cd87b7d6c1b48be2821acc58932f284572323e81a8d4179" dependencies = [ "auto_impl", "either", @@ -10794,9 +10795,9 @@ dependencies = [ [[package]] name = "revm-interpreter" -version = "23.0.1" +version = "23.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d2a89c40b7c72220f3d4b753ca0ce9ae912cf5dad7d3517182e4e1473b9b55e" +checksum = "d95c4a9a1662d10b689b66b536ddc2eb1e89f5debfcabc1a2d7b8417a2fa47cd" dependencies = [ "revm-bytecode", "revm-context-interface", @@ -10806,9 +10807,9 @@ dependencies = [ [[package]] name = "revm-precompile" -version = "24.0.0" +version = "24.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c35a987086055a5cb368e080d1300ea853a3185b7bb9cdfebb8c05852cda24f" +checksum = "b68d54a4733ac36bd29ee645c3c2e5e782fb63f199088d49e2c48c64a9fedc15" dependencies = [ "ark-bls12-381", "ark-bn254", @@ -12483,8 +12484,6 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" dependencies = [ - "futures", - "futures-task", "pin-project", "tracing", ] diff --git a/Cargo.toml b/Cargo.toml index a800852600d..49e37635c12 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -478,33 +478,33 @@ alloy-trie = { version = "0.9.0", default-features = false } alloy-hardforks = "0.2.7" -alloy-consensus = { version = "1.0.18", default-features = false } -alloy-contract = { version = "1.0.18", default-features = false } -alloy-eips = { version = "1.0.18", default-features = false } -alloy-genesis = { version = "1.0.18", default-features = false } -alloy-json-rpc = { version = "1.0.18", default-features = false } -alloy-network = { version = "1.0.18", default-features = false } -alloy-network-primitives = { version = "1.0.18", default-features = false } -alloy-provider = { version = "1.0.18", features = ["reqwest"], default-features = false } -alloy-pubsub = { version = "1.0.18", default-features = false } -alloy-rpc-client = { version = "1.0.18", default-features = false } -alloy-rpc-types = { version = "1.0.18", features = ["eth"], default-features = false } -alloy-rpc-types-admin = { version = "1.0.18", default-features = false } -alloy-rpc-types-anvil = { version = "1.0.18", default-features = false } -alloy-rpc-types-beacon = { version = "1.0.18", default-features = false } -alloy-rpc-types-debug = { version = "1.0.18", default-features = false } -alloy-rpc-types-engine = { version = "1.0.18", default-features = false } -alloy-rpc-types-eth = { version = "1.0.18", default-features = false } -alloy-rpc-types-mev = { version = "1.0.18", default-features = false } -alloy-rpc-types-trace = { version = "1.0.18", default-features = false } -alloy-rpc-types-txpool = { version = "1.0.18", default-features = false } -alloy-serde = { version = "1.0.18", default-features = false } -alloy-signer = { version = "1.0.18", default-features = false } -alloy-signer-local = { version = "1.0.18", default-features = false } -alloy-transport = { version = "1.0.18" } -alloy-transport-http = { version = "1.0.18", features = ["reqwest-rustls-tls"], default-features = false } -alloy-transport-ipc = { version = "1.0.18", default-features = false } -alloy-transport-ws = { version = "1.0.18", default-features = false } +alloy-consensus = { version = "1.0.22", default-features = false } +alloy-contract = { version = "1.0.22", default-features = false } +alloy-eips = { version = "1.0.22", default-features = false } +alloy-genesis = { version = "1.0.22", default-features = false } +alloy-json-rpc = { version = "1.0.22", default-features = false } +alloy-network = { version = "1.0.22", default-features = false } +alloy-network-primitives = { version = "1.0.22", default-features = false } +alloy-provider = { version = "1.0.22", features = ["reqwest"], default-features = false } +alloy-pubsub = { version = "1.0.22", default-features = false } +alloy-rpc-client = { version = "1.0.22", default-features = false } +alloy-rpc-types = { version = "1.0.22", features = ["eth"], default-features = false } +alloy-rpc-types-admin = { version = "1.0.22", default-features = false } +alloy-rpc-types-anvil = { version = "1.0.22", default-features = false } +alloy-rpc-types-beacon = { version = "1.0.22", default-features = false } +alloy-rpc-types-debug = { version = "1.0.22", default-features = false } +alloy-rpc-types-engine = { version = "1.0.22", default-features = false } +alloy-rpc-types-eth = { version = "1.0.22", default-features = false } +alloy-rpc-types-mev = { version = "1.0.22", default-features = false } +alloy-rpc-types-trace = { version = "1.0.22", default-features = false } +alloy-rpc-types-txpool = { version = "1.0.22", default-features = false } +alloy-serde = { version = "1.0.22", default-features = false } +alloy-signer = { version = "1.0.22", default-features = false } +alloy-signer-local = { version = "1.0.22", default-features = false } +alloy-transport = { version = "1.0.22" } +alloy-transport-http = { version = "1.0.22", features = ["reqwest-rustls-tls"], default-features = false } +alloy-transport-ipc = { version = "1.0.22", default-features = false } +alloy-transport-ws = { version = "1.0.22", default-features = false } # op alloy-op-evm = { version = "0.14", default-features = false } diff --git a/crates/engine/invalid-block-hooks/src/witness.rs b/crates/engine/invalid-block-hooks/src/witness.rs index 54e18c07a70..b78cf462f52 100644 --- a/crates/engine/invalid-block-hooks/src/witness.rs +++ b/crates/engine/invalid-block-hooks/src/witness.rs @@ -230,8 +230,11 @@ where if let Some(healthy_node_client) = &self.healthy_node_client { // Compare the witness against the healthy node. let healthy_node_witness = futures::executor::block_on(async move { - DebugApiClient::debug_execution_witness(healthy_node_client, block.number().into()) - .await + DebugApiClient::<()>::debug_execution_witness( + healthy_node_client, + block.number().into(), + ) + .await })?; let healthy_path = self.save_file( diff --git a/crates/ethereum/node/Cargo.toml b/crates/ethereum/node/Cargo.toml index a1cca45ea2d..128ca756190 100644 --- a/crates/ethereum/node/Cargo.toml +++ b/crates/ethereum/node/Cargo.toml @@ -27,6 +27,7 @@ reth-evm-ethereum.workspace = true reth-consensus.workspace = true reth-rpc.workspace = true reth-rpc-api.workspace = true +reth-rpc-eth-api.workspace = true reth-rpc-builder.workspace = true reth-rpc-server-types.workspace = true reth-node-api.workspace = true diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index a7d2913eac3..9585e8abf8b 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -38,6 +38,7 @@ use reth_provider::{providers::ProviderFactoryBuilder, EthStorage}; use reth_rpc::{eth::core::EthApiFor, ValidationApi}; use reth_rpc_api::{eth::FullEthApiServer, servers::BlockSubmissionValidationApiServer}; use reth_rpc_builder::{config::RethRpcServerConfig, middleware::RethRpcMiddleware}; +use reth_rpc_eth_api::helpers::AddDevSigners; use reth_rpc_eth_types::{error::FromEvmError, EthApiError}; use reth_rpc_server_types::RethRpcModule; use reth_tracing::tracing::{debug, info}; @@ -137,7 +138,7 @@ pub struct EthereumEthApiBuilder; impl EthApiBuilder for EthereumEthApiBuilder where N: FullNodeComponents, - EthApiFor: FullEthApiServer, + EthApiFor: FullEthApiServer + AddDevSigners, { type EthApi = EthApiFor; diff --git a/crates/net/network-api/Cargo.toml b/crates/net/network-api/Cargo.toml index 4ecfa1f593e..b0ebed8bcfb 100644 --- a/crates/net/network-api/Cargo.toml +++ b/crates/net/network-api/Cargo.toml @@ -21,6 +21,8 @@ reth-tokio-util.workspace = true reth-ethereum-forks.workspace = true # ethereum +alloy-consensus.workspace = true +alloy-rpc-types-eth.workspace = true alloy-primitives = { workspace = true, features = ["getrandom"] } alloy-rpc-types-admin.workspace = true enr = { workspace = true, default-features = false, features = ["rust-secp256k1"] } @@ -44,4 +46,6 @@ serde = [ "alloy-primitives/serde", "enr/serde", "reth-ethereum-forks/serde", + "alloy-consensus/serde", + "alloy-rpc-types-eth/serde", ] diff --git a/crates/net/network-api/src/noop.rs b/crates/net/network-api/src/noop.rs index 2183f276bab..3d6b295e7f3 100644 --- a/crates/net/network-api/src/noop.rs +++ b/crates/net/network-api/src/noop.rs @@ -6,6 +6,13 @@ use core::{fmt, marker::PhantomData}; use std::net::{IpAddr, SocketAddr}; +use crate::{ + events::{NetworkPeersEvents, PeerEventStream}, + test_utils::{PeersHandle, PeersHandleProvider}, + BlockDownloaderProvider, DiscoveryEvent, NetworkError, NetworkEvent, + NetworkEventListenerProvider, NetworkInfo, NetworkStatus, PeerId, PeerInfo, PeerRequest, Peers, + PeersInfo, +}; use alloy_rpc_types_admin::EthProtocolInfo; use enr::{secp256k1::SecretKey, Enr}; use reth_eth_wire_types::{ @@ -18,14 +25,6 @@ use reth_tokio_util::{EventSender, EventStream}; use tokio::sync::{mpsc, oneshot}; use tokio_stream::wrappers::UnboundedReceiverStream; -use crate::{ - events::{NetworkPeersEvents, PeerEventStream}, - test_utils::{PeersHandle, PeersHandleProvider}, - BlockDownloaderProvider, DiscoveryEvent, NetworkError, NetworkEvent, - NetworkEventListenerProvider, NetworkInfo, NetworkStatus, PeerId, PeerInfo, PeerRequest, Peers, - PeersInfo, -}; - /// A type that implements all network trait that does nothing. /// /// Intended for testing purposes where network is not used. diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index 12367188576..9bdf4ecb2ea 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -52,7 +52,6 @@ op-revm.workspace = true # ethereum alloy-primitives.workspace = true op-alloy-consensus.workspace = true -op-alloy-network.workspace = true op-alloy-rpc-types-engine.workspace = true alloy-rpc-types-engine.workspace = true alloy-rpc-types-eth.workspace = true diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 07cd3866c13..adeacfe8ef3 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -54,9 +54,9 @@ use reth_optimism_txpool::{ supervisor::{SupervisorClient, DEFAULT_SUPERVISOR_URL}, OpPooledTx, }; -use reth_provider::{providers::ProviderFactoryBuilder, CanonStateSubscriptions}; +use reth_provider::{providers::ProviderFactoryBuilder, CanonStateSubscriptions, ProviderTx}; use reth_rpc_api::DebugApiServer; -use reth_rpc_eth_api::{ext::L2EthApiExtServer, FullEthApiServer}; +use reth_rpc_eth_api::{ext::L2EthApiExtServer, FullEthApiServer, RpcTypes, SignableTxRequest}; use reth_rpc_eth_types::error::FromEvmError; use reth_rpc_server_types::RethRpcModule; use reth_tracing::tracing::{debug, info}; @@ -440,7 +440,7 @@ where ::Transaction: OpPooledTx, EvmFactoryFor: EvmFactory>, OpEthApi: FullEthApiServer, - NetworkT: op_alloy_network::Network + Unpin, + NetworkT: RpcTypes>>, EV: EngineValidatorBuilder, EB: EngineApiBuilder, RpcMiddleware: RethRpcMiddleware, @@ -559,7 +559,7 @@ where <::Pool as TransactionPool>::Transaction: OpPooledTx, EvmFactoryFor: EvmFactory>, OpEthApi: FullEthApiServer, - NetworkT: op_alloy_network::Network + Unpin, + NetworkT: RpcTypes>>, EV: EngineValidatorBuilder, EB: EngineApiBuilder, RpcMiddleware: RethRpcMiddleware, diff --git a/crates/optimism/rpc/Cargo.toml b/crates/optimism/rpc/Cargo.toml index 954722b3fd4..34343670819 100644 --- a/crates/optimism/rpc/Cargo.toml +++ b/crates/optimism/rpc/Cargo.toml @@ -57,7 +57,6 @@ revm.workspace = true op-revm.workspace = true # async -parking_lot.workspace = true tokio.workspace = true reqwest = { workspace = true, features = ["rustls-tls-native-roots"] } async-trait.workspace = true diff --git a/crates/optimism/rpc/src/eth/call.rs b/crates/optimism/rpc/src/eth/call.rs index d886b201bdf..a988bbf740a 100644 --- a/crates/optimism/rpc/src/eth/call.rs +++ b/crates/optimism/rpc/src/eth/call.rs @@ -1,12 +1,11 @@ use super::OpNodeCore; use crate::{OpEthApi, OpEthApiError}; -use alloy_rpc_types_eth::TransactionRequest; use op_revm::OpTransaction; use reth_evm::{execute::BlockExecutorFactory, ConfigureEvm, EvmFactory, TxEnvFor}; use reth_node_api::NodePrimitives; use reth_rpc_eth_api::{ helpers::{estimate::EstimateCall, Call, EthCall, LoadBlock, LoadState, SpawnBlocking}, - FromEvmError, FullEthApiTypes, RpcConvert, RpcTypes, + FromEvmError, FullEthApiTypes, RpcConvert, }; use reth_storage_api::{errors::ProviderError, ProviderHeader, ProviderTx}; use revm::context::TxEnv; @@ -39,7 +38,6 @@ where >, >, RpcConvert: RpcConvert, Network = Self::NetworkTypes>, - NetworkTypes: RpcTypes>, Error: FromEvmError + From<::Error> + From, diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index 6f2bc1b0b19..b5f76539cdc 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -21,10 +21,11 @@ use reth_node_builder::rpc::{EthApiBuilder, EthApiCtx}; use reth_rpc::eth::{core::EthApiInner, DevSigner}; use reth_rpc_eth_api::{ helpers::{ - AddDevSigners, EthApiSpec, EthFees, EthSigner, EthState, LoadBlock, LoadFee, LoadState, - SpawnBlocking, Trace, + spec::SignersForApi, AddDevSigners, EthApiSpec, EthFees, EthState, LoadBlock, LoadFee, + LoadState, SpawnBlocking, Trace, }, EthApiTypes, FromEvmError, FullEthApiServer, RpcConverter, RpcNodeCore, RpcNodeCoreExt, + RpcTypes, SignableTxRequest, }; use reth_rpc_eth_types::{EthStateCache, FeeHistoryCache, GasPriceOracle}; use reth_storage_api::{ @@ -39,11 +40,12 @@ use reth_transaction_pool::TransactionPool; use std::{fmt, fmt::Formatter, marker::PhantomData, sync::Arc}; /// Adapter for [`EthApiInner`], which holds all the data required to serve core `eth_` API. -pub type EthApiNodeBackend = EthApiInner< +pub type EthApiNodeBackend = EthApiInner< ::Provider, ::Pool, ::Network, ::Evm, + Rpc, >; /// A helper trait with requirements for [`RpcNodeCore`] to be used in [`OpEthApi`]. @@ -60,18 +62,23 @@ impl OpNodeCore for T where T: RpcNodeCore {} /// /// This type implements the [`FullEthApi`](reth_rpc_eth_api::helpers::FullEthApi) by implemented /// all the `Eth` helper traits and prerequisite traits. -#[derive(Clone)] -pub struct OpEthApi { +pub struct OpEthApi { /// Gateway to node's core components. - inner: Arc>, + inner: Arc>, /// Converter for RPC types. - tx_resp_builder: RpcConverter>, + tx_resp_builder: RpcConverter>, } -impl OpEthApi { +impl Clone for OpEthApi { + fn clone(&self) -> Self { + Self { inner: self.inner.clone(), tx_resp_builder: self.tx_resp_builder.clone() } + } +} + +impl OpEthApi { /// Creates a new `OpEthApi`. pub fn new( - eth_api: EthApiNodeBackend, + eth_api: EthApiNodeBackend, sequencer_client: Option, min_suggested_priority_fee: U256, ) -> Self { @@ -84,7 +91,7 @@ impl OpEthApi { } /// Returns a reference to the [`EthApiNodeBackend`]. - pub fn eth_api(&self) -> &EthApiNodeBackend { + pub fn eth_api(&self) -> &EthApiNodeBackend { self.inner.eth_api() } /// Returns the configured sequencer client, if any. @@ -102,13 +109,13 @@ impl EthApiTypes for OpEthApi where Self: Send + Sync + fmt::Debug, N: OpNodeCore, - NetworkT: op_alloy_network::Network + Clone + fmt::Debug, + NetworkT: RpcTypes, ::Evm: fmt::Debug, ::Primitives: fmt::Debug, { type Error = OpEthApiError; type NetworkTypes = NetworkT; - type RpcConvert = RpcConverter>; + type RpcConvert = RpcConverter>; fn tx_resp_builder(&self) -> &Self::RpcConvert { &self.tx_resp_builder @@ -118,7 +125,7 @@ where impl RpcNodeCore for OpEthApi where N: OpNodeCore, - NetworkT: op_alloy_network::Network, + NetworkT: RpcTypes, { type Primitives = N::Primitives; type Provider = N::Provider; @@ -156,7 +163,7 @@ where impl RpcNodeCoreExt for OpEthApi where N: OpNodeCore, - NetworkT: op_alloy_network::Network, + NetworkT: RpcTypes, { #[inline] fn cache(&self) -> &EthStateCache, ProviderReceipt> { @@ -172,9 +179,10 @@ where + StageCheckpointReader, Network: NetworkInfo, >, - NetworkT: op_alloy_network::Network, + NetworkT: RpcTypes, { type Transaction = ProviderTx; + type Rpc = NetworkT; #[inline] fn starting_block(&self) -> U256 { @@ -182,7 +190,7 @@ where } #[inline] - fn signers(&self) -> &parking_lot::RwLock>>>> { + fn signers(&self) -> &SignersForApi { self.inner.eth_api.signers() } } @@ -191,7 +199,7 @@ impl SpawnBlocking for OpEthApi where Self: Send + Sync + Clone + 'static, N: OpNodeCore, - NetworkT: op_alloy_network::Network, + NetworkT: RpcTypes, ::Evm: fmt::Debug, ::Primitives: fmt::Debug, { @@ -219,6 +227,7 @@ where + ChainSpecProvider + StateProviderFactory, >, + NetworkT: RpcTypes, { #[inline] fn gas_oracle(&self) -> &GasPriceOracle { @@ -242,7 +251,7 @@ where Provider: StateProviderFactory + ChainSpecProvider, Pool: TransactionPool, >, - NetworkT: op_alloy_network::Network, + NetworkT: RpcTypes, ::Evm: fmt::Debug, ::Primitives: fmt::Debug, { @@ -252,6 +261,7 @@ impl EthState for OpEthApi where Self: LoadState + SpawnBlocking, N: OpNodeCore, + NetworkT: RpcTypes, { #[inline] fn max_proof_window(&self) -> u64 { @@ -267,6 +277,7 @@ where >, >, N: OpNodeCore, + NetworkT: RpcTypes, { } @@ -283,28 +294,30 @@ where Error: FromEvmError, >, N: OpNodeCore, + NetworkT: RpcTypes, { } impl AddDevSigners for OpEthApi where N: OpNodeCore, + NetworkT: RpcTypes>>, { fn with_dev_accounts(&self) { *self.inner.eth_api.signers().write() = DevSigner::random_signers(20) } } -impl fmt::Debug for OpEthApi { +impl fmt::Debug for OpEthApi { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("OpEthApi").finish_non_exhaustive() } } /// Container type `OpEthApi` -pub struct OpEthApiInner { +pub struct OpEthApiInner { /// Gateway to node's core components. - eth_api: EthApiNodeBackend, + eth_api: EthApiNodeBackend, /// Sequencer client, configured to forward submitted transactions to sequencer of given OP /// network. sequencer_client: Option, @@ -314,15 +327,15 @@ pub struct OpEthApiInner { min_suggested_priority_fee: U256, } -impl fmt::Debug for OpEthApiInner { +impl fmt::Debug for OpEthApiInner { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { f.debug_struct("OpEthApiInner").finish() } } -impl OpEthApiInner { +impl OpEthApiInner { /// Returns a reference to the [`EthApiNodeBackend`]. - const fn eth_api(&self) -> &EthApiNodeBackend { + const fn eth_api(&self) -> &EthApiNodeBackend { &self.eth_api } @@ -390,8 +403,8 @@ impl OpEthApiBuilder { impl EthApiBuilder for OpEthApiBuilder where N: FullNodeComponents, - OpEthApi: FullEthApiServer, - NetworkT: op_alloy_network::Network + Unpin, + NetworkT: RpcTypes, + OpEthApi: FullEthApiServer + AddDevSigners, { type EthApi = OpEthApi; diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index 106fe85b1f0..b92bd71f994 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -10,9 +10,9 @@ use op_alloy_consensus::{transaction::OpTransactionInfo, OpTxEnvelope}; use reth_node_api::FullNodeComponents; use reth_optimism_primitives::DepositReceipt; use reth_rpc_eth_api::{ - helpers::{EthSigner, EthTransactions, LoadTransaction, SpawnBlocking}, + helpers::{spec::SignersForRpc, EthTransactions, LoadTransaction, SpawnBlocking}, try_into_op_tx_info, EthApiTypes, FromEthApiError, FullEthApiTypes, RpcNodeCore, - RpcNodeCoreExt, TxInfoMapper, + RpcNodeCoreExt, RpcTypes, TxInfoMapper, }; use reth_rpc_eth_types::utils::recover_raw_transaction; use reth_storage_api::{ @@ -25,12 +25,14 @@ use std::{ sync::Arc, }; -impl EthTransactions for OpEthApi +impl EthTransactions for OpEthApi where - Self: LoadTransaction + EthApiTypes, + Self: LoadTransaction + + EthApiTypes, N: OpNodeCore>>, + Rpc: RpcTypes, { - fn signers(&self) -> &parking_lot::RwLock>>>> { + fn signers(&self) -> &SignersForRpc { self.inner.eth_api.signers() } @@ -83,9 +85,10 @@ where { } -impl OpEthApi +impl OpEthApi where N: OpNodeCore, + Rpc: RpcTypes, { /// Returns the [`SequencerClient`] if one is set. pub fn raw_tx_forwarder(&self) -> Option { @@ -97,26 +100,32 @@ where /// /// For deposits, receipt is fetched to extract `deposit_nonce` and `deposit_receipt_version`. /// Otherwise, it works like regular Ethereum implementation, i.e. uses [`TransactionInfo`]. -#[derive(Clone)] -pub struct OpTxInfoMapper(Arc>); +pub struct OpTxInfoMapper(Arc>); -impl Debug for OpTxInfoMapper { +impl Clone for OpTxInfoMapper { + fn clone(&self) -> Self { + Self(self.0.clone()) + } +} + +impl Debug for OpTxInfoMapper { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { f.debug_struct("OpTxInfoMapper").finish() } } -impl OpTxInfoMapper { +impl OpTxInfoMapper { /// Creates [`OpTxInfoMapper`] that uses [`ReceiptProvider`] borrowed from given `eth_api`. - pub const fn new(eth_api: Arc>) -> Self { + pub const fn new(eth_api: Arc>) -> Self { Self(eth_api) } } -impl TxInfoMapper<&OpTxEnvelope> for OpTxInfoMapper +impl TxInfoMapper<&OpTxEnvelope> for OpTxInfoMapper where N: FullNodeComponents, N::Provider: ReceiptProvider, + Rpc: RpcTypes, { type Out = OpTransactionInfo; type Err = ProviderError; diff --git a/crates/primitives-traits/Cargo.toml b/crates/primitives-traits/Cargo.toml index a920a6cb5af..a5bdd9a0ae7 100644 --- a/crates/primitives-traits/Cargo.toml +++ b/crates/primitives-traits/Cargo.toml @@ -127,6 +127,7 @@ serde-bincode-compat = [ "op-alloy-consensus?/serde", "op-alloy-consensus?/serde-bincode-compat", "alloy-genesis/serde-bincode-compat", + "alloy-rpc-types-eth?/serde-bincode-compat", ] serde = [ "dep:serde", diff --git a/crates/rpc/rpc-api/src/debug.rs b/crates/rpc/rpc-api/src/debug.rs index 8aefda4767b..5dd7401782f 100644 --- a/crates/rpc/rpc-api/src/debug.rs +++ b/crates/rpc/rpc-api/src/debug.rs @@ -1,8 +1,9 @@ use alloy_eips::{BlockId, BlockNumberOrTag}; use alloy_genesis::ChainConfig; +use alloy_json_rpc::RpcObject; use alloy_primitives::{Address, Bytes, B256}; use alloy_rpc_types_debug::ExecutionWitness; -use alloy_rpc_types_eth::{transaction::TransactionRequest, Block, Bundle, StateContext}; +use alloy_rpc_types_eth::{Block, Bundle, StateContext}; use alloy_rpc_types_trace::geth::{ BlockTraceResult, GethDebugTracingCallOptions, GethDebugTracingOptions, GethTrace, TraceResult, }; @@ -12,7 +13,7 @@ use reth_trie_common::{updates::TrieUpdates, HashedPostState}; /// Debug rpc interface. #[cfg_attr(not(feature = "client"), rpc(server, namespace = "debug"))] #[cfg_attr(feature = "client", rpc(server, client, namespace = "debug"))] -pub trait DebugApi { +pub trait DebugApi { /// Returns an RLP-encoded header. #[method(name = "getRawHeader")] async fn raw_header(&self, block_id: BlockId) -> RpcResult; @@ -105,7 +106,7 @@ pub trait DebugApi { #[method(name = "traceCall")] async fn debug_trace_call( &self, - request: TransactionRequest, + request: TxReq, block_id: Option, opts: Option, ) -> RpcResult; @@ -128,7 +129,7 @@ pub trait DebugApi { #[method(name = "traceCallMany")] async fn debug_trace_call_many( &self, - bundles: Vec, + bundles: Vec>, state_context: Option, opts: Option, ) -> RpcResult>>; diff --git a/crates/rpc/rpc-api/src/engine.rs b/crates/rpc/rpc-api/src/engine.rs index 6d9ba5211b6..088d18b9bf4 100644 --- a/crates/rpc/rpc-api/src/engine.rs +++ b/crates/rpc/rpc-api/src/engine.rs @@ -15,8 +15,7 @@ use alloy_rpc_types_engine::{ ExecutionPayloadV3, ForkchoiceState, ForkchoiceUpdated, PayloadId, PayloadStatus, }; use alloy_rpc_types_eth::{ - state::StateOverride, transaction::TransactionRequest, BlockOverrides, - EIP1186AccountProofResponse, Filter, Log, SyncStatus, + state::StateOverride, BlockOverrides, EIP1186AccountProofResponse, Filter, Log, SyncStatus, }; use alloy_serde::JsonStorageKey; use jsonrpsee::{core::RpcResult, proc_macros::rpc, RpcModule}; @@ -250,7 +249,7 @@ pub trait EngineApi { /// Specifically for the engine auth server: #[cfg_attr(not(feature = "client"), rpc(server, namespace = "eth"))] #[cfg_attr(feature = "client", rpc(server, client, namespace = "eth"))] -pub trait EngineEthApi { +pub trait EngineEthApi { /// Returns an object with data about the sync status or false. #[method(name = "syncing")] fn syncing(&self) -> RpcResult; @@ -267,7 +266,7 @@ pub trait EngineEthApi { #[method(name = "call")] async fn call( &self, - request: TransactionRequest, + request: TxReq, block_id: Option, state_overrides: Option, block_overrides: Option>, diff --git a/crates/rpc/rpc-api/src/trace.rs b/crates/rpc/rpc-api/src/trace.rs index 425fe1bb63e..1c4b148a098 100644 --- a/crates/rpc/rpc-api/src/trace.rs +++ b/crates/rpc/rpc-api/src/trace.rs @@ -1,8 +1,6 @@ use alloy_eips::BlockId; use alloy_primitives::{map::HashSet, Bytes, B256}; -use alloy_rpc_types_eth::{ - state::StateOverride, transaction::TransactionRequest, BlockOverrides, Index, -}; +use alloy_rpc_types_eth::{state::StateOverride, BlockOverrides, Index}; use alloy_rpc_types_trace::{ filter::TraceFilter, opcode::{BlockOpcodeGas, TransactionOpcodeGas}, @@ -13,12 +11,12 @@ use jsonrpsee::{core::RpcResult, proc_macros::rpc}; /// Ethereum trace API #[cfg_attr(not(feature = "client"), rpc(server, namespace = "trace"))] #[cfg_attr(feature = "client", rpc(server, client, namespace = "trace"))] -pub trait TraceApi { +pub trait TraceApi { /// Executes the given call and returns a number of possible traces for it. #[method(name = "call")] async fn trace_call( &self, - call: TransactionRequest, + call: TxReq, trace_types: HashSet, block_id: Option, state_overrides: Option, @@ -31,7 +29,7 @@ pub trait TraceApi { #[method(name = "callMany")] async fn trace_call_many( &self, - calls: Vec<(TransactionRequest, HashSet)>, + calls: Vec<(TxReq, HashSet)>, block_id: Option, ) -> RpcResult>; diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 4dcce346c0d..7c5604e2420 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -20,6 +20,7 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] use crate::{auth::AuthRpcModule, error::WsHttpSamePortError, metrics::RpcRequestMetrics}; +use alloy_network::Ethereum; use alloy_provider::{fillers::RecommendedFillers, Provider, ProviderBuilder}; use core::marker::PhantomData; use error::{ConflictingModules, RpcError, ServerKind}; @@ -36,7 +37,7 @@ use reth_network_api::{noop::NoopNetwork, NetworkInfo, Peers}; use reth_primitives_traits::NodePrimitives; use reth_rpc::{ AdminApi, DebugApi, EngineEthApi, EthApi, EthApiBuilder, EthBundle, MinerApi, NetApi, - OtterscanApi, RPCApi, RethApi, TraceApi, TxPoolApi, ValidationApiConfig, Web3Api, + OtterscanApi, RPCApi, RethApi, RpcTypes, TraceApi, TxPoolApi, ValidationApiConfig, Web3Api, }; use reth_rpc_api::servers::*; use reth_rpc_eth_api::{ @@ -105,7 +106,7 @@ pub mod rate_limiter; /// /// This is the main entrypoint and the easiest way to configure an RPC server. #[derive(Debug, Clone)] -pub struct RpcModuleBuilder { +pub struct RpcModuleBuilder { /// The Provider type to when creating all rpc handlers provider: Provider, /// The Pool type to when creating all rpc handlers @@ -119,13 +120,15 @@ pub struct RpcModuleBuilder { /// The consensus implementation. consensus: Consensus, /// Node data primitives. - _primitives: PhantomData, + _primitives: PhantomData<(N, Rpc)>, } // === impl RpcBuilder === -impl - RpcModuleBuilder +impl + RpcModuleBuilder +where + Rpc: RpcTypes, { /// Create a new instance of the builder pub const fn new( @@ -143,7 +146,7 @@ impl pub fn with_provider

( self, provider: P, - ) -> RpcModuleBuilder { + ) -> RpcModuleBuilder { let Self { pool, network, executor, evm_config, consensus, _primitives, .. } = self; RpcModuleBuilder { provider, network, pool, executor, evm_config, consensus, _primitives } } @@ -152,7 +155,7 @@ impl pub fn with_pool

( self, pool: P, - ) -> RpcModuleBuilder { + ) -> RpcModuleBuilder { let Self { provider, network, executor, evm_config, consensus, _primitives, .. } = self; RpcModuleBuilder { provider, network, pool, executor, evm_config, consensus, _primitives } } @@ -164,7 +167,8 @@ impl /// [`EthApi`] which requires a [`TransactionPool`] implementation. pub fn with_noop_pool( self, - ) -> RpcModuleBuilder { + ) -> RpcModuleBuilder + { let Self { provider, executor, network, evm_config, consensus, _primitives, .. } = self; RpcModuleBuilder { provider, @@ -181,7 +185,7 @@ impl pub fn with_network( self, network: Net, - ) -> RpcModuleBuilder { + ) -> RpcModuleBuilder { let Self { provider, pool, executor, evm_config, consensus, _primitives, .. } = self; RpcModuleBuilder { provider, network, pool, executor, evm_config, consensus, _primitives } } @@ -193,7 +197,7 @@ impl /// [`EthApi`] which requires a [`NetworkInfo`] implementation. pub fn with_noop_network( self, - ) -> RpcModuleBuilder { + ) -> RpcModuleBuilder { let Self { provider, pool, executor, evm_config, consensus, _primitives, .. } = self; RpcModuleBuilder { provider, @@ -233,7 +237,7 @@ impl pub fn with_evm_config( self, evm_config: E, - ) -> RpcModuleBuilder { + ) -> RpcModuleBuilder { let Self { provider, pool, executor, network, consensus, _primitives, .. } = self; RpcModuleBuilder { provider, network, pool, executor, evm_config, consensus, _primitives } } @@ -242,13 +246,13 @@ impl pub fn with_consensus( self, consensus: C, - ) -> RpcModuleBuilder { + ) -> RpcModuleBuilder { let Self { provider, network, pool, executor, evm_config, _primitives, .. } = self; RpcModuleBuilder { provider, network, pool, executor, evm_config, consensus, _primitives } } /// Instantiates a new [`EthApiBuilder`] from the configured components. - pub fn eth_api_builder(&self) -> EthApiBuilder + pub fn eth_api_builder(&self) -> EthApiBuilder where Provider: BlockReaderIdExt + Clone, Pool: Clone, @@ -268,7 +272,7 @@ impl /// Note: This spawns all necessary tasks. /// /// See also [`EthApiBuilder`]. - pub fn bootstrap_eth_api(&self) -> EthApi + pub fn bootstrap_eth_api(&self) -> EthApi where N: NodePrimitives, Provider: BlockReaderIdExt @@ -286,8 +290,8 @@ impl } } -impl - RpcModuleBuilder +impl + RpcModuleBuilder where N: NodePrimitives, Provider: FullRpcProvider @@ -387,7 +391,7 @@ where } } -impl Default for RpcModuleBuilder { +impl Default for RpcModuleBuilder { fn default() -> Self { Self::new((), (), (), Box::new(TokioTaskExecutor::default()), (), ()) } diff --git a/crates/rpc/rpc-builder/tests/it/http.rs b/crates/rpc/rpc-builder/tests/it/http.rs index d21d6f915a9..a790253d266 100644 --- a/crates/rpc/rpc-builder/tests/it/http.rs +++ b/crates/rpc/rpc-builder/tests/it/http.rs @@ -410,11 +410,11 @@ where { let block_id = BlockId::Number(BlockNumberOrTag::default()); - DebugApiClient::raw_header(client, block_id).await.unwrap(); - DebugApiClient::raw_block(client, block_id).await.unwrap_err(); - DebugApiClient::raw_transaction(client, B256::default()).await.unwrap(); - DebugApiClient::raw_receipts(client, block_id).await.unwrap(); - DebugApiClient::bad_blocks(client).await.unwrap(); + DebugApiClient::::raw_header(client, block_id).await.unwrap(); + DebugApiClient::::raw_block(client, block_id).await.unwrap_err(); + DebugApiClient::::raw_transaction(client, B256::default()).await.unwrap(); + DebugApiClient::::raw_receipts(client, block_id).await.unwrap(); + DebugApiClient::::bad_blocks(client).await.unwrap(); } async fn test_basic_net_calls(client: &C) @@ -441,22 +441,39 @@ where count: None, }; - TraceApiClient::trace_raw_transaction(client, Bytes::default(), HashSet::default(), None) - .await - .unwrap_err(); - TraceApiClient::trace_call_many(client, vec![], Some(BlockNumberOrTag::Latest.into())) - .await - .unwrap_err(); - TraceApiClient::replay_transaction(client, B256::default(), HashSet::default()) - .await - .err() - .unwrap(); - TraceApiClient::trace_block(client, block_id).await.unwrap_err(); - TraceApiClient::replay_block_transactions(client, block_id, HashSet::default()) - .await - .unwrap_err(); + TraceApiClient::::trace_raw_transaction( + client, + Bytes::default(), + HashSet::default(), + None, + ) + .await + .unwrap_err(); + TraceApiClient::::trace_call_many( + client, + vec![], + Some(BlockNumberOrTag::Latest.into()), + ) + .await + .unwrap_err(); + TraceApiClient::::replay_transaction( + client, + B256::default(), + HashSet::default(), + ) + .await + .err() + .unwrap(); + TraceApiClient::::trace_block(client, block_id).await.unwrap_err(); + TraceApiClient::::replay_block_transactions( + client, + block_id, + HashSet::default(), + ) + .await + .unwrap_err(); - TraceApiClient::trace_filter(client, trace_filter).await.unwrap(); + TraceApiClient::::trace_filter(client, trace_filter).await.unwrap(); } async fn test_basic_web3_calls(client: &C) diff --git a/crates/rpc/rpc-builder/tests/it/utils.rs b/crates/rpc/rpc-builder/tests/it/utils.rs index 5c95dbc7ad5..f03d73f01d9 100644 --- a/crates/rpc/rpc-builder/tests/it/utils.rs +++ b/crates/rpc/rpc-builder/tests/it/utils.rs @@ -1,11 +1,11 @@ -use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; - +use alloy_network::Ethereum; use alloy_rpc_types_engine::{ClientCode, ClientVersionV1}; use reth_chainspec::MAINNET; use reth_consensus::noop::NoopConsensus; use reth_engine_primitives::BeaconConsensusEngineHandle; use reth_ethereum_engine_primitives::EthEngineTypes; use reth_ethereum_primitives::EthPrimitives; +use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; use reth_evm_ethereum::EthEvmConfig; use reth_network_api::noop::NoopNetwork; @@ -118,9 +118,15 @@ pub async fn launch_http_ws_same_port(modules: impl Into) -> } /// Returns an [`RpcModuleBuilder`] with testing components. -pub fn test_rpc_builder( -) -> RpcModuleBuilder -{ +pub fn test_rpc_builder() -> RpcModuleBuilder< + EthPrimitives, + NoopProvider, + TestPool, + NoopNetwork, + EthEvmConfig, + NoopConsensus, + Ethereum, +> { RpcModuleBuilder::default() .with_provider(NoopProvider::default()) .with_pool(TestPoolBuilder::default().into()) diff --git a/crates/rpc/rpc-convert/Cargo.toml b/crates/rpc/rpc-convert/Cargo.toml index 0ccf2107ad2..4923c5ab27c 100644 --- a/crates/rpc/rpc-convert/Cargo.toml +++ b/crates/rpc/rpc-convert/Cargo.toml @@ -20,6 +20,7 @@ reth-evm.workspace = true # ethereum alloy-primitives.workspace = true alloy-rpc-types-eth = { workspace = true, features = ["serde"] } +alloy-signer.workspace = true alloy-consensus.workspace = true alloy-network.workspace = true alloy-json-rpc.workspace = true diff --git a/crates/rpc/rpc-convert/src/rpc.rs b/crates/rpc/rpc-convert/src/rpc.rs index 7b5c457419c..4e052672102 100644 --- a/crates/rpc/rpc-convert/src/rpc.rs +++ b/crates/rpc/rpc-convert/src/rpc.rs @@ -1,10 +1,17 @@ +use std::{fmt::Debug, future::Future}; + +use alloy_consensus::{ + EthereumTxEnvelope, EthereumTypedTransaction, SignableTransaction, TxEip4844, +}; use alloy_json_rpc::RpcObject; -use alloy_network::{Network, ReceiptResponse, TransactionResponse}; +use alloy_network::{Network, ReceiptResponse, TransactionResponse, TxSigner}; +use alloy_primitives::Signature; +use alloy_rpc_types_eth::TransactionRequest; /// RPC types used by the `eth_` RPC API. /// /// This is a subset of [`Network`] trait with only RPC response types kept. -pub trait RpcTypes { +pub trait RpcTypes: Send + Sync + Clone + Unpin + Debug + 'static { /// Header response type. type Header: RpcObject; /// Receipt response type. @@ -12,12 +19,12 @@ pub trait RpcTypes { /// Transaction response type. type TransactionResponse: RpcObject + TransactionResponse; /// Transaction response type. - type TransactionRequest: RpcObject; + type TransactionRequest: RpcObject + AsRef + AsMut; } impl RpcTypes for T where - T: Network, + T: Network + AsMut> + Unpin, { type Header = T::HeaderResponse; type Receipt = T::ReceiptResponse; @@ -30,3 +37,85 @@ pub type RpcTransaction = ::TransactionResponse; /// Adapter for network specific transaction request. pub type RpcTxReq = ::TransactionRequest; + +/// Error for [`SignableTxRequest`] trait. +#[derive(Debug, thiserror::Error)] +pub enum SignTxRequestError { + /// The transaction request is invalid. + #[error("invalid transaction request")] + InvalidTransactionRequest, + + /// The signer is not supported. + #[error(transparent)] + SignerNotSupported(#[from] alloy_signer::Error), +} + +/// An abstraction over transaction requests that can be signed. +pub trait SignableTxRequest: Send + Sync + 'static { + /// Attempts to build a transaction request and sign it with the given signer. + fn try_build_and_sign( + self, + signer: impl TxSigner + Send, + ) -> impl Future> + Send; +} + +impl SignableTxRequest> for TransactionRequest { + async fn try_build_and_sign( + self, + signer: impl TxSigner + Send, + ) -> Result, SignTxRequestError> { + let mut tx = + self.build_typed_tx().map_err(|_| SignTxRequestError::InvalidTransactionRequest)?; + let signature = signer.sign_transaction(&mut tx).await?; + let signed = match tx { + EthereumTypedTransaction::Legacy(tx) => { + EthereumTxEnvelope::Legacy(tx.into_signed(signature)) + } + EthereumTypedTransaction::Eip2930(tx) => { + EthereumTxEnvelope::Eip2930(tx.into_signed(signature)) + } + EthereumTypedTransaction::Eip1559(tx) => { + EthereumTxEnvelope::Eip1559(tx.into_signed(signature)) + } + EthereumTypedTransaction::Eip4844(tx) => { + EthereumTxEnvelope::Eip4844(TxEip4844::from(tx).into_signed(signature)) + } + EthereumTypedTransaction::Eip7702(tx) => { + EthereumTxEnvelope::Eip7702(tx.into_signed(signature)) + } + }; + Ok(signed) + } +} + +#[cfg(feature = "op")] +impl SignableTxRequest + for op_alloy_rpc_types::OpTransactionRequest +{ + async fn try_build_and_sign( + self, + signer: impl TxSigner + Send, + ) -> Result { + let mut tx = + self.build_typed_tx().map_err(|_| SignTxRequestError::InvalidTransactionRequest)?; + let signature = signer.sign_transaction(&mut tx).await?; + let signed = match tx { + op_alloy_consensus::OpTypedTransaction::Legacy(tx) => { + op_alloy_consensus::OpTxEnvelope::Legacy(tx.into_signed(signature)) + } + op_alloy_consensus::OpTypedTransaction::Eip2930(tx) => { + op_alloy_consensus::OpTxEnvelope::Eip2930(tx.into_signed(signature)) + } + op_alloy_consensus::OpTypedTransaction::Eip1559(tx) => { + op_alloy_consensus::OpTxEnvelope::Eip1559(tx.into_signed(signature)) + } + op_alloy_consensus::OpTypedTransaction::Eip7702(tx) => { + op_alloy_consensus::OpTxEnvelope::Eip7702(tx.into_signed(signature)) + } + op_alloy_consensus::OpTypedTransaction::Deposit(_) => { + return Err(SignTxRequestError::InvalidTransactionRequest); + } + }; + Ok(signed) + } +} diff --git a/crates/rpc/rpc-eth-api/src/core.rs b/crates/rpc/rpc-eth-api/src/core.rs index 0f2b9eb3896..3e6b85bdee9 100644 --- a/crates/rpc/rpc-eth-api/src/core.rs +++ b/crates/rpc/rpc-eth-api/src/core.rs @@ -12,7 +12,7 @@ use alloy_rpc_types_eth::{ simulate::{SimulatePayload, SimulatedBlock}, state::{EvmOverrides, StateOverride}, BlockOverrides, Bundle, EIP1186AccountProofResponse, EthCallResponse, FeeHistory, Index, - StateContext, SyncStatus, TransactionRequest, Work, + StateContext, SyncStatus, Work, }; use alloy_serde::JsonStorageKey; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; @@ -214,7 +214,7 @@ pub trait EthApi, block_number: Option, ) -> RpcResult>>; @@ -222,7 +222,7 @@ pub trait EthApi, state_overrides: Option, block_overrides: Option>, @@ -233,7 +233,7 @@ pub trait EthApi, + bundles: Vec>, state_context: Option, state_override: Option, ) -> RpcResult>>; @@ -255,7 +255,7 @@ pub trait EthApi, state_override: Option, ) -> RpcResult; @@ -265,7 +265,7 @@ pub trait EthApi, state_override: Option, ) -> RpcResult; @@ -333,7 +333,7 @@ pub trait EthApi RpcResult; + async fn send_transaction(&self, request: TxReq) -> RpcResult; /// Sends signed transaction, returning its hash. #[method(name = "sendRawTransaction")] @@ -353,7 +353,7 @@ pub trait EthApi RpcResult; + async fn sign_transaction(&self, transaction: TxReq) -> RpcResult; /// Signs data via [EIP-712](https://github.com/ethereum/EIPs/blob/master/EIPS/eip-712.md). #[method(name = "signTypedData")] @@ -656,7 +656,7 @@ where /// Handler for: `eth_simulateV1` async fn simulate_v1( &self, - payload: SimulatePayload, + payload: SimulatePayload>, block_number: Option, ) -> RpcResult>>> { trace!(target: "rpc::eth", ?block_number, "Serving eth_simulateV1"); @@ -667,7 +667,7 @@ where /// Handler for: `eth_call` async fn call( &self, - request: TransactionRequest, + request: RpcTxReq, block_number: Option, state_overrides: Option, block_overrides: Option>, @@ -685,7 +685,7 @@ where /// Handler for: `eth_callMany` async fn call_many( &self, - bundles: Vec, + bundles: Vec>>, state_context: Option, state_override: Option, ) -> RpcResult>> { @@ -696,7 +696,7 @@ where /// Handler for: `eth_createAccessList` async fn create_access_list( &self, - request: TransactionRequest, + request: RpcTxReq, block_number: Option, state_override: Option, ) -> RpcResult { @@ -707,7 +707,7 @@ where /// Handler for: `eth_estimateGas` async fn estimate_gas( &self, - request: TransactionRequest, + request: RpcTxReq, block_number: Option, state_override: Option, ) -> RpcResult { @@ -799,7 +799,7 @@ where } /// Handler for: `eth_sendTransaction` - async fn send_transaction(&self, request: TransactionRequest) -> RpcResult { + async fn send_transaction(&self, request: RpcTxReq) -> RpcResult { trace!(target: "rpc::eth", ?request, "Serving eth_sendTransaction"); Ok(EthTransactions::send_transaction(self, request).await?) } @@ -823,7 +823,7 @@ where } /// Handler for: `eth_signTransaction` - async fn sign_transaction(&self, request: TransactionRequest) -> RpcResult { + async fn sign_transaction(&self, request: RpcTxReq) -> RpcResult { trace!(target: "rpc::eth", ?request, "Serving eth_signTransaction"); Ok(EthTransactions::sign_transaction(self, request).await?) } diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index 22ec006a4f8..707aa052543 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -13,11 +13,11 @@ use alloy_evm::{ call::caller_gas_allowance, overrides::{apply_block_overrides, apply_state_overrides, OverrideBlockHashes}, }; +use alloy_network::TransactionBuilder; use alloy_primitives::{Bytes, B256, U256}; use alloy_rpc_types_eth::{ simulate::{SimBlock, SimulatePayload, SimulatedBlock}, state::{EvmOverrides, StateOverride}, - transaction::TransactionRequest, BlockId, Bundle, EthCallResponse, StateContext, TransactionInfo, }; use futures::Future; @@ -32,7 +32,7 @@ use reth_revm::{ database::StateProviderDatabase, db::{CacheDB, State}, }; -use reth_rpc_convert::{RpcConvert, RpcTypes}; +use reth_rpc_convert::{RpcConvert, RpcTxReq}; use reth_rpc_eth_types::{ cache::db::{StateCacheDbRefMutWrapper, StateProviderTraitObjWrapper}, error::{api::FromEvmHalt, ensure_success, FromEthApiError}, @@ -59,7 +59,7 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA /// Estimate gas needed for execution of the `request` at the [`BlockId`]. fn estimate_gas_at( &self, - request: TransactionRequest, + request: RpcTxReq<::Network>, at: BlockId, state_override: Option, ) -> impl Future> + Send { @@ -72,7 +72,7 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA /// See also: fn simulate_v1( &self, - payload: SimulatePayload, + payload: SimulatePayload::Network>>, block: Option, ) -> impl Future> + Send { async move { @@ -144,9 +144,10 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA let chain_id = evm_env.cfg_env.chain_id; let default_gas_limit = { - let total_specified_gas = calls.iter().filter_map(|tx| tx.gas).sum::(); + let total_specified_gas = + calls.iter().filter_map(|tx| tx.as_ref().gas_limit()).sum::(); let txs_without_gas_limit = - calls.iter().filter(|tx| tx.gas.is_none()).count(); + calls.iter().filter(|tx| tx.as_ref().gas_limit().is_none()).count(); if total_specified_gas > block_gas_limit { return Err(EthApiError::Other(Box::new( @@ -216,7 +217,7 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA /// Executes the call request (`eth_call`) and returns the output fn call( &self, - request: TransactionRequest, + request: RpcTxReq<::Network>, block_number: Option, overrides: EvmOverrides, ) -> impl Future> + Send { @@ -232,7 +233,7 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA /// optionality of state overrides fn call_many( &self, - bundles: Vec, + bundles: Vec::Network>>>, state_context: Option, mut state_override: Option, ) -> impl Future>, Self::Error>> + Send { @@ -348,11 +349,11 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA } } - /// Creates [`AccessListResult`] for the [`TransactionRequest`] at the given + /// Creates [`AccessListResult`] for the [`RpcTxReq`] at the given /// [`BlockId`], or latest block. fn create_access_list_at( &self, - request: TransactionRequest, + request: RpcTxReq<::Network>, block_number: Option, state_override: Option, ) -> impl Future> + Send @@ -370,13 +371,13 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA } } - /// Creates [`AccessListResult`] for the [`TransactionRequest`] at the given + /// Creates [`AccessListResult`] for the [`RpcTxReq`] at the given /// [`BlockId`]. fn create_access_list_with( &self, mut evm_env: EvmEnvFor, at: BlockId, - mut request: TransactionRequest, + request: RpcTxReq<::Network>, state_override: Option, ) -> Result where @@ -403,14 +404,14 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA // Disabled because eth_createAccessList is sometimes used with non-eoa senders evm_env.cfg_env.disable_eip3607 = true; - if request.gas.is_none() && tx_env.gas_price() > 0 { + if request.as_ref().gas_limit().is_none() && tx_env.gas_price() > 0 { let cap = caller_gas_allowance(&mut db, &tx_env).map_err(Self::Error::from_eth_err)?; // no gas limit was provided in the request, so we need to cap the request's gas limit tx_env.set_gas_limit(cap.min(evm_env.block_env.gas_limit)); } // can consume the list since we're not using the request anymore - let initial = request.access_list.take().unwrap_or_default(); + let initial = request.as_ref().access_list().cloned().unwrap_or_default(); let mut inspector = AccessListInspector::new(initial); @@ -461,10 +462,7 @@ pub trait Call: SignedTx = ProviderTx, >, >, - RpcConvert: RpcConvert< - TxEnv = TxEnvFor, - Network: RpcTypes>, - >, + RpcConvert: RpcConvert>, Error: FromEvmError + From<::Error> + From, @@ -526,7 +524,7 @@ pub trait Call: /// Executes the call request at the given [`BlockId`]. fn transact_call_at( &self, - request: TransactionRequest, + request: RpcTxReq<::Network>, at: BlockId, overrides: EvmOverrides, ) -> impl Future>, Self::Error>> + Send @@ -555,10 +553,10 @@ pub trait Call: }) } - /// Prepares the state and env for the given [`TransactionRequest`] at the given [`BlockId`] and + /// Prepares the state and env for the given [`RpcTxReq`] at the given [`BlockId`] and /// executes the closure on a new task returning the result of the closure. /// - /// This returns the configured [`EvmEnv`] for the given [`TransactionRequest`] at + /// This returns the configured [`EvmEnv`] for the given [`RpcTxReq`] at /// the given [`BlockId`] and with configured call settings: `prepare_call_env`. /// /// This is primarily used by `eth_call`. @@ -572,7 +570,7 @@ pub trait Call: /// instead, where blocking IO is less problematic. fn spawn_with_call_at( &self, - request: TransactionRequest, + request: RpcTxReq<::Network>, at: BlockId, overrides: EvmOverrides, f: F, @@ -694,26 +692,25 @@ pub trait Call: Ok(index) } - /// Configures a new `TxEnv` for the [`TransactionRequest`] /// - /// All `TxEnv` fields are derived from the given [`TransactionRequest`], if fields are + /// All `TxEnv` fields are derived from the given [`RpcTxReq`], if fields are /// `None`, they fall back to the [`EvmEnv`]'s settings. fn create_txn_env( &self, evm_env: &EvmEnv>, - mut request: TransactionRequest, + mut request: RpcTxReq<::Network>, mut db: impl Database>, ) -> Result, Self::Error> { - if request.nonce.is_none() { - request.nonce.replace( - db.basic(request.from.unwrap_or_default()) - .map_err(Into::into)? - .map(|acc| acc.nonce) - .unwrap_or_default(), - ); + if request.as_ref().nonce().is_none() { + let nonce = db + .basic(request.as_ref().from().unwrap_or_default()) + .map_err(Into::into)? + .map(|acc| acc.nonce) + .unwrap_or_default(); + request.as_mut().set_nonce(nonce); } - Ok(self.tx_resp_builder().tx_env(request.into(), &evm_env.cfg_env, &evm_env.block_env)?) + Ok(self.tx_resp_builder().tx_env(request, &evm_env.cfg_env, &evm_env.block_env)?) } /// Prepares the [`EvmEnv`] for execution of calls. @@ -733,7 +730,7 @@ pub trait Call: fn prepare_call_env( &self, mut evm_env: EvmEnvFor, - mut request: TransactionRequest, + mut request: RpcTxReq<::Network>, db: &mut DB, overrides: EvmOverrides, ) -> Result<(EvmEnvFor, TxEnvFor), Self::Error> @@ -741,7 +738,7 @@ pub trait Call: DB: Database + DatabaseCommit + OverrideBlockHashes, EthApiError: From<::Error>, { - if request.gas > Some(self.call_gas_limit()) { + if request.as_ref().gas_limit() > Some(self.call_gas_limit()) { // configured gas exceeds limit return Err( EthApiError::InvalidTransaction(RpcInvalidTransactionError::GasTooHigh).into() @@ -761,7 +758,7 @@ pub trait Call: evm_env.cfg_env.disable_base_fee = true; // set nonce to None so that the correct nonce is chosen by the EVM - request.nonce = None; + request.as_mut().take_nonce(); if let Some(block_overrides) = overrides.block { apply_block_overrides(*block_overrides, db, &mut evm_env.block_env); @@ -771,7 +768,7 @@ pub trait Call: .map_err(EthApiError::from_state_overrides_err)?; } - let request_gas = request.gas; + let request_gas = request.as_ref().gas_limit(); let mut tx_env = self.create_txn_env(&evm_env, request, &mut *db)?; if request_gas.is_none() { diff --git a/crates/rpc/rpc-eth-api/src/helpers/estimate.rs b/crates/rpc/rpc-eth-api/src/helpers/estimate.rs index 91af2c37e4c..87945c3f4ad 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/estimate.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/estimate.rs @@ -3,13 +3,15 @@ use super::{Call, LoadPendingBlock}; use crate::{AsEthApiError, FromEthApiError, IntoEthApiError}; use alloy_evm::{call::caller_gas_allowance, overrides::apply_state_overrides}; +use alloy_network::TransactionBuilder; use alloy_primitives::{TxKind, U256}; -use alloy_rpc_types_eth::{state::StateOverride, transaction::TransactionRequest, BlockId}; +use alloy_rpc_types_eth::{state::StateOverride, BlockId}; use futures::Future; use reth_chainspec::MIN_TRANSACTION_GAS; use reth_errors::ProviderError; use reth_evm::{ConfigureEvm, Database, Evm, EvmEnvFor, EvmFor, TransactionEnv, TxEnvFor}; use reth_revm::{database::StateProviderDatabase, db::CacheDB}; +use reth_rpc_convert::{RpcConvert, RpcTxReq}; use reth_rpc_eth_types::{ error::{api::FromEvmHalt, FromEvmError}, EthApiError, RevertError, RpcInvalidTransactionError, @@ -23,7 +25,7 @@ use tracing::trace; pub trait EstimateCall: Call { /// Estimates the gas usage of the `request` with the state. /// - /// This will execute the [`TransactionRequest`] and find the best gas limit via binary search. + /// This will execute the [`RpcTxReq`] and find the best gas limit via binary search. /// /// ## EVM settings /// @@ -35,7 +37,7 @@ pub trait EstimateCall: Call { fn estimate_gas_with( &self, mut evm_env: EvmEnvFor, - mut request: TransactionRequest, + mut request: RpcTxReq<::Network>, state: S, state_override: Option, ) -> Result @@ -52,11 +54,11 @@ pub trait EstimateCall: Call { evm_env.cfg_env.disable_base_fee = true; // set nonce to None so that the correct nonce is chosen by the EVM - request.nonce = None; + request.as_mut().take_nonce(); // Keep a copy of gas related request values - let tx_request_gas_limit = request.gas; - let tx_request_gas_price = request.gas_price; + let tx_request_gas_limit = request.as_ref().gas_limit(); + let tx_request_gas_price = request.as_ref().gas_price(); // the gas limit of the corresponding block let block_env_gas_limit = evm_env.block_env.gas_limit; @@ -268,7 +270,7 @@ pub trait EstimateCall: Call { /// Estimate gas needed for execution of the `request` at the [`BlockId`]. fn estimate_gas_at( &self, - request: TransactionRequest, + request: RpcTxReq<::Network>, at: BlockId, state_override: Option, ) -> impl Future> + Send diff --git a/crates/rpc/rpc-eth-api/src/helpers/signer.rs b/crates/rpc/rpc-eth-api/src/helpers/signer.rs index 62f8b75b869..4060be138e0 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/signer.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/signer.rs @@ -12,7 +12,7 @@ pub type Result = result::Result; /// An Ethereum Signer used via RPC. #[async_trait::async_trait] -pub trait EthSigner: Send + Sync + DynClone { +pub trait EthSigner: Send + Sync + DynClone { /// Returns the available accounts for this signer. fn accounts(&self) -> Vec

; @@ -25,7 +25,7 @@ pub trait EthSigner: Send + Sync + DynClone { async fn sign(&self, address: Address, message: &[u8]) -> Result; /// signs a transaction request using the given account in request - async fn sign_transaction(&self, request: TransactionRequest, address: &Address) -> Result; + async fn sign_transaction(&self, request: TxReq, address: &Address) -> Result; /// Encodes and signs the typed data according EIP-712. Payload must implement Eip712 trait. fn sign_typed_data(&self, address: Address, payload: &TypedData) -> Result; diff --git a/crates/rpc/rpc-eth-api/src/helpers/spec.rs b/crates/rpc/rpc-eth-api/src/helpers/spec.rs index de446d8fb2d..fd3e13620c5 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/spec.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/spec.rs @@ -6,7 +6,8 @@ use futures::Future; use reth_chainspec::{ChainInfo, ChainSpecProvider, EthereumHardforks}; use reth_errors::{RethError, RethResult}; use reth_network_api::NetworkInfo; -use reth_storage_api::{BlockNumReader, StageCheckpointReader}; +use reth_rpc_convert::{RpcTxReq, RpcTypes}; +use reth_storage_api::{BlockNumReader, StageCheckpointReader, TransactionsProvider}; use crate::{helpers::EthSigner, RpcNodeCore}; @@ -25,11 +26,14 @@ pub trait EthApiSpec: /// The transaction type signers are using. type Transaction; + /// The RPC requests and responses. + type Rpc: RpcTypes; + /// Returns the block node is started on. fn starting_block(&self) -> U256; /// Returns a handle to the signers owned by provider. - fn signers(&self) -> &parking_lot::RwLock>>>; + fn signers(&self) -> &SignersForApi; /// Returns the current ethereum protocol version. fn protocol_version(&self) -> impl Future> + Send { @@ -88,3 +92,14 @@ pub trait EthApiSpec: Ok(status) } } + +/// A handle to [`EthSigner`]s with its generics set from [`EthApiSpec`]. +pub type SignersForApi = parking_lot::RwLock< + Vec::Transaction, RpcTxReq<::Rpc>>>>, +>; + +/// A handle to [`EthSigner`]s with its generics set from [`TransactionsProvider`] and +/// [`reth_rpc_convert::RpcTypes`]. +pub type SignersForRpc = parking_lot::RwLock< + Vec::Transaction, RpcTxReq>>>, +>; diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index c0c759d400d..4f1252e193b 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -3,8 +3,9 @@ use super::{EthApiSpec, EthSigner, LoadBlock, LoadReceipt, LoadState, SpawnBlocking}; use crate::{ - helpers::estimate::EstimateCall, FromEthApiError, FullEthApiTypes, IntoEthApiError, - RpcNodeCore, RpcNodeCoreExt, RpcReceipt, RpcTransaction, + helpers::{estimate::EstimateCall, spec::SignersForRpc}, + FromEthApiError, FullEthApiTypes, IntoEthApiError, RpcNodeCore, RpcNodeCoreExt, RpcReceipt, + RpcTransaction, }; use alloy_consensus::{ transaction::{SignerRecoverable, TransactionMeta}, @@ -14,12 +15,12 @@ use alloy_dyn_abi::TypedData; use alloy_eips::{eip2718::Encodable2718, BlockId}; use alloy_network::TransactionBuilder; use alloy_primitives::{Address, Bytes, TxHash, B256}; -use alloy_rpc_types_eth::{transaction::TransactionRequest, BlockNumberOrTag, TransactionInfo}; +use alloy_rpc_types_eth::{BlockNumberOrTag, TransactionInfo}; use futures::{Future, StreamExt}; use reth_chain_state::CanonStateSubscriptions; use reth_node_api::BlockBody; use reth_primitives_traits::{RecoveredBlock, SignedTransaction}; -use reth_rpc_convert::transaction::RpcConvert; +use reth_rpc_convert::{transaction::RpcConvert, RpcTxReq}; use reth_rpc_eth_types::{ utils::binary_search, EthApiError, EthApiError::TransactionConfirmationTimeout, SignError, TransactionSource, @@ -41,7 +42,7 @@ use std::sync::Arc; /// /// ## Calls /// -/// There are subtle differences between when transacting [`TransactionRequest`]: +/// There are subtle differences between when transacting [`RpcTxReq`]: /// /// The endpoints `eth_call` and `eth_estimateGas` and `eth_createAccessList` should always /// __disable__ the base fee check in the [`CfgEnv`](revm::context::CfgEnv). @@ -57,8 +58,7 @@ pub trait EthTransactions: LoadTransaction { /// Returns a handle for signing data. /// /// Signer access in default (L1) trait method implementations. - #[expect(clippy::type_complexity)] - fn signers(&self) -> &parking_lot::RwLock>>>>; + fn signers(&self) -> &SignersForRpc; /// Decodes and recovers the transaction and submits it to the pool. /// @@ -379,13 +379,13 @@ pub trait EthTransactions: LoadTransaction { /// Returns the hash of the signed transaction. fn send_transaction( &self, - mut request: TransactionRequest, + mut request: RpcTxReq, ) -> impl Future> + Send where Self: EthApiSpec + LoadBlock + EstimateCall, { async move { - let from = match request.from { + let from = match request.as_ref().from() { Some(from) => from, None => return Err(SignError::NoAccount.into_eth_err()), }; @@ -395,18 +395,18 @@ pub trait EthTransactions: LoadTransaction { } // set nonce if not already set before - if request.nonce.is_none() { + if request.as_ref().nonce().is_none() { let nonce = self.next_available_nonce(from).await?; - request.nonce = Some(nonce); + request.as_mut().set_nonce(nonce); } let chain_id = self.chain_id(); - request.chain_id = Some(chain_id.to()); + request.as_mut().set_chain_id(chain_id.to()); let estimated_gas = self.estimate_gas_at(request.clone(), BlockId::pending(), None).await?; let gas_limit = estimated_gas; - request.set_gas_limit(gas_limit.to()); + request.as_mut().set_gas_limit(gas_limit.to()); let transaction = self.sign_request(&from, request).await?.with_signer(from); @@ -431,7 +431,7 @@ pub trait EthTransactions: LoadTransaction { fn sign_request( &self, from: &Address, - txn: TransactionRequest, + txn: RpcTxReq, ) -> impl Future, Self::Error>> + Send { async move { self.find_signer(from)? @@ -462,10 +462,10 @@ pub trait EthTransactions: LoadTransaction { /// Returns the EIP-2718 encoded signed transaction. fn sign_transaction( &self, - request: TransactionRequest, + request: RpcTxReq, ) -> impl Future> + Send { async move { - let from = match request.from { + let from = match request.as_ref().from() { Some(from) => from, None => return Err(SignError::NoAccount.into_eth_err()), }; @@ -489,7 +489,10 @@ pub trait EthTransactions: LoadTransaction { fn find_signer( &self, account: &Address, - ) -> Result> + 'static>, Self::Error> { + ) -> Result< + Box, RpcTxReq> + 'static>, + Self::Error, + > { self.signers() .read() .iter() diff --git a/crates/rpc/rpc-eth-api/src/types.rs b/crates/rpc/rpc-eth-api/src/types.rs index 7bb91af8258..2b4148ebe81 100644 --- a/crates/rpc/rpc-eth-api/src/types.rs +++ b/crates/rpc/rpc-eth-api/src/types.rs @@ -1,9 +1,10 @@ //! Trait for specifying `eth` network dependent API types. use crate::{AsEthApiError, FromEthApiError, RpcNodeCore}; -use alloy_rpc_types_eth::{Block, TransactionRequest}; +use alloy_rpc_types_eth::Block; use reth_chain_state::CanonStateSubscriptions; use reth_rpc_convert::RpcConvert; +pub use reth_rpc_convert::{RpcTransaction, RpcTxReq, RpcTypes}; use reth_storage_api::{ProviderTx, ReceiptProvider, TransactionsProvider}; use reth_transaction_pool::{PoolTransaction, TransactionPool}; use std::{ @@ -11,8 +12,6 @@ use std::{ fmt::{self}, }; -pub use reth_rpc_convert::{RpcTransaction, RpcTxReq, RpcTypes}; - /// Network specific `eth` API types. /// /// This trait defines the network specific rpc types and helpers required for the `eth_` and @@ -64,7 +63,6 @@ where Network = Self::NetworkTypes, Error = RpcError, >, - NetworkTypes: RpcTypes>, >, { } @@ -81,7 +79,6 @@ impl FullEthApiTypes for T where Network = Self::NetworkTypes, Error = RpcError, >, - NetworkTypes: RpcTypes>, > { } diff --git a/crates/rpc/rpc-eth-types/Cargo.toml b/crates/rpc/rpc-eth-types/Cargo.toml index 4a2104d9146..2148ba7e37b 100644 --- a/crates/rpc/rpc-eth-types/Cargo.toml +++ b/crates/rpc/rpc-eth-types/Cargo.toml @@ -35,6 +35,7 @@ alloy-primitives.workspace = true alloy-consensus.workspace = true alloy-sol-types.workspace = true alloy-rpc-types-eth.workspace = true +alloy-network.workspace = true revm.workspace = true revm-inspectors.workspace = true diff --git a/crates/rpc/rpc-eth-types/src/simulate.rs b/crates/rpc/rpc-eth-types/src/simulate.rs index 988261b8179..9cca683d2be 100644 --- a/crates/rpc/rpc-eth-types/src/simulate.rs +++ b/crates/rpc/rpc-eth-types/src/simulate.rs @@ -7,11 +7,11 @@ use crate::{ }, EthApiError, RevertError, }; -use alloy_consensus::{BlockHeader, Transaction as _, TxType}; +use alloy_consensus::{BlockHeader, Transaction as _}; use alloy_eips::eip2718::WithEncoded; +use alloy_network::TransactionBuilder; use alloy_rpc_types_eth::{ simulate::{SimCallResult, SimulateError, SimulatedBlock}, - transaction::TransactionRequest, Block, BlockTransactionsKind, Header, }; use jsonrpsee_types::ErrorObject; @@ -22,7 +22,7 @@ use reth_evm::{ use reth_primitives_traits::{ block::BlockTx, BlockBody as _, NodePrimitives, Recovered, RecoveredBlock, SignedTransaction, }; -use reth_rpc_convert::{RpcConvert, RpcTransaction, RpcTypes}; +use reth_rpc_convert::{RpcConvert, RpcTransaction, RpcTxReq}; use reth_rpc_server_types::result::rpc_err; use reth_storage_api::noop::NoopProvider; use revm::{ @@ -61,10 +61,12 @@ impl ToRpcError for EthSimulateError { /// given [`BlockExecutor`]. /// /// Returns all executed transactions and the result of the execution. +/// +/// [`TransactionRequest`]: alloy_rpc_types_eth::TransactionRequest #[expect(clippy::type_complexity)] pub fn execute_transactions( mut builder: S, - calls: Vec, + calls: Vec>, default_gas_limit: u64, chain_id: u64, tx_resp_builder: &T, @@ -77,10 +79,7 @@ pub fn execute_transactions( > where S: BlockBuilder>>>>, - T: RpcConvert< - Primitives = S::Primitives, - Network: RpcTypes>, - >, + T: RpcConvert, { builder.apply_pre_execution_changes()?; @@ -114,8 +113,10 @@ where /// them into primitive transactions. /// /// This will set the defaults as defined in +/// +/// [`TransactionRequest`]: alloy_rpc_types_eth::TransactionRequest pub fn resolve_transaction( - mut tx: TransactionRequest, + mut tx: RpcTxReq, default_gas_limit: u64, block_base_fee_per_gas: u64, chain_id: u64, @@ -124,67 +125,62 @@ pub fn resolve_transaction( ) -> Result, EthApiError> where DB::Error: Into, - T: RpcConvert< - Primitives: NodePrimitives, - Network: RpcTypes>, - >, + T: RpcConvert>, { // If we're missing any fields we try to fill nonce, gas and // gas price. - let tx_type = tx.preferred_type(); + let tx_type = tx.as_ref().output_tx_type(); - let from = if let Some(from) = tx.from { + let from = if let Some(from) = tx.as_ref().from() { from } else { - tx.from = Some(Address::ZERO); + tx.as_mut().set_from(Address::ZERO); Address::ZERO }; - if tx.nonce.is_none() { - tx.nonce = - Some(db.basic(from).map_err(Into::into)?.map(|acc| acc.nonce).unwrap_or_default()); + if tx.as_ref().nonce().is_none() { + tx.as_mut().set_nonce( + db.basic(from).map_err(Into::into)?.map(|acc| acc.nonce).unwrap_or_default(), + ); } - if tx.gas.is_none() { - tx.gas = Some(default_gas_limit); + if tx.as_ref().gas_limit().is_none() { + tx.as_mut().set_gas_limit(default_gas_limit); } - if tx.chain_id.is_none() { - tx.chain_id = Some(chain_id); + if tx.as_ref().chain_id().is_none() { + tx.as_mut().set_chain_id(chain_id); } - if tx.to.is_none() { - tx.to = Some(TxKind::Create); + if tx.as_ref().kind().is_none() { + tx.as_mut().set_kind(TxKind::Create); } // if we can't build the _entire_ transaction yet, we need to check the fee values - if tx.buildable_type().is_none() { - match tx_type { - TxType::Legacy | TxType::Eip2930 => { - if tx.gas_price.is_none() { - tx.gas_price = Some(block_base_fee_per_gas as u128); - } + if tx.as_ref().output_tx_type_checked().is_none() { + if tx_type.is_legacy() || tx_type.is_eip2930() { + if tx.as_ref().gas_price().is_none() { + tx.as_mut().set_gas_price(block_base_fee_per_gas as u128); } - _ => { - // set dynamic 1559 fees - if tx.max_fee_per_gas.is_none() { - let mut max_fee_per_gas = block_base_fee_per_gas as u128; - if let Some(prio_fee) = tx.max_priority_fee_per_gas { - // if a prio fee is provided we need to select the max fee accordingly - // because the base fee must be higher than the prio fee. - max_fee_per_gas = prio_fee.max(max_fee_per_gas); - } - tx.max_fee_per_gas = Some(max_fee_per_gas); - } - if tx.max_priority_fee_per_gas.is_none() { - tx.max_priority_fee_per_gas = Some(0); + } else { + // set dynamic 1559 fees + if tx.as_ref().max_fee_per_gas().is_none() { + let mut max_fee_per_gas = block_base_fee_per_gas as u128; + if let Some(prio_fee) = tx.as_ref().max_priority_fee_per_gas() { + // if a prio fee is provided we need to select the max fee accordingly + // because the base fee must be higher than the prio fee. + max_fee_per_gas = prio_fee.max(max_fee_per_gas); } + tx.as_mut().set_max_fee_per_gas(max_fee_per_gas); + } + if tx.as_ref().max_priority_fee_per_gas().is_none() { + tx.as_mut().set_max_priority_fee_per_gas(0); } } } let tx = tx_resp_builder - .build_simulate_v1_transaction(tx.into()) + .build_simulate_v1_transaction(tx) .map_err(|e| EthApiError::other(e.into()))?; Ok(Recovered::new_unchecked(tx, from)) diff --git a/crates/rpc/rpc-testing-util/src/debug.rs b/crates/rpc/rpc-testing-util/src/debug.rs index 85b1bc4208c..4f91e7e63c0 100644 --- a/crates/rpc/rpc-testing-util/src/debug.rs +++ b/crates/rpc/rpc-testing-util/src/debug.rs @@ -78,7 +78,7 @@ pub trait DebugApiExt { impl DebugApiExt for T where T: EthApiClient - + DebugApiClient + + DebugApiClient + Sync, { type Provider = T; diff --git a/crates/rpc/rpc-testing-util/src/trace.rs b/crates/rpc/rpc-testing-util/src/trace.rs index b556a895045..8f71d1c4554 100644 --- a/crates/rpc/rpc-testing-util/src/trace.rs +++ b/crates/rpc/rpc-testing-util/src/trace.rs @@ -250,7 +250,7 @@ impl std::fmt::Debug for ReplayTransactionStream<'_> { } } -impl TraceApiExt for T { +impl + Sync> TraceApiExt for T { type Provider = T; fn trace_block_buffered(&self, params: I, n: usize) -> TraceBlockStream<'_> diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 7117c83cbad..6560aa45798 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -5,8 +5,7 @@ use alloy_primitives::{uint, Address, Bytes, B256}; use alloy_rlp::{Decodable, Encodable}; use alloy_rpc_types_debug::ExecutionWitness; use alloy_rpc_types_eth::{ - state::EvmOverrides, transaction::TransactionRequest, Block as RpcBlock, BlockError, Bundle, - StateContext, TransactionInfo, + state::EvmOverrides, Block as RpcBlock, BlockError, Bundle, StateContext, TransactionInfo, }; use alloy_rpc_types_trace::geth::{ call::FlatCallFrame, BlockTraceResult, FourByteFrame, GethDebugBuiltInTracerType, @@ -26,6 +25,7 @@ use reth_revm::{ witness::ExecutionWitnessRecord, }; use reth_rpc_api::DebugApiServer; +use reth_rpc_convert::RpcTxReq; use reth_rpc_eth_api::{ helpers::{EthTransactions, TraceExt}, EthApiTypes, FromEthApiError, RpcNodeCore, @@ -265,7 +265,7 @@ where /// - `debug_traceCall` executes with __enabled__ basefee check, `eth_call` does not: pub async fn debug_trace_call( &self, - call: TransactionRequest, + call: RpcTxReq, block_id: Option, opts: GethDebugTracingCallOptions, ) -> Result { @@ -481,7 +481,7 @@ where /// Each following bundle increments block number by 1 and block timestamp by 12 seconds pub async fn debug_trace_call_many( &self, - bundles: Vec, + bundles: Vec>>, state_context: Option, opts: Option, ) -> Result>, Eth::Error> { @@ -897,7 +897,7 @@ where } #[async_trait] -impl DebugApiServer for DebugApi +impl DebugApiServer> for DebugApi where Eth: EthApiTypes + EthTransactions + TraceExt + 'static, Evm: ConfigureEvm>> + 'static, @@ -1035,7 +1035,7 @@ where /// Handler for `debug_traceCall` async fn debug_trace_call( &self, - request: TransactionRequest, + request: RpcTxReq, block_id: Option, opts: Option, ) -> RpcResult { @@ -1047,7 +1047,7 @@ where async fn debug_trace_call_many( &self, - bundles: Vec, + bundles: Vec>>, state_context: Option, opts: Option, ) -> RpcResult>> { diff --git a/crates/rpc/rpc/src/engine.rs b/crates/rpc/rpc/src/engine.rs index 33ef2b3e5fe..a0e0bd30931 100644 --- a/crates/rpc/rpc/src/engine.rs +++ b/crates/rpc/rpc/src/engine.rs @@ -1,8 +1,7 @@ use alloy_eips::{BlockId, BlockNumberOrTag}; use alloy_primitives::{Address, Bytes, B256, U256, U64}; use alloy_rpc_types_eth::{ - state::StateOverride, transaction::TransactionRequest, BlockOverrides, - EIP1186AccountProofResponse, Filter, Log, SyncStatus, + state::StateOverride, BlockOverrides, EIP1186AccountProofResponse, Filter, Log, SyncStatus, }; use alloy_serde::JsonStorageKey; use jsonrpsee::core::RpcResult as Result; @@ -37,8 +36,12 @@ impl EngineEthApi { } #[async_trait::async_trait] -impl EngineEthApiServer, RpcReceipt> - for EngineEthApi +impl + EngineEthApiServer< + RpcTxReq, + RpcBlock, + RpcReceipt, + > for EngineEthApi where Eth: EthApiServer< RpcTxReq, @@ -73,7 +76,7 @@ where /// Handler for: `eth_call` async fn call( &self, - request: TransactionRequest, + request: RpcTxReq, block_id: Option, state_overrides: Option, block_overrides: Option>, diff --git a/crates/rpc/rpc/src/eth/builder.rs b/crates/rpc/rpc/src/eth/builder.rs index 732ae1edf11..1b4374c1770 100644 --- a/crates/rpc/rpc/src/eth/builder.rs +++ b/crates/rpc/rpc/src/eth/builder.rs @@ -4,6 +4,7 @@ use crate::{eth::core::EthApiInner, EthApi}; use reth_chain_state::CanonStateSubscriptions; use reth_chainspec::ChainSpecProvider; use reth_node_api::NodePrimitives; +use reth_rpc_convert::RpcTypes; use reth_rpc_eth_types::{ fee_history::fee_history_cache_new_blocks_task, EthStateCache, EthStateCacheConfig, FeeHistoryCache, FeeHistoryCacheConfig, GasCap, GasPriceOracle, GasPriceOracleConfig, @@ -13,14 +14,14 @@ use reth_rpc_server_types::constants::{ }; use reth_storage_api::{BlockReaderIdExt, StateProviderFactory}; use reth_tasks::{pool::BlockingTaskPool, TaskSpawner, TokioTaskExecutor}; -use std::sync::Arc; +use std::{marker::PhantomData, sync::Arc}; /// A helper to build the `EthApi` handler instance. /// /// This builder type contains all settings to create an [`EthApiInner`] or an [`EthApi`] instance /// directly. #[derive(Debug)] -pub struct EthApiBuilder +pub struct EthApiBuilder where Provider: BlockReaderIdExt, { @@ -28,6 +29,7 @@ where pool: Pool, network: Network, evm_config: EvmConfig, + rpc: PhantomData, gas_cap: GasCap, max_simulate_blocks: u64, eth_proof_window: u64, @@ -41,9 +43,10 @@ where task_spawner: Box, } -impl EthApiBuilder +impl EthApiBuilder where Provider: BlockReaderIdExt, + Rpc: RpcTypes, { /// Creates a new `EthApiBuilder` instance. pub fn new(provider: Provider, pool: Pool, network: Network, evm_config: EvmConfig) -> Self @@ -55,6 +58,7 @@ where pool, network, evm_config, + rpc: PhantomData, eth_cache: None, gas_oracle: None, gas_cap: GasCap::default(), @@ -154,7 +158,7 @@ where /// /// This function panics if the blocking task pool cannot be built. /// This will panic if called outside the context of a Tokio runtime. - pub fn build_inner(self) -> EthApiInner + pub fn build_inner(self) -> EthApiInner where Provider: BlockReaderIdExt + StateProviderFactory @@ -173,6 +177,7 @@ where provider, pool, network, + rpc: _, evm_config, eth_state_cache_config, gas_oracle_config, @@ -231,7 +236,7 @@ where /// /// This function panics if the blocking task pool cannot be built. /// This will panic if called outside the context of a Tokio runtime. - pub fn build(self) -> EthApi + pub fn build(self) -> EthApi where Provider: BlockReaderIdExt + StateProviderFactory diff --git a/crates/rpc/rpc/src/eth/core.rs b/crates/rpc/rpc/src/eth/core.rs index f6cceee46e0..a8699cb5af7 100644 --- a/crates/rpc/rpc/src/eth/core.rs +++ b/crates/rpc/rpc/src/eth/core.rs @@ -10,8 +10,9 @@ use alloy_network::Ethereum; use alloy_primitives::{Bytes, U256}; use derive_more::Deref; use reth_node_api::{FullNodeComponents, FullNodeTypes}; +use reth_rpc_convert::RpcTypes; use reth_rpc_eth_api::{ - helpers::{EthSigner, SpawnBlocking}, + helpers::{spec::SignersForRpc, SpawnBlocking}, node::RpcNodeCoreExt, EthApiTypes, RpcNodeCore, }; @@ -31,19 +32,21 @@ use tokio::sync::{broadcast, Mutex}; const DEFAULT_BROADCAST_CAPACITY: usize = 2000; /// Helper type alias for [`EthApi`] with components from the given [`FullNodeComponents`]. -pub type EthApiFor = EthApi< +pub type EthApiFor = EthApi< ::Provider, ::Pool, ::Network, ::Evm, + Rpc, >; /// Helper type alias for [`EthApi`] with components from the given [`FullNodeComponents`]. -pub type EthApiBuilderFor = EthApiBuilder< +pub type EthApiBuilderFor = EthApiBuilder< ::Provider, ::Pool, ::Network, ::Evm, + Rpc, >; /// `Eth` API implementation. @@ -61,26 +64,29 @@ pub type EthApiBuilderFor = EthApiBuilder< /// While this type requires various unrestricted generic components, trait bounds are enforced when /// additional traits are implemented for this type. #[derive(Deref)] -pub struct EthApi { +pub struct EthApi { /// All nested fields bundled together. #[deref] - pub(super) inner: Arc>, + pub(super) inner: Arc>, /// Transaction RPC response builder. pub tx_resp_builder: EthRpcConverter, } -impl Clone for EthApi +impl Clone + for EthApi where Provider: BlockReader, + Rpc: RpcTypes, { fn clone(&self) -> Self { Self { inner: self.inner.clone(), tx_resp_builder: self.tx_resp_builder.clone() } } } -impl EthApi +impl EthApi where Provider: BlockReaderIdExt, + Rpc: RpcTypes, { /// Convenience fn to obtain a new [`EthApiBuilder`] instance with mandatory components. /// @@ -94,12 +100,13 @@ where /// # Create an instance with noop ethereum implementations /// /// ```no_run + /// use alloy_network::Ethereum; /// use reth_evm_ethereum::EthEvmConfig; /// use reth_network_api::noop::NoopNetwork; /// use reth_provider::noop::NoopProvider; /// use reth_rpc::EthApi; /// use reth_transaction_pool::noop::NoopTransactionPool; - /// let eth_api = EthApi::builder( + /// let eth_api = EthApi::<_, _, _, _, Ethereum>::builder( /// NoopProvider::default(), /// NoopTransactionPool::default(), /// NoopNetwork::default(), @@ -112,7 +119,7 @@ where pool: Pool, network: Network, evm_config: EvmConfig, - ) -> EthApiBuilder { + ) -> EthApiBuilder { EthApiBuilder::new(provider, pool, network, evm_config) } @@ -152,7 +159,8 @@ where } } -impl EthApiTypes for EthApi +impl EthApiTypes + for EthApi where Self: Send + Sync, Provider: BlockReader, @@ -166,12 +174,14 @@ where } } -impl RpcNodeCore for EthApi +impl RpcNodeCore + for EthApi where Provider: BlockReader + NodePrimitivesProvider + Clone + Unpin, Pool: Send + Sync + Clone + Unpin, Network: Send + Sync + Clone, EvmConfig: Send + Sync + Clone + Unpin, + Rpc: RpcTypes, { type Primitives = Provider::Primitives; type Provider = Provider; @@ -201,13 +211,14 @@ where } } -impl RpcNodeCoreExt - for EthApi +impl RpcNodeCoreExt + for EthApi where Provider: BlockReader + NodePrimitivesProvider + Clone + Unpin, Pool: Send + Sync + Clone + Unpin, Network: Send + Sync + Clone, EvmConfig: Send + Sync + Clone + Unpin, + Rpc: RpcTypes, { #[inline] fn cache(&self) -> &EthStateCache, ProviderReceipt> { @@ -215,21 +226,23 @@ where } } -impl std::fmt::Debug - for EthApi +impl std::fmt::Debug + for EthApi where Provider: BlockReader, + Rpc: RpcTypes, { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("EthApi").finish_non_exhaustive() } } -impl SpawnBlocking - for EthApi +impl SpawnBlocking + for EthApi where - Self: Clone + Send + Sync + 'static, + Self: EthApiTypes + Clone + Send + Sync + 'static, Provider: BlockReader, + Rpc: RpcTypes, { #[inline] fn io_task_spawner(&self) -> impl TaskSpawner { @@ -249,7 +262,7 @@ where /// Container type `EthApi` #[expect(missing_debug_implementations)] -pub struct EthApiInner { +pub struct EthApiInner { /// The transaction pool. pool: Pool, /// The provider that can interact with the chain. @@ -257,7 +270,7 @@ pub struct EthApiInner { /// An interface to interact with the network network: Network, /// All configured Signers - signers: parking_lot::RwLock>>>, + signers: SignersForRpc, /// The async cache frontend for eth related data eth_cache: EthStateCache, /// The async gas oracle frontend for gas price suggestions @@ -288,9 +301,10 @@ pub struct EthApiInner { raw_tx_sender: broadcast::Sender, } -impl EthApiInner +impl EthApiInner where Provider: BlockReaderIdExt, + Rpc: RpcTypes, { /// Creates a new, shareable instance using the default tokio task spawner. #[expect(clippy::too_many_arguments)] @@ -344,9 +358,10 @@ where } } -impl EthApiInner +impl EthApiInner where Provider: BlockReader, + Rpc: RpcTypes, { /// Returns a handle to data on disk. #[inline] @@ -418,9 +433,7 @@ where /// Returns a handle to the signers. #[inline] - pub const fn signers( - &self, - ) -> &parking_lot::RwLock>>> { + pub const fn signers(&self) -> &SignersForRpc { &self.signers } @@ -466,6 +479,7 @@ mod tests { use crate::{EthApi, EthApiBuilder}; use alloy_consensus::{Block, BlockBody, Header}; use alloy_eips::BlockNumberOrTag; + use alloy_network::Ethereum; use alloy_primitives::{Signature, B256, U64}; use alloy_rpc_types::FeeHistory; use jsonrpsee_types::error::INVALID_PARAMS_CODE; @@ -481,6 +495,8 @@ mod tests { use reth_testing_utils::generators; use reth_transaction_pool::test_utils::{testing_pool, TestPool}; + type FakeEthApi = EthApi; + fn build_test_eth_api< P: BlockReaderIdExt< Block = reth_ethereum_primitives::Block, @@ -495,7 +511,7 @@ mod tests { + 'static, >( provider: P, - ) -> EthApi { + ) -> EthApi { EthApiBuilder::new( provider.clone(), testing_pool(), @@ -511,7 +527,7 @@ mod tests { mut oldest_block: Option, block_count: u64, mock_provider: MockEthProvider, - ) -> (EthApi, Vec, Vec) { + ) -> (FakeEthApi, Vec, Vec) { let mut rng = generators::rng(); // Build mock data @@ -597,7 +613,7 @@ mod tests { /// Invalid block range #[tokio::test] async fn test_fee_history_empty() { - let response = as EthApiServer<_, _, _, _, _>>::fee_history( + let response = as EthApiServer<_, _, _, _, _>>::fee_history( &build_test_eth_api(NoopProvider::default()), U64::from(1), BlockNumberOrTag::Latest, @@ -619,7 +635,7 @@ mod tests { let (eth_api, _, _) = prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default()); - let response = as EthApiServer<_, _, _, _, _>>::fee_history( + let response = as EthApiServer<_, _, _, _, _>>::fee_history( ð_api, U64::from(newest_block + 1), newest_block.into(), @@ -642,7 +658,7 @@ mod tests { let (eth_api, _, _) = prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default()); - let response = as EthApiServer<_, _, _, _, _>>::fee_history( + let response = as EthApiServer<_, _, _, _, _>>::fee_history( ð_api, U64::from(1), (newest_block + 1000).into(), @@ -665,7 +681,7 @@ mod tests { let (eth_api, _, _) = prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default()); - let response = as EthApiServer<_, _, _, _, _>>::fee_history( + let response = as EthApiServer<_, _, _, _, _>>::fee_history( ð_api, U64::from(0), newest_block.into(), diff --git a/crates/rpc/rpc/src/eth/filter.rs b/crates/rpc/rpc/src/eth/filter.rs index 4eecdee6490..78e28edb467 100644 --- a/crates/rpc/rpc/src/eth/filter.rs +++ b/crates/rpc/rpc/src/eth/filter.rs @@ -1084,6 +1084,7 @@ impl< mod tests { use super::*; use crate::{eth::EthApi, EthApiBuilder}; + use alloy_network::Ethereum; use alloy_primitives::FixedBytes; use rand::Rng; use reth_chainspec::ChainSpecProvider; @@ -1121,7 +1122,7 @@ mod tests { // Helper function to create a test EthApi instance fn build_test_eth_api( provider: MockEthProvider, - ) -> EthApi { + ) -> EthApi { EthApiBuilder::new( provider.clone(), testing_pool(), diff --git a/crates/rpc/rpc/src/eth/helpers/block.rs b/crates/rpc/rpc/src/eth/helpers/block.rs index b9ae198bba1..fd4a9cc6ea0 100644 --- a/crates/rpc/rpc/src/eth/helpers/block.rs +++ b/crates/rpc/rpc/src/eth/helpers/block.rs @@ -19,18 +19,20 @@ use reth_transaction_pool::{PoolTransaction, TransactionPool}; use crate::EthApi; -impl EthBlocks for EthApi +impl EthBlocks + for EthApi where Self: LoadBlock< Error = EthApiError, - NetworkTypes: RpcTypes, - RpcConvert: RpcConvert, + NetworkTypes = Rpc, + RpcConvert: RpcConvert, Provider: BlockReader< Transaction = reth_ethereum_primitives::TransactionSigned, Receipt = reth_ethereum_primitives::Receipt, >, >, Provider: BlockReader + ChainSpecProvider, + Rpc: RpcTypes, { async fn block_receipts( &self, @@ -78,7 +80,8 @@ where } } -impl LoadBlock for EthApi +impl LoadBlock + for EthApi where Self: LoadPendingBlock + SpawnBlocking @@ -91,5 +94,6 @@ where >, Provider: BlockReader, EvmConfig: ConfigureEvm::Primitives>, + Rpc: RpcTypes, { } diff --git a/crates/rpc/rpc/src/eth/helpers/call.rs b/crates/rpc/rpc/src/eth/helpers/call.rs index 1a41b8d5768..0053ca15478 100644 --- a/crates/rpc/rpc/src/eth/helpers/call.rs +++ b/crates/rpc/rpc/src/eth/helpers/call.rs @@ -2,7 +2,6 @@ use crate::EthApi; use alloy_evm::block::BlockExecutorFactory; -use alloy_rpc_types_eth::TransactionRequest; use reth_errors::ProviderError; use reth_evm::{ConfigureEvm, EvmFactory, TxEnvFor}; use reth_node_api::NodePrimitives; @@ -15,11 +14,12 @@ use reth_storage_api::{BlockReader, ProviderHeader, ProviderTx}; use reth_transaction_pool::{PoolTransaction, TransactionPool}; use revm::context::TxEnv; -impl EthCall for EthApi +impl EthCall + for EthApi where - Self: EstimateCall - + LoadPendingBlock - + FullEthApiTypes + Self: EstimateCall + + LoadPendingBlock + + FullEthApiTypes + RpcNodeCoreExt< Pool: TransactionPool< Transaction: PoolTransaction>, @@ -29,10 +29,12 @@ where >, EvmConfig: ConfigureEvm::Primitives>, Provider: BlockReader, + Rpc: RpcTypes, { } -impl Call for EthApi +impl Call + for EthApi where Self: LoadState< Evm: ConfigureEvm< @@ -42,13 +44,14 @@ where SignedTx = ProviderTx, >, >, - RpcConvert: RpcConvert, Network = Self::NetworkTypes>, - NetworkTypes: RpcTypes>, + RpcConvert: RpcConvert, Network = Rpc>, + NetworkTypes = Rpc, Error: FromEvmError + From<::Error> + From, > + SpawnBlocking, Provider: BlockReader, + Rpc: RpcTypes, { #[inline] fn call_gas_limit(&self) -> u64 { @@ -61,9 +64,11 @@ where } } -impl EstimateCall for EthApi +impl EstimateCall + for EthApi where - Self: Call, + Self: Call, Provider: BlockReader, + Rpc: RpcTypes, { } diff --git a/crates/rpc/rpc/src/eth/helpers/fees.rs b/crates/rpc/rpc/src/eth/helpers/fees.rs index 87adb42b2b5..45b0a2a70dc 100644 --- a/crates/rpc/rpc/src/eth/helpers/fees.rs +++ b/crates/rpc/rpc/src/eth/helpers/fees.rs @@ -1,13 +1,15 @@ //! Contains RPC handler implementations for fee history. use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardforks}; +use reth_rpc_convert::RpcTypes; use reth_rpc_eth_api::helpers::{EthFees, LoadBlock, LoadFee}; use reth_rpc_eth_types::{FeeHistoryCache, GasPriceOracle}; use reth_storage_api::{BlockReader, BlockReaderIdExt, ProviderHeader, StateProviderFactory}; use crate::EthApi; -impl EthFees for EthApi +impl EthFees + for EthApi where Self: LoadFee< Provider: ChainSpecProvider< @@ -15,15 +17,18 @@ where >, >, Provider: BlockReader, + Rpc: RpcTypes, { } -impl LoadFee for EthApi +impl LoadFee + for EthApi where Self: LoadBlock, Provider: BlockReaderIdExt + ChainSpecProvider + StateProviderFactory, + Rpc: RpcTypes, { #[inline] fn gas_oracle(&self) -> &GasPriceOracle { diff --git a/crates/rpc/rpc/src/eth/helpers/pending_block.rs b/crates/rpc/rpc/src/eth/helpers/pending_block.rs index dd65fd53ca9..acb4072bff6 100644 --- a/crates/rpc/rpc/src/eth/helpers/pending_block.rs +++ b/crates/rpc/rpc/src/eth/helpers/pending_block.rs @@ -20,15 +20,13 @@ use reth_storage_api::{ use reth_transaction_pool::{PoolTransaction, TransactionPool}; use revm_primitives::B256; -impl LoadPendingBlock - for EthApi +impl LoadPendingBlock + for EthApi where Self: SpawnBlocking< - NetworkTypes: RpcTypes< - Header = alloy_rpc_types_eth::Header>, - >, + NetworkTypes = Rpc, Error: FromEvmError, - RpcConvert: RpcConvert, + RpcConvert: RpcConvert, > + RpcNodeCore< Provider: BlockReaderIdExt + ChainSpecProvider @@ -48,6 +46,7 @@ where >, >, Provider: BlockReader, + Rpc: RpcTypes
>>, { #[inline] fn pending_block( diff --git a/crates/rpc/rpc/src/eth/helpers/receipt.rs b/crates/rpc/rpc/src/eth/helpers/receipt.rs index 44b9910b2fa..fee7724df5e 100644 --- a/crates/rpc/rpc/src/eth/helpers/receipt.rs +++ b/crates/rpc/rpc/src/eth/helpers/receipt.rs @@ -1,21 +1,30 @@ //! Builds an RPC receipt response w.r.t. data layout of network. use crate::EthApi; -use alloy_consensus::transaction::{SignerRecoverable, TransactionMeta}; +use alloy_consensus::{ + crypto::RecoveryError, + transaction::{SignerRecoverable, TransactionMeta}, +}; +use alloy_rpc_types_eth::TransactionReceipt; use reth_chainspec::{ChainSpecProvider, EthChainSpec}; use reth_ethereum_primitives::{Receipt, TransactionSigned}; -use reth_rpc_eth_api::{helpers::LoadReceipt, FromEthApiError, RpcNodeCoreExt, RpcReceipt}; +use reth_rpc_convert::RpcTypes; +use reth_rpc_eth_api::{ + helpers::LoadReceipt, EthApiTypes, FromEthApiError, RpcNodeCoreExt, RpcReceipt, +}; use reth_rpc_eth_types::{EthApiError, EthReceiptBuilder}; use reth_storage_api::{BlockReader, ReceiptProvider, TransactionsProvider}; use std::borrow::Cow; -impl LoadReceipt for EthApi +impl LoadReceipt + for EthApi where Self: RpcNodeCoreExt< - Provider: TransactionsProvider - + ReceiptProvider, - >, + Provider: TransactionsProvider + + ReceiptProvider, + > + EthApiTypes>, Provider: BlockReader + ChainSpecProvider, + Rpc: RpcTypes, { async fn build_transaction_receipt( &self, diff --git a/crates/rpc/rpc/src/eth/helpers/signer.rs b/crates/rpc/rpc/src/eth/helpers/signer.rs index 01a07c4436d..fcd8161adaa 100644 --- a/crates/rpc/rpc/src/eth/helpers/signer.rs +++ b/crates/rpc/rpc/src/eth/helpers/signer.rs @@ -5,19 +5,19 @@ use std::collections::HashMap; use crate::EthApi; use alloy_dyn_abi::TypedData; use alloy_eips::eip2718::Decodable2718; -use alloy_network::{eip2718::Encodable2718, EthereumWallet, TransactionBuilder}; use alloy_primitives::{eip191_hash_message, Address, Signature, B256}; -use alloy_rpc_types_eth::TransactionRequest; use alloy_signer::SignerSync; use alloy_signer_local::PrivateKeySigner; +use reth_rpc_convert::{RpcTypes, SignableTxRequest}; use reth_rpc_eth_api::helpers::{signer::Result, AddDevSigners, EthSigner}; use reth_rpc_eth_types::SignError; -use reth_storage_api::BlockReader; +use reth_storage_api::{BlockReader, ProviderTx}; -impl AddDevSigners - for EthApi +impl AddDevSigners + for EthApi where Provider: BlockReader, + Rpc: RpcTypes>>, { fn with_dev_accounts(&self) { *self.inner.signers().write() = DevSigner::random_signers(20) @@ -32,15 +32,11 @@ pub struct DevSigner { } impl DevSigner { - /// Generates a random dev signer which satisfies [`EthSigner`] trait - pub fn random() -> Box> { - let mut signers = Self::random_signers(1); - signers.pop().expect("expect to generate at least one signer") - } - /// Generates provided number of random dev signers /// which satisfy [`EthSigner`] trait - pub fn random_signers(num: u32) -> Vec + 'static>> { + pub fn random_signers>( + num: u32, + ) -> Vec + 'static>> { let mut signers = Vec::with_capacity(num as usize); for _ in 0..num { let sk = PrivateKeySigner::random(); @@ -49,7 +45,7 @@ impl DevSigner { let addresses = vec![address]; let accounts = HashMap::from([(address, sk)]); - signers.push(Box::new(Self { addresses, accounts }) as Box>); + signers.push(Box::new(Self { addresses, accounts }) as Box>); } signers } @@ -65,7 +61,7 @@ impl DevSigner { } #[async_trait::async_trait] -impl EthSigner for DevSigner { +impl> EthSigner for DevSigner { fn accounts(&self) -> Vec
{ self.addresses.clone() } @@ -81,21 +77,17 @@ impl EthSigner for DevSigner { self.sign_hash(hash, address) } - async fn sign_transaction(&self, request: TransactionRequest, address: &Address) -> Result { + async fn sign_transaction(&self, request: TxReq, address: &Address) -> Result { // create local signer wallet from signing key let signer = self.accounts.get(address).ok_or(SignError::NoAccount)?.clone(); - let wallet = EthereumWallet::from(signer); // build and sign transaction with signer - let txn_envelope = - request.build(&wallet).await.map_err(|_| SignError::InvalidTransactionRequest)?; - - // decode transaction into signed transaction type - let encoded = txn_envelope.encoded_2718(); - let txn_signed = T::decode_2718(&mut encoded.as_ref()) + let tx = request + .try_build_and_sign(&signer) + .await .map_err(|_| SignError::InvalidTransactionRequest)?; - Ok(txn_signed) + Ok(tx) } fn sign_typed_data(&self, address: Address, payload: &TypedData) -> Result { @@ -109,7 +101,7 @@ mod tests { use super::*; use alloy_consensus::Transaction; use alloy_primitives::{Bytes, U256}; - use alloy_rpc_types_eth::TransactionInput; + use alloy_rpc_types_eth::{TransactionInput, TransactionRequest}; use reth_ethereum_primitives::TransactionSigned; use revm_primitives::TxKind; diff --git a/crates/rpc/rpc/src/eth/helpers/spec.rs b/crates/rpc/rpc/src/eth/helpers/spec.rs index a4a8ad7531a..3bec5a67a09 100644 --- a/crates/rpc/rpc/src/eth/helpers/spec.rs +++ b/crates/rpc/rpc/src/eth/helpers/spec.rs @@ -1,12 +1,17 @@ +use alloy_network::Ethereum; use alloy_primitives::U256; use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; use reth_network_api::NetworkInfo; -use reth_rpc_eth_api::{helpers::EthApiSpec, RpcNodeCore}; +use reth_rpc_eth_api::{ + helpers::{spec::SignersForApi, EthApiSpec}, + RpcNodeCore, +}; use reth_storage_api::{BlockNumReader, BlockReader, ProviderTx, StageCheckpointReader}; use crate::EthApi; -impl EthApiSpec for EthApi +impl EthApiSpec + for EthApi where Self: RpcNodeCore< Provider: ChainSpecProvider @@ -17,15 +22,13 @@ where Provider: BlockReader, { type Transaction = ProviderTx; + type Rpc = Ethereum; fn starting_block(&self) -> U256 { self.inner.starting_block() } - fn signers( - &self, - ) -> &parking_lot::RwLock>>> - { + fn signers(&self) -> &SignersForApi { self.inner.signers() } } diff --git a/crates/rpc/rpc/src/eth/helpers/state.rs b/crates/rpc/rpc/src/eth/helpers/state.rs index 90c9e32c64d..62a94f1bd7e 100644 --- a/crates/rpc/rpc/src/eth/helpers/state.rs +++ b/crates/rpc/rpc/src/eth/helpers/state.rs @@ -1,35 +1,40 @@ //! Contains RPC handler implementations specific to state. use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; +use reth_rpc_convert::RpcTypes; use reth_storage_api::{BlockReader, StateProviderFactory}; use reth_transaction_pool::TransactionPool; use reth_rpc_eth_api::{ helpers::{EthState, LoadState, SpawnBlocking}, - RpcNodeCoreExt, + EthApiTypes, RpcNodeCoreExt, }; use crate::EthApi; -impl EthState for EthApi +impl EthState + for EthApi where Self: LoadState + SpawnBlocking, Provider: BlockReader, + Rpc: RpcTypes, { fn max_proof_window(&self) -> u64 { self.inner.eth_proof_window() } } -impl LoadState for EthApi +impl LoadState + for EthApi where Self: RpcNodeCoreExt< - Provider: BlockReader - + StateProviderFactory - + ChainSpecProvider, - Pool: TransactionPool, - >, + Provider: BlockReader + + StateProviderFactory + + ChainSpecProvider, + Pool: TransactionPool, + > + EthApiTypes, Provider: BlockReader, + Rpc: RpcTypes, { } @@ -38,6 +43,7 @@ mod tests { use super::*; use alloy_consensus::Header; use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT_30M; + use alloy_network::Ethereum; use alloy_primitives::{Address, StorageKey, StorageValue, U256}; use reth_evm_ethereum::EthEvmConfig; use reth_network_api::noop::NoopNetwork; @@ -53,7 +59,7 @@ mod tests { use reth_transaction_pool::test_utils::{testing_pool, TestPool}; use std::collections::HashMap; - fn noop_eth_api() -> EthApi { + fn noop_eth_api() -> EthApi { let pool = testing_pool(); let evm_config = EthEvmConfig::mainnet(); @@ -76,7 +82,7 @@ mod tests { fn mock_eth_api( accounts: HashMap, - ) -> EthApi { + ) -> EthApi { let pool = testing_pool(); let mock_provider = MockEthProvider::default(); diff --git a/crates/rpc/rpc/src/eth/helpers/trace.rs b/crates/rpc/rpc/src/eth/helpers/trace.rs index 98f3e255818..a080264698d 100644 --- a/crates/rpc/rpc/src/eth/helpers/trace.rs +++ b/crates/rpc/rpc/src/eth/helpers/trace.rs @@ -2,6 +2,7 @@ use reth_evm::ConfigureEvm; use reth_node_api::NodePrimitives; +use reth_rpc_convert::RpcTypes; use reth_rpc_eth_api::{ helpers::{LoadState, Trace}, FromEvmError, @@ -10,7 +11,8 @@ use reth_storage_api::{BlockReader, ProviderHeader, ProviderTx}; use crate::EthApi; -impl Trace for EthApi +impl Trace + for EthApi where Self: LoadState< Provider: BlockReader, @@ -23,5 +25,6 @@ where Error: FromEvmError, >, Provider: BlockReader, + Rpc: RpcTypes, { } diff --git a/crates/rpc/rpc/src/eth/helpers/transaction.rs b/crates/rpc/rpc/src/eth/helpers/transaction.rs index e7efc43ac45..313b5778785 100644 --- a/crates/rpc/rpc/src/eth/helpers/transaction.rs +++ b/crates/rpc/rpc/src/eth/helpers/transaction.rs @@ -2,22 +2,24 @@ use crate::EthApi; use alloy_primitives::{Bytes, B256}; +use reth_rpc_convert::RpcTypes; use reth_rpc_eth_api::{ - helpers::{EthSigner, EthTransactions, LoadTransaction, SpawnBlocking}, - FromEthApiError, FullEthApiTypes, RpcNodeCore, RpcNodeCoreExt, + helpers::{spec::SignersForRpc, EthTransactions, LoadTransaction, SpawnBlocking}, + EthApiTypes, FromEthApiError, FullEthApiTypes, RpcNodeCore, RpcNodeCoreExt, }; use reth_rpc_eth_types::utils::recover_raw_transaction; use reth_storage_api::{BlockReader, BlockReaderIdExt, ProviderTx, TransactionsProvider}; use reth_transaction_pool::{PoolTransaction, TransactionOrigin, TransactionPool}; -impl EthTransactions - for EthApi +impl EthTransactions + for EthApi where - Self: LoadTransaction, + Self: LoadTransaction + EthApiTypes, Provider: BlockReader>, + Rpc: RpcTypes, { #[inline] - fn signers(&self) -> &parking_lot::RwLock>>>> { + fn signers(&self) -> &SignersForRpc { self.inner.signers() } @@ -43,13 +45,15 @@ where } } -impl LoadTransaction - for EthApi +impl LoadTransaction + for EthApi where Self: SpawnBlocking + FullEthApiTypes - + RpcNodeCoreExt, + + RpcNodeCoreExt + + EthApiTypes, Provider: BlockReader, + Rpc: RpcTypes, { } diff --git a/crates/rpc/rpc/src/lib.rs b/crates/rpc/rpc/src/lib.rs index 690fb33e871..d2905095900 100644 --- a/crates/rpc/rpc/src/lib.rs +++ b/crates/rpc/rpc/src/lib.rs @@ -54,6 +54,7 @@ pub use miner::MinerApi; pub use net::NetApi; pub use otterscan::OtterscanApi; pub use reth::RethApi; +pub use reth_rpc_convert::RpcTypes; pub use rpc::RPCApi; pub use trace::TraceApi; pub use txpool::TxPoolApi; diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index 5adc54168ef..787b7dfd1bd 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -4,7 +4,6 @@ use alloy_evm::block::calc::{base_block_reward_pre_merge, block_reward, ommer_re use alloy_primitives::{map::HashSet, Bytes, B256, U256}; use alloy_rpc_types_eth::{ state::{EvmOverrides, StateOverride}, - transaction::TransactionRequest, BlockOverrides, Index, }; use alloy_rpc_types_trace::{ @@ -20,6 +19,7 @@ use reth_evm::ConfigureEvm; use reth_primitives_traits::{BlockBody, BlockHeader}; use reth_revm::{database::StateProviderDatabase, db::CacheDB}; use reth_rpc_api::TraceApiServer; +use reth_rpc_convert::RpcTxReq; use reth_rpc_eth_api::{ helpers::{Call, LoadPendingBlock, LoadTransaction, Trace, TraceExt}, FromEthApiError, RpcNodeCore, @@ -87,7 +87,7 @@ where /// Executes the given call and returns a number of possible traces for it. pub async fn trace_call( &self, - trace_request: TraceCallRequest, + trace_request: TraceCallRequest>, ) -> Result { let at = trace_request.block_id.unwrap_or_default(); let config = TracingInspectorConfig::from_parity_config(&trace_request.trace_types); @@ -142,7 +142,7 @@ where /// Note: Allows tracing dependent transactions, hence all transactions are traced in sequence pub async fn trace_call_many( &self, - calls: Vec<(TransactionRequest, HashSet)>, + calls: Vec<(RpcTxReq, HashSet)>, block_id: Option, ) -> Result, Eth::Error> { let at = block_id.unwrap_or(BlockId::pending()); @@ -568,7 +568,7 @@ where } #[async_trait] -impl TraceApiServer for TraceApi +impl TraceApiServer> for TraceApi where Eth: TraceExt + 'static, { @@ -577,7 +577,7 @@ where /// Handler for `trace_call` async fn trace_call( &self, - call: TransactionRequest, + call: RpcTxReq, trace_types: HashSet, block_id: Option, state_overrides: Option, @@ -592,7 +592,7 @@ where /// Handler for `trace_callMany` async fn trace_call_many( &self, - calls: Vec<(TransactionRequest, HashSet)>, + calls: Vec<(RpcTxReq, HashSet)>, block_id: Option, ) -> RpcResult> { let _permit = self.acquire_trace_permit().await; diff --git a/crates/trie/common/Cargo.toml b/crates/trie/common/Cargo.toml index 29b75342070..0aa93adb598 100644 --- a/crates/trie/common/Cargo.toml +++ b/crates/trie/common/Cargo.toml @@ -105,6 +105,7 @@ serde-bincode-compat = [ "alloy-consensus/serde-bincode-compat", "dep:serde_with", "alloy-genesis/serde-bincode-compat", + "alloy-rpc-types-eth?/serde-bincode-compat", ] test-utils = [ "dep:plain_hasher", From 253721d22604850180a1af07f7f3e7657dea25c8 Mon Sep 17 00:00:00 2001 From: Rez Date: Tue, 15 Jul 2025 19:13:14 +1000 Subject: [PATCH 166/305] feat: add generic database support for Receipt (#17409) --- crates/storage/db-api/src/models/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/storage/db-api/src/models/mod.rs b/crates/storage/db-api/src/models/mod.rs index af9baa1867e..cffa9d910f8 100644 --- a/crates/storage/db-api/src/models/mod.rs +++ b/crates/storage/db-api/src/models/mod.rs @@ -215,7 +215,7 @@ impl_compression_for_compact!( Header, Account, Log, - Receipt, + Receipt, TxType, StorageEntry, BranchNodeCompact, From 13d3d9b57713dbc9c292ae0bbe7eaebb48f3a9a5 Mon Sep 17 00:00:00 2001 From: Yash Atreya <44857776+yash-atreya@users.noreply.github.com> Date: Tue, 15 Jul 2025 14:46:27 +0530 Subject: [PATCH 167/305] fix(`docs`): rustdoc search functionality (#17410) Co-authored-by: Claude --- docs/vocs/scripts/inject-cargo-docs.ts | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/docs/vocs/scripts/inject-cargo-docs.ts b/docs/vocs/scripts/inject-cargo-docs.ts index 1ea30d34790..f2d9869aecf 100644 --- a/docs/vocs/scripts/inject-cargo-docs.ts +++ b/docs/vocs/scripts/inject-cargo-docs.ts @@ -1,5 +1,4 @@ import { promises as fs } from 'fs'; -import { join, relative } from 'path'; import { glob } from 'glob'; const CARGO_DOCS_PATH = '../../target/doc'; @@ -86,6 +85,23 @@ async function injectCargoDocs() { await fs.writeFile(file, content, 'utf-8'); } + // Find the actual search JS filename from the HTML files + let actualSearchJsFile = ''; + for (const htmlFile of htmlFiles) { + const htmlContent = await fs.readFile(htmlFile, 'utf-8'); + const searchMatch = htmlContent.match(/data-search-js="[^"]*\/([^"]+)"/); + if (searchMatch && searchMatch[1]) { + actualSearchJsFile = searchMatch[1]; + console.log(`Found search JS file: ${actualSearchJsFile} in ${htmlFile}`); + break; + } + } + + if (!actualSearchJsFile) { + console.error('Could not detect search JS filename from HTML files'); + process.exit(1); + } + // Also fix paths in JavaScript files const jsFiles = await glob(`${VOCS_DIST_PATH}/**/*.js`); @@ -120,9 +136,10 @@ async function injectCargoDocs() { ); // Fix the search-js variable to return just the filename + // Use the detected search filename content = content.replace( /getVar\("search-js"\)/g, - `"search-f7877310.js"` + `"${actualSearchJsFile}"` ); // Fix the search index loading path From 13c59dc1c455d9a4ebe1b819300860a6e0fd1464 Mon Sep 17 00:00:00 2001 From: cakevm Date: Tue, 15 Jul 2025 11:20:58 +0200 Subject: [PATCH 168/305] feat(alloy-provider): implement header methods (#17402) --- crates/alloy-provider/src/lib.rs | 37 +++++++++++++++++++++++++++----- 1 file changed, 32 insertions(+), 5 deletions(-) diff --git a/crates/alloy-provider/src/lib.rs b/crates/alloy-provider/src/lib.rs index c3f5e40a4da..0a5c1475d3c 100644 --- a/crates/alloy-provider/src/lib.rs +++ b/crates/alloy-provider/src/lib.rs @@ -33,7 +33,7 @@ use reth_db_api::{ models::StoredBlockBodyIndices, }; use reth_errors::{ProviderError, ProviderResult}; -use reth_node_types::{BlockTy, HeaderTy, NodeTypes, PrimitivesTy, ReceiptTy, TxTy}; +use reth_node_types::{Block, BlockTy, HeaderTy, NodeTypes, PrimitivesTy, ReceiptTy, TxTy}; use reth_primitives::{Account, Bytecode, RecoveredBlock, SealedHeader, TransactionMeta}; use reth_provider::{ AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BytecodeReader, @@ -279,15 +279,42 @@ where P: Provider + Clone + 'static, N: Network, Node: NodeTypes, + BlockTy: TryFromBlockResponse, { type Header = HeaderTy; - fn header(&self, _block_hash: &BlockHash) -> ProviderResult> { - Err(ProviderError::UnsupportedProvider) + fn header(&self, block_hash: &BlockHash) -> ProviderResult> { + let block_response = self.block_on_async(async { + self.provider.get_block_by_hash(*block_hash).await.map_err(ProviderError::other) + })?; + + let Some(block_response) = block_response else { + // If the block was not found, return None + return Ok(None); + }; + + // Convert the network block response to primitive block + let block = as TryFromBlockResponse>::from_block_response(block_response) + .map_err(ProviderError::other)?; + + Ok(Some(block.into_header())) } - fn header_by_number(&self, _num: u64) -> ProviderResult> { - Err(ProviderError::UnsupportedProvider) + fn header_by_number(&self, num: u64) -> ProviderResult> { + let block_response = self.block_on_async(async { + self.provider.get_block_by_number(num.into()).await.map_err(ProviderError::other) + })?; + + let Some(block_response) = block_response else { + // If the block was not found, return None + return Ok(None); + }; + + // Convert the network block response to primitive block + let block = as TryFromBlockResponse>::from_block_response(block_response) + .map_err(ProviderError::other)?; + + Ok(Some(block.into_header())) } fn header_td(&self, _hash: &BlockHash) -> ProviderResult> { From 00d259dbeafe7b06b78a819452569be5a452cf4a Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Tue, 15 Jul 2025 11:28:21 +0200 Subject: [PATCH 169/305] feat(sdk): make engine API (auth server) optional for custom consensus integrations (#17376) --- crates/node/builder/src/rpc.rs | 44 ++++++++++++++++++++----- crates/node/core/src/args/rpc_server.rs | 8 +++++ docs/vocs/docs/pages/cli/reth/node.mdx | 5 +++ 3 files changed, 48 insertions(+), 9 deletions(-) diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index 82e94287442..6ab2395cd5e 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -681,12 +681,31 @@ where } /// Launches the RPC servers with the given context and an additional hook for extending - /// modules. + /// modules. Whether the auth server is launched depends on the CLI configuration. pub async fn launch_add_ons_with( self, ctx: AddOnsContext<'_, N>, ext: F, ) -> eyre::Result> + where + F: FnOnce(RpcModuleContainer<'_, N, EthB::EthApi>) -> eyre::Result<()>, + { + // Check CLI config to determine if auth server should be disabled + let disable_auth = ctx.config.rpc.disable_auth_server; + self.launch_add_ons_with_opt_engine(ctx, ext, disable_auth).await + } + + /// Launches the RPC servers with the given context and an additional hook for extending + /// modules. Optionally disables the auth server based on the `disable_auth` parameter. + /// + /// When `disable_auth` is true, the auth server will not be started and a noop handle + /// will be used instead. + pub async fn launch_add_ons_with_opt_engine( + self, + ctx: AddOnsContext<'_, N>, + ext: F, + disable_auth: bool, + ) -> eyre::Result> where F: FnOnce(RpcModuleContainer<'_, N, EthB::EthApi>) -> eyre::Result<()>, { @@ -705,14 +724,21 @@ where } = setup_ctx; let server_config = config.rpc.rpc_server_config().set_rpc_middleware(rpc_middleware); - let auth_module_clone = auth_module.clone(); - - // launch servers concurrently - let (rpc, auth) = futures::future::try_join( - Self::launch_rpc_server_internal(server_config, &modules), - Self::launch_auth_server_internal(auth_module_clone, auth_config), - ) - .await?; + + let (rpc, auth) = if disable_auth { + // Only launch the RPC server, use a noop auth handle + let rpc = Self::launch_rpc_server_internal(server_config, &modules).await?; + (rpc, AuthServerHandle::noop()) + } else { + let auth_module_clone = auth_module.clone(); + // launch servers concurrently + let (rpc, auth) = futures::future::try_join( + Self::launch_rpc_server_internal(server_config, &modules), + Self::launch_auth_server_internal(auth_module_clone, auth_config), + ) + .await?; + (rpc, auth) + }; let handles = RethRpcServerHandles { rpc, auth }; diff --git a/crates/node/core/src/args/rpc_server.rs b/crates/node/core/src/args/rpc_server.rs index 120a3335936..5a2d32353b7 100644 --- a/crates/node/core/src/args/rpc_server.rs +++ b/crates/node/core/src/args/rpc_server.rs @@ -119,6 +119,13 @@ pub struct RpcServerArgs { #[arg(long = "auth-ipc.path", default_value_t = constants::DEFAULT_ENGINE_API_IPC_ENDPOINT.to_string())] pub auth_ipc_path: String, + /// Disable the auth/engine API server. + /// + /// This will prevent the authenticated engine-API server from starting. Use this if you're + /// running a node that doesn't need to serve engine API requests. + #[arg(long = "disable-auth-server", alias = "disable-engine-api")] + pub disable_auth_server: bool, + /// Hex encoded JWT secret to authenticate the regular RPC server(s), see `--http.api` and /// `--ws.api`. /// @@ -335,6 +342,7 @@ impl Default for RpcServerArgs { auth_jwtsecret: None, auth_ipc: false, auth_ipc_path: constants::DEFAULT_ENGINE_API_IPC_ENDPOINT.to_string(), + disable_auth_server: false, rpc_jwtsecret: None, rpc_max_request_size: RPC_DEFAULT_MAX_REQUEST_SIZE_MB.into(), rpc_max_response_size: RPC_DEFAULT_MAX_RESPONSE_SIZE_MB.into(), diff --git a/docs/vocs/docs/pages/cli/reth/node.mdx b/docs/vocs/docs/pages/cli/reth/node.mdx index d6a5e3e544b..2b033b88ac7 100644 --- a/docs/vocs/docs/pages/cli/reth/node.mdx +++ b/docs/vocs/docs/pages/cli/reth/node.mdx @@ -312,6 +312,11 @@ RPC: [default: _engine_api.ipc] + --disable-auth-server + Disable the auth/engine API server. + + This will prevent the authenticated engine-API server from starting. Use this if you're running a node that doesn't need to serve engine API requests. + --rpc.jwtsecret Hex encoded JWT secret to authenticate the regular RPC server(s), see `--http.api` and `--ws.api`. From c667bc972ea19b76a69c802eac938ac3aef1f6ec Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Tue, 15 Jul 2025 06:10:24 -0400 Subject: [PATCH 170/305] chore(txpool): use alloy-primitives HashMap for SenderIdentifiers (#17408) --- crates/transaction-pool/src/identifier.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/crates/transaction-pool/src/identifier.rs b/crates/transaction-pool/src/identifier.rs index 17320ecf930..96cfd1ef2df 100644 --- a/crates/transaction-pool/src/identifier.rs +++ b/crates/transaction-pool/src/identifier.rs @@ -1,7 +1,6 @@ //! Identifier types for transactions and senders. -use alloy_primitives::Address; +use alloy_primitives::{map::HashMap, Address}; use rustc_hash::FxHashMap; -use std::collections::HashMap; /// An internal mapping of addresses. /// From fb9f3cce92a304635c1f639c9f6427205dff471e Mon Sep 17 00:00:00 2001 From: fantasyup <59591096+fantasyup@users.noreply.github.com> Date: Tue, 15 Jul 2025 06:56:43 -0400 Subject: [PATCH 171/305] feat: Add support for ethstats (#16396) Co-authored-by: Matthias Seitz --- .github/assets/check_wasm.sh | 1 + Cargo.lock | 25 + Cargo.toml | 4 + crates/node/builder/Cargo.toml | 1 + crates/node/builder/src/launch/common.rs | 17 + crates/node/builder/src/launch/engine.rs | 2 + crates/node/core/src/args/debug.rs | 6 + crates/node/ethstats/Cargo.toml | 34 + crates/node/ethstats/src/connection.rs | 67 ++ crates/node/ethstats/src/credentials.rs | 47 ++ crates/node/ethstats/src/error.rs | 69 ++ crates/node/ethstats/src/ethstats.rs | 823 +++++++++++++++++++++++ crates/node/ethstats/src/events.rs | 283 ++++++++ crates/node/ethstats/src/lib.rs | 30 + docs/vocs/docs/pages/cli/reth/node.mdx | 3 + 15 files changed, 1412 insertions(+) create mode 100644 crates/node/ethstats/Cargo.toml create mode 100644 crates/node/ethstats/src/connection.rs create mode 100644 crates/node/ethstats/src/credentials.rs create mode 100644 crates/node/ethstats/src/error.rs create mode 100644 crates/node/ethstats/src/ethstats.rs create mode 100644 crates/node/ethstats/src/events.rs create mode 100644 crates/node/ethstats/src/lib.rs diff --git a/.github/assets/check_wasm.sh b/.github/assets/check_wasm.sh index faec5157950..cec98aa8dbe 100755 --- a/.github/assets/check_wasm.sh +++ b/.github/assets/check_wasm.sh @@ -78,6 +78,7 @@ exclude_crates=( reth-era-downloader # tokio reth-era-utils # tokio reth-tracing-otlp + reth-node-ethstats ) # Array to hold the results diff --git a/Cargo.lock b/Cargo.lock index 9ed950df3e0..ab6558cf6a6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8861,6 +8861,7 @@ dependencies = [ "reth-network-p2p", "reth-node-api", "reth-node-core", + "reth-node-ethstats", "reth-node-events", "reth-node-metrics", "reth-payload-builder", @@ -8993,6 +8994,29 @@ dependencies = [ "tokio", ] +[[package]] +name = "reth-node-ethstats" +version = "1.5.1" +dependencies = [ + "alloy-consensus", + "alloy-primitives", + "chrono", + "futures-util", + "reth-chain-state", + "reth-network-api", + "reth-primitives-traits", + "reth-storage-api", + "reth-transaction-pool", + "serde", + "serde_json", + "thiserror 2.0.12", + "tokio", + "tokio-stream", + "tokio-tungstenite", + "tracing", + "url", +] + [[package]] name = "reth-node-events" version = "1.5.1" @@ -12286,6 +12310,7 @@ dependencies = [ "futures-util", "log", "rustls", + "rustls-native-certs", "rustls-pki-types", "tokio", "tokio-rustls", diff --git a/Cargo.toml b/Cargo.toml index 49e37635c12..509741c6186 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -67,6 +67,7 @@ members = [ "crates/node/api/", "crates/node/builder/", "crates/node/core/", + "crates/node/ethstats", "crates/node/events/", "crates/node/metrics", "crates/node/types", @@ -392,6 +393,7 @@ reth-node-api = { path = "crates/node/api" } reth-node-builder = { path = "crates/node/builder" } reth-node-core = { path = "crates/node/core" } reth-node-ethereum = { path = "crates/ethereum/node" } +reth-node-ethstats = { path = "crates/node/ethstats" } reth-node-events = { path = "crates/node/events" } reth-node-metrics = { path = "crates/node/metrics" } reth-optimism-node = { path = "crates/optimism/node" } @@ -569,6 +571,7 @@ byteorder = "1" mini-moka = "0.10" tar-no-std = { version = "0.3.2", default-features = false } miniz_oxide = { version = "0.8.4", default-features = false } +chrono = "0.4.41" # metrics metrics = "0.24.0" @@ -584,6 +587,7 @@ quote = "1.0" # tokio tokio = { version = "1.44.2", default-features = false } tokio-stream = "0.1.11" +tokio-tungstenite = "0.26.2" tokio-util = { version = "0.7.4", features = ["codec"] } # async diff --git a/crates/node/builder/Cargo.toml b/crates/node/builder/Cargo.toml index d08c62d38ce..9172dc30462 100644 --- a/crates/node/builder/Cargo.toml +++ b/crates/node/builder/Cargo.toml @@ -54,6 +54,7 @@ reth-tokio-util.workspace = true reth-tracing.workspace = true reth-transaction-pool.workspace = true reth-basic-payload-builder.workspace = true +reth-node-ethstats.workspace = true ## ethereum alloy-consensus.workspace = true diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index 50ad3599095..2de4bbd7de6 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -95,6 +95,7 @@ use tokio::sync::{ }; use futures::{future::Either, stream, Stream, StreamExt}; +use reth_node_ethstats::EthStatsService; use reth_node_events::{cl::ConsensusLayerHealthEvents, node::NodeEvent}; /// Reusable setup for launching a node. @@ -1047,6 +1048,22 @@ where Either::Right(stream::empty()) } } + + /// Spawns the [`EthStatsService`] service if configured. + pub async fn spawn_ethstats(&self) -> eyre::Result<()> { + let Some(url) = self.node_config().debug.ethstats.as_ref() else { return Ok(()) }; + + let network = self.components().network().clone(); + let pool = self.components().pool().clone(); + let provider = self.node_adapter().provider.clone(); + + info!(target: "reth::cli", "Starting EthStats service at {}", url); + + let ethstats = EthStatsService::new(url, network, provider, pool).await?; + tokio::spawn(async move { ethstats.run().await }); + + Ok(()) + } } impl diff --git a/crates/node/builder/src/launch/engine.rs b/crates/node/builder/src/launch/engine.rs index 4b17954ed9c..4dcf1107278 100644 --- a/crates/node/builder/src/launch/engine.rs +++ b/crates/node/builder/src/launch/engine.rs @@ -348,6 +348,8 @@ where // Notify on node started on_node_started.on_event(FullNode::clone(&full_node))?; + ctx.spawn_ethstats().await?; + let handle = NodeHandle { node_exit_future: NodeExitFuture::new( async { rx.await? }, diff --git a/crates/node/core/src/args/debug.rs b/crates/node/core/src/args/debug.rs index d8b6d570384..fdd08243a77 100644 --- a/crates/node/core/src/args/debug.rs +++ b/crates/node/core/src/args/debug.rs @@ -92,6 +92,11 @@ pub struct DebugArgs { verbatim_doc_comment )] pub healthy_node_rpc_url: Option, + + /// The URL of the ethstats server to connect to. + /// Example: `nodename:secret@host:port` + #[arg(long = "ethstats", help_heading = "Debug")] + pub ethstats: Option, } impl Default for DebugArgs { @@ -109,6 +114,7 @@ impl Default for DebugArgs { engine_api_store: None, invalid_block_hook: Some(InvalidBlockSelection::default()), healthy_node_rpc_url: None, + ethstats: None, } } } diff --git a/crates/node/ethstats/Cargo.toml b/crates/node/ethstats/Cargo.toml new file mode 100644 index 00000000000..6ffad317702 --- /dev/null +++ b/crates/node/ethstats/Cargo.toml @@ -0,0 +1,34 @@ +[package] +name = "reth-node-ethstats" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +reth-network-api.workspace = true +reth-transaction-pool.workspace = true +reth-primitives-traits.workspace = true +reth-storage-api.workspace = true +reth-chain-state.workspace = true + +alloy-primitives.workspace = true +alloy-consensus.workspace = true + +tokio.workspace = true +tokio-tungstenite = { workspace = true, features = ["rustls-tls-native-roots"] } +futures-util.workspace = true +tokio-stream.workspace = true + +serde.workspace = true +serde_json.workspace = true + +tracing.workspace = true +url.workspace = true +chrono.workspace = true +thiserror = { workspace = true, features = ["std"] } diff --git a/crates/node/ethstats/src/connection.rs b/crates/node/ethstats/src/connection.rs new file mode 100644 index 00000000000..049788dccc3 --- /dev/null +++ b/crates/node/ethstats/src/connection.rs @@ -0,0 +1,67 @@ +/// Abstractions for managing `WebSocket` connections in the ethstats service. +use crate::error::ConnectionError; +use futures_util::{ + stream::{SplitSink, SplitStream}, + SinkExt, StreamExt, +}; +use serde_json::Value; +use std::sync::Arc; +use tokio::{net::TcpStream, sync::Mutex}; +use tokio_tungstenite::{ + tungstenite::protocol::{frame::Utf8Bytes, Message}, + MaybeTlsStream, WebSocketStream, +}; + +/// Type alias for a `WebSocket` stream that may be TLS or plain TCP +pub(crate) type WsStream = WebSocketStream>; + +/// Wrapper for a thread-safe, asynchronously accessible `WebSocket` connection +#[derive(Debug, Clone)] +pub(crate) struct ConnWrapper { + /// Write-only part of the `WebSocket` stream + writer: Arc>>, + /// Read-only part of the `WebSocket` stream + reader: Arc>>, +} + +impl ConnWrapper { + /// Create a new connection wrapper from a `WebSocket` stream + pub(crate) fn new(stream: WsStream) -> Self { + let (writer, reader) = stream.split(); + + Self { writer: Arc::new(Mutex::new(writer)), reader: Arc::new(Mutex::new(reader)) } + } + + /// Write a JSON string as a text message to the `WebSocket` + pub(crate) async fn write_json(&self, value: &str) -> Result<(), ConnectionError> { + let mut writer = self.writer.lock().await; + writer.send(Message::Text(Utf8Bytes::from(value))).await?; + + Ok(()) + } + + /// Read the next JSON text message from the `WebSocket` + /// + /// Waits for the next text message, parses it as JSON, and returns the value. + /// Ignores non-text messages. Returns an error if the connection is closed or if parsing fails. + pub(crate) async fn read_json(&self) -> Result { + let mut reader = self.reader.lock().await; + while let Some(msg) = reader.next().await { + match msg? { + Message::Text(text) => return Ok(serde_json::from_str(&text)?), + Message::Close(_) => return Err(ConnectionError::ConnectionClosed), + _ => {} // Ignore non-text messages + } + } + + Err(ConnectionError::ConnectionClosed) + } + + /// Close the `WebSocket` connection gracefully + pub(crate) async fn close(&self) -> Result<(), ConnectionError> { + let mut writer = self.writer.lock().await; + writer.close().await?; + + Ok(()) + } +} diff --git a/crates/node/ethstats/src/credentials.rs b/crates/node/ethstats/src/credentials.rs new file mode 100644 index 00000000000..cf2adb785e8 --- /dev/null +++ b/crates/node/ethstats/src/credentials.rs @@ -0,0 +1,47 @@ +use crate::error::EthStatsError; +use std::str::FromStr; + +/// Credentials for connecting to an `EthStats` server +/// +/// Contains the node identifier, authentication secret, and server host +/// information needed to establish a connection with the `EthStats` service. +#[derive(Debug, Clone)] +pub(crate) struct EthstatsCredentials { + /// Unique identifier for this node in the `EthStats` network + pub node_id: String, + /// Authentication secret for the `EthStats` server + pub secret: String, + /// Host address of the `EthStats` server + pub host: String, +} + +impl FromStr for EthstatsCredentials { + type Err = EthStatsError; + + /// Parse credentials from a string in the format "`node_id:secret@host`" + /// + /// # Arguments + /// * `s` - String containing credentials in the format "`node_id:secret@host`" + /// + /// # Returns + /// * `Ok(EthstatsCredentials)` - Successfully parsed credentials + /// * `Err(EthStatsError::InvalidUrl)` - Invalid format or missing separators + fn from_str(s: &str) -> Result { + let parts: Vec<&str> = s.split('@').collect(); + if parts.len() != 2 { + return Err(EthStatsError::InvalidUrl("Missing '@' separator".to_string())); + } + let creds = parts[0]; + let host = parts[1].to_string(); + let creds_parts: Vec<&str> = creds.split(':').collect(); + if creds_parts.len() != 2 { + return Err(EthStatsError::InvalidUrl( + "Missing ':' separator in credentials".to_string(), + )); + } + let node_id = creds_parts[0].to_string(); + let secret = creds_parts[1].to_string(); + + Ok(Self { node_id, secret, host }) + } +} diff --git a/crates/node/ethstats/src/error.rs b/crates/node/ethstats/src/error.rs new file mode 100644 index 00000000000..fff9bf5306a --- /dev/null +++ b/crates/node/ethstats/src/error.rs @@ -0,0 +1,69 @@ +use thiserror::Error; + +/// Errors that can occur during `WebSocket` connection handling +#[derive(Debug, Error)] +pub enum ConnectionError { + /// The `WebSocket` connection was closed unexpectedly + #[error("Connection closed")] + ConnectionClosed, + + /// Error occurred during JSON serialization/deserialization + #[error("Serialization error: {0}")] + Serialization(#[from] serde_json::Error), + + /// Error occurred during `WebSocket` communication + #[error("WebSocket error: {0}")] + WebSocket(#[from] tokio_tungstenite::tungstenite::Error), +} + +/// Main error type for the `EthStats` client +/// +/// This enum covers all possible errors that can occur when interacting +/// with an `EthStats` server, including connection issues, authentication +/// problems, data fetching errors, and various I/O operations. +#[derive(Debug, Error)] +pub enum EthStatsError { + /// The provided URL is invalid or malformed + #[error("Invalid URL: {0}")] + InvalidUrl(String), + + /// Error occurred during connection establishment or maintenance + #[error("Connection error: {0}")] + ConnectionError(#[from] ConnectionError), + + /// Authentication failed with the `EthStats` server + #[error("Authentication error: {0}")] + AuthError(String), + + /// Attempted to perform an operation while not connected to the server + #[error("Not connected to server")] + NotConnected, + + /// Error occurred during JSON serialization or deserialization + #[error("Serialization error: {0}")] + Serialization(#[from] serde_json::Error), + + /// Error occurred during `WebSocket` communication + #[error("WebSocket error: {0}")] + WebSocket(#[from] tokio_tungstenite::tungstenite::Error), + + /// Operation timed out + #[error("Timeout error")] + Timeout, + + /// Error occurred while parsing a URL + #[error("URL parsing error: {0}")] + Url(#[from] url::ParseError), + + /// Requested block was not found in the blockchain + #[error("Block not found: {0}")] + BlockNotFound(u64), + + /// Error occurred while fetching data from the blockchain or server + #[error("Data fetch error: {0}")] + DataFetchError(String), + + /// The request sent to the server was invalid or malformed + #[error("Inivalid request")] + InvalidRequest, +} diff --git a/crates/node/ethstats/src/ethstats.rs b/crates/node/ethstats/src/ethstats.rs new file mode 100644 index 00000000000..aea8a160fc0 --- /dev/null +++ b/crates/node/ethstats/src/ethstats.rs @@ -0,0 +1,823 @@ +use crate::{ + connection::ConnWrapper, + credentials::EthstatsCredentials, + error::EthStatsError, + events::{ + AuthMsg, BlockMsg, BlockStats, HistoryMsg, LatencyMsg, NodeInfo, NodeStats, PendingMsg, + PendingStats, PingMsg, StatsMsg, TxStats, UncleStats, + }, +}; +use alloy_consensus::{BlockHeader, Sealable}; +use alloy_primitives::U256; +use reth_chain_state::{CanonStateNotification, CanonStateSubscriptions}; +use reth_network_api::{NetworkInfo, Peers}; +use reth_primitives_traits::{Block, BlockBody}; +use reth_storage_api::{BlockReader, BlockReaderIdExt, NodePrimitivesProvider}; +use reth_transaction_pool::TransactionPool; + +use chrono::Local; +use serde_json::Value; +use std::{ + str::FromStr, + sync::Arc, + time::{Duration, Instant}, +}; +use tokio::{ + sync::{mpsc, Mutex, RwLock}, + time::{interval, sleep, timeout}, +}; +use tokio_stream::StreamExt; +use tokio_tungstenite::connect_async; +use tracing::{debug, info}; +use url::Url; + +/// Number of historical blocks to include in a history update sent to the `EthStats` server +const HISTORY_UPDATE_RANGE: u64 = 50; +/// Duration to wait before attempting to reconnect to the `EthStats` server +const RECONNECT_INTERVAL: Duration = Duration::from_secs(5); +/// Maximum time to wait for a ping response from the server +const PING_TIMEOUT: Duration = Duration::from_secs(5); +/// Interval between regular stats reports to the server +const REPORT_INTERVAL: Duration = Duration::from_secs(15); +/// Maximum time to wait for initial connection establishment +const CONNECT_TIMEOUT: Duration = Duration::from_secs(10); +/// Maximum time to wait for reading messages from the server +const READ_TIMEOUT: Duration = Duration::from_secs(30); + +/// Main service for interacting with an `EthStats` server +/// +/// This service handles all communication with the `EthStats` server including +/// authentication, stats reporting, block notifications, and connection management. +/// It maintains a persistent `WebSocket` connection and automatically reconnects +/// when the connection is lost. +#[derive(Debug)] +pub struct EthStatsService { + /// Authentication credentials for the `EthStats` server + credentials: EthstatsCredentials, + /// `WebSocket` connection wrapper, wrapped in `Arc` for shared access + conn: Arc>>, + /// Timestamp of the last ping sent to the server + last_ping: Arc>>, + /// Network interface for getting peer and sync information + network: Network, + /// Blockchain provider for reading block data and state + provider: Provider, + /// Transaction pool for getting pending transaction statistics + pool: Pool, +} + +impl EthStatsService +where + Network: NetworkInfo + Peers, + Provider: BlockReaderIdExt + CanonStateSubscriptions, + Pool: TransactionPool, +{ + /// Create a new `EthStats` service and establish initial connection + /// + /// # Arguments + /// * `url` - Connection string in format "`node_id:secret@host`" + /// * `network` - Network interface implementation + /// * `provider` - Blockchain provider implementation + /// * `pool` - Transaction pool implementation + pub async fn new( + url: &str, + network: Network, + provider: Provider, + pool: Pool, + ) -> Result { + let credentials = EthstatsCredentials::from_str(url)?; + let service = Self { + credentials, + conn: Arc::new(RwLock::new(None)), + last_ping: Arc::new(Mutex::new(None)), + network, + provider, + pool, + }; + service.connect().await?; + + Ok(service) + } + + /// Establish `WebSocket` connection to the `EthStats` server + /// + /// Attempts to connect to the server using the credentials and handles + /// connection timeouts and errors. + async fn connect(&self) -> Result<(), EthStatsError> { + debug!( + target: "ethstats", + "Attempting to connect to EthStats server at {}", self.credentials.host + ); + let full_url = format!("ws://{}/api", self.credentials.host); + let url = Url::parse(&full_url) + .map_err(|e| EthStatsError::InvalidUrl(format!("Invalid URL: {full_url} - {e}")))?; + + match timeout(CONNECT_TIMEOUT, connect_async(url.to_string())).await { + Ok(Ok((ws_stream, _))) => { + debug!( + target: "ethstats", + "Successfully connected to EthStats server at {}", self.credentials.host + ); + let conn: ConnWrapper = ConnWrapper::new(ws_stream); + *self.conn.write().await = Some(conn.clone()); + self.login().await?; + Ok(()) + } + Ok(Err(e)) => Err(EthStatsError::InvalidUrl(e.to_string())), + Err(_) => { + debug!(target: "ethstats", "Connection to EthStats server timed out"); + Err(EthStatsError::Timeout) + } + } + } + + /// Authenticate with the `EthStats` server + /// + /// Sends authentication credentials and node information to the server + /// and waits for a successful acknowledgment. + async fn login(&self) -> Result<(), EthStatsError> { + debug!( + target: "ethstats", + "Attempting to login to EthStats server as node_id {}", self.credentials.node_id + ); + let conn = self.conn.read().await; + let conn = conn.as_ref().ok_or(EthStatsError::NotConnected)?; + + let network_status = self + .network + .network_status() + .await + .map_err(|e| EthStatsError::AuthError(e.to_string()))?; + let id = &self.credentials.node_id; + let secret = &self.credentials.secret; + let protocol = network_status + .capabilities + .iter() + .map(|cap| format!("{}/{}", cap.name, cap.version)) + .collect::>() + .join(", "); + let port = self.network.local_addr().port() as u64; + + let auth = AuthMsg { + id: id.clone(), + secret: secret.clone(), + info: NodeInfo { + name: id.clone(), + node: network_status.client_version.clone(), + port, + network: self.network.chain_id().to_string(), + protocol, + api: "No".to_string(), + os: std::env::consts::OS.into(), + os_ver: std::env::consts::ARCH.into(), + client: "0.1.1".to_string(), + history: true, + }, + }; + + let message = auth.generate_login_message(); + conn.write_json(&message).await?; + + let response = + timeout(READ_TIMEOUT, conn.read_json()).await.map_err(|_| EthStatsError::Timeout)??; + + if let Some(ack) = response.get("emit") { + if ack.get(0) == Some(&Value::String("ready".to_string())) { + info!( + target: "ethstats", + "Login successful to EthStats server as node_id {}", self.credentials.node_id + ); + return Ok(()); + } + } + + debug!(target: "ethstats", "Login failed: Unauthorized or unexpected login response"); + Err(EthStatsError::AuthError("Unauthorized or unexpected login response".into())) + } + + /// Report current node statistics to the `EthStats` server + /// + /// Sends information about the node's current state including sync status, + /// peer count, and uptime. + async fn report_stats(&self) -> Result<(), EthStatsError> { + let conn = self.conn.read().await; + let conn = conn.as_ref().ok_or(EthStatsError::NotConnected)?; + + let stats_msg = StatsMsg { + id: self.credentials.node_id.clone(), + stats: NodeStats { + active: true, + syncing: self.network.is_syncing(), + peers: self.network.num_connected_peers() as u64, + gas_price: 0, // TODO + uptime: 100, + }, + }; + + let message = stats_msg.generate_stats_message(); + conn.write_json(&message).await?; + + Ok(()) + } + + /// Send a ping message to the `EthStats` server + /// + /// Records the ping time and starts a timeout task to detect if the server + /// doesn't respond within the expected timeframe. + async fn send_ping(&self) -> Result<(), EthStatsError> { + let conn = self.conn.read().await; + let conn = conn.as_ref().ok_or(EthStatsError::NotConnected)?; + + let ping_time = Instant::now(); + *self.last_ping.lock().await = Some(ping_time); + + let client_time = Local::now().format("%Y-%m-%d %H:%M:%S%.f %:z %Z").to_string(); + let ping_msg = PingMsg { id: self.credentials.node_id.clone(), client_time }; + + let message = ping_msg.generate_ping_message(); + conn.write_json(&message).await?; + + // Start ping timeout + let active_ping = self.last_ping.clone(); + let conn_ref = self.conn.clone(); + tokio::spawn(async move { + sleep(PING_TIMEOUT).await; + let mut active = active_ping.lock().await; + if active.is_some() { + debug!(target: "ethstats", "Ping timeout"); + *active = None; + // Clear connection to trigger reconnect + if let Some(conn) = conn_ref.write().await.take() { + let _ = conn.close().await; + } + } + }); + + Ok(()) + } + + /// Report latency measurement to the `EthStats` server + /// + /// Calculates the round-trip time from the last ping and sends it to + /// the server. This is called when a pong response is received. + async fn report_latency(&self) -> Result<(), EthStatsError> { + let conn = self.conn.read().await; + let conn = conn.as_ref().ok_or(EthStatsError::NotConnected)?; + + let mut active = self.last_ping.lock().await; + if let Some(start) = active.take() { + let latency = start.elapsed().as_millis() as u64 / 2; + + debug!(target: "ethstats", "Reporting latency: {}ms", latency); + + let latency_msg = LatencyMsg { id: self.credentials.node_id.clone(), latency }; + + let message = latency_msg.generate_latency_message(); + conn.write_json(&message).await? + } + + Ok(()) + } + + /// Report pending transaction count to the `EthStats` server + /// + /// Gets the current number of pending transactions from the pool and + /// sends this information to the server. + async fn report_pending(&self) -> Result<(), EthStatsError> { + let conn = self.conn.read().await; + let conn = conn.as_ref().ok_or(EthStatsError::NotConnected)?; + let pending = self.pool.pool_size().pending as u64; + + debug!(target: "ethstats", "Reporting pending txs: {}", pending); + + let pending_msg = + PendingMsg { id: self.credentials.node_id.clone(), stats: PendingStats { pending } }; + + let message = pending_msg.generate_pending_message(); + conn.write_json(&message).await?; + + Ok(()) + } + + /// Report block information to the `EthStats` server + /// + /// Fetches block data either from a canonical state notification or + /// the current best block, converts it to stats format, and sends + /// it to the server. + /// + /// # Arguments + /// * `head` - Optional canonical state notification containing new block info + async fn report_block( + &self, + head: Option::Primitives>>, + ) -> Result<(), EthStatsError> { + let conn = self.conn.read().await; + let conn = conn.as_ref().ok_or(EthStatsError::NotConnected)?; + + let block_number = if let Some(head) = head { + head.tip().header().number() + } else { + self.provider + .best_block_number() + .map_err(|e| EthStatsError::DataFetchError(e.to_string()))? + }; + + match self.provider.block_by_id(block_number.into()) { + Ok(Some(block)) => { + let block_msg = BlockMsg { + id: self.credentials.node_id.clone(), + block: self.block_to_stats(&block)?, + }; + + debug!(target: "ethstats", "Reporting block: {}", block_number); + + let message = block_msg.generate_block_message(); + conn.write_json(&message).await?; + } + Ok(None) => { + // Block not found, stop fetching + debug!(target: "ethstats", "Block {} not found", block_number); + return Err(EthStatsError::BlockNotFound(block_number)); + } + Err(e) => { + debug!(target: "ethstats", "Error fetching block {}: {}", block_number, e); + return Err(EthStatsError::DataFetchError(e.to_string())); + } + }; + + Ok(()) + } + + /// Convert a block to `EthStats` block statistics format + /// + /// Extracts relevant information from a block and formats it according + /// to the `EthStats` protocol specification. + /// + /// # Arguments + /// * `block` - The block to convert + fn block_to_stats( + &self, + block: &::Block, + ) -> Result { + let body = block.body(); + let header = block.header(); + + let txs = body.transaction_hashes_iter().copied().map(|hash| TxStats { hash }).collect(); + + Ok(BlockStats { + number: U256::from(header.number()), + hash: header.hash_slow(), + parent_hash: header.parent_hash(), + timestamp: U256::from(header.timestamp()), + miner: header.beneficiary(), + gas_used: header.gas_used(), + gas_limit: header.gas_limit(), + diff: header.difficulty().to_string(), + total_diff: "0".into(), + txs, + tx_root: header.transactions_root(), + root: header.state_root(), + uncles: UncleStats(vec![]), + }) + } + + /// Report historical block data to the `EthStats` server + /// + /// Fetches multiple blocks by their numbers and sends their statistics + /// to the server. This is typically called in response to a history + /// request from the server. + /// + /// # Arguments + /// * `list` - Vector of block numbers to fetch and report + async fn report_history(&self, list: Option<&Vec>) -> Result<(), EthStatsError> { + let conn = self.conn.read().await; + let conn = conn.as_ref().ok_or(EthStatsError::NotConnected)?; + + let indexes = if let Some(list) = list { + list + } else { + let best_block_number = self + .provider + .best_block_number() + .map_err(|e| EthStatsError::DataFetchError(e.to_string()))?; + + let start = best_block_number.saturating_sub(HISTORY_UPDATE_RANGE); + + &(start..=best_block_number).collect() + }; + + let mut blocks = Vec::with_capacity(indexes.len()); + for &block_number in indexes { + match self.provider.block_by_id(block_number.into()) { + Ok(Some(block)) => { + blocks.push(block); + } + Ok(None) => { + // Block not found, stop fetching + debug!(target: "ethstats", "Block {} not found", block_number); + break; + } + Err(e) => { + debug!(target: "ethstats", "Error fetching block {}: {}", block_number, e); + break; + } + } + } + + let history: Vec = + blocks.iter().map(|block| self.block_to_stats(block)).collect::>()?; + + if history.is_empty() { + debug!(target: "ethstats", "No history to send to stats server"); + } else { + debug!( + target: "ethstats", + "Sending historical blocks to ethstats, first: {}, last: {}", + history.first().unwrap().number, + history.last().unwrap().number + ); + } + + let history_msg = HistoryMsg { id: self.credentials.node_id.clone(), history }; + + let message = history_msg.generate_history_message(); + conn.write_json(&message).await?; + + Ok(()) + } + + /// Send a complete status report to the `EthStats` server + /// + /// Performs all regular reporting tasks: ping, block info, pending + /// transactions, and general statistics. + async fn report(&self) -> Result<(), EthStatsError> { + self.send_ping().await?; + self.report_block(None).await?; + self.report_pending().await?; + self.report_stats().await?; + + Ok(()) + } + + /// Handle incoming messages from the `EthStats` server + /// + /// # Expected Message Variants + /// + /// This function expects messages in the following format: + /// + /// ```json + /// { "emit": [, ] } + /// ``` + /// + /// ## Supported Commands: + /// + /// - `"node-pong"`: Indicates a pong response to a previously sent ping. The payload is + /// ignored. Triggers a latency report to the server. + /// - Example: ```json { "emit": [ "node-pong", { "clientTime": "2025-07-10 12:00:00.123 + /// +00:00 UTC", "serverTime": "2025-07-10 12:00:01.456 +00:00 UTC" } ] } ``` + /// + /// - `"history"`: Requests historical block data. The payload may contain a `list` field with + /// block numbers to fetch. If `list` is not present, the default range is used. + /// - Example with list: `{ "emit": ["history", {"list": [1, 2, 3], "min": 1, "max": 3}] }` + /// - Example without list: `{ "emit": ["history", {}] }` + /// + /// ## Other Commands: + /// + /// Any other command is logged as unhandled and ignored. + async fn handle_message(&self, msg: Value) -> Result<(), EthStatsError> { + let emit = match msg.get("emit") { + Some(emit) => emit, + None => { + debug!(target: "ethstats", "Stats server sent non-broadcast, msg {}", msg); + return Err(EthStatsError::InvalidRequest); + } + }; + + let command = match emit.get(0) { + Some(Value::String(command)) => command.as_str(), + _ => { + debug!(target: "ethstats", "Invalid stats server message type, msg {}", msg); + return Err(EthStatsError::InvalidRequest); + } + }; + + match command { + "node-pong" => { + self.report_latency().await?; + } + "history" => { + let block_numbers = emit + .get(1) + .and_then(|v| v.as_object()) + .and_then(|obj| obj.get("list")) + .and_then(|v| v.as_array()); + + if block_numbers.is_none() { + self.report_history(None).await?; + + return Ok(()); + } + + let block_numbers = block_numbers + .unwrap() + .iter() + .map(|val| { + val.as_u64().ok_or_else(|| { + debug!( + target: "ethstats", + "Invalid stats history block number, msg {}", msg + ); + EthStatsError::InvalidRequest + }) + }) + .collect::>()?; + + self.report_history(Some(&block_numbers)).await?; + } + other => debug!(target: "ethstats", "Unhandled command: {}", other), + } + + Ok(()) + } + + /// Main service loop that handles all `EthStats` communication + /// + /// This method runs the main event loop that: + /// - Maintains the `WebSocket` connection + /// - Handles incoming messages from the server + /// - Reports statistics at regular intervals + /// - Processes new block notifications + /// - Automatically reconnects when the connection is lost + /// + /// The service runs until explicitly shut down or an unrecoverable + /// error occurs. + pub async fn run(self) { + // Create channels for internal communication + let (shutdown_tx, mut shutdown_rx) = mpsc::channel(1); + let (message_tx, mut message_rx) = mpsc::channel(32); + let (head_tx, mut head_rx) = mpsc::channel(10); + + // Start the read loop in a separate task + let read_handle = { + let conn = self.conn.clone(); + let message_tx = message_tx.clone(); + let shutdown_tx = shutdown_tx.clone(); + + tokio::spawn(async move { + loop { + let conn = conn.read().await; + if let Some(conn) = conn.as_ref() { + match conn.read_json().await { + Ok(msg) => { + if message_tx.send(msg).await.is_err() { + break; + } + } + Err(e) => { + debug!(target: "ethstats", "Read error: {}", e); + break; + } + } + } else { + sleep(RECONNECT_INTERVAL).await; + } + } + + let _ = shutdown_tx.send(()).await; + }) + }; + + let canonical_stream_handle = { + let mut canonical_stream = self.provider.canonical_state_stream(); + let head_tx = head_tx.clone(); + let shutdown_tx = shutdown_tx.clone(); + + tokio::spawn(async move { + loop { + let head = canonical_stream.next().await; + if let Some(head) = head { + if head_tx.send(head).await.is_err() { + break; + } + } + } + + let _ = shutdown_tx.send(()).await; + }) + }; + + let mut pending_tx_receiver = self.pool.pending_transactions_listener(); + + // Set up intervals + let mut report_interval = interval(REPORT_INTERVAL); + let mut reconnect_interval = interval(RECONNECT_INTERVAL); + + // Main event loop using select! + loop { + tokio::select! { + // Handle shutdown signal + _ = shutdown_rx.recv() => { + info!(target: "ethstats", "Shutting down ethstats service"); + break; + } + + // Handle messages from the read loop + Some(msg) = message_rx.recv() => { + if let Err(e) = self.handle_message(msg).await { + debug!(target: "ethstats", "Error handling message: {}", e); + self.disconnect().await; + } + } + + // Handle new block + Some(head) = head_rx.recv() => { + if let Err(e) = self.report_block(Some(head)).await { + debug!(target: "ethstats", "Failed to report block: {}", e); + self.disconnect().await; + } + + if let Err(e) = self.report_pending().await { + debug!(target: "ethstats", "Failed to report pending: {}", e); + self.disconnect().await; + } + } + + // Handle new pending tx + _= pending_tx_receiver.recv() => { + if let Err(e) = self.report_pending().await { + debug!(target: "ethstats", "Failed to report pending: {}", e); + self.disconnect().await; + } + } + + // Handle stats reporting + _ = report_interval.tick() => { + if let Err(e) = self.report().await { + debug!(target: "ethstats", "Failed to report: {}", e); + self.disconnect().await; + } + } + + // Handle reconnection + _ = reconnect_interval.tick(), if self.conn.read().await.is_none() => { + match self.connect().await { + Ok(_) => info!(target: "ethstats", "Reconnected successfully"), + Err(e) => debug!(target: "ethstats", "Reconnect failed: {}", e), + } + } + } + } + + // Cleanup + self.disconnect().await; + + // Cancel background tasks + read_handle.abort(); + canonical_stream_handle.abort(); + } + + /// Gracefully close the `WebSocket` connection + /// + /// Attempts to close the connection cleanly and logs any errors + /// that occur during the process. + async fn disconnect(&self) { + if let Some(conn) = self.conn.write().await.take() { + if let Err(e) = conn.close().await { + debug!(target: "ethstats", "Error closing connection: {}", e); + } + } + } + + /// Test helper to check connection status + #[cfg(test)] + pub async fn is_connected(&self) -> bool { + self.conn.read().await.is_some() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use futures_util::{SinkExt, StreamExt}; + use reth_network_api::noop::NoopNetwork; + use reth_storage_api::noop::NoopProvider; + use reth_transaction_pool::noop::NoopTransactionPool; + use serde_json::json; + use tokio::net::TcpListener; + use tokio_tungstenite::tungstenite::protocol::{frame::Utf8Bytes, Message}; + + const TEST_HOST: &str = "127.0.0.1"; + const TEST_PORT: u16 = 0; // Let OS choose port + + async fn setup_mock_server() -> (String, tokio::task::JoinHandle<()>) { + let listener = TcpListener::bind((TEST_HOST, TEST_PORT)).await.unwrap(); + let addr = listener.local_addr().unwrap(); + + let handle = tokio::spawn(async move { + let (stream, _) = listener.accept().await.unwrap(); + let mut ws_stream = tokio_tungstenite::accept_async(stream).await.unwrap(); + + // Handle login + if let Some(Ok(Message::Text(text))) = ws_stream.next().await { + let value: serde_json::Value = serde_json::from_str(&text).unwrap(); + if value["emit"][0] == "hello" { + let response = json!({ + "emit": ["ready", []] + }); + ws_stream + .send(Message::Text(Utf8Bytes::from(response.to_string()))) + .await + .unwrap(); + } + } + + // Handle ping + while let Some(Ok(msg)) = ws_stream.next().await { + if let Message::Text(text) = msg { + if text.contains("node-ping") { + let pong = json!({ + "emit": ["node-pong", {"id": "test-node"}] + }); + ws_stream + .send(Message::Text(Utf8Bytes::from(pong.to_string()))) + .await + .unwrap(); + } + } + } + }); + + (addr.to_string(), handle) + } + + #[tokio::test] + async fn test_connection_and_login() { + let (server_url, server_handle) = setup_mock_server().await; + let ethstats_url = format!("test-node:test-secret@{server_url}"); + + let network = NoopNetwork::default(); + let provider = NoopProvider::default(); + let pool = NoopTransactionPool::default(); + + let service = EthStatsService::new(ðstats_url, network, provider, pool) + .await + .expect("Service should connect"); + + // Verify connection was established + assert!(service.is_connected().await, "Service should be connected"); + + // Clean up server + server_handle.abort(); + } + + #[tokio::test] + async fn test_history_command_handling() { + let (server_url, server_handle) = setup_mock_server().await; + let ethstats_url = format!("test-node:test-secret@{server_url}"); + + let network = NoopNetwork::default(); + let provider = NoopProvider::default(); + let pool = NoopTransactionPool::default(); + + let service = EthStatsService::new(ðstats_url, network, provider, pool) + .await + .expect("Service should connect"); + + // Simulate receiving a history command + let history_cmd = json!({ + "emit": ["history", {"list": [1, 2, 3]}] + }); + + service.handle_message(history_cmd).await.expect("History command should be handled"); + + // Clean up server + server_handle.abort(); + } + + #[tokio::test] + async fn test_invalid_url_handling() { + let network = NoopNetwork::default(); + let provider = NoopProvider::default(); + let pool = NoopTransactionPool::default(); + + // Test missing secret + let result = EthStatsService::new( + "test-node@localhost", + network.clone(), + provider.clone(), + pool.clone(), + ) + .await; + assert!( + matches!(result, Err(EthStatsError::InvalidUrl(_))), + "Should detect invalid URL format" + ); + + // Test invalid URL format + let result = EthStatsService::new("invalid-url", network, provider, pool).await; + assert!( + matches!(result, Err(EthStatsError::InvalidUrl(_))), + "Should detect invalid URL format" + ); + } +} diff --git a/crates/node/ethstats/src/events.rs b/crates/node/ethstats/src/events.rs new file mode 100644 index 00000000000..08d0c90feb6 --- /dev/null +++ b/crates/node/ethstats/src/events.rs @@ -0,0 +1,283 @@ +//! Types for ethstats event reporting. +//! These structures define the data format used to report blockchain events to ethstats servers. + +use alloy_consensus::Header; +use alloy_primitives::{Address, B256, U256}; +use serde::{Deserialize, Serialize}; + +/// Collection of meta information about a node that is displayed on the monitoring page. +/// This information is used to identify and display node details in the ethstats monitoring +/// interface. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct NodeInfo { + /// The display name of the node in the monitoring interface + pub name: String, + + /// The node's unique identifier + pub node: String, + + /// The port number the node is listening on for P2P connections + pub port: u64, + + /// The network ID the node is connected to (e.g. "1" for mainnet) + #[serde(rename = "net")] + pub network: String, + + /// Comma-separated list of supported protocols and their versions + pub protocol: String, + + /// API availability indicator ("Yes" or "No") + pub api: String, + + /// Operating system the node is running on + pub os: String, + + /// Operating system version/architecture + #[serde(rename = "os_v")] + pub os_ver: String, + + /// Client software version + pub client: String, + + /// Whether the node can provide historical block data + #[serde(rename = "canUpdateHistory")] + pub history: bool, +} + +/// Authentication message used to login to the ethstats monitoring server. +/// Contains node identification and authentication information. +#[derive(Debug, Serialize, Deserialize)] +pub struct AuthMsg { + /// The node's unique identifier + pub id: String, + + /// Detailed information about the node + pub info: NodeInfo, + + /// Secret password for authentication with the monitoring server + pub secret: String, +} + +impl AuthMsg { + /// Generate a login message for the ethstats monitoring server. + pub fn generate_login_message(&self) -> String { + serde_json::json!({ + "emit": ["hello", self] + }) + .to_string() + } +} + +/// Simplified transaction info, containing only the hash. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct TxStats { + /// Transaction hash + pub hash: B256, +} + +/// Wrapper for uncle block headers. +/// This ensures empty lists serialize as `[]` instead of `null`. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(transparent)] +pub struct UncleStats(pub Vec
); + +/// Information to report about individual blocks. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct BlockStats { + /// Block number (height in the chain). + pub number: U256, + + /// Hash of this block. + pub hash: B256, + + /// Hash of the parent block. + #[serde(rename = "parentHash")] + pub parent_hash: B256, + + /// Timestamp of the block (Unix time). + pub timestamp: U256, + + /// Address of the miner who produced this block. + pub miner: Address, + + /// Total gas used by all transactions in the block. + #[serde(rename = "gasUsed")] + pub gas_used: u64, + + /// Maximum gas allowed for this block. + #[serde(rename = "gasLimit")] + pub gas_limit: u64, + + /// Difficulty for mining this block (as a decimal string). + #[serde(rename = "difficulty")] + pub diff: String, + + /// Cumulative difficulty up to this block (as a decimal string). + #[serde(rename = "totalDifficulty")] + pub total_diff: String, + + /// Simplified list of transactions in the block. + #[serde(rename = "transactions")] + pub txs: Vec, + + /// Root hash of all transactions (Merkle root). + #[serde(rename = "transactionsRoot")] + pub tx_root: B256, + + /// State root after applying this block. + #[serde(rename = "stateRoot")] + pub root: B256, + + /// List of uncle block headers. + pub uncles: UncleStats, +} + +/// Message containing a block to be reported to the ethstats monitoring server. +#[derive(Debug, Serialize, Deserialize)] +pub struct BlockMsg { + /// The node's unique identifier + pub id: String, + + /// The block to report + pub block: BlockStats, +} + +impl BlockMsg { + /// Generate a block message for the ethstats monitoring server. + pub fn generate_block_message(&self) -> String { + serde_json::json!({ + "emit": ["block", self] + }) + .to_string() + } +} + +/// Message containing historical block data to be reported to the ethstats monitoring server. +#[derive(Debug, Serialize, Deserialize)] +pub struct HistoryMsg { + /// The node's unique identifier + pub id: String, + + /// The historical block data to report + pub history: Vec, +} + +impl HistoryMsg { + /// Generate a history message for the ethstats monitoring server. + pub fn generate_history_message(&self) -> String { + serde_json::json!({ + "emit": ["history", self] + }) + .to_string() + } +} + +/// Message containing pending transaction statistics to be reported to the ethstats monitoring +/// server. +#[derive(Debug, Serialize, Deserialize)] +pub struct PendingStats { + /// Number of pending transactions + pub pending: u64, +} + +/// Message containing pending transaction statistics to be reported to the ethstats monitoring +/// server. +#[derive(Debug, Serialize, Deserialize)] +pub struct PendingMsg { + /// The node's unique identifier + pub id: String, + + /// The pending transaction statistics to report + pub stats: PendingStats, +} + +impl PendingMsg { + /// Generate a pending message for the ethstats monitoring server. + pub fn generate_pending_message(&self) -> String { + serde_json::json!({ + "emit": ["pending", self] + }) + .to_string() + } +} + +/// Information reported about the local node. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct NodeStats { + /// Whether the node is active + pub active: bool, + + /// Whether the node is currently syncing + pub syncing: bool, + + /// Number of connected peers + pub peers: u64, + + /// Current gas price in wei + #[serde(rename = "gasPrice")] + pub gas_price: u64, + + /// Node uptime percentage + pub uptime: u64, +} + +/// Message containing node statistics to be reported to the ethstats monitoring server. +#[derive(Debug, Serialize, Deserialize)] +pub struct StatsMsg { + /// The node's unique identifier + pub id: String, + + /// The stats to report + pub stats: NodeStats, +} + +impl StatsMsg { + /// Generate a stats message for the ethstats monitoring server. + pub fn generate_stats_message(&self) -> String { + serde_json::json!({ + "emit": ["stats", self] + }) + .to_string() + } +} + +/// Latency report message used to report network latency to the ethstats monitoring server. +#[derive(Serialize, Deserialize, Debug)] +pub struct LatencyMsg { + /// The node's unique identifier + pub id: String, + + /// The latency to report in milliseconds + pub latency: u64, +} + +impl LatencyMsg { + /// Generate a latency message for the ethstats monitoring server. + pub fn generate_latency_message(&self) -> String { + serde_json::json!({ + "emit": ["latency", self] + }) + .to_string() + } +} + +/// Ping message sent to the ethstats monitoring server to initiate latency measurement. +#[derive(Serialize, Deserialize, Debug)] +pub struct PingMsg { + /// The node's unique identifier + pub id: String, + + /// Client timestamp when the ping was sent + #[serde(rename = "clientTime")] + pub client_time: String, +} + +impl PingMsg { + /// Generate a ping message for the ethstats monitoring server. + pub fn generate_ping_message(&self) -> String { + serde_json::json!({ + "emit": ["node-ping", self] + }) + .to_string() + } +} diff --git a/crates/node/ethstats/src/lib.rs b/crates/node/ethstats/src/lib.rs new file mode 100644 index 00000000000..b2cd03243a0 --- /dev/null +++ b/crates/node/ethstats/src/lib.rs @@ -0,0 +1,30 @@ +//! +//! `EthStats` client support for Reth. +//! +//! This crate provides the necessary components to connect to, authenticate with, and report +//! node and network statistics to an `EthStats` server. It includes abstractions for `WebSocket` +//! connections, error handling, event/message types, and the main `EthStats` service logic. +//! +//! - `connection`: `WebSocket` connection management and utilities +//! - `error`: Error types for connection and `EthStats` operations +//! - `ethstats`: Main service logic for `EthStats` client +//! - `events`: Data structures for `EthStats` protocol messages + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +mod connection; +mod credentials; + +mod error; + +mod ethstats; +pub use ethstats::*; + +mod events; +pub use events::*; diff --git a/docs/vocs/docs/pages/cli/reth/node.mdx b/docs/vocs/docs/pages/cli/reth/node.mdx index 2b033b88ac7..6eba046f921 100644 --- a/docs/vocs/docs/pages/cli/reth/node.mdx +++ b/docs/vocs/docs/pages/cli/reth/node.mdx @@ -635,6 +635,9 @@ Debug: compare them against local execution when a bad block is encountered, helping identify discrepancies in state execution. + --ethstats + The URL of the ethstats server to connect to. Example: `nodename:secret@host:port` + Database: --db.log-level Database logging level. Levels higher than "notice" require a debug build From fe1d2d2425b59da199df15228272bb3ef5b31925 Mon Sep 17 00:00:00 2001 From: Aliaksei Misiukevich Date: Tue, 15 Jul 2025 14:40:52 +0200 Subject: [PATCH 172/305] refactor: `BlindedPovider` rename (#17208) Signed-off-by: Aliaksei Misiukevich Co-authored-by: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> --- .../configured_sparse_trie.rs | 6 +- .../tree/src/tree/payload_processor/mod.rs | 20 +-- .../src/tree/payload_processor/sparse_trie.rs | 22 +-- crates/stateless/src/trie.rs | 8 +- crates/trie/parallel/src/proof_task.rs | 40 +++--- crates/trie/sparse-parallel/src/trie.rs | 136 +++++++++--------- crates/trie/sparse/benches/rlp_node.rs | 4 +- crates/trie/sparse/benches/root.rs | 6 +- crates/trie/sparse/benches/update.rs | 6 +- crates/trie/sparse/src/lib.rs | 2 +- .../sparse/src/{blinded.rs => provider.rs} | 38 ++--- crates/trie/sparse/src/state.rs | 32 ++--- crates/trie/sparse/src/traits.rs | 10 +- crates/trie/sparse/src/trie.rs | 78 +++++----- crates/trie/trie/src/proof/mod.rs | 4 +- .../src/proof/{blinded.rs => trie_node.rs} | 18 +-- crates/trie/trie/src/witness.rs | 30 ++-- 17 files changed, 230 insertions(+), 230 deletions(-) rename crates/trie/sparse/src/{blinded.rs => provider.rs} (58%) rename crates/trie/trie/src/proof/{blinded.rs => trie_node.rs} (90%) diff --git a/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs b/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs index 61ad88b67fb..411d8da238e 100644 --- a/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs +++ b/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs @@ -3,7 +3,7 @@ use alloy_primitives::B256; use reth_trie::{Nibbles, TrieNode}; use reth_trie_sparse::{ - blinded::BlindedProvider, errors::SparseTrieResult, LeafLookup, LeafLookupError, + errors::SparseTrieResult, provider::TrieNodeProvider, LeafLookup, LeafLookupError, SerialSparseTrie, SparseTrieInterface, SparseTrieUpdates, TrieMasks, }; use reth_trie_sparse_parallel::ParallelSparseTrie; @@ -77,7 +77,7 @@ impl SparseTrieInterface for ConfiguredSparseTrie { } } - fn update_leaf( + fn update_leaf( &mut self, full_path: Nibbles, value: Vec, @@ -89,7 +89,7 @@ impl SparseTrieInterface for ConfiguredSparseTrie { } } - fn remove_leaf( + fn remove_leaf( &mut self, full_path: &Nibbles, provider: P, diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index cc4d291912c..3210780ec60 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -29,7 +29,7 @@ use reth_trie_parallel::{ root::ParallelStateRootError, }; use reth_trie_sparse::{ - blinded::{BlindedProvider, BlindedProviderFactory}, + provider::{TrieNodeProvider, TrieNodeProviderFactory}, SerialSparseTrie, SparseTrie, SparseTrieInterface, }; use reth_trie_sparse_parallel::ParallelSparseTrie; @@ -329,9 +329,9 @@ where state_root_tx: mpsc::Sender>, sparse_trie: Option>, ) where - BPF: BlindedProviderFactory + Clone + Send + Sync + 'static, - BPF::AccountNodeProvider: BlindedProvider + Send + Sync, - BPF::StorageNodeProvider: BlindedProvider + Send + Sync, + BPF: TrieNodeProviderFactory + Clone + Send + Sync + 'static, + BPF::AccountNodeProvider: TrieNodeProvider + Send + Sync, + BPF::StorageNodeProvider: TrieNodeProvider + Send + Sync, A: SparseTrieInterface + Send + Sync + Default + 'static, ConfiguredSparseTrie: From, { @@ -371,9 +371,9 @@ where state_root_tx: mpsc::Sender>, is_revealed: bool, ) where - BPF: BlindedProviderFactory + Clone + Send + Sync + 'static, - BPF::AccountNodeProvider: BlindedProvider + Send + Sync, - BPF::StorageNodeProvider: BlindedProvider + Send + Sync, + BPF: TrieNodeProviderFactory + Clone + Send + Sync + 'static, + BPF::AccountNodeProvider: TrieNodeProvider + Send + Sync, + BPF::StorageNodeProvider: TrieNodeProvider + Send + Sync, { match configured_trie { ConfiguredSparseTrie::Serial(boxed_serial) => { @@ -407,9 +407,9 @@ where stored_accounts_trie: Option>, use_parallel_for_new: bool, ) where - BPF: BlindedProviderFactory + Clone + Send + Sync + 'static, - BPF::AccountNodeProvider: BlindedProvider + Send + Sync, - BPF::StorageNodeProvider: BlindedProvider + Send + Sync, + BPF: TrieNodeProviderFactory + Clone + Send + Sync + 'static, + BPF::AccountNodeProvider: TrieNodeProvider + Send + Sync, + BPF::StorageNodeProvider: TrieNodeProvider + Send + Sync, { let is_revealed = stored_accounts_trie.as_ref().is_some_and(|trie| trie.is_revealed()); match stored_accounts_trie { diff --git a/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs b/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs index 458ba1b08b4..929e4d1de30 100644 --- a/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs +++ b/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs @@ -9,8 +9,8 @@ use rayon::iter::{ParallelBridge, ParallelIterator}; use reth_trie::{updates::TrieUpdates, Nibbles}; use reth_trie_parallel::root::ParallelStateRootError; use reth_trie_sparse::{ - blinded::{BlindedProvider, BlindedProviderFactory}, errors::{SparseStateTrieResult, SparseTrieErrorKind}, + provider::{TrieNodeProvider, TrieNodeProviderFactory}, SerialSparseTrie, SparseStateTrie, SparseTrie, SparseTrieInterface, }; use std::{ @@ -22,9 +22,9 @@ use tracing::{debug, trace, trace_span}; /// A task responsible for populating the sparse trie. pub(super) struct SparseTrieTask where - BPF: BlindedProviderFactory + Send + Sync, - BPF::AccountNodeProvider: BlindedProvider + Send + Sync, - BPF::StorageNodeProvider: BlindedProvider + Send + Sync, + BPF: TrieNodeProviderFactory + Send + Sync, + BPF::AccountNodeProvider: TrieNodeProvider + Send + Sync, + BPF::StorageNodeProvider: TrieNodeProvider + Send + Sync, { /// Executor used to spawn subtasks. #[expect(unused)] // TODO use this for spawning trie tasks @@ -36,15 +36,15 @@ where /// It's kept as a field on the struct to prevent blocking on de-allocation in [`Self::run`]. pub(super) trie: SparseStateTrie, pub(super) metrics: MultiProofTaskMetrics, - /// Blinded node provider factory. + /// Trie node provider factory. blinded_provider_factory: BPF, } impl SparseTrieTask where - BPF: BlindedProviderFactory + Send + Sync + Clone, - BPF::AccountNodeProvider: BlindedProvider + Send + Sync, - BPF::StorageNodeProvider: BlindedProvider + Send + Sync, + BPF: TrieNodeProviderFactory + Send + Sync + Clone, + BPF::AccountNodeProvider: TrieNodeProvider + Send + Sync, + BPF::StorageNodeProvider: TrieNodeProvider + Send + Sync, A: SparseTrieInterface + Send + Sync + Default, S: SparseTrieInterface + Send + Sync + Default, { @@ -191,9 +191,9 @@ pub(crate) fn update_sparse_trie( blinded_provider_factory: &BPF, ) -> SparseStateTrieResult where - BPF: BlindedProviderFactory + Send + Sync, - BPF::AccountNodeProvider: BlindedProvider + Send + Sync, - BPF::StorageNodeProvider: BlindedProvider + Send + Sync, + BPF: TrieNodeProviderFactory + Send + Sync, + BPF::AccountNodeProvider: TrieNodeProvider + Send + Sync, + BPF::StorageNodeProvider: TrieNodeProvider + Send + Sync, A: SparseTrieInterface + Send + Sync + Default, S: SparseTrieInterface + Send + Sync + Default, { diff --git a/crates/stateless/src/trie.rs b/crates/stateless/src/trie.rs index 9cc95ff5848..582323614ef 100644 --- a/crates/stateless/src/trie.rs +++ b/crates/stateless/src/trie.rs @@ -9,8 +9,8 @@ use reth_errors::ProviderError; use reth_revm::state::Bytecode; use reth_trie_common::{HashedPostState, Nibbles, TRIE_ACCOUNT_RLP_MAX_SIZE}; use reth_trie_sparse::{ - blinded::{DefaultBlindedProvider, DefaultBlindedProviderFactory}, errors::SparseStateTrieResult, + provider::{DefaultTrieNodeProvider, DefaultTrieNodeProviderFactory}, SparseStateTrie, SparseTrie, SparseTrieInterface, }; @@ -175,7 +175,7 @@ fn verify_execution_witness( witness: &ExecutionWitness, pre_state_root: B256, ) -> Result<(SparseStateTrie, B256Map), StatelessValidationError> { - let provider_factory = DefaultBlindedProviderFactory; + let provider_factory = DefaultTrieNodeProviderFactory; let mut trie = SparseStateTrie::new(); let mut state_witness = B256Map::default(); let mut bytecode = B256Map::default(); @@ -239,8 +239,8 @@ fn calculate_state_root( // In `verify_execution_witness` a `DefaultBlindedProviderFactory` is used, so we use the same // again in here. - let provider_factory = DefaultBlindedProviderFactory; - let storage_provider = DefaultBlindedProvider; + let provider_factory = DefaultTrieNodeProviderFactory; + let storage_provider = DefaultTrieNodeProvider; for (address, storage) in state.storages.into_iter().sorted_unstable_by_key(|(addr, _)| *addr) { // Take the existing storage trie (or create an empty, “revealed” one) diff --git a/crates/trie/parallel/src/proof_task.rs b/crates/trie/parallel/src/proof_task.rs index 4dc78106963..2e5813d55b0 100644 --- a/crates/trie/parallel/src/proof_task.rs +++ b/crates/trie/parallel/src/proof_task.rs @@ -19,14 +19,14 @@ use reth_provider::{ use reth_trie::{ hashed_cursor::HashedPostStateCursorFactory, prefix_set::TriePrefixSetsMut, - proof::{ProofBlindedProviderFactory, StorageProof}, + proof::{ProofTrieNodeProviderFactory, StorageProof}, trie_cursor::InMemoryTrieCursorFactory, updates::TrieUpdatesSorted, DecodedStorageMultiProof, HashedPostStateSorted, Nibbles, }; use reth_trie_common::prefix_set::{PrefixSet, PrefixSetMut}; use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; -use reth_trie_sparse::blinded::{BlindedProvider, BlindedProviderFactory, RevealedNode}; +use reth_trie_sparse::provider::{RevealedNode, TrieNodeProvider, TrieNodeProviderFactory}; use std::{ collections::VecDeque, sync::{ @@ -40,7 +40,7 @@ use tokio::runtime::Handle; use tracing::debug; type StorageProofResult = Result; -type BlindedNodeResult = Result, SparseTrieError>; +type TrieNodeProviderResult = Result, SparseTrieError>; /// A task that manages sending multiproof requests to a number of tasks that have longer-running /// database transactions @@ -291,7 +291,7 @@ where fn blinded_account_node( self, path: Nibbles, - result_sender: Sender, + result_sender: Sender, tx_sender: Sender>, ) { debug!( @@ -302,14 +302,14 @@ where let (trie_cursor_factory, hashed_cursor_factory) = self.create_factories(); - let blinded_provider_factory = ProofBlindedProviderFactory::new( + let blinded_provider_factory = ProofTrieNodeProviderFactory::new( trie_cursor_factory, hashed_cursor_factory, self.task_ctx.prefix_sets.clone(), ); let start = Instant::now(); - let result = blinded_provider_factory.account_node_provider().blinded_node(&path); + let result = blinded_provider_factory.account_node_provider().trie_node(&path); debug!( target: "trie::proof_task", ?path, @@ -335,7 +335,7 @@ where self, account: B256, path: Nibbles, - result_sender: Sender, + result_sender: Sender, tx_sender: Sender>, ) { debug!( @@ -347,14 +347,14 @@ where let (trie_cursor_factory, hashed_cursor_factory) = self.create_factories(); - let blinded_provider_factory = ProofBlindedProviderFactory::new( + let blinded_provider_factory = ProofTrieNodeProviderFactory::new( trie_cursor_factory, hashed_cursor_factory, self.task_ctx.prefix_sets.clone(), ); let start = Instant::now(); - let result = blinded_provider_factory.storage_node_provider(account).blinded_node(&path); + let result = blinded_provider_factory.storage_node_provider(account).trie_node(&path); debug!( target: "trie::proof_task", ?account, @@ -449,9 +449,9 @@ pub enum ProofTaskKind { /// A storage proof request. StorageProof(StorageProofInput, Sender), /// A blinded account node request. - BlindedAccountNode(Nibbles, Sender), + BlindedAccountNode(Nibbles, Sender), /// A blinded storage node request. - BlindedStorageNode(B256, Nibbles, Sender), + BlindedStorageNode(B256, Nibbles, Sender), } /// A handle that wraps a single proof task sender that sends a terminate message on `Drop` if the @@ -498,22 +498,22 @@ impl Drop for ProofTaskManagerHandle { } } -impl BlindedProviderFactory for ProofTaskManagerHandle { - type AccountNodeProvider = ProofTaskBlindedNodeProvider; - type StorageNodeProvider = ProofTaskBlindedNodeProvider; +impl TrieNodeProviderFactory for ProofTaskManagerHandle { + type AccountNodeProvider = ProofTaskTrieNodeProvider; + type StorageNodeProvider = ProofTaskTrieNodeProvider; fn account_node_provider(&self) -> Self::AccountNodeProvider { - ProofTaskBlindedNodeProvider::AccountNode { sender: self.sender.clone() } + ProofTaskTrieNodeProvider::AccountNode { sender: self.sender.clone() } } fn storage_node_provider(&self, account: B256) -> Self::StorageNodeProvider { - ProofTaskBlindedNodeProvider::StorageNode { account, sender: self.sender.clone() } + ProofTaskTrieNodeProvider::StorageNode { account, sender: self.sender.clone() } } } -/// Blinded node provider for retrieving trie nodes by path. +/// Trie node provider for retrieving trie nodes by path. #[derive(Debug)] -pub enum ProofTaskBlindedNodeProvider { +pub enum ProofTaskTrieNodeProvider { /// Blinded account trie node provider. AccountNode { /// Sender to the proof task. @@ -528,8 +528,8 @@ pub enum ProofTaskBlindedNodeProvider { }, } -impl BlindedProvider for ProofTaskBlindedNodeProvider { - fn blinded_node(&self, path: &Nibbles) -> Result, SparseTrieError> { +impl TrieNodeProvider for ProofTaskTrieNodeProvider { + fn trie_node(&self, path: &Nibbles) -> Result, SparseTrieError> { let (tx, rx) = channel(); match self { Self::AccountNode { sender } => { diff --git a/crates/trie/sparse-parallel/src/trie.rs b/crates/trie/sparse-parallel/src/trie.rs index 4c0a02d0102..9b123ef0f67 100644 --- a/crates/trie/sparse-parallel/src/trie.rs +++ b/crates/trie/sparse-parallel/src/trie.rs @@ -12,7 +12,7 @@ use reth_trie_common::{ BranchNodeRef, ExtensionNodeRef, LeafNodeRef, Nibbles, RlpNode, TrieNode, CHILD_INDEX_RANGE, }; use reth_trie_sparse::{ - blinded::{BlindedProvider, RevealedNode}, + provider::{RevealedNode, TrieNodeProvider}, LeafLookup, LeafLookupError, RlpNodeStackItem, SparseNode, SparseNodeType, SparseTrieInterface, SparseTrieUpdates, TrieMasks, }; @@ -153,7 +153,7 @@ impl SparseTrieInterface for ParallelSparseTrie { Ok(()) } - fn update_leaf( + fn update_leaf( &mut self, full_path: Nibbles, value: Vec, @@ -199,7 +199,7 @@ impl SparseTrieInterface for ParallelSparseTrie { let subtrie = self.subtrie_for_path_mut(&reveal_path); if subtrie.nodes.get(&reveal_path).expect("node must exist").is_hash() { if let Some(RevealedNode { node, tree_mask, hash_mask }) = - provider.blinded_node(&reveal_path)? + provider.trie_node(&reveal_path)? { let decoded = TrieNode::decode(&mut &node[..])?; trace!( @@ -300,7 +300,7 @@ impl SparseTrieInterface for ParallelSparseTrie { Ok(()) } - fn remove_leaf( + fn remove_leaf( &mut self, full_path: &Nibbles, provider: P, @@ -482,7 +482,7 @@ impl SparseTrieInterface for ParallelSparseTrie { "Retrieving remaining blinded branch child", ); if let Some(RevealedNode { node, tree_mask, hash_mask }) = - provider.blinded_node(&remaining_child_path)? + provider.trie_node(&remaining_child_path)? { let decoded = TrieNode::decode(&mut &node[..])?; trace!( @@ -1299,7 +1299,7 @@ impl SparseSubtrie { &mut self, full_path: Nibbles, value: Vec, - provider: impl BlindedProvider, + provider: impl TrieNodeProvider, retain_updates: bool, ) -> SparseTrieResult<()> { debug_assert!(full_path.starts_with(&self.path)); @@ -1320,7 +1320,7 @@ impl SparseSubtrie { if let Some(reveal_path) = reveal_path { if self.nodes.get(&reveal_path).expect("node must exist").is_hash() { if let Some(RevealedNode { node, tree_mask, hash_mask }) = - provider.blinded_node(&reveal_path)? + provider.trie_node(&reveal_path)? { let decoded = TrieNode::decode(&mut &node[..])?; trace!( @@ -2271,7 +2271,7 @@ mod tests { }; use reth_trie_db::DatabaseTrieCursorFactory; use reth_trie_sparse::{ - blinded::{BlindedProvider, DefaultBlindedProvider, RevealedNode}, + provider::{DefaultTrieNodeProvider, RevealedNode, TrieNodeProvider}, LeafLookup, LeafLookupError, SerialSparseTrie, SparseNode, SparseTrieInterface, SparseTrieUpdates, TrieMasks, }; @@ -2286,17 +2286,17 @@ mod tests { nibbles } - /// Mock blinded provider for testing that allows pre-setting nodes at specific paths. + /// Mock trie node provider for testing that allows pre-setting nodes at specific paths. /// - /// This provider can be used in tests to simulate blinded nodes that need to be revealed + /// This provider can be used in tests to simulate trie nodes that need to be revealed /// during trie operations, particularly when collapsing branch nodes during leaf removal. #[derive(Debug, Clone)] - struct MockBlindedProvider { + struct MockTrieNodeProvider { /// Mapping from path to revealed node data nodes: HashMap, } - impl MockBlindedProvider { + impl MockTrieNodeProvider { /// Creates a new empty mock provider fn new() -> Self { Self { nodes: HashMap::with_hasher(RandomState::default()) } @@ -2308,8 +2308,8 @@ mod tests { } } - impl BlindedProvider for MockBlindedProvider { - fn blinded_node(&self, path: &Nibbles) -> Result, SparseTrieError> { + impl TrieNodeProvider for MockTrieNodeProvider { + fn trie_node(&self, path: &Nibbles) -> Result, SparseTrieError> { Ok(self.nodes.get(path).cloned()) } } @@ -2395,7 +2395,7 @@ mod tests { leaves: impl IntoIterator)>, ) { for (path, value) in leaves { - trie.update_leaf(path, value, DefaultBlindedProvider).unwrap(); + trie.update_leaf(path, value, DefaultTrieNodeProvider).unwrap(); } } @@ -3270,7 +3270,7 @@ mod tests { .into_iter(), ); - let provider = MockBlindedProvider::new(); + let provider = MockTrieNodeProvider::new(); // Remove the leaf with a full path of 0x537 let leaf_full_path = Nibbles::from_nibbles([0x5, 0x3, 0x7]); @@ -3330,7 +3330,7 @@ mod tests { .insert(Nibbles::default(), BranchNodeCompact::new(0b11, 0, 0, vec![], None)); } - let provider = MockBlindedProvider::new(); + let provider = MockTrieNodeProvider::new(); // Remove the leaf with a full path of 0x012 let leaf_full_path = Nibbles::from_nibbles([0x0, 0x1, 0x2]); @@ -3389,7 +3389,7 @@ mod tests { .into_iter(), ); - let provider = MockBlindedProvider::new(); + let provider = MockTrieNodeProvider::new(); // Remove the leaf with a full path of 0x5012 let leaf_full_path = Nibbles::from_nibbles([0x5, 0x0, 0x1, 0x2]); @@ -3449,7 +3449,7 @@ mod tests { .into_iter(), ); - let provider = MockBlindedProvider::new(); + let provider = MockTrieNodeProvider::new(); // Remove the leaf with a full path of 0x2034 let leaf_full_path = Nibbles::from_nibbles([0x2, 0x0, 0x3, 0x4]); @@ -3528,7 +3528,7 @@ mod tests { .into_iter(), ); - let provider = MockBlindedProvider::new(); + let provider = MockTrieNodeProvider::new(); // Verify initial state - the lower subtrie's path should be 0x123 let lower_subtrie_root_path = Nibbles::from_nibbles([0x1, 0x2, 0x3]); @@ -3586,7 +3586,7 @@ mod tests { ); // Create a mock provider that will reveal the blinded leaf - let mut provider = MockBlindedProvider::new(); + let mut provider = MockTrieNodeProvider::new(); let revealed_leaf = create_leaf_node([0x3, 0x4], 42); let mut encoded = Vec::new(); revealed_leaf.encode(&mut encoded); @@ -3627,7 +3627,7 @@ mod tests { SparseNode::new_leaf(Nibbles::from_nibbles([0x1, 0x2, 0x3])), ))); - let provider = MockBlindedProvider::new(); + let provider = MockTrieNodeProvider::new(); // Remove the leaf with a full key of 0x123 let leaf_full_path = Nibbles::from_nibbles([0x1, 0x2, 0x3]); @@ -3716,7 +3716,7 @@ mod tests { .into_iter(), ); - let provider = MockBlindedProvider::new(); + let provider = MockTrieNodeProvider::new(); // Remove a leaf which does not exist; this should have no effect. trie.remove_leaf(&Nibbles::from_nibbles([0x0, 0x1, 0x2, 0x3, 0x4, 0xF]), &provider) @@ -3942,7 +3942,7 @@ mod tests { paths.clone(), ); - let provider = DefaultBlindedProvider; + let provider = DefaultTrieNodeProvider; let mut sparse = ParallelSparseTrie::default().with_updates(true); for path in &paths { sparse.update_leaf(*path, value_encoded(), &provider).unwrap(); @@ -4057,7 +4057,7 @@ mod tests { #[test] fn sparse_trie_remove_leaf() { let ctx = ParallelSparseTrieTestContext; - let provider = DefaultBlindedProvider; + let provider = DefaultTrieNodeProvider; let mut sparse = ParallelSparseTrie::default(); let value = alloy_rlp::encode_fixed_size(&U256::ZERO).to_vec(); @@ -4325,7 +4325,7 @@ mod tests { TrieMask::new(0b11), )); - let provider = DefaultBlindedProvider; + let provider = DefaultTrieNodeProvider; let mut sparse = ParallelSparseTrie::from_root( branch.clone(), TrieMasks { hash_mask: Some(TrieMask::new(0b01)), tree_mask: None }, @@ -4370,7 +4370,7 @@ mod tests { TrieMask::new(0b11), )); - let provider = DefaultBlindedProvider; + let provider = DefaultTrieNodeProvider; let mut sparse = ParallelSparseTrie::from_root( branch.clone(), TrieMasks { hash_mask: Some(TrieMask::new(0b01)), tree_mask: None }, @@ -4410,7 +4410,7 @@ mod tests { fn test(updates: Vec<(BTreeMap, BTreeSet)>) { { let mut state = BTreeMap::default(); - let default_provider = DefaultBlindedProvider; + let default_provider = DefaultTrieNodeProvider; let provider_factory = create_test_provider_factory(); let mut sparse = ParallelSparseTrie::default().with_updates(true); @@ -4553,7 +4553,7 @@ mod tests { const KEY_NIBBLES_LEN: usize = 3; fn test(updates: Vec<(BTreeMap, BTreeSet)>) { - let default_provider = DefaultBlindedProvider; + let default_provider = DefaultTrieNodeProvider; let mut serial = SerialSparseTrie::default().with_updates(true); let mut parallel = ParallelSparseTrie::default().with_updates(true); @@ -4649,7 +4649,7 @@ mod tests { #[test] fn sparse_trie_two_leaves_at_lower_roots() { - let provider = DefaultBlindedProvider; + let provider = DefaultTrieNodeProvider; let mut trie = ParallelSparseTrie::default().with_updates(true); let key_50 = Nibbles::unpack(hex!( "0x5000000000000000000000000000000000000000000000000000000000000000" @@ -4708,7 +4708,7 @@ mod tests { [Nibbles::default()], ); - let provider = DefaultBlindedProvider; + let provider = DefaultTrieNodeProvider; let mut sparse = ParallelSparseTrie::from_root( TrieNode::decode(&mut &hash_builder_proof_nodes.nodes_sorted()[0].1[..]).unwrap(), TrieMasks { @@ -4818,7 +4818,7 @@ mod tests { [Nibbles::default()], ); - let provider = DefaultBlindedProvider; + let provider = DefaultTrieNodeProvider; let mut sparse = ParallelSparseTrie::from_root( TrieNode::decode(&mut &hash_builder_proof_nodes.nodes_sorted()[0].1[..]).unwrap(), TrieMasks { @@ -4921,7 +4921,7 @@ mod tests { [Nibbles::default()], ); - let provider = DefaultBlindedProvider; + let provider = DefaultTrieNodeProvider; let mut sparse = ParallelSparseTrie::from_root( TrieNode::decode(&mut &hash_builder_proof_nodes.nodes_sorted()[0].1[..]).unwrap(), TrieMasks { @@ -5002,7 +5002,7 @@ mod tests { // First add leaf 0x1345 - this should create a leaf in upper trie at 0x let (leaf1_path, value1) = ctx.create_test_leaf([0x1, 0x3, 0x4, 0x5], 1); - trie.update_leaf(leaf1_path, value1.clone(), DefaultBlindedProvider).unwrap(); + trie.update_leaf(leaf1_path, value1.clone(), DefaultTrieNodeProvider).unwrap(); // Verify upper trie has a leaf at the root with key 1345 ctx.assert_upper_subtrie(&trie) @@ -5011,7 +5011,7 @@ mod tests { // Add leaf 0x1234 - this should go first in the upper subtrie let (leaf2_path, value2) = ctx.create_test_leaf([0x1, 0x2, 0x3, 0x4], 2); - trie.update_leaf(leaf2_path, value2.clone(), DefaultBlindedProvider).unwrap(); + trie.update_leaf(leaf2_path, value2.clone(), DefaultTrieNodeProvider).unwrap(); // Upper trie should now have a branch at 0x1 ctx.assert_upper_subtrie(&trie) @@ -5021,7 +5021,7 @@ mod tests { // Add leaf 0x1245 - this should cause a branch and create the 0x12 subtrie let (leaf3_path, value3) = ctx.create_test_leaf([0x1, 0x2, 0x4, 0x5], 3); - trie.update_leaf(leaf3_path, value3.clone(), DefaultBlindedProvider).unwrap(); + trie.update_leaf(leaf3_path, value3.clone(), DefaultTrieNodeProvider).unwrap(); // Verify lower subtrie at 0x12 exists with correct structure ctx.assert_subtrie(&trie, Nibbles::from_nibbles([0x1, 0x2])) @@ -5033,7 +5033,7 @@ mod tests { // Add leaf 0x1334 - this should create another lower subtrie let (leaf4_path, value4) = ctx.create_test_leaf([0x1, 0x3, 0x3, 0x4], 4); - trie.update_leaf(leaf4_path, value4.clone(), DefaultBlindedProvider).unwrap(); + trie.update_leaf(leaf4_path, value4.clone(), DefaultTrieNodeProvider).unwrap(); // Verify lower subtrie at 0x13 exists with correct values ctx.assert_subtrie(&trie, Nibbles::from_nibbles([0x1, 0x3])) @@ -5077,7 +5077,7 @@ mod tests { // First insert a leaf that ends exactly at the boundary (2 nibbles) let (first_leaf_path, first_value) = ctx.create_test_leaf([0x1, 0x2, 0x2, 0x4], 1); - trie.update_leaf(first_leaf_path, first_value.clone(), DefaultBlindedProvider).unwrap(); + trie.update_leaf(first_leaf_path, first_value.clone(), DefaultTrieNodeProvider).unwrap(); // In an empty trie, the first leaf becomes the root, regardless of path length ctx.assert_upper_subtrie(&trie) @@ -5087,7 +5087,7 @@ mod tests { // Now insert another leaf that shares the same 2-nibble prefix let (second_leaf_path, second_value) = ctx.create_test_leaf([0x1, 0x2, 0x3, 0x4], 2); - trie.update_leaf(second_leaf_path, second_value.clone(), DefaultBlindedProvider).unwrap(); + trie.update_leaf(second_leaf_path, second_value.clone(), DefaultTrieNodeProvider).unwrap(); // Now both leaves should be in a lower subtrie at index [0x1, 0x2] ctx.assert_subtrie(&trie, Nibbles::from_nibbles([0x1, 0x2])) @@ -5150,7 +5150,7 @@ mod tests { let updated_path = Nibbles::from_nibbles([0x1, 0x2, 0x3, 0x4]); let (_, updated_value) = ctx.create_test_leaf([0x1, 0x2, 0x3, 0x4], 100); - trie.update_leaf(updated_path, updated_value.clone(), DefaultBlindedProvider).unwrap(); + trie.update_leaf(updated_path, updated_value.clone(), DefaultTrieNodeProvider).unwrap(); // Verify the subtrie structure is maintained and value is updated // The branch structure should remain the same and all values should be present @@ -5164,7 +5164,7 @@ mod tests { // Add a new leaf that extends an existing branch let (new_leaf_path, new_leaf_value) = ctx.create_test_leaf([0x1, 0x2, 0x3, 0x6], 200); - trie.update_leaf(new_leaf_path, new_leaf_value.clone(), DefaultBlindedProvider).unwrap(); + trie.update_leaf(new_leaf_path, new_leaf_value.clone(), DefaultTrieNodeProvider).unwrap(); // Verify the branch at [0x1, 0x2, 0x3] now has an additional child ctx.assert_subtrie(&trie, Nibbles::from_nibbles([0x1, 0x2])) @@ -5507,7 +5507,7 @@ mod tests { // └── 0xAB2: Hash // Create a mock provider that will provide the hash node - let mut provider = MockBlindedProvider::new(); + let mut provider = MockTrieNodeProvider::new(); // Create revealed branch which will get revealed and add it to the mock provider let child_hashes = [ @@ -5568,8 +5568,8 @@ mod tests { let (leaf1_path, value1) = ctx.create_test_leaf([0xA, 0xB, 0xC, 0xD, 0xE, 0xF], 1); let (leaf2_path, value2) = ctx.create_test_leaf([0xA, 0xB, 0xD, 0xE, 0xF, 0x0], 2); - trie.update_leaf(leaf1_path, value1.clone(), DefaultBlindedProvider).unwrap(); - trie.update_leaf(leaf2_path, value2.clone(), DefaultBlindedProvider).unwrap(); + trie.update_leaf(leaf1_path, value1.clone(), DefaultTrieNodeProvider).unwrap(); + trie.update_leaf(leaf2_path, value2.clone(), DefaultTrieNodeProvider).unwrap(); // Verify upper trie structure ctx.assert_upper_subtrie(&trie) @@ -5625,9 +5625,9 @@ mod tests { let (leaf2_path, value2) = ctx.create_test_leaf([0x2, 0x3, 0x4, 0x5], 2); let (leaf3_path, value3) = ctx.create_test_leaf([0x2, 0x3, 0x5, 0x6], 3); - trie.update_leaf(leaf1_path, value1, DefaultBlindedProvider).unwrap(); - trie.update_leaf(leaf2_path, value2, DefaultBlindedProvider).unwrap(); - trie.update_leaf(leaf3_path, value3, DefaultBlindedProvider).unwrap(); + trie.update_leaf(leaf1_path, value1, DefaultTrieNodeProvider).unwrap(); + trie.update_leaf(leaf2_path, value2, DefaultTrieNodeProvider).unwrap(); + trie.update_leaf(leaf3_path, value3, DefaultTrieNodeProvider).unwrap(); // Verify initial structure has branch at root ctx.assert_upper_subtrie(&trie).has_branch(&Nibbles::default(), &[0x1, 0x2]); @@ -5641,9 +5641,9 @@ mod tests { // Clear and add new leaves let mut trie = ParallelSparseTrie::from_root(TrieNode::EmptyRoot, TrieMasks::none(), true).unwrap(); - trie.update_leaf(new_leaf1_path, new_value1.clone(), DefaultBlindedProvider).unwrap(); - trie.update_leaf(new_leaf2_path, new_value2.clone(), DefaultBlindedProvider).unwrap(); - trie.update_leaf(new_leaf3_path, new_value3.clone(), DefaultBlindedProvider).unwrap(); + trie.update_leaf(new_leaf1_path, new_value1.clone(), DefaultTrieNodeProvider).unwrap(); + trie.update_leaf(new_leaf2_path, new_value2.clone(), DefaultTrieNodeProvider).unwrap(); + trie.update_leaf(new_leaf3_path, new_value3.clone(), DefaultTrieNodeProvider).unwrap(); // Verify new structure has extension ctx.assert_upper_subtrie(&trie) @@ -5688,9 +5688,9 @@ mod tests { let (leaf2_path, value2) = ctx.create_test_leaf([0x2, 0x3, 0x4, 0x5], 2); let (leaf3_path, value3) = ctx.create_test_leaf([0x2, 0x3, 0x5, 0x6], 3); - trie.update_leaf(leaf1_path, value1, DefaultBlindedProvider).unwrap(); - trie.update_leaf(leaf2_path, value2.clone(), DefaultBlindedProvider).unwrap(); - trie.update_leaf(leaf3_path, value3.clone(), DefaultBlindedProvider).unwrap(); + trie.update_leaf(leaf1_path, value1, DefaultTrieNodeProvider).unwrap(); + trie.update_leaf(leaf2_path, value2.clone(), DefaultTrieNodeProvider).unwrap(); + trie.update_leaf(leaf3_path, value3.clone(), DefaultTrieNodeProvider).unwrap(); // Verify upper trie structure ctx.assert_upper_subtrie(&trie) @@ -5749,7 +5749,7 @@ mod tests { // Step 1: Add first leaf - initially stored as leaf in upper trie let (leaf1_path, value1) = ctx.create_test_leaf([0x1, 0x2, 0x3, 0x4, 0x5], 1); - trie.update_leaf(leaf1_path, value1.clone(), DefaultBlindedProvider).unwrap(); + trie.update_leaf(leaf1_path, value1.clone(), DefaultTrieNodeProvider).unwrap(); // Verify leaf node in upper trie (optimized single-leaf case) ctx.assert_upper_subtrie(&trie) @@ -5758,7 +5758,7 @@ mod tests { // Step 2: Add leaf at 0x12346 - creates branch at 0x1234 let (leaf2_path, value2) = ctx.create_test_leaf([0x1, 0x2, 0x3, 0x4, 0x6], 2); - trie.update_leaf(leaf2_path, value2.clone(), DefaultBlindedProvider).unwrap(); + trie.update_leaf(leaf2_path, value2.clone(), DefaultTrieNodeProvider).unwrap(); // Verify extension now goes to 0x1234 ctx.assert_upper_subtrie(&trie) @@ -5776,7 +5776,7 @@ mod tests { // Step 3: Add leaf at 0x1235 - creates branch at 0x123 let (leaf3_path, value3) = ctx.create_test_leaf([0x1, 0x2, 0x3, 0x5], 3); - trie.update_leaf(leaf3_path, value3.clone(), DefaultBlindedProvider).unwrap(); + trie.update_leaf(leaf3_path, value3.clone(), DefaultTrieNodeProvider).unwrap(); // Verify extension now goes to 0x123 ctx.assert_upper_subtrie(&trie) @@ -5795,7 +5795,7 @@ mod tests { // Step 4: Add leaf at 0x124 - creates branch at 0x12 (subtrie root) let (leaf4_path, value4) = ctx.create_test_leaf([0x1, 0x2, 0x4], 4); - trie.update_leaf(leaf4_path, value4.clone(), DefaultBlindedProvider).unwrap(); + trie.update_leaf(leaf4_path, value4.clone(), DefaultTrieNodeProvider).unwrap(); // Verify extension now goes to 0x12 ctx.assert_upper_subtrie(&trie) @@ -5841,8 +5841,8 @@ mod tests { let (leaf1_path, value1) = ctx.create_test_leaf(&path1_nibbles, 1); let (leaf2_path, value2) = ctx.create_test_leaf(&path2_nibbles, 2); - trie.update_leaf(leaf1_path, value1.clone(), DefaultBlindedProvider).unwrap(); - trie.update_leaf(leaf2_path, value2.clone(), DefaultBlindedProvider).unwrap(); + trie.update_leaf(leaf1_path, value1.clone(), DefaultTrieNodeProvider).unwrap(); + trie.update_leaf(leaf2_path, value2.clone(), DefaultTrieNodeProvider).unwrap(); // The common prefix of 63 F's will create a very long extension let extension_key = vec![0xF; 63]; @@ -5965,7 +5965,7 @@ mod tests { 218, 223, 145, 158, 225, 240, 227, 203, 155, 98, 211, 244, 176, 44, ]; - trie.update_leaf(leaf_full_path, leaf_new_value.clone(), DefaultBlindedProvider).unwrap(); + trie.update_leaf(leaf_full_path, leaf_new_value.clone(), DefaultTrieNodeProvider).unwrap(); // Sanity checks before calculating the root assert_eq!( @@ -5983,7 +5983,7 @@ mod tests { #[test] fn find_leaf_existing_leaf() { // Create a simple trie with one leaf - let provider = DefaultBlindedProvider; + let provider = DefaultTrieNodeProvider; let mut sparse = ParallelSparseTrie::default(); let path = Nibbles::from_nibbles([0x1, 0x2, 0x3]); let value = b"test_value".to_vec(); @@ -6002,7 +6002,7 @@ mod tests { #[test] fn find_leaf_value_mismatch() { // Create a simple trie with one leaf - let provider = DefaultBlindedProvider; + let provider = DefaultTrieNodeProvider; let mut sparse = ParallelSparseTrie::default(); let path = Nibbles::from_nibbles([0x1, 0x2, 0x3]); let value = b"test_value".to_vec(); @@ -6040,7 +6040,7 @@ mod tests { #[test] fn find_leaf_exists_no_value_check() { - let provider = DefaultBlindedProvider; + let provider = DefaultTrieNodeProvider; let mut sparse = ParallelSparseTrie::default(); let path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4]); sparse.update_leaf(path, encode_account_value(0), &provider).unwrap(); @@ -6051,7 +6051,7 @@ mod tests { #[test] fn find_leaf_exists_with_value_check_ok() { - let provider = DefaultBlindedProvider; + let provider = DefaultTrieNodeProvider; let mut sparse = ParallelSparseTrie::default(); let path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4]); let value = encode_account_value(0); @@ -6063,7 +6063,7 @@ mod tests { #[test] fn find_leaf_exclusion_branch_divergence() { - let provider = DefaultBlindedProvider; + let provider = DefaultTrieNodeProvider; let mut sparse = ParallelSparseTrie::default(); let path1 = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4]); // Creates branch at 0x12 let path2 = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x5, 0x6]); // Belongs to same branch @@ -6078,7 +6078,7 @@ mod tests { #[test] fn find_leaf_exclusion_extension_divergence() { - let provider = DefaultBlindedProvider; + let provider = DefaultTrieNodeProvider; let mut sparse = ParallelSparseTrie::default(); // This will create an extension node at root with key 0x12 let path1 = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4, 0x5, 0x6]); @@ -6093,7 +6093,7 @@ mod tests { #[test] fn find_leaf_exclusion_leaf_divergence() { - let provider = DefaultBlindedProvider; + let provider = DefaultTrieNodeProvider; let mut sparse = ParallelSparseTrie::default(); let existing_leaf_path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4]); let search_path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4, 0x5, 0x6]); @@ -6106,7 +6106,7 @@ mod tests { #[test] fn find_leaf_exclusion_path_ends_at_branch() { - let provider = DefaultBlindedProvider; + let provider = DefaultTrieNodeProvider; let mut sparse = ParallelSparseTrie::default(); let path1 = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4]); // Creates branch at 0x12 let path2 = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x5, 0x6]); diff --git a/crates/trie/sparse/benches/rlp_node.rs b/crates/trie/sparse/benches/rlp_node.rs index 97dac845bc0..9f2337f31b8 100644 --- a/crates/trie/sparse/benches/rlp_node.rs +++ b/crates/trie/sparse/benches/rlp_node.rs @@ -7,7 +7,7 @@ use proptest::{prelude::*, test_runner::TestRunner}; use rand::{seq::IteratorRandom, Rng}; use reth_testing_utils::generators; use reth_trie::Nibbles; -use reth_trie_sparse::{blinded::DefaultBlindedProvider, SerialSparseTrie, SparseTrieInterface}; +use reth_trie_sparse::{provider::DefaultTrieNodeProvider, SerialSparseTrie, SparseTrieInterface}; fn update_rlp_node_level(c: &mut Criterion) { let mut rng = generators::rng(); @@ -22,7 +22,7 @@ fn update_rlp_node_level(c: &mut Criterion) { .current(); // Create a sparse trie with `size` leaves - let provider = DefaultBlindedProvider; + let provider = DefaultTrieNodeProvider; let mut sparse = SerialSparseTrie::default(); for (key, value) in &state { sparse diff --git a/crates/trie/sparse/benches/root.rs b/crates/trie/sparse/benches/root.rs index 121d3350eb3..ed88921ecf2 100644 --- a/crates/trie/sparse/benches/root.rs +++ b/crates/trie/sparse/benches/root.rs @@ -13,7 +13,7 @@ use reth_trie::{ HashedStorage, }; use reth_trie_common::{HashBuilder, Nibbles}; -use reth_trie_sparse::{blinded::DefaultBlindedProvider, SerialSparseTrie, SparseTrie}; +use reth_trie_sparse::{provider::DefaultTrieNodeProvider, SerialSparseTrie, SparseTrie}; fn calculate_root_from_leaves(c: &mut Criterion) { let mut group = c.benchmark_group("calculate root from leaves"); @@ -40,7 +40,7 @@ fn calculate_root_from_leaves(c: &mut Criterion) { }); // sparse trie - let provider = DefaultBlindedProvider; + let provider = DefaultTrieNodeProvider; group.bench_function(BenchmarkId::new("sparse trie", size), |b| { b.iter_with_setup(SparseTrie::::revealed_empty, |mut sparse| { for (key, value) in &state { @@ -179,7 +179,7 @@ fn calculate_root_from_leaves_repeated(c: &mut Criterion) { }); // sparse trie - let provider = DefaultBlindedProvider; + let provider = DefaultTrieNodeProvider; let benchmark_id = BenchmarkId::new( "sparse trie", format!( diff --git a/crates/trie/sparse/benches/update.rs b/crates/trie/sparse/benches/update.rs index 66669e0d161..dff0260a9a4 100644 --- a/crates/trie/sparse/benches/update.rs +++ b/crates/trie/sparse/benches/update.rs @@ -5,7 +5,7 @@ use criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criteri use proptest::{prelude::*, strategy::ValueTree}; use rand::seq::IteratorRandom; use reth_trie_common::Nibbles; -use reth_trie_sparse::{blinded::DefaultBlindedProvider, SerialSparseTrie, SparseTrie}; +use reth_trie_sparse::{provider::DefaultTrieNodeProvider, SerialSparseTrie, SparseTrie}; const LEAF_COUNTS: [usize; 2] = [1_000, 5_000]; @@ -16,7 +16,7 @@ fn update_leaf(c: &mut Criterion) { group.bench_function(BenchmarkId::from_parameter(leaf_count), |b| { let leaves = generate_leaves(leaf_count); // Start with an empty trie - let provider = DefaultBlindedProvider; + let provider = DefaultTrieNodeProvider; b.iter_batched( || { @@ -60,7 +60,7 @@ fn remove_leaf(c: &mut Criterion) { group.bench_function(BenchmarkId::from_parameter(leaf_count), |b| { let leaves = generate_leaves(leaf_count); // Start with an empty trie - let provider = DefaultBlindedProvider; + let provider = DefaultTrieNodeProvider; b.iter_batched( || { diff --git a/crates/trie/sparse/src/lib.rs b/crates/trie/sparse/src/lib.rs index 220a712d8c8..6b175970481 100644 --- a/crates/trie/sparse/src/lib.rs +++ b/crates/trie/sparse/src/lib.rs @@ -14,7 +14,7 @@ pub use trie::*; mod traits; pub use traits::*; -pub mod blinded; +pub mod provider; #[cfg(feature = "metrics")] mod metrics; diff --git a/crates/trie/sparse/src/blinded.rs b/crates/trie/sparse/src/provider.rs similarity index 58% rename from crates/trie/sparse/src/blinded.rs rename to crates/trie/sparse/src/provider.rs index b42012eb8ea..405b3a84747 100644 --- a/crates/trie/sparse/src/blinded.rs +++ b/crates/trie/sparse/src/provider.rs @@ -4,13 +4,13 @@ use alloy_primitives::{Bytes, B256}; use reth_execution_errors::SparseTrieError; use reth_trie_common::{Nibbles, TrieMask}; -/// Factory for instantiating blinded node providers. +/// Factory for instantiating trie node providers. #[auto_impl::auto_impl(&)] -pub trait BlindedProviderFactory { +pub trait TrieNodeProviderFactory { /// Type capable of fetching blinded account nodes. - type AccountNodeProvider: BlindedProvider; + type AccountNodeProvider: TrieNodeProvider; /// Type capable of fetching blinded storage nodes. - type StorageNodeProvider: BlindedProvider; + type StorageNodeProvider: TrieNodeProvider; /// Returns blinded account node provider. fn account_node_provider(&self) -> Self::AccountNodeProvider; @@ -30,36 +30,36 @@ pub struct RevealedNode { pub hash_mask: Option, } -/// Trie node provider for retrieving blinded nodes. +/// Trie node provider for retrieving trie nodes. #[auto_impl::auto_impl(&)] -pub trait BlindedProvider { - /// Retrieve blinded node by path. - fn blinded_node(&self, path: &Nibbles) -> Result, SparseTrieError>; +pub trait TrieNodeProvider { + /// Retrieve trie node by path. + fn trie_node(&self, path: &Nibbles) -> Result, SparseTrieError>; } -/// Default blinded node provider factory that creates [`DefaultBlindedProvider`]. +/// Default trie node provider factory that creates [`DefaultTrieNodeProviderFactory`]. #[derive(PartialEq, Eq, Clone, Default, Debug)] -pub struct DefaultBlindedProviderFactory; +pub struct DefaultTrieNodeProviderFactory; -impl BlindedProviderFactory for DefaultBlindedProviderFactory { - type AccountNodeProvider = DefaultBlindedProvider; - type StorageNodeProvider = DefaultBlindedProvider; +impl TrieNodeProviderFactory for DefaultTrieNodeProviderFactory { + type AccountNodeProvider = DefaultTrieNodeProvider; + type StorageNodeProvider = DefaultTrieNodeProvider; fn account_node_provider(&self) -> Self::AccountNodeProvider { - DefaultBlindedProvider + DefaultTrieNodeProvider } fn storage_node_provider(&self, _account: B256) -> Self::StorageNodeProvider { - DefaultBlindedProvider + DefaultTrieNodeProvider } } -/// Default blinded node provider that always returns `Ok(None)`. +/// Default trie node provider that always returns `Ok(None)`. #[derive(PartialEq, Eq, Clone, Default, Debug)] -pub struct DefaultBlindedProvider; +pub struct DefaultTrieNodeProvider; -impl BlindedProvider for DefaultBlindedProvider { - fn blinded_node(&self, _path: &Nibbles) -> Result, SparseTrieError> { +impl TrieNodeProvider for DefaultTrieNodeProvider { + fn trie_node(&self, _path: &Nibbles) -> Result, SparseTrieError> { Ok(None) } } diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index d80813b2e3a..60f7dab6c3b 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -1,5 +1,5 @@ use crate::{ - blinded::{BlindedProvider, BlindedProviderFactory}, + provider::{TrieNodeProvider, TrieNodeProviderFactory}, traits::SparseTrieInterface, SerialSparseTrie, SparseTrie, TrieMasks, }; @@ -605,16 +605,16 @@ where /// Returns mutable reference to the revealed account sparse trie. /// - /// If the trie is not revealed yet, its root will be revealed using the blinded node provider. + /// If the trie is not revealed yet, its root will be revealed using the trie node provider. fn revealed_trie_mut( &mut self, - provider_factory: impl BlindedProviderFactory, + provider_factory: impl TrieNodeProviderFactory, ) -> SparseStateTrieResult<&mut A> { match self.state { SparseTrie::Blind(_) => { let (root_node, hash_mask, tree_mask) = provider_factory .account_node_provider() - .blinded_node(&Nibbles::default())? + .trie_node(&Nibbles::default())? .map(|node| { TrieNode::decode(&mut &node.node[..]) .map(|decoded| (decoded, node.hash_mask, node.tree_mask)) @@ -634,7 +634,7 @@ where /// If the trie has not been revealed, this function reveals the root node and returns its hash. pub fn root( &mut self, - provider_factory: impl BlindedProviderFactory, + provider_factory: impl TrieNodeProviderFactory, ) -> SparseStateTrieResult { // record revealed node metrics #[cfg(feature = "metrics")] @@ -646,7 +646,7 @@ where /// Returns sparse trie root and trie updates if the trie has been revealed. pub fn root_with_updates( &mut self, - provider_factory: impl BlindedProviderFactory, + provider_factory: impl TrieNodeProviderFactory, ) -> SparseStateTrieResult<(B256, TrieUpdates)> { // record revealed node metrics #[cfg(feature = "metrics")] @@ -704,7 +704,7 @@ where &mut self, path: Nibbles, value: Vec, - provider_factory: impl BlindedProviderFactory, + provider_factory: impl TrieNodeProviderFactory, ) -> SparseStateTrieResult<()> { if !self.revealed_account_paths.contains(&path) { self.revealed_account_paths.insert(path); @@ -721,7 +721,7 @@ where address: B256, slot: Nibbles, value: Vec, - provider_factory: impl BlindedProviderFactory, + provider_factory: impl TrieNodeProviderFactory, ) -> SparseStateTrieResult<()> { if !self.revealed_storage_paths.get(&address).is_some_and(|slots| slots.contains(&slot)) { self.revealed_storage_paths.entry(address).or_default().insert(slot); @@ -742,7 +742,7 @@ where &mut self, address: B256, account: Account, - provider_factory: impl BlindedProviderFactory, + provider_factory: impl TrieNodeProviderFactory, ) -> SparseStateTrieResult<()> { let nibbles = Nibbles::unpack(address); @@ -783,7 +783,7 @@ where pub fn update_account_storage_root( &mut self, address: B256, - provider_factory: impl BlindedProviderFactory, + provider_factory: impl TrieNodeProviderFactory, ) -> SparseStateTrieResult<()> { if !self.is_account_revealed(address) { return Err(SparseTrieErrorKind::Blind.into()) @@ -831,7 +831,7 @@ where pub fn remove_account_leaf( &mut self, path: &Nibbles, - provider_factory: impl BlindedProviderFactory, + provider_factory: impl TrieNodeProviderFactory, ) -> SparseStateTrieResult<()> { let provider = provider_factory.account_node_provider(); self.state.remove_leaf(path, provider)?; @@ -843,7 +843,7 @@ where &mut self, address: B256, slot: &Nibbles, - provider_factory: impl BlindedProviderFactory, + provider_factory: impl TrieNodeProviderFactory, ) -> SparseStateTrieResult<()> { let storage_trie = self.storages.get_mut(&address).ok_or(SparseTrieErrorKind::Blind)?; @@ -905,7 +905,7 @@ fn filter_revealed_nodes( #[cfg(test)] mod tests { use super::*; - use crate::blinded::DefaultBlindedProviderFactory; + use crate::provider::DefaultTrieNodeProviderFactory; use alloy_primitives::{ b256, map::{HashMap, HashSet}, @@ -982,7 +982,7 @@ mod tests { #[test] fn reveal_account_path_twice() { - let provider_factory = DefaultBlindedProviderFactory; + let provider_factory = DefaultTrieNodeProviderFactory; let mut sparse = SparseStateTrie::::default(); let leaf_value = alloy_rlp::encode(TrieAccount::default()); @@ -1054,7 +1054,7 @@ mod tests { #[test] fn reveal_storage_path_twice() { - let provider_factory = DefaultBlindedProviderFactory; + let provider_factory = DefaultTrieNodeProviderFactory; let mut sparse = SparseStateTrie::::default(); let leaf_value = alloy_rlp::encode(TrieAccount::default()); @@ -1186,7 +1186,7 @@ mod tests { let root = hash_builder.root(); let proof_nodes = hash_builder.take_proof_nodes(); - let provider_factory = DefaultBlindedProviderFactory; + let provider_factory = DefaultTrieNodeProviderFactory; let mut sparse = SparseStateTrie::::default().with_updates(true); sparse .reveal_decoded_multiproof( diff --git a/crates/trie/sparse/src/traits.rs b/crates/trie/sparse/src/traits.rs index 2fe1838d777..45c990511db 100644 --- a/crates/trie/sparse/src/traits.rs +++ b/crates/trie/sparse/src/traits.rs @@ -11,7 +11,7 @@ use alloy_trie::{BranchNodeCompact, TrieMask}; use reth_execution_errors::SparseTrieResult; use reth_trie_common::{Nibbles, TrieNode}; -use crate::blinded::BlindedProvider; +use crate::provider::TrieNodeProvider; /// Trait defining common operations for revealed sparse trie implementations. /// @@ -94,12 +94,12 @@ pub trait SparseTrieInterface: Sized + Debug + Send + Sync { /// /// * `full_path` - The full path to the leaf /// * `value` - The new value for the leaf - /// * `provider` - The blinded provider for resolving missing nodes + /// * `provider` - The trie provider for resolving missing nodes /// /// # Returns /// /// `Ok(())` if successful, or an error if the update failed. - fn update_leaf( + fn update_leaf( &mut self, full_path: Nibbles, value: Vec, @@ -114,12 +114,12 @@ pub trait SparseTrieInterface: Sized + Debug + Send + Sync { /// # Arguments /// /// * `full_path` - The full path to the leaf to remove - /// * `provider` - The blinded provider for resolving missing nodes + /// * `provider` - The trie node provider for resolving missing nodes /// /// # Returns /// /// `Ok(())` if successful, or an error if the removal failed. - fn remove_leaf( + fn remove_leaf( &mut self, full_path: &Nibbles, provider: P, diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index 06891a441f5..c8669cca179 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -1,5 +1,5 @@ use crate::{ - blinded::{BlindedProvider, RevealedNode}, + provider::{RevealedNode, TrieNodeProvider}, LeafLookup, LeafLookupError, SparseTrieInterface, SparseTrieUpdates, TrieMasks, }; use alloc::{ @@ -71,7 +71,7 @@ impl SparseTrie { /// # Examples /// /// ``` - /// use reth_trie_sparse::{blinded::DefaultBlindedProvider, SerialSparseTrie, SparseTrie}; + /// use reth_trie_sparse::{provider::DefaultTrieNodeProvider, SerialSparseTrie, SparseTrie}; /// /// let trie = SparseTrie::::revealed_empty(); /// assert!(!trie.is_blind()); @@ -120,7 +120,7 @@ impl SparseTrie { /// # Examples /// /// ``` - /// use reth_trie_sparse::{blinded::DefaultBlindedProvider, SerialSparseTrie, SparseTrie}; + /// use reth_trie_sparse::{provider::DefaultTrieNodeProvider, SerialSparseTrie, SparseTrie}; /// /// let trie = SparseTrie::::blind(); /// assert!(trie.is_blind()); @@ -226,7 +226,7 @@ impl SparseTrie { &mut self, path: Nibbles, value: Vec, - provider: impl BlindedProvider, + provider: impl TrieNodeProvider, ) -> SparseTrieResult<()> { let revealed = self.as_revealed_mut().ok_or(SparseTrieErrorKind::Blind)?; revealed.update_leaf(path, value, provider)?; @@ -241,7 +241,7 @@ impl SparseTrie { pub fn remove_leaf( &mut self, path: &Nibbles, - provider: impl BlindedProvider, + provider: impl TrieNodeProvider, ) -> SparseTrieResult<()> { let revealed = self.as_revealed_mut().ok_or(SparseTrieErrorKind::Blind)?; revealed.remove_leaf(path, provider)?; @@ -556,7 +556,7 @@ impl SparseTrieInterface for SerialSparseTrie { Ok(()) } - fn update_leaf( + fn update_leaf( &mut self, full_path: Nibbles, value: Vec, @@ -629,7 +629,7 @@ impl SparseTrieInterface for SerialSparseTrie { // Check if the extension node child is a hash that needs to be revealed if self.nodes.get(¤t).unwrap().is_hash() { if let Some(RevealedNode { node, tree_mask, hash_mask }) = - provider.blinded_node(¤t)? + provider.trie_node(¤t)? { let decoded = TrieNode::decode(&mut &node[..])?; trace!( @@ -687,7 +687,7 @@ impl SparseTrieInterface for SerialSparseTrie { Ok(()) } - fn remove_leaf( + fn remove_leaf( &mut self, full_path: &Nibbles, provider: P, @@ -797,7 +797,7 @@ impl SparseTrieInterface for SerialSparseTrie { if self.nodes.get(&child_path).unwrap().is_hash() { trace!(target: "trie::sparse", ?child_path, "Retrieving remaining blinded branch child"); if let Some(RevealedNode { node, tree_mask, hash_mask }) = - provider.blinded_node(&child_path)? + provider.trie_node(&child_path)? { let decoded = TrieNode::decode(&mut &node[..])?; trace!( @@ -1912,7 +1912,7 @@ impl SparseTrieUpdates { #[cfg(test)] mod find_leaf_tests { use super::*; - use crate::blinded::DefaultBlindedProvider; + use crate::provider::DefaultTrieNodeProvider; use alloy_primitives::map::foldhash::fast::RandomState; // Assuming this exists use alloy_rlp::Encodable; @@ -1935,7 +1935,7 @@ mod find_leaf_tests { #[test] fn find_leaf_existing_leaf() { // Create a simple trie with one leaf - let provider = DefaultBlindedProvider; + let provider = DefaultTrieNodeProvider; let mut sparse = SerialSparseTrie::default(); let path = Nibbles::from_nibbles([0x1, 0x2, 0x3]); let value = b"test_value".to_vec(); @@ -1954,7 +1954,7 @@ mod find_leaf_tests { #[test] fn find_leaf_value_mismatch() { // Create a simple trie with one leaf - let provider = DefaultBlindedProvider; + let provider = DefaultTrieNodeProvider; let mut sparse = SerialSparseTrie::default(); let path = Nibbles::from_nibbles([0x1, 0x2, 0x3]); let value = b"test_value".to_vec(); @@ -1992,7 +1992,7 @@ mod find_leaf_tests { #[test] fn find_leaf_exists_no_value_check() { - let provider = DefaultBlindedProvider; + let provider = DefaultTrieNodeProvider; let mut sparse = SerialSparseTrie::default(); let path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4]); sparse.update_leaf(path, VALUE_A(), &provider).unwrap(); @@ -2003,7 +2003,7 @@ mod find_leaf_tests { #[test] fn find_leaf_exists_with_value_check_ok() { - let provider = DefaultBlindedProvider; + let provider = DefaultTrieNodeProvider; let mut sparse = SerialSparseTrie::default(); let path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4]); let value = VALUE_A(); @@ -2015,7 +2015,7 @@ mod find_leaf_tests { #[test] fn find_leaf_exclusion_branch_divergence() { - let provider = DefaultBlindedProvider; + let provider = DefaultTrieNodeProvider; let mut sparse = SerialSparseTrie::default(); let path1 = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4]); // Creates branch at 0x12 let path2 = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x5, 0x6]); // Belongs to same branch @@ -2030,7 +2030,7 @@ mod find_leaf_tests { #[test] fn find_leaf_exclusion_extension_divergence() { - let provider = DefaultBlindedProvider; + let provider = DefaultTrieNodeProvider; let mut sparse = SerialSparseTrie::default(); // This will create an extension node at root with key 0x12 let path1 = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4, 0x5, 0x6]); @@ -2045,7 +2045,7 @@ mod find_leaf_tests { #[test] fn find_leaf_exclusion_leaf_divergence() { - let provider = DefaultBlindedProvider; + let provider = DefaultTrieNodeProvider; let mut sparse = SerialSparseTrie::default(); let existing_leaf_path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4]); let search_path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4, 0x5, 0x6]); @@ -2058,7 +2058,7 @@ mod find_leaf_tests { #[test] fn find_leaf_exclusion_path_ends_at_branch() { - let provider = DefaultBlindedProvider; + let provider = DefaultTrieNodeProvider; let mut sparse = SerialSparseTrie::default(); let path1 = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4]); // Creates branch at 0x12 let path2 = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x5, 0x6]); @@ -2072,7 +2072,7 @@ mod find_leaf_tests { } #[test] - fn find_leaf_error_blinded_node_at_leaf_path() { + fn find_leaf_error_trie_node_at_leaf_path() { // Scenario: The node *at* the leaf path is blinded. let blinded_hash = B256::repeat_byte(0xBB); let leaf_path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4]); @@ -2113,7 +2113,7 @@ mod find_leaf_tests { } #[test] - fn find_leaf_error_blinded_node() { + fn find_leaf_error_trie_node() { let blinded_hash = B256::repeat_byte(0xAA); let path_to_blind = Nibbles::from_nibbles_unchecked([0x1]); let search_path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4]); @@ -2155,7 +2155,7 @@ mod find_leaf_tests { } #[test] - fn find_leaf_error_blinded_node_via_reveal() { + fn find_leaf_error_trie_node_via_reveal() { let blinded_hash = B256::repeat_byte(0xAA); let path_to_blind = Nibbles::from_nibbles_unchecked([0x1]); // Path of the blinded node itself let search_path = Nibbles::from_nibbles_unchecked([0x1, 0x2, 0x3, 0x4]); // Path we will search for @@ -2216,7 +2216,7 @@ mod find_leaf_tests { #[cfg(test)] mod tests { use super::*; - use crate::blinded::DefaultBlindedProvider; + use crate::provider::DefaultTrieNodeProvider; use alloy_primitives::{map::B256Set, U256}; use alloy_rlp::Encodable; use assert_matches::assert_matches; @@ -2398,7 +2398,7 @@ mod tests { [key], ); - let provider = DefaultBlindedProvider; + let provider = DefaultTrieNodeProvider; let mut sparse = SerialSparseTrie::default().with_updates(true); sparse.update_leaf(key, value_encoded(), &provider).unwrap(); let sparse_root = sparse.root(); @@ -2429,7 +2429,7 @@ mod tests { paths.clone(), ); - let provider = DefaultBlindedProvider; + let provider = DefaultTrieNodeProvider; let mut sparse = SerialSparseTrie::default().with_updates(true); for path in &paths { sparse.update_leaf(*path, value_encoded(), &provider).unwrap(); @@ -2460,7 +2460,7 @@ mod tests { paths.clone(), ); - let provider = DefaultBlindedProvider; + let provider = DefaultTrieNodeProvider; let mut sparse = SerialSparseTrie::default().with_updates(true); for path in &paths { sparse.update_leaf(*path, value_encoded(), &provider).unwrap(); @@ -2499,7 +2499,7 @@ mod tests { paths.clone(), ); - let provider = DefaultBlindedProvider; + let provider = DefaultTrieNodeProvider; let mut sparse = SerialSparseTrie::default().with_updates(true); for path in &paths { sparse.update_leaf(*path, value_encoded(), &provider).unwrap(); @@ -2539,7 +2539,7 @@ mod tests { paths.clone(), ); - let provider = DefaultBlindedProvider; + let provider = DefaultTrieNodeProvider; let mut sparse = SerialSparseTrie::default().with_updates(true); for path in &paths { sparse.update_leaf(*path, old_value_encoded.clone(), &provider).unwrap(); @@ -2574,7 +2574,7 @@ mod tests { fn sparse_trie_remove_leaf() { reth_tracing::init_test_tracing(); - let provider = DefaultBlindedProvider; + let provider = DefaultTrieNodeProvider; let mut sparse = SerialSparseTrie::default(); let value = alloy_rlp::encode_fixed_size(&U256::ZERO).to_vec(); @@ -2828,7 +2828,7 @@ mod tests { TrieMask::new(0b11), )); - let provider = DefaultBlindedProvider; + let provider = DefaultTrieNodeProvider; let mut sparse = SerialSparseTrie::from_root( branch.clone(), TrieMasks { hash_mask: Some(TrieMask::new(0b01)), tree_mask: None }, @@ -2873,7 +2873,7 @@ mod tests { TrieMask::new(0b11), )); - let provider = DefaultBlindedProvider; + let provider = DefaultTrieNodeProvider; let mut sparse = SerialSparseTrie::from_root( branch.clone(), TrieMasks { hash_mask: Some(TrieMask::new(0b01)), tree_mask: None }, @@ -2913,7 +2913,7 @@ mod tests { fn test(updates: Vec<(BTreeMap, BTreeSet)>) { { let mut state = BTreeMap::default(); - let default_provider = DefaultBlindedProvider; + let default_provider = DefaultTrieNodeProvider; let provider_factory = create_test_provider_factory(); let mut sparse = SerialSparseTrie::default().with_updates(true); @@ -3074,7 +3074,7 @@ mod tests { [Nibbles::default()], ); - let provider = DefaultBlindedProvider; + let provider = DefaultTrieNodeProvider; let mut sparse = SerialSparseTrie::from_root( TrieNode::decode(&mut &hash_builder_proof_nodes.nodes_sorted()[0].1[..]).unwrap(), TrieMasks { @@ -3184,7 +3184,7 @@ mod tests { [Nibbles::default()], ); - let provider = DefaultBlindedProvider; + let provider = DefaultTrieNodeProvider; let mut sparse = SerialSparseTrie::from_root( TrieNode::decode(&mut &hash_builder_proof_nodes.nodes_sorted()[0].1[..]).unwrap(), TrieMasks { @@ -3287,7 +3287,7 @@ mod tests { [Nibbles::default()], ); - let provider = DefaultBlindedProvider; + let provider = DefaultTrieNodeProvider; let mut sparse = SerialSparseTrie::from_root( TrieNode::decode(&mut &hash_builder_proof_nodes.nodes_sorted()[0].1[..]).unwrap(), TrieMasks { @@ -3342,7 +3342,7 @@ mod tests { #[test] fn sparse_trie_get_changed_nodes_at_depth() { - let provider = DefaultBlindedProvider; + let provider = DefaultTrieNodeProvider; let mut sparse = SerialSparseTrie::default(); let value = alloy_rlp::encode_fixed_size(&U256::ZERO).to_vec(); @@ -3457,7 +3457,7 @@ mod tests { [Nibbles::default()], ); - let provider = DefaultBlindedProvider; + let provider = DefaultTrieNodeProvider; let mut sparse = SerialSparseTrie::default(); sparse.update_leaf(key1(), value_encoded(), &provider).unwrap(); sparse.update_leaf(key2(), value_encoded(), &provider).unwrap(); @@ -3470,7 +3470,7 @@ mod tests { #[test] fn sparse_trie_wipe() { - let provider = DefaultBlindedProvider; + let provider = DefaultTrieNodeProvider; let mut sparse = SerialSparseTrie::default().with_updates(true); let value = alloy_rlp::encode_fixed_size(&U256::ZERO).to_vec(); @@ -3520,7 +3520,7 @@ mod tests { fn sparse_trie_clear() { // tests that if we fill a sparse trie with some nodes and then clear it, it has the same // contents as an empty sparse trie - let provider = DefaultBlindedProvider; + let provider = DefaultTrieNodeProvider; let mut sparse = SerialSparseTrie::default(); let value = alloy_rlp::encode_fixed_size(&U256::ZERO).to_vec(); sparse @@ -3544,7 +3544,7 @@ mod tests { #[test] fn sparse_trie_display() { - let provider = DefaultBlindedProvider; + let provider = DefaultTrieNodeProvider; let mut sparse = SerialSparseTrie::default(); let value = alloy_rlp::encode_fixed_size(&U256::ZERO).to_vec(); diff --git a/crates/trie/trie/src/proof/mod.rs b/crates/trie/trie/src/proof/mod.rs index 266aac19a39..10439b804f6 100644 --- a/crates/trie/trie/src/proof/mod.rs +++ b/crates/trie/trie/src/proof/mod.rs @@ -17,8 +17,8 @@ use reth_trie_common::{ proof::ProofRetainer, AccountProof, MultiProof, MultiProofTargets, StorageMultiProof, }; -mod blinded; -pub use blinded::*; +mod trie_node; +pub use trie_node::*; /// A struct for generating merkle proofs. /// diff --git a/crates/trie/trie/src/proof/blinded.rs b/crates/trie/trie/src/proof/trie_node.rs similarity index 90% rename from crates/trie/trie/src/proof/blinded.rs rename to crates/trie/trie/src/proof/trie_node.rs index 363add7116b..3d964cf5e8b 100644 --- a/crates/trie/trie/src/proof/blinded.rs +++ b/crates/trie/trie/src/proof/trie_node.rs @@ -3,15 +3,15 @@ use crate::{hashed_cursor::HashedCursorFactory, trie_cursor::TrieCursorFactory}; use alloy_primitives::{map::HashSet, B256}; use reth_execution_errors::{SparseTrieError, SparseTrieErrorKind}; use reth_trie_common::{prefix_set::TriePrefixSetsMut, MultiProofTargets, Nibbles}; -use reth_trie_sparse::blinded::{ - pad_path_to_key, BlindedProvider, BlindedProviderFactory, RevealedNode, +use reth_trie_sparse::provider::{ + pad_path_to_key, RevealedNode, TrieNodeProvider, TrieNodeProviderFactory, }; use std::{sync::Arc, time::Instant}; use tracing::{enabled, trace, Level}; /// Factory for instantiating providers capable of retrieving blinded trie nodes via proofs. #[derive(Debug, Clone)] -pub struct ProofBlindedProviderFactory { +pub struct ProofTrieNodeProviderFactory { /// The cursor factory for traversing trie nodes. trie_cursor_factory: T, /// The factory for hashed cursors. @@ -20,7 +20,7 @@ pub struct ProofBlindedProviderFactory { prefix_sets: Arc, } -impl ProofBlindedProviderFactory { +impl ProofTrieNodeProviderFactory { /// Create new proof-based blinded provider factory. pub const fn new( trie_cursor_factory: T, @@ -31,7 +31,7 @@ impl ProofBlindedProviderFactory { } } -impl BlindedProviderFactory for ProofBlindedProviderFactory +impl TrieNodeProviderFactory for ProofTrieNodeProviderFactory where T: TrieCursorFactory + Clone + Send + Sync, H: HashedCursorFactory + Clone + Send + Sync, @@ -79,12 +79,12 @@ impl ProofBlindedAccountProvider { } } -impl BlindedProvider for ProofBlindedAccountProvider +impl TrieNodeProvider for ProofBlindedAccountProvider where T: TrieCursorFactory + Clone + Send + Sync, H: HashedCursorFactory + Clone + Send + Sync, { - fn blinded_node(&self, path: &Nibbles) -> Result, SparseTrieError> { + fn trie_node(&self, path: &Nibbles) -> Result, SparseTrieError> { let start = enabled!(target: "trie::proof::blinded", Level::TRACE).then(Instant::now); let targets = MultiProofTargets::from_iter([(pad_path_to_key(path), HashSet::default())]); @@ -136,12 +136,12 @@ impl ProofBlindedStorageProvider { } } -impl BlindedProvider for ProofBlindedStorageProvider +impl TrieNodeProvider for ProofBlindedStorageProvider where T: TrieCursorFactory + Clone + Send + Sync, H: HashedCursorFactory + Clone + Send + Sync, { - fn blinded_node(&self, path: &Nibbles) -> Result, SparseTrieError> { + fn trie_node(&self, path: &Nibbles) -> Result, SparseTrieError> { let start = enabled!(target: "trie::proof::blinded", Level::TRACE).then(Instant::now); let targets = HashSet::from_iter([pad_path_to_key(path)]); diff --git a/crates/trie/trie/src/witness.rs b/crates/trie/trie/src/witness.rs index b3d8c6d1411..a3f8bbcc348 100644 --- a/crates/trie/trie/src/witness.rs +++ b/crates/trie/trie/src/witness.rs @@ -1,7 +1,7 @@ use crate::{ hashed_cursor::{HashedCursor, HashedCursorFactory}, prefix_set::TriePrefixSetsMut, - proof::{Proof, ProofBlindedProviderFactory}, + proof::{Proof, ProofTrieNodeProviderFactory}, trie_cursor::TrieCursorFactory, }; use alloy_rlp::EMPTY_STRING_CODE; @@ -21,7 +21,7 @@ use reth_execution_errors::{ }; use reth_trie_common::{MultiProofTargets, Nibbles}; use reth_trie_sparse::{ - blinded::{BlindedProvider, BlindedProviderFactory, RevealedNode}, + provider::{RevealedNode, TrieNodeProvider, TrieNodeProviderFactory}, SerialSparseTrie, SparseStateTrie, }; use std::sync::{mpsc, Arc}; @@ -146,8 +146,8 @@ where } let (tx, rx) = mpsc::channel(); - let blinded_provider_factory = WitnessBlindedProviderFactory::new( - ProofBlindedProviderFactory::new( + let blinded_provider_factory = WitnessTrieNodeProviderFactory::new( + ProofTrieNodeProviderFactory::new( self.trie_cursor_factory, self.hashed_cursor_factory, Arc::new(self.prefix_sets), @@ -237,24 +237,24 @@ where } #[derive(Debug, Clone)] -struct WitnessBlindedProviderFactory { - /// Blinded node provider factory. +struct WitnessTrieNodeProviderFactory { + /// Trie node provider factory. provider_factory: F, - /// Sender for forwarding fetched blinded node. + /// Sender for forwarding fetched trie node. tx: mpsc::Sender, } -impl WitnessBlindedProviderFactory { +impl WitnessTrieNodeProviderFactory { const fn new(provider_factory: F, tx: mpsc::Sender) -> Self { Self { provider_factory, tx } } } -impl BlindedProviderFactory for WitnessBlindedProviderFactory +impl TrieNodeProviderFactory for WitnessTrieNodeProviderFactory where - F: BlindedProviderFactory, - F::AccountNodeProvider: BlindedProvider, - F::StorageNodeProvider: BlindedProvider, + F: TrieNodeProviderFactory, + F::AccountNodeProvider: TrieNodeProvider, + F::StorageNodeProvider: TrieNodeProvider, { type AccountNodeProvider = WitnessBlindedProvider; type StorageNodeProvider = WitnessBlindedProvider; @@ -284,9 +284,9 @@ impl

WitnessBlindedProvider

{ } } -impl BlindedProvider for WitnessBlindedProvider

{ - fn blinded_node(&self, path: &Nibbles) -> Result, SparseTrieError> { - let maybe_node = self.provider.blinded_node(path)?; +impl TrieNodeProvider for WitnessBlindedProvider

{ + fn trie_node(&self, path: &Nibbles) -> Result, SparseTrieError> { + let maybe_node = self.provider.trie_node(path)?; if let Some(node) = &maybe_node { self.tx .send(node.node.clone()) From 55fa57bb111f894c3636241bc3df845ee4a8d973 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 15 Jul 2025 16:43:51 +0200 Subject: [PATCH 173/305] chore: box import future (#17424) --- crates/e2e-test-utils/src/testsuite/setup.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/crates/e2e-test-utils/src/testsuite/setup.rs b/crates/e2e-test-utils/src/testsuite/setup.rs index 2b4968c1fdb..f79bb6d61cb 100644 --- a/crates/e2e-test-utils/src/testsuite/setup.rs +++ b/crates/e2e-test-utils/src/testsuite/setup.rs @@ -179,7 +179,8 @@ where { // If import_rlp_path is set, use apply_with_import instead if let Some(rlp_path) = self.import_rlp_path.take() { - return self.apply_with_import::(env, &rlp_path).await; + // Note: this future is quite large so we box it + return Box::pin(self.apply_with_import::(env, &rlp_path)).await; } let chain_spec = self.chain_spec.clone().ok_or_else(|| eyre!("Chain specification is required"))?; From 4364cd09bc272a97196c6c8ab8cbfaf0a5958ad6 Mon Sep 17 00:00:00 2001 From: maradini77 <140460067+maradini77@users.noreply.github.com> Date: Tue, 15 Jul 2025 16:57:33 +0200 Subject: [PATCH 174/305] refactor: use DefaultTrieNodeProviderFactory in state root calculation (#17425) --- crates/stateless/src/trie.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/stateless/src/trie.rs b/crates/stateless/src/trie.rs index 582323614ef..f5c570b425d 100644 --- a/crates/stateless/src/trie.rs +++ b/crates/stateless/src/trie.rs @@ -237,7 +237,7 @@ fn calculate_state_root( // borrowing issues. let mut storage_results = Vec::with_capacity(state.storages.len()); - // In `verify_execution_witness` a `DefaultBlindedProviderFactory` is used, so we use the same + // In `verify_execution_witness` a `DefaultTrieNodeProviderFactory` is used, so we use the same // again in here. let provider_factory = DefaultTrieNodeProviderFactory; let storage_provider = DefaultTrieNodeProvider; From cd737052c3e062ac8da0f426655fbf0549fa4c94 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Tue, 15 Jul 2025 16:15:06 +0100 Subject: [PATCH 175/305] test(engine): enable parallel sparse trie in e2e tests (#17423) --- crates/engine/tree/tests/e2e-testsuite/main.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/crates/engine/tree/tests/e2e-testsuite/main.rs b/crates/engine/tree/tests/e2e-testsuite/main.rs index cc5240f5f84..0b9162ab8c2 100644 --- a/crates/engine/tree/tests/e2e-testsuite/main.rs +++ b/crates/engine/tree/tests/e2e-testsuite/main.rs @@ -33,7 +33,10 @@ fn default_engine_tree_setup() -> Setup { )) .with_network(NetworkSetup::single_node()) .with_tree_config( - TreeConfig::default().with_legacy_state_root(false).with_has_enough_parallelism(true), + TreeConfig::default() + .with_legacy_state_root(false) + .with_has_enough_parallelism(true) + .with_enable_parallel_sparse_trie(true), ) } From 5d72088ecd99b74e44c97138934d6047517f123a Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 15 Jul 2025 17:34:06 +0200 Subject: [PATCH 176/305] chore: add txpool submit examples (#17420) --- Cargo.lock | 2 + examples/txpool-tracing/Cargo.toml | 8 +- examples/txpool-tracing/src/main.rs | 2 + examples/txpool-tracing/src/submit.rs | 133 ++++++++++++++++++++++++++ 4 files changed, 143 insertions(+), 2 deletions(-) create mode 100644 examples/txpool-tracing/src/submit.rs diff --git a/Cargo.lock b/Cargo.lock index ab6558cf6a6..744dee7a2b6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3610,9 +3610,11 @@ dependencies = [ name = "example-txpool-tracing" version = "0.0.0" dependencies = [ + "alloy-network", "alloy-primitives", "alloy-rpc-types-trace", "clap", + "eyre", "futures-util", "reth-ethereum", ] diff --git a/examples/txpool-tracing/Cargo.toml b/examples/txpool-tracing/Cargo.toml index 57c93485ccf..df72dd193f9 100644 --- a/examples/txpool-tracing/Cargo.toml +++ b/examples/txpool-tracing/Cargo.toml @@ -6,8 +6,12 @@ edition.workspace = true license.workspace = true [dependencies] -reth-ethereum = { workspace = true, features = ["node", "pool", "cli"] } +reth-ethereum = { workspace = true, features = ["node", "pool", "cli", "rpc"] } + +alloy-primitives.workspace = true alloy-rpc-types-trace.workspace = true +alloy-network.workspace = true + clap = { workspace = true, features = ["derive"] } futures-util.workspace = true -alloy-primitives.workspace = true +eyre.workspace = true diff --git a/examples/txpool-tracing/src/main.rs b/examples/txpool-tracing/src/main.rs index 655b8889f6d..a1b61422cb9 100644 --- a/examples/txpool-tracing/src/main.rs +++ b/examples/txpool-tracing/src/main.rs @@ -21,6 +21,8 @@ use reth_ethereum::{ rpc::eth::primitives::TransactionRequest, }; +mod submit; + fn main() { Cli::::parse() .run(|builder, args| async move { diff --git a/examples/txpool-tracing/src/submit.rs b/examples/txpool-tracing/src/submit.rs new file mode 100644 index 00000000000..04744f37244 --- /dev/null +++ b/examples/txpool-tracing/src/submit.rs @@ -0,0 +1,133 @@ +//! Transaction submission functionality for the txpool tracing example +#![allow(unused)] +#![allow(clippy::too_many_arguments)] + +use alloy_network::{Ethereum, EthereumWallet, NetworkWallet, TransactionBuilder}; +use alloy_primitives::{Address, TxHash, U256}; +use futures_util::StreamExt; +use reth_ethereum::{ + node::api::{FullNodeComponents, NodeTypes}, + pool::{PoolTransaction, TransactionEvent, TransactionOrigin, TransactionPool}, + primitives::SignerRecoverable, + rpc::eth::primitives::TransactionRequest, + EthPrimitives, TransactionSigned, +}; + +/// Submit a transaction to the transaction pool +/// +/// This function demonstrates how to create, sign, and submit a transaction +/// to the reth transaction pool. +pub async fn submit_transaction( + node: &FC, + wallet: &EthereumWallet, + to: Address, + data: Vec, + nonce: u64, + chain_id: u64, + gas_limit: u64, + max_priority_fee_per_gas: u128, + max_fee_per_gas: u128, +) -> eyre::Result +where + // This enforces `EthPrimitives` types for this node, this unlocks the proper conversions when + FC: FullNodeComponents>, +{ + // Create the transaction request + let request = TransactionRequest::default() + .with_to(to) + .with_input(data) + .with_nonce(nonce) + .with_chain_id(chain_id) + .with_gas_limit(gas_limit) + .with_max_priority_fee_per_gas(max_priority_fee_per_gas) + .with_max_fee_per_gas(max_fee_per_gas); + + // Sign the transaction + let transaction: TransactionSigned = + NetworkWallet::::sign_request(wallet, request).await?.into(); + // Get the transaction hash before submitting + let tx_hash = *transaction.hash(); + + // Recover the transaction + let transaction = transaction.try_into_recovered()?; + + // Convert to pool transaction type + let pool_transaction = + ::Transaction::try_from_consensus(transaction) + .map_err(|e| eyre::eyre!("Failed to convert to pool transaction: {e}"))?; + + // Submit the transaction to the pool and get event stream + let mut tx_events = node + .pool() + .add_transaction_and_subscribe(TransactionOrigin::Local, pool_transaction) + .await?; + + // Wait for the transaction to be added to the pool + while let Some(event) = tx_events.next().await { + match event { + TransactionEvent::Mined(_) => { + println!("Transaction was mined: {:?}", tx_events.hash()); + break; + } + TransactionEvent::Pending => { + println!("Transaction added to pending pool: {:?}", tx_events.hash()); + break; + } + TransactionEvent::Discarded => { + return Err(eyre::eyre!("Transaction discarded: {:?}", tx_events.hash(),)); + } + _ => { + // Continue waiting for added or rejected event + } + } + } + + Ok(tx_hash) +} + +/// Helper function to submit a simple ETH transfer transaction +/// +/// This will first populate a tx request, sign it then submit to the pool in the required format. +pub async fn submit_eth_transfer( + node: &FC, + wallet: &EthereumWallet, + to: Address, + value: U256, + nonce: u64, + chain_id: u64, + gas_limit: u64, + max_priority_fee_per_gas: u128, + max_fee_per_gas: u128, +) -> eyre::Result +where + FC: FullNodeComponents>, +{ + // Create the transaction request for ETH transfer + let request = TransactionRequest::default() + .with_to(to) + .with_value(value) + .with_nonce(nonce) + .with_chain_id(chain_id) + .with_gas_limit(gas_limit) + .with_max_priority_fee_per_gas(max_priority_fee_per_gas) + .with_max_fee_per_gas(max_fee_per_gas); + + // Sign the transaction + let transaction: TransactionSigned = + NetworkWallet::::sign_request(wallet, request).await?.into(); + // Recover the transaction + let transaction = transaction.try_into_recovered()?; + + // Get the transaction hash + let tx_hash = *transaction.hash(); + + // Convert to pool transaction type + let pool_transaction = + ::Transaction::try_from_consensus(transaction) + .map_err(|e| eyre::eyre!("Failed to convert to pool transaction: {e}"))?; + + // Submit the transaction to the pool + node.pool().add_transaction(TransactionOrigin::Local, pool_transaction).await?; + + Ok(tx_hash) +} From 76b19f37ab2f5ac2933a14ffec85a8e320844f1e Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Tue, 15 Jul 2025 15:17:23 -0400 Subject: [PATCH 177/305] chore(consensus): refactor fork and ommers check into standalone fn (#17406) --- crates/consensus/common/src/validation.rs | 30 +++++++++++++++++++---- 1 file changed, 25 insertions(+), 5 deletions(-) diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index 450817f2705..72389acdce8 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -136,6 +136,31 @@ pub fn validate_block_pre_execution( block: &SealedBlock, chain_spec: &ChainSpec, ) -> Result<(), ConsensusError> +where + B: Block, + ChainSpec: EthereumHardforks, +{ + post_merge_hardfork_fields(block, chain_spec)?; + + // Check transaction root + if let Err(error) = block.ensure_transaction_root_valid() { + return Err(ConsensusError::BodyTransactionRootDiff(error.into())) + } + + Ok(()) +} + +/// Validates the ommers hash and other fork-specific fields. +/// +/// These fork-specific validations are: +/// * EIP-4895 withdrawals validation, if shanghai is active based on the given chainspec. See more +/// information about the specific checks in [`validate_shanghai_withdrawals`]. +/// * EIP-4844 blob gas validation, if cancun is active based on the given chainspec. See more +/// information about the specific checks in [`validate_cancun_gas`]. +pub fn post_merge_hardfork_fields( + block: &SealedBlock, + chain_spec: &ChainSpec, +) -> Result<(), ConsensusError> where B: Block, ChainSpec: EthereumHardforks, @@ -152,11 +177,6 @@ where )) } - // Check transaction root - if let Err(error) = block.ensure_transaction_root_valid() { - return Err(ConsensusError::BodyTransactionRootDiff(error.into())) - } - // EIP-4895: Beacon chain push withdrawals as operations if chain_spec.is_shanghai_active_at_timestamp(block.timestamp()) { validate_shanghai_withdrawals(block)?; From b0d05b69e2cd002c20d9e0898b8446f201a87a18 Mon Sep 17 00:00:00 2001 From: adust Date: Wed, 16 Jul 2025 17:00:16 +0900 Subject: [PATCH 178/305] refactor: remove unused sparse trie methods (#17433) Co-authored-by: Claude --- crates/trie/sparse/src/state.rs | 166 +------------------------------- 1 file changed, 1 insertion(+), 165 deletions(-) diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index 60f7dab6c3b..3e9664581bb 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -167,90 +167,6 @@ where self.storages.insert(address, storage_trie); } - /// Reveal unknown trie paths from provided leaf path and its proof for the account. - /// - /// Panics if trie updates retention is enabled. - /// - /// NOTE: This method does not extensively validate the proof. - pub fn reveal_account( - &mut self, - account: B256, - proof: impl IntoIterator, - ) -> SparseStateTrieResult<()> { - assert!(!self.retain_updates); - - if self.is_account_revealed(account) { - return Ok(()); - } - - let mut proof = proof.into_iter().peekable(); - - let Some(root_node) = self.validate_root_node(&mut proof)? else { return Ok(()) }; - - // Reveal root node if it wasn't already. - let trie = self.state.reveal_root(root_node, TrieMasks::none(), self.retain_updates)?; - - // Reveal the remaining proof nodes. - for (path, bytes) in proof { - if self.revealed_account_paths.contains(&path) { - continue - } - let node = TrieNode::decode(&mut &bytes[..])?; - trie.reveal_node(path, node, TrieMasks::none())?; - - // Track the revealed path. - self.revealed_account_paths.insert(path); - } - - Ok(()) - } - - /// Reveal unknown trie paths from provided leaf path and its proof for the storage slot. - /// - /// Panics if trie updates retention is enabled. - /// - /// NOTE: This method does not extensively validate the proof. - pub fn reveal_storage_slot( - &mut self, - account: B256, - slot: B256, - proof: impl IntoIterator, - ) -> SparseStateTrieResult<()> { - assert!(!self.retain_updates); - - if self.is_storage_slot_revealed(account, slot) { - return Ok(()); - } - - let mut proof = proof.into_iter().peekable(); - - let Some(root_node) = self.validate_root_node(&mut proof)? else { return Ok(()) }; - - // Reveal root node if it wasn't already. - let trie = self.storages.entry(account).or_default().reveal_root( - root_node, - TrieMasks::none(), - self.retain_updates, - )?; - - let revealed_nodes = self.revealed_storage_paths.entry(account).or_default(); - - // Reveal the remaining proof nodes. - for (path, bytes) in proof { - // If the node is already revealed, skip it. - if revealed_nodes.contains(&path) { - continue - } - let node = TrieNode::decode(&mut &bytes[..])?; - trie.reveal_node(path, node, TrieMasks::none())?; - - // Track the revealed path. - revealed_nodes.insert(path); - } - - Ok(()) - } - /// Reveal unknown trie paths from multiproof. /// NOTE: This method does not extensively validate the proof. pub fn reveal_multiproof(&mut self, multiproof: MultiProof) -> SparseStateTrieResult<()> { @@ -535,26 +451,6 @@ where Ok(()) } - /// Validates the root node of the proof and returns it if it exists and is valid. - fn validate_root_node>( - &self, - proof: &mut Peekable, - ) -> SparseStateTrieResult> { - // Validate root node. - let Some((path, node)) = proof.next() else { return Ok(None) }; - if !path.is_empty() { - return Err(SparseStateTrieErrorKind::InvalidRootNode { path, node }.into()) - } - - // Decode root node and perform sanity check. - let root_node = TrieNode::decode(&mut &node[..])?; - if matches!(root_node, TrieNode::EmptyRoot) && proof.peek().is_some() { - return Err(SparseStateTrieErrorKind::InvalidRootNode { path, node }.into()) - } - - Ok(Some(root_node)) - } - /// Validates the decoded root node of the proof and returns it if it exists and is valid. fn validate_root_node_decoded>( proof: &mut Peekable, @@ -909,11 +805,9 @@ mod tests { use alloy_primitives::{ b256, map::{HashMap, HashSet}, - Bytes, U256, + U256, }; - use alloy_rlp::EMPTY_STRING_CODE; use arbitrary::Arbitrary; - use assert_matches::assert_matches; use rand::{rngs::StdRng, Rng, SeedableRng}; use reth_primitives_traits::Account; use reth_trie::{updates::StorageTrieUpdates, HashBuilder, MultiProof, EMPTY_ROOT_HASH}; @@ -922,64 +816,6 @@ mod tests { BranchNode, LeafNode, StorageMultiProof, TrieMask, }; - #[test] - fn validate_root_node_first_node_not_root() { - let sparse = SparseStateTrie::::default(); - let proof = [(Nibbles::from_nibbles([0x1]), Bytes::from([EMPTY_STRING_CODE]))]; - assert_matches!( - sparse.validate_root_node(&mut proof.into_iter().peekable()).map_err(|e| e.into_kind()), - Err(SparseStateTrieErrorKind::InvalidRootNode { .. }) - ); - } - - #[test] - fn validate_root_node_invalid_proof_with_empty_root() { - let sparse = SparseStateTrie::::default(); - let proof = [ - (Nibbles::default(), Bytes::from([EMPTY_STRING_CODE])), - (Nibbles::from_nibbles([0x1]), Bytes::new()), - ]; - assert_matches!( - sparse.validate_root_node(&mut proof.into_iter().peekable()).map_err(|e| e.into_kind()), - Err(SparseStateTrieErrorKind::InvalidRootNode { .. }) - ); - } - - #[test] - fn reveal_account_empty() { - let retainer = ProofRetainer::from_iter([Nibbles::default()]); - let mut hash_builder = HashBuilder::default().with_proof_retainer(retainer); - hash_builder.root(); - let proofs = hash_builder.take_proof_nodes(); - assert_eq!(proofs.len(), 1); - - let mut sparse = SparseStateTrie::::default(); - assert_eq!(sparse.state, SparseTrie::Blind(None)); - - sparse.reveal_account(Default::default(), proofs.into_inner()).unwrap(); - assert_eq!(sparse.state, SparseTrie::revealed_empty()); - } - - #[test] - fn reveal_storage_slot_empty() { - let retainer = ProofRetainer::from_iter([Nibbles::default()]); - let mut hash_builder = HashBuilder::default().with_proof_retainer(retainer); - hash_builder.root(); - let proofs = hash_builder.take_proof_nodes(); - assert_eq!(proofs.len(), 1); - - let mut sparse = SparseStateTrie::::default(); - assert!(sparse.storages.is_empty()); - - sparse - .reveal_storage_slot(Default::default(), Default::default(), proofs.into_inner()) - .unwrap(); - assert_eq!( - sparse.storages, - HashMap::from_iter([(Default::default(), SparseTrie::revealed_empty())]) - ); - } - #[test] fn reveal_account_path_twice() { let provider_factory = DefaultTrieNodeProviderFactory; From 26433246682431aba070b5771f90e454296f5e8d Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 16 Jul 2025 12:53:48 +0200 Subject: [PATCH 179/305] chore: bump revm 273 (#17412) --- Cargo.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 509741c6186..480bcd54c51 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -454,7 +454,7 @@ reth-ress-protocol = { path = "crates/ress/protocol" } reth-ress-provider = { path = "crates/ress/provider" } # revm -revm = { version = "27.0.2", default-features = false } +revm = { version = "27.0.3", default-features = false } revm-bytecode = { version = "6.0.1", default-features = false } revm-database = { version = "7.0.1", default-features = false } revm-state = { version = "7.0.1", default-features = false } @@ -464,7 +464,7 @@ revm-inspector = { version = "8.0.2", default-features = false } revm-context = { version = "8.0.2", default-features = false } revm-context-interface = { version = "8.0.1", default-features = false } revm-database-interface = { version = "7.0.1", default-features = false } -op-revm = { version = "8.0.2", default-features = false } +op-revm = { version = "8.0.3", default-features = false } revm-inspectors = "0.26.5" # eth From 8e5efb36c33749b1adef3dd355609c12e72ec8b4 Mon Sep 17 00:00:00 2001 From: Rez Date: Wed, 16 Jul 2025 20:53:13 +1000 Subject: [PATCH 180/305] feat: make revm_spec generic over header type (#17436) --- crates/ethereum/evm/src/config.rs | 32 ++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/crates/ethereum/evm/src/config.rs b/crates/ethereum/evm/src/config.rs index 676b790edb7..08f42540d08 100644 --- a/crates/ethereum/evm/src/config.rs +++ b/crates/ethereum/evm/src/config.rs @@ -1,14 +1,15 @@ -use alloy_consensus::Header; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_ethereum_forks::{EthereumHardfork, Hardforks}; +use reth_primitives_traits::BlockHeader; use revm::primitives::hardfork::SpecId; /// Map the latest active hardfork at the given header to a revm [`SpecId`]. -pub fn revm_spec(chain_spec: &C, header: &Header) -> SpecId +pub fn revm_spec(chain_spec: &C, header: &H) -> SpecId where C: EthereumHardforks + EthChainSpec + Hardforks, + H: BlockHeader, { - revm_spec_by_timestamp_and_block_number(chain_spec, header.timestamp, header.number) + revm_spec_by_timestamp_and_block_number(chain_spec, header.timestamp(), header.number()) } /// Map the latest active hardfork at the given timestamp or block number to a revm [`SpecId`]. @@ -99,6 +100,7 @@ where mod tests { use super::*; use crate::U256; + use alloy_consensus::Header; use reth_chainspec::{ChainSpecBuilder, MAINNET}; #[test] @@ -129,74 +131,74 @@ mod tests { #[test] fn test_to_revm_spec() { assert_eq!( - revm_spec(&ChainSpecBuilder::mainnet().cancun_activated().build(), &Default::default()), + revm_spec(&ChainSpecBuilder::mainnet().cancun_activated().build(), &Header::default()), SpecId::CANCUN ); assert_eq!( revm_spec( &ChainSpecBuilder::mainnet().shanghai_activated().build(), - &Default::default() + &Header::default() ), SpecId::SHANGHAI ); assert_eq!( - revm_spec(&ChainSpecBuilder::mainnet().paris_activated().build(), &Default::default()), + revm_spec(&ChainSpecBuilder::mainnet().paris_activated().build(), &Header::default()), SpecId::MERGE ); assert_eq!( - revm_spec(&ChainSpecBuilder::mainnet().london_activated().build(), &Default::default()), + revm_spec(&ChainSpecBuilder::mainnet().london_activated().build(), &Header::default()), SpecId::LONDON ); assert_eq!( - revm_spec(&ChainSpecBuilder::mainnet().berlin_activated().build(), &Default::default()), + revm_spec(&ChainSpecBuilder::mainnet().berlin_activated().build(), &Header::default()), SpecId::BERLIN ); assert_eq!( revm_spec( &ChainSpecBuilder::mainnet().istanbul_activated().build(), - &Default::default() + &Header::default() ), SpecId::ISTANBUL ); assert_eq!( revm_spec( &ChainSpecBuilder::mainnet().petersburg_activated().build(), - &Default::default() + &Header::default() ), SpecId::PETERSBURG ); assert_eq!( revm_spec( &ChainSpecBuilder::mainnet().byzantium_activated().build(), - &Default::default() + &Header::default() ), SpecId::BYZANTIUM ); assert_eq!( revm_spec( &ChainSpecBuilder::mainnet().spurious_dragon_activated().build(), - &Default::default() + &Header::default() ), SpecId::SPURIOUS_DRAGON ); assert_eq!( revm_spec( &ChainSpecBuilder::mainnet().tangerine_whistle_activated().build(), - &Default::default() + &Header::default() ), SpecId::TANGERINE ); assert_eq!( revm_spec( &ChainSpecBuilder::mainnet().homestead_activated().build(), - &Default::default() + &Header::default() ), SpecId::HOMESTEAD ); assert_eq!( revm_spec( &ChainSpecBuilder::mainnet().frontier_activated().build(), - &Default::default() + &Header::default() ), SpecId::FRONTIER ); From 1179da222228188ee952f183a0aba3fd9ff4a8c3 Mon Sep 17 00:00:00 2001 From: cakevm Date: Wed, 16 Jul 2025 12:56:13 +0200 Subject: [PATCH 181/305] chore: simplify blob count extraction using new blob_count() method (#17439) --- crates/transaction-pool/src/validate/eth.rs | 3 +-- examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index 76d9da17969..80ee4a040b5 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -493,8 +493,7 @@ where )) } - let blob_count = - transaction.blob_versioned_hashes().map(|b| b.len() as u64).unwrap_or(0); + let blob_count = transaction.blob_count().unwrap_or(0); if blob_count == 0 { // no blobs return Err(TransactionValidationOutcome::Invalid( diff --git a/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs b/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs index 9bbb198ae12..56755b1e730 100644 --- a/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs +++ b/examples/beacon-api-sidecar-fetcher/src/mined_sidecar.rs @@ -103,7 +103,7 @@ where .body() .transactions() .filter(|tx| tx.is_eip4844()) - .map(|tx| (tx.clone(), tx.blob_versioned_hashes().unwrap().len())) + .map(|tx| (tx.clone(), tx.blob_count().unwrap_or(0) as usize)) .collect(); let mut all_blobs_available = true; From c01f230ffbf3fd3420ead48b19ecd63e91520d69 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Wed, 16 Jul 2025 14:11:17 +0100 Subject: [PATCH 182/305] chore(bin): missing `--jwt-secret` message in reth-bench (#17443) --- bin/reth-bench/src/bench/context.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/reth-bench/src/bench/context.rs b/bin/reth-bench/src/bench/context.rs index 197af246c19..57b4067999a 100644 --- a/bin/reth-bench/src/bench/context.rs +++ b/bin/reth-bench/src/bench/context.rs @@ -61,7 +61,7 @@ impl BenchContext { let auth_jwt = bench_args .auth_jwtsecret .clone() - .ok_or_else(|| eyre::eyre!("--jwtsecret must be provided for authenticated RPC"))?; + .ok_or_else(|| eyre::eyre!("--jwt-secret must be provided for authenticated RPC"))?; // fetch jwt from file // From 8cbd119940dcb5c5cfe53ac26ee38c63be70fb13 Mon Sep 17 00:00:00 2001 From: maradini77 <140460067+maradini77@users.noreply.github.com> Date: Wed, 16 Jul 2025 15:13:04 +0200 Subject: [PATCH 183/305] fix: Rename WitnessBlindedProvider to WitnessTrieNodeProvider (#17426) --- crates/trie/trie/src/witness.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/crates/trie/trie/src/witness.rs b/crates/trie/trie/src/witness.rs index a3f8bbcc348..67da561f3d8 100644 --- a/crates/trie/trie/src/witness.rs +++ b/crates/trie/trie/src/witness.rs @@ -256,35 +256,35 @@ where F::AccountNodeProvider: TrieNodeProvider, F::StorageNodeProvider: TrieNodeProvider, { - type AccountNodeProvider = WitnessBlindedProvider; - type StorageNodeProvider = WitnessBlindedProvider; + type AccountNodeProvider = WitnessTrieNodeProvider; + type StorageNodeProvider = WitnessTrieNodeProvider; fn account_node_provider(&self) -> Self::AccountNodeProvider { let provider = self.provider_factory.account_node_provider(); - WitnessBlindedProvider::new(provider, self.tx.clone()) + WitnessTrieNodeProvider::new(provider, self.tx.clone()) } fn storage_node_provider(&self, account: B256) -> Self::StorageNodeProvider { let provider = self.provider_factory.storage_node_provider(account); - WitnessBlindedProvider::new(provider, self.tx.clone()) + WitnessTrieNodeProvider::new(provider, self.tx.clone()) } } #[derive(Debug)] -struct WitnessBlindedProvider

{ +struct WitnessTrieNodeProvider

{ /// Proof-based blinded. provider: P, /// Sender for forwarding fetched blinded node. tx: mpsc::Sender, } -impl

WitnessBlindedProvider

{ +impl

WitnessTrieNodeProvider

{ const fn new(provider: P, tx: mpsc::Sender) -> Self { Self { provider, tx } } } -impl TrieNodeProvider for WitnessBlindedProvider

{ +impl TrieNodeProvider for WitnessTrieNodeProvider

{ fn trie_node(&self, path: &Nibbles) -> Result, SparseTrieError> { let maybe_node = self.provider.trie_node(path)?; if let Some(node) = &maybe_node { From fdefed3d79270f4593b17c713eb0209c067027d8 Mon Sep 17 00:00:00 2001 From: viktorking7 <140458814+viktorking7@users.noreply.github.com> Date: Wed, 16 Jul 2025 15:44:06 +0200 Subject: [PATCH 184/305] fix: Update Docker Compose Docs Link in etc/README.md (#17414) --- etc/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/etc/README.md b/etc/README.md index 4f4ce7f20e4..6b6cff73e3c 100644 --- a/etc/README.md +++ b/etc/README.md @@ -13,7 +13,7 @@ up to date. ### Docker Compose To run Reth, Grafana or Prometheus with Docker Compose, refer to -the [docker docs](/book/installation/docker.md#using-docker-compose). +the [docker docs](https://reth.rs/installation/docker#using-docker-compose). ### Grafana @@ -75,4 +75,4 @@ If you are running Reth and Grafana outside of docker, and wish to import new Gr 1. Delete the old dashboard If you are running Reth and Grafana using docker, after having pulled the updated dashboards from `main`, restart the -Grafana service. This will update all dashboards. \ No newline at end of file +Grafana service. This will update all dashboards. From 2d1f8cdea116e96b63ed64e6f2327f4a08c4b81f Mon Sep 17 00:00:00 2001 From: anim001k <140460766+anim001k@users.noreply.github.com> Date: Wed, 16 Jul 2025 16:26:39 +0200 Subject: [PATCH 185/305] fix: rename `highest_static_fileted_block` to `highest_static_file_block` (#17427) --- crates/static-file/types/src/lib.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/crates/static-file/types/src/lib.rs b/crates/static-file/types/src/lib.rs index 5d638493643..caa0bd39e9e 100644 --- a/crates/static-file/types/src/lib.rs +++ b/crates/static-file/types/src/lib.rs @@ -110,12 +110,11 @@ impl StaticFileTargets { (self.block_meta.as_ref(), static_files.block_meta), ] .iter() - .all(|(target_block_range, highest_static_fileted_block)| { + .all(|(target_block_range, highest_static_file_block)| { target_block_range.is_none_or(|target_block_range| { *target_block_range.start() == - highest_static_fileted_block.map_or(0, |highest_static_fileted_block| { - highest_static_fileted_block + 1 - }) + highest_static_file_block + .map_or(0, |highest_static_file_block| highest_static_file_block + 1) }) }) } From f86959f4c15dcabd881fa0f283f2f4139debc672 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 16 Jul 2025 16:38:10 +0200 Subject: [PATCH 186/305] docs: enhance direct database access documentation (#17445) Co-authored-by: Claude --- .../sdk/examples/standalone-components.mdx | 81 +++++++++++++++++++ 1 file changed, 81 insertions(+) diff --git a/docs/vocs/docs/pages/sdk/examples/standalone-components.mdx b/docs/vocs/docs/pages/sdk/examples/standalone-components.mdx index 3c16e1cf123..8b77913f539 100644 --- a/docs/vocs/docs/pages/sdk/examples/standalone-components.mdx +++ b/docs/vocs/docs/pages/sdk/examples/standalone-components.mdx @@ -4,6 +4,87 @@ This guide demonstrates how to use Reth components independently without running ## Direct Database Access +Reth uses MDBX as its primary database backend, storing blockchain data in a structured format. You can access this database directly from external processes for read-only operations, which is useful for analytics, indexing, or building custom tools. + +### Understanding the Database Architecture + +Reth's storage architecture consists of two main components: + +1. **MDBX Database**: Primary storage for blockchain state, headers, bodies, receipts, and indices +2. **Static Files**: Immutable historical data (headers, bodies, receipts, transactions) stored in compressed files for better performance + +Both components must be accessed together for complete data access. + +### Database Location + +The database is stored in the node's data directory: +- **Default location**: `$HOME/.local/share/reth/mainnet/db` (Linux/macOS) or `%APPDATA%\reth\mainnet\db` (Windows) +- **Custom location**: Set with `--datadir` flag when running reth +- **Static files**: Located in `/static_files` subdirectory + +### Opening the Database from External Processes + +When accessing the database while a node is running, you **must** open it in read-only mode to prevent corruption and conflicts. + +#### Using the High-Level API + +The safest way to access the database is through Reth's provider factory: + +```rust +use reth_ethereum::node::EthereumNode; +use reth_ethereum::chainspec::MAINNET; + +// Open with automatic configuration +let factory = EthereumNode::provider_factory_builder() + .open_read_only(MAINNET.clone(), "path/to/datadir")?; + +// Get a provider for queries +let provider = factory.provider()?; +let latest_block = provider.last_block_number()?; +``` + +### Performance Implications + +External reads while the node is syncing or processing blocks: + +- **I/O Competition**: May compete with the node for disk I/O +- **Cache Pollution**: Can evict hot data from OS page cache +- **CPU Impact**: Complex queries can impact node performance + +### Important Considerations + +1. **Read-Only Access Only**: Never open the database in write mode while the regular reth process is running. + +2. **Consistency**: When reading from an external process: + - Data may be slightly behind the latest processed block (if it hasn't been written to disk yet) + - Use transactions for consistent views across multiple reads + - Be aware of potential reorgs affecting recent blocks + +3. **Performance**: + - MDBX uses memory-mapped files for efficient access + - Multiple readers don't block each other + - Consider caching frequently accessed data + +### Disabling long-lived read transactions: + +By default long lived read transactions are terminated after a few minutes, this is because long read transaction can cause the free list to grow if changes to the database are made (reth node is running). +To opt out of this, this safety mechanism can be disabled: + +```rust +let factory = EthereumNode::provider_factory_builder() + .open_read_only(MAINNET.clone(), ReadOnlyConfig::from_datadir("datadir").disable_long_read_transaction_safety())?; +``` + +### Real-time Block Access Configuration + +Reth buffers new blocks in memory before persisting them to disk for performance optimization. If your external process needs immediate access to the latest blocks, configure the node to persist blocks immediately: + +- `--engine.persistence-threshold 0` - Persists new canonical blocks to disk immediately +- `--engine.memory-block-buffer-target 0` - Disables in-memory block buffering + +Use both flags together to ensure external processes can read new blocks without delay. + +As soon as the reth process has persisted the block data, the external reader can read it from the database. ## Next Steps From 825222f3b0be1ca4db81f91652b20f7f37e0acdb Mon Sep 17 00:00:00 2001 From: maradini77 <140460067+maradini77@users.noreply.github.com> Date: Wed, 16 Jul 2025 16:46:18 +0200 Subject: [PATCH 187/305] fix: Update JWT Secret Flag in Benchmark Documentation (#17447) --- bin/reth-bench/README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bin/reth-bench/README.md b/bin/reth-bench/README.md index 3f7ae7f0377..9d8a04f8deb 100644 --- a/bin/reth-bench/README.md +++ b/bin/reth-bench/README.md @@ -49,7 +49,7 @@ reth stage unwind to-block 21000000 The following `reth-bench` command would then start the benchmark at block 21,000,000: ```bash -reth-bench new-payload-fcu --rpc-url --from 21000000 --to --jwtsecret +reth-bench new-payload-fcu --rpc-url --from 21000000 --to --jwt-secret ``` Finally, make sure that reth is built using a build profile suitable for what you are trying to measure. @@ -80,11 +80,11 @@ RUSTFLAGS="-C target-cpu=native" cargo build --profile profiling --no-default-fe ### Run the Benchmark: First, start the reth node. Here is an example that runs `reth` compiled with the `profiling` profile, runs `samply`, and configures `reth` to run with metrics enabled: ```bash -samply record -p 3001 target/profiling/reth node --metrics localhost:9001 --authrpc.jwtsecret +samply record -p 3001 target/profiling/reth node --metrics localhost:9001 --authrpc.jwt-secret ``` ```bash -reth-bench new-payload-fcu --rpc-url --from --to --jwtsecret +reth-bench new-payload-fcu --rpc-url --from --to --jwt-secret ``` Replace ``, ``, and `` with the appropriate values for your testing environment. `` should be the URL of an RPC endpoint that can provide the blocks that will be used during the execution. From 802be64ef8ddeffe9fbc5468b1d03a3dc9070717 Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Wed, 16 Jul 2025 17:22:32 +0200 Subject: [PATCH 188/305] perf(trie): parallelize ParallelSparseTrie::reveal_nodes (#17372) --- .../configured_sparse_trie.rs | 9 +- crates/trie/sparse-parallel/src/lower.rs | 8 + crates/trie/sparse-parallel/src/trie.rs | 485 ++++++++++++------ crates/trie/sparse/src/state.rs | 233 +++++---- crates/trie/sparse/src/traits.rs | 47 +- crates/trie/sparse/src/trie.rs | 16 +- 6 files changed, 517 insertions(+), 281 deletions(-) diff --git a/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs b/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs index 411d8da238e..83f8c82b529 100644 --- a/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs +++ b/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs @@ -4,7 +4,7 @@ use alloy_primitives::B256; use reth_trie::{Nibbles, TrieNode}; use reth_trie_sparse::{ errors::SparseTrieResult, provider::TrieNodeProvider, LeafLookup, LeafLookupError, - SerialSparseTrie, SparseTrieInterface, SparseTrieUpdates, TrieMasks, + RevealedSparseNode, SerialSparseTrie, SparseTrieInterface, SparseTrieUpdates, TrieMasks, }; use reth_trie_sparse_parallel::ParallelSparseTrie; use std::borrow::Cow; @@ -77,6 +77,13 @@ impl SparseTrieInterface for ConfiguredSparseTrie { } } + fn reveal_nodes(&mut self, nodes: Vec) -> SparseTrieResult<()> { + match self { + Self::Serial(trie) => trie.reveal_nodes(nodes), + Self::Parallel(trie) => trie.reveal_nodes(nodes), + } + } + fn update_leaf( &mut self, full_path: Nibbles, diff --git a/crates/trie/sparse-parallel/src/lower.rs b/crates/trie/sparse-parallel/src/lower.rs index 047e3a15a16..0a4356426e5 100644 --- a/crates/trie/sparse-parallel/src/lower.rs +++ b/crates/trie/sparse-parallel/src/lower.rs @@ -80,6 +80,14 @@ impl LowerSparseSubtrie { } } + /// Takes ownership of the underlying [`SparseSubtrie`] if revealed, putting this + /// `LowerSparseSubtrie` will be put into the blinded state. + /// + /// Otherwise returns None. + pub(crate) fn take_revealed(&mut self) -> Option> { + self.take_revealed_if(|_| true) + } + /// Takes ownership of the underlying [`SparseSubtrie`] if revealed and the predicate returns /// true. /// diff --git a/crates/trie/sparse-parallel/src/trie.rs b/crates/trie/sparse-parallel/src/trie.rs index 9b123ef0f67..ffc40ded86b 100644 --- a/crates/trie/sparse-parallel/src/trie.rs +++ b/crates/trie/sparse-parallel/src/trie.rs @@ -13,11 +13,14 @@ use reth_trie_common::{ }; use reth_trie_sparse::{ provider::{RevealedNode, TrieNodeProvider}, - LeafLookup, LeafLookupError, RlpNodeStackItem, SparseNode, SparseNodeType, SparseTrieInterface, - SparseTrieUpdates, TrieMasks, + LeafLookup, LeafLookupError, RevealedSparseNode, RlpNodeStackItem, SparseNode, SparseNodeType, + SparseTrieInterface, SparseTrieUpdates, TrieMasks, }; use smallvec::SmallVec; -use std::sync::mpsc; +use std::{ + cmp::{Ord, Ordering, PartialOrd}, + sync::mpsc, +}; use tracing::{instrument, trace}; /// The maximum length of a path, in nibbles, which belongs to the upper subtrie of a @@ -86,7 +89,7 @@ impl SparseTrieInterface for ParallelSparseTrie { self = self.with_updates(retain_updates); - self.reveal_node(Nibbles::default(), root, masks)?; + self.reveal_upper_node(Nibbles::default(), &root, masks)?; Ok(self) } @@ -95,62 +98,136 @@ impl SparseTrieInterface for ParallelSparseTrie { self } - fn reveal_node( - &mut self, - path: Nibbles, - node: TrieNode, - masks: TrieMasks, - ) -> SparseTrieResult<()> { - // Store masks - if let Some(tree_mask) = masks.tree_mask { - self.branch_node_tree_masks.insert(path, tree_mask); + fn reveal_nodes(&mut self, mut nodes: Vec) -> SparseTrieResult<()> { + if nodes.is_empty() { + return Ok(()) } - if let Some(hash_mask) = masks.hash_mask { - self.branch_node_hash_masks.insert(path, hash_mask); + + // Sort nodes first by their subtrie, and secondarily by their path. This allows for + // grouping nodes by their subtrie using `chunk_by`. + nodes.sort_unstable_by( + |RevealedSparseNode { path: path_a, .. }, RevealedSparseNode { path: path_b, .. }| { + let subtrie_type_a = SparseSubtrieType::from_path(path_a); + let subtrie_type_b = SparseSubtrieType::from_path(path_b); + subtrie_type_a.cmp(&subtrie_type_b).then(path_a.cmp(path_b)) + }, + ); + + // Update the top-level branch node masks. This is simple and can't be done in parallel. + for RevealedSparseNode { path, masks, .. } in &nodes { + if let Some(tree_mask) = masks.tree_mask { + self.branch_node_tree_masks.insert(*path, tree_mask); + } + if let Some(hash_mask) = masks.hash_mask { + self.branch_node_hash_masks.insert(*path, hash_mask); + } } - if let Some(subtrie) = self.lower_subtrie_for_path_mut(&path) { - return subtrie.reveal_node(path, &node, masks); + // Due to the sorting all upper subtrie nodes will be at the front of the slice. We split + // them off from the rest to be handled specially by + // `ParallelSparseTrie::reveal_upper_node`. + let num_upper_nodes = nodes + .iter() + .position(|n| !SparseSubtrieType::path_len_is_upper(n.path.len())) + .unwrap_or(nodes.len()); + + let upper_nodes = &nodes[..num_upper_nodes]; + let lower_nodes = &nodes[num_upper_nodes..]; + + // Reserve the capacity of the upper subtrie's `nodes` HashMap before iterating, so we don't + // end up making many small capacity changes as we loop. + self.upper_subtrie.nodes.reserve(upper_nodes.len()); + for node in upper_nodes { + self.reveal_upper_node(node.path, &node.node, node.masks)?; } - // If there is no subtrie for the path it means the path is UPPER_TRIE_MAX_DEPTH or less - // nibbles, and so belongs to the upper trie. - self.upper_subtrie.reveal_node(path, &node, masks)?; + #[cfg(not(feature = "std"))] + // Reveal lower subtrie nodes serially if nostd + { + for node in lower_nodes { + if let Some(subtrie) = self.lower_subtrie_for_path_mut(&node.path) { + subtrie.reveal_node(node.path, &node.node, &node.masks)?; + } else { + panic!("upper subtrie node {node:?} found amongst lower nodes"); + } + } + Ok(()) + } - // The previous upper_trie.reveal_node call will not have revealed any child nodes via - // reveal_node_or_hash if the child node would be found on a lower subtrie. We handle that - // here by manually checking the specific cases where this could happen, and calling - // reveal_node_or_hash for each. - match node { - TrieNode::Branch(branch) => { - // If a branch is at the cutoff level of the trie then it will be in the upper trie, - // but all of its children will be in a lower trie. Check if a child node would be - // in the lower subtrie, and reveal accordingly. - if !SparseSubtrieType::path_len_is_upper(path.len() + 1) { - let mut stack_ptr = branch.as_ref().first_child_index(); - for idx in CHILD_INDEX_RANGE { - if branch.state_mask.is_bit_set(idx) { - let mut child_path = path; - child_path.push_unchecked(idx); - self.lower_subtrie_for_path_mut(&child_path) - .expect("child_path must have a lower subtrie") - .reveal_node_or_hash(child_path, &branch.stack[stack_ptr])?; - stack_ptr += 1; + #[cfg(feature = "std")] + // Reveal lower subtrie nodes in parallel + { + use rayon::iter::{IndexedParallelIterator, IntoParallelIterator, ParallelIterator}; + + // Group the nodes by lower subtrie. This must be collected into a Vec in order for + // rayon's `zip` to be happy. + let node_groups: Vec<_> = lower_nodes + .chunk_by(|node_a, node_b| { + SparseSubtrieType::from_path(&node_a.path) == + SparseSubtrieType::from_path(&node_b.path) + }) + .collect(); + + // Take the lower subtries in the same order that the nodes were grouped into, so that + // the two can be zipped together. This also must be collected into a Vec for rayon's + // `zip` to be happy. + let lower_subtries: Vec<_> = node_groups + .iter() + .map(|nodes| { + // NOTE: chunk_by won't produce empty groups + let node = &nodes[0]; + let idx = + SparseSubtrieType::from_path(&node.path).lower_index().unwrap_or_else( + || panic!("upper subtrie node {node:?} found amongst lower nodes"), + ); + // due to the nodes being sorted secondarily on their path, and chunk_by keeping + // the first element of each group, the `path` here will necessarily be the + // shortest path being revealed for each subtrie. Therefore we can reveal the + // subtrie itself using this path and retain correct behavior. + self.lower_subtries[idx].reveal(&node.path); + (idx, self.lower_subtries[idx].take_revealed().expect("just revealed")) + }) + .collect(); + + let (tx, rx) = mpsc::channel(); + + // Zip the lower subtries and their corresponding node groups, and reveal lower subtrie + // nodes in parallel + lower_subtries + .into_par_iter() + .zip(node_groups.into_par_iter()) + .map(|((subtrie_idx, mut subtrie), nodes)| { + // reserve space in the HashMap ahead of time; doing it on a node-by-node basis + // can cause multiple re-allocations as the hashmap grows. + subtrie.nodes.reserve(nodes.len()); + + for node in nodes { + // Reveal each node in the subtrie, returning early on any errors + let res = subtrie.reveal_node(node.path, &node.node, node.masks); + if res.is_err() { + return (subtrie_idx, subtrie, res) } } + (subtrie_idx, subtrie, Ok(())) + }) + .for_each_init(|| tx.clone(), |tx, result| tx.send(result).unwrap()); + + drop(tx); + + // Take back all lower subtries which were sent to the rayon pool, collecting the last + // seen error in the process and returning that. If we don't fully drain the channel + // then we lose lower sparse tries, putting the whole ParallelSparseTrie in an + // inconsistent state. + let mut any_err = Ok(()); + for (subtrie_idx, subtrie, res) in rx { + self.lower_subtries[subtrie_idx] = LowerSparseSubtrie::Revealed(subtrie); + if res.is_err() { + any_err = res; } } - TrieNode::Extension(ext) => { - let mut child_path = path; - child_path.extend(&ext.key); - if let Some(subtrie) = self.lower_subtrie_for_path_mut(&child_path) { - subtrie.reveal_node_or_hash(child_path, &ext.child)?; - } - } - TrieNode::EmptyRoot | TrieNode::Leaf(_) => (), - } - Ok(()) + any_err + } } fn update_leaf( @@ -1230,6 +1307,68 @@ impl ParallelSparseTrie { nodes.extend(self.upper_subtrie.nodes.iter()); nodes } + + /// Reveals a trie node in the upper trie if it has not been revealed before. When revealing + /// branch/extension nodes this may recurse into a lower trie to reveal a child. + /// + /// This function decodes a trie node and inserts it into the trie structure. It handles + /// different node types (leaf, extension, branch) by appropriately adding them to the trie and + /// recursively revealing their children. + /// + /// # Arguments + /// + /// * `path` - The path where the node should be revealed + /// * `node` - The trie node to reveal + /// * `masks` - Trie masks for branch nodes + /// + /// # Returns + /// + /// `Ok(())` if successful, or an error if the node was not revealed. + fn reveal_upper_node( + &mut self, + path: Nibbles, + node: &TrieNode, + masks: TrieMasks, + ) -> SparseTrieResult<()> { + // If there is no subtrie for the path it means the path is UPPER_TRIE_MAX_DEPTH or less + // nibbles, and so belongs to the upper trie. + self.upper_subtrie.reveal_node(path, node, masks)?; + + // The previous upper_trie.reveal_node call will not have revealed any child nodes via + // reveal_node_or_hash if the child node would be found on a lower subtrie. We handle that + // here by manually checking the specific cases where this could happen, and calling + // reveal_node_or_hash for each. + match node { + TrieNode::Branch(branch) => { + // If a branch is at the cutoff level of the trie then it will be in the upper trie, + // but all of its children will be in a lower trie. Check if a child node would be + // in the lower subtrie, and reveal accordingly. + if !SparseSubtrieType::path_len_is_upper(path.len() + 1) { + let mut stack_ptr = branch.as_ref().first_child_index(); + for idx in CHILD_INDEX_RANGE { + if branch.state_mask.is_bit_set(idx) { + let mut child_path = path; + child_path.push_unchecked(idx); + self.lower_subtrie_for_path_mut(&child_path) + .expect("child_path must have a lower subtrie") + .reveal_node_or_hash(child_path, &branch.stack[stack_ptr])?; + stack_ptr += 1; + } + } + } + } + TrieNode::Extension(ext) => { + let mut child_path = path; + child_path.extend(&ext.key); + if let Some(subtrie) = self.lower_subtrie_for_path_mut(&child_path) { + subtrie.reveal_node_or_hash(child_path, &ext.child)?; + } + } + TrieNode::EmptyRoot | TrieNode::Leaf(_) => (), + } + + Ok(()) + } } /// This is a subtrie of the [`ParallelSparseTrie`] that contains a map from path to sparse trie @@ -2160,6 +2299,26 @@ impl SparseSubtrieType { } } +impl Ord for SparseSubtrieType { + /// Orders two [`SparseSubtrieType`]s such that `Upper` is less than `Lower(_)`, and `Lower`s + /// are ordered by their index. + fn cmp(&self, other: &Self) -> Ordering { + match (self, other) { + (Self::Upper, Self::Upper) => Ordering::Equal, + (Self::Upper, Self::Lower(_)) => Ordering::Less, + (Self::Lower(_), Self::Upper) => Ordering::Greater, + (Self::Lower(idx_a), Self::Lower(idx_b)) if idx_a == idx_b => Ordering::Equal, + (Self::Lower(idx_a), Self::Lower(idx_b)) => idx_a.cmp(idx_b), + } + } +} + +impl PartialOrd for SparseSubtrieType { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + /// Collection of reusable buffers for calculating subtrie hashes. /// /// These buffers reduce allocations when computing RLP representations during trie updates. @@ -2272,8 +2431,8 @@ mod tests { use reth_trie_db::DatabaseTrieCursorFactory; use reth_trie_sparse::{ provider::{DefaultTrieNodeProvider, RevealedNode, TrieNodeProvider}, - LeafLookup, LeafLookupError, SerialSparseTrie, SparseNode, SparseTrieInterface, - SparseTrieUpdates, TrieMasks, + LeafLookup, LeafLookupError, RevealedSparseNode, SerialSparseTrie, SparseNode, + SparseTrieInterface, SparseTrieUpdates, TrieMasks, }; use std::collections::{BTreeMap, BTreeSet}; @@ -2855,7 +3014,7 @@ mod tests { let node = create_leaf_node([0x2, 0x3], 42); let masks = TrieMasks::none(); - trie.reveal_node(path, node, masks).unwrap(); + trie.reveal_nodes(vec![RevealedSparseNode { path, node, masks }]).unwrap(); assert_matches!( trie.upper_subtrie.nodes.get(&path), @@ -2876,7 +3035,7 @@ mod tests { let node = create_leaf_node([0x3, 0x4], 42); let masks = TrieMasks::none(); - trie.reveal_node(path, node, masks).unwrap(); + trie.reveal_nodes(vec![RevealedSparseNode { path, node, masks }]).unwrap(); // Check that the lower subtrie was created let idx = path_subtrie_index_unchecked(&path); @@ -2900,7 +3059,7 @@ mod tests { let node = create_leaf_node([0x4, 0x5], 42); let masks = TrieMasks::none(); - trie.reveal_node(path, node, masks).unwrap(); + trie.reveal_nodes(vec![RevealedSparseNode { path, node, masks }]).unwrap(); // Check that the lower subtrie's path hasn't changed let idx = path_subtrie_index_unchecked(&path); @@ -2961,7 +3120,7 @@ mod tests { let node = create_extension_node([0x2], child_hash); let masks = TrieMasks::none(); - trie.reveal_node(path, node, masks).unwrap(); + trie.reveal_nodes(vec![RevealedSparseNode { path, node, masks }]).unwrap(); // Extension node should be in upper trie assert_matches!( @@ -3023,7 +3182,7 @@ mod tests { let node = create_branch_node_with_children(&[0x0, 0x7, 0xf], child_hashes.clone()); let masks = TrieMasks::none(); - trie.reveal_node(path, node, masks).unwrap(); + trie.reveal_nodes(vec![RevealedSparseNode { path, node, masks }]).unwrap(); // Branch node should be in upper trie assert_matches!( @@ -3052,16 +3211,10 @@ mod tests { #[test] fn test_update_subtrie_hashes() { - // Create a trie with three subtries + // Create a trie and reveal leaf nodes using reveal_nodes let mut trie = ParallelSparseTrie::default(); - let mut subtrie_1 = Box::new(SparseSubtrie::new(Nibbles::from_nibbles([0x0, 0x0]))); - let subtrie_1_index = path_subtrie_index_unchecked(&subtrie_1.path); - let mut subtrie_2 = Box::new(SparseSubtrie::new(Nibbles::from_nibbles([0x1, 0x0]))); - let subtrie_2_index = path_subtrie_index_unchecked(&subtrie_2.path); - let mut subtrie_3 = Box::new(SparseSubtrie::new(Nibbles::from_nibbles([0x3, 0x0]))); - let subtrie_3_index = path_subtrie_index_unchecked(&subtrie_3.path); - // Reveal dummy leaf nodes that form an incorrect trie structure but enough to test the + // Create dummy leaf nodes that form an incorrect trie structure but enough to test the // method let leaf_1_full_path = Nibbles::from_nibbles([0; 64]); let leaf_1_path = leaf_1_full_path.slice(..2); @@ -3075,14 +3228,19 @@ mod tests { let leaf_1 = create_leaf_node(leaf_1_key.to_vec(), 1); let leaf_2 = create_leaf_node(leaf_2_key.to_vec(), 2); let leaf_3 = create_leaf_node(leaf_3_key.to_vec(), 3); - subtrie_1.reveal_node(leaf_1_path, &leaf_1, TrieMasks::none()).unwrap(); - subtrie_2.reveal_node(leaf_2_path, &leaf_2, TrieMasks::none()).unwrap(); - subtrie_3.reveal_node(leaf_3_path, &leaf_3, TrieMasks::none()).unwrap(); - // Add subtries at specific positions - trie.lower_subtries[subtrie_1_index] = LowerSparseSubtrie::Revealed(subtrie_1); - trie.lower_subtries[subtrie_2_index] = LowerSparseSubtrie::Revealed(subtrie_2); - trie.lower_subtries[subtrie_3_index] = LowerSparseSubtrie::Revealed(subtrie_3); + // Reveal nodes using reveal_nodes + trie.reveal_nodes(vec![ + RevealedSparseNode { path: leaf_1_path, node: leaf_1, masks: TrieMasks::none() }, + RevealedSparseNode { path: leaf_2_path, node: leaf_2, masks: TrieMasks::none() }, + RevealedSparseNode { path: leaf_3_path, node: leaf_3, masks: TrieMasks::none() }, + ]) + .unwrap(); + + // Calculate subtrie indexes + let subtrie_1_index = SparseSubtrieType::from_path(&leaf_1_path).lower_index().unwrap(); + let subtrie_2_index = SparseSubtrieType::from_path(&leaf_2_path).lower_index().unwrap(); + let subtrie_3_index = SparseSubtrieType::from_path(&leaf_3_path).lower_index().unwrap(); let unchanged_prefix_set = PrefixSetMut::from([ Nibbles::from_nibbles([0x0]), @@ -3805,9 +3963,12 @@ mod tests { // Step 2: Reveal nodes in the trie let mut trie = ParallelSparseTrie::from_root(extension, TrieMasks::none(), true).unwrap(); - trie.reveal_node(branch_path, branch, TrieMasks::none()).unwrap(); - trie.reveal_node(leaf_1_path, leaf_1, TrieMasks::none()).unwrap(); - trie.reveal_node(leaf_2_path, leaf_2, TrieMasks::none()).unwrap(); + trie.reveal_nodes(vec![ + RevealedSparseNode { path: branch_path, node: branch, masks: TrieMasks::none() }, + RevealedSparseNode { path: leaf_1_path, node: leaf_1, masks: TrieMasks::none() }, + RevealedSparseNode { path: leaf_2_path, node: leaf_2, masks: TrieMasks::none() }, + ]) + .unwrap(); // Step 3: Reset hashes for all revealed nodes to test actual hash calculation // Reset upper subtrie node hashes @@ -4339,14 +4500,18 @@ mod tests { // ├── 0 -> Hash (Path = 0) // └── 1 -> Leaf (Path = 1) sparse - .reveal_node( - Nibbles::default(), - branch, - TrieMasks { hash_mask: None, tree_mask: Some(TrieMask::new(0b01)) }, - ) - .unwrap(); - sparse - .reveal_node(Nibbles::from_nibbles([0x1]), TrieNode::Leaf(leaf), TrieMasks::none()) + .reveal_nodes(vec![ + RevealedSparseNode { + path: Nibbles::default(), + node: branch, + masks: TrieMasks { hash_mask: None, tree_mask: Some(TrieMask::new(0b01)) }, + }, + RevealedSparseNode { + path: Nibbles::from_nibbles([0x1]), + node: TrieNode::Leaf(leaf), + masks: TrieMasks::none(), + }, + ]) .unwrap(); // Removing a blinded leaf should result in an error @@ -4384,14 +4549,18 @@ mod tests { // ├── 0 -> Hash (Path = 0) // └── 1 -> Leaf (Path = 1) sparse - .reveal_node( - Nibbles::default(), - branch, - TrieMasks { hash_mask: None, tree_mask: Some(TrieMask::new(0b01)) }, - ) - .unwrap(); - sparse - .reveal_node(Nibbles::from_nibbles([0x1]), TrieNode::Leaf(leaf), TrieMasks::none()) + .reveal_nodes(vec![ + RevealedSparseNode { + path: Nibbles::default(), + node: branch, + masks: TrieMasks { hash_mask: None, tree_mask: Some(TrieMask::new(0b01)) }, + }, + RevealedSparseNode { + path: Nibbles::from_nibbles([0x1]), + node: TrieNode::Leaf(leaf), + masks: TrieMasks::none(), + }, + ]) .unwrap(); // Removing a non-existent leaf should be a noop @@ -4727,17 +4896,20 @@ mod tests { Default::default(), [key1()], ); - for (path, node) in hash_builder_proof_nodes.nodes_sorted() { - let hash_mask = branch_node_hash_masks.get(&path).copied(); - let tree_mask = branch_node_tree_masks.get(&path).copied(); - sparse - .reveal_node( + let revealed_nodes: Vec = hash_builder_proof_nodes + .nodes_sorted() + .into_iter() + .map(|(path, node)| { + let hash_mask = branch_node_hash_masks.get(&path).copied(); + let tree_mask = branch_node_tree_masks.get(&path).copied(); + RevealedSparseNode { path, - TrieNode::decode(&mut &node[..]).unwrap(), - TrieMasks { hash_mask, tree_mask }, - ) - .unwrap(); - } + node: TrieNode::decode(&mut &node[..]).unwrap(), + masks: TrieMasks { hash_mask, tree_mask }, + } + }) + .collect(); + sparse.reveal_nodes(revealed_nodes).unwrap(); // Check that the branch node exists with only two nibbles set assert_eq!( @@ -4762,17 +4934,20 @@ mod tests { Default::default(), [key3()], ); - for (path, node) in hash_builder_proof_nodes.nodes_sorted() { - let hash_mask = branch_node_hash_masks.get(&path).copied(); - let tree_mask = branch_node_tree_masks.get(&path).copied(); - sparse - .reveal_node( + let revealed_nodes: Vec = hash_builder_proof_nodes + .nodes_sorted() + .into_iter() + .map(|(path, node)| { + let hash_mask = branch_node_hash_masks.get(&path).copied(); + let tree_mask = branch_node_tree_masks.get(&path).copied(); + RevealedSparseNode { path, - TrieNode::decode(&mut &node[..]).unwrap(), - TrieMasks { hash_mask, tree_mask }, - ) - .unwrap(); - } + node: TrieNode::decode(&mut &node[..]).unwrap(), + masks: TrieMasks { hash_mask, tree_mask }, + } + }) + .collect(); + sparse.reveal_nodes(revealed_nodes).unwrap(); // Check that nothing changed in the branch node assert_eq!( @@ -4838,17 +5013,20 @@ mod tests { Default::default(), [key1(), Nibbles::from_nibbles_unchecked([0x01])], ); - for (path, node) in hash_builder_proof_nodes.nodes_sorted() { - let hash_mask = branch_node_hash_masks.get(&path).copied(); - let tree_mask = branch_node_tree_masks.get(&path).copied(); - sparse - .reveal_node( + let revealed_nodes: Vec = hash_builder_proof_nodes + .nodes_sorted() + .into_iter() + .map(|(path, node)| { + let hash_mask = branch_node_hash_masks.get(&path).copied(); + let tree_mask = branch_node_tree_masks.get(&path).copied(); + RevealedSparseNode { path, - TrieNode::decode(&mut &node[..]).unwrap(), - TrieMasks { hash_mask, tree_mask }, - ) - .unwrap(); - } + node: TrieNode::decode(&mut &node[..]).unwrap(), + masks: TrieMasks { hash_mask, tree_mask }, + } + }) + .collect(); + sparse.reveal_nodes(revealed_nodes).unwrap(); // Check that the branch node exists assert_eq!( @@ -4873,17 +5051,20 @@ mod tests { Default::default(), [key2()], ); - for (path, node) in hash_builder_proof_nodes.nodes_sorted() { - let hash_mask = branch_node_hash_masks.get(&path).copied(); - let tree_mask = branch_node_tree_masks.get(&path).copied(); - sparse - .reveal_node( + let revealed_nodes: Vec = hash_builder_proof_nodes + .nodes_sorted() + .into_iter() + .map(|(path, node)| { + let hash_mask = branch_node_hash_masks.get(&path).copied(); + let tree_mask = branch_node_tree_masks.get(&path).copied(); + RevealedSparseNode { path, - TrieNode::decode(&mut &node[..]).unwrap(), - TrieMasks { hash_mask, tree_mask }, - ) - .unwrap(); - } + node: TrieNode::decode(&mut &node[..]).unwrap(), + masks: TrieMasks { hash_mask, tree_mask }, + } + }) + .collect(); + sparse.reveal_nodes(revealed_nodes).unwrap(); // Check that nothing changed in the extension node assert_eq!( @@ -4955,17 +5136,20 @@ mod tests { Default::default(), [key1()], ); - for (path, node) in hash_builder_proof_nodes.nodes_sorted() { - let hash_mask = branch_node_hash_masks.get(&path).copied(); - let tree_mask = branch_node_tree_masks.get(&path).copied(); - sparse - .reveal_node( + let revealed_nodes: Vec = hash_builder_proof_nodes + .nodes_sorted() + .into_iter() + .map(|(path, node)| { + let hash_mask = branch_node_hash_masks.get(&path).copied(); + let tree_mask = branch_node_tree_masks.get(&path).copied(); + RevealedSparseNode { path, - TrieNode::decode(&mut &node[..]).unwrap(), - TrieMasks { hash_mask, tree_mask }, - ) - .unwrap(); - } + node: TrieNode::decode(&mut &node[..]).unwrap(), + masks: TrieMasks { hash_mask, tree_mask }, + } + }) + .collect(); + sparse.reveal_nodes(revealed_nodes).unwrap(); // Check that the branch node wasn't overwritten by the extension node in the proof assert_matches!( @@ -5935,13 +6119,6 @@ mod tests { tree_mask: Some(TrieMask::new(0b0100000000000000)), }; - trie.reveal_node( - Nibbles::from_nibbles([0x3]), - TrieNode::Branch(branch_0x3_node), - branch_0x3_masks, - ) - .unwrap(); - // Reveal node at path Nibbles(0x37) - leaf node let leaf_path = Nibbles::from_nibbles([0x3, 0x7]); let leaf_key = Nibbles::unpack( @@ -5952,7 +6129,19 @@ mod tests { let leaf_node = LeafNode::new(leaf_key, leaf_value); let leaf_masks = TrieMasks::none(); - trie.reveal_node(leaf_path, TrieNode::Leaf(leaf_node), leaf_masks).unwrap(); + trie.reveal_nodes(vec![ + RevealedSparseNode { + path: Nibbles::from_nibbles([0x3]), + node: TrieNode::Branch(branch_0x3_node), + masks: branch_0x3_masks, + }, + RevealedSparseNode { + path: leaf_path, + node: TrieNode::Leaf(leaf_node), + masks: leaf_masks, + }, + ]) + .unwrap(); // Update leaf with its new value let mut leaf_full_path = leaf_path; diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index 3e9664581bb..133b8dacbef 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -1,7 +1,7 @@ use crate::{ provider::{TrieNodeProvider, TrieNodeProviderFactory}, traits::SparseTrieInterface, - SerialSparseTrie, SparseTrie, TrieMasks, + RevealedSparseNode, SerialSparseTrie, SparseTrie, TrieMasks, }; use alloc::{collections::VecDeque, vec::Vec}; use alloy_primitives::{ @@ -10,7 +10,6 @@ use alloy_primitives::{ }; use alloy_rlp::{Decodable, Encodable}; use alloy_trie::proof::DecodedProofNodes; -use core::iter::Peekable; use reth_execution_errors::{SparseStateTrieErrorKind, SparseStateTrieResult, SparseTrieErrorKind}; use reth_primitives_traits::Account; use reth_trie_common::{ @@ -228,47 +227,36 @@ where branch_node_hash_masks: HashMap, branch_node_tree_masks: HashMap, ) -> SparseStateTrieResult<()> { - let FilteredProofNodes { + let FilterMappedProofNodes { + root_node, nodes, - new_nodes: _, + new_nodes, total_nodes: _total_nodes, skipped_nodes: _skipped_nodes, - } = filter_revealed_nodes(account_subtree, &self.revealed_account_paths)?; + } = filter_map_revealed_nodes( + account_subtree, + &mut self.revealed_account_paths, + &branch_node_hash_masks, + &branch_node_tree_masks, + )?; #[cfg(feature = "metrics")] { self.metrics.increment_total_account_nodes(_total_nodes as u64); self.metrics.increment_skipped_account_nodes(_skipped_nodes as u64); } - let mut account_nodes = nodes.into_iter().peekable(); - if let Some(root_node) = Self::validate_root_node_decoded(&mut account_nodes)? { + if let Some(root_node) = root_node { // Reveal root node if it wasn't already. - let trie = self.state.reveal_root( - root_node, - TrieMasks { - hash_mask: branch_node_hash_masks.get(&Nibbles::default()).copied(), - tree_mask: branch_node_tree_masks.get(&Nibbles::default()).copied(), - }, - self.retain_updates, - )?; - - // Reveal the remaining proof nodes. - for (path, node) in account_nodes { - let (hash_mask, tree_mask) = if let TrieNode::Branch(_) = node { - ( - branch_node_hash_masks.get(&path).copied(), - branch_node_tree_masks.get(&path).copied(), - ) - } else { - (None, None) - }; + trace!(target: "trie::sparse", ?root_node, "Revealing root account node"); + let trie = + self.state.reveal_root(root_node.node, root_node.masks, self.retain_updates)?; - trace!(target: "trie::sparse", ?path, ?node, ?hash_mask, ?tree_mask, "Revealing account node"); - trie.reveal_node(path, node, TrieMasks { hash_mask, tree_mask })?; + // Reserve the capacity for new nodes ahead of time, if the trie implementation + // supports doing so. + trie.reserve_nodes(new_nodes); - // Track the revealed path. - self.revealed_account_paths.insert(path); - } + trace!(target: "trie::sparse", total_nodes = ?nodes.len(), "Revealing account nodes"); + trie.reveal_nodes(nodes)?; } Ok(()) @@ -293,56 +281,39 @@ where ) -> SparseStateTrieResult<()> { let revealed_nodes = self.revealed_storage_paths.entry(account).or_default(); - let FilteredProofNodes { + let FilterMappedProofNodes { + root_node, nodes, new_nodes, total_nodes: _total_nodes, skipped_nodes: _skipped_nodes, - } = filter_revealed_nodes(storage_subtree.subtree, revealed_nodes)?; + } = filter_map_revealed_nodes( + storage_subtree.subtree, + revealed_nodes, + &storage_subtree.branch_node_hash_masks, + &storage_subtree.branch_node_tree_masks, + )?; #[cfg(feature = "metrics")] { self.metrics.increment_total_storage_nodes(_total_nodes as u64); self.metrics.increment_skipped_storage_nodes(_skipped_nodes as u64); } - let mut nodes = nodes.into_iter().peekable(); - if let Some(root_node) = Self::validate_root_node_decoded(&mut nodes)? { + if let Some(root_node) = root_node { // Reveal root node if it wasn't already. + trace!(target: "trie::sparse", ?account, ?root_node, "Revealing root storage node"); let trie = self.storages.entry(account).or_default().reveal_root( - root_node, - TrieMasks { - hash_mask: storage_subtree - .branch_node_hash_masks - .get(&Nibbles::default()) - .copied(), - tree_mask: storage_subtree - .branch_node_tree_masks - .get(&Nibbles::default()) - .copied(), - }, + root_node.node, + root_node.masks, self.retain_updates, )?; - // Reserve the capacity for new nodes ahead of time. + // Reserve the capacity for new nodes ahead of time, if the trie implementation + // supports doing so. trie.reserve_nodes(new_nodes); - // Reveal the remaining proof nodes. - for (path, node) in nodes { - let (hash_mask, tree_mask) = if let TrieNode::Branch(_) = node { - ( - storage_subtree.branch_node_hash_masks.get(&path).copied(), - storage_subtree.branch_node_tree_masks.get(&path).copied(), - ) - } else { - (None, None) - }; - - trace!(target: "trie::sparse", ?account, ?path, ?node, ?hash_mask, ?tree_mask, "Revealing storage node"); - trie.reveal_node(path, node, TrieMasks { hash_mask, tree_mask })?; - - // Track the revealed path. - revealed_nodes.insert(path); - } + trace!(target: "trie::sparse", ?account, total_nodes = ?nodes.len(), "Revealing storage nodes"); + trie.reveal_nodes(nodes)?; } Ok(()) @@ -451,32 +422,6 @@ where Ok(()) } - /// Validates the decoded root node of the proof and returns it if it exists and is valid. - fn validate_root_node_decoded>( - proof: &mut Peekable, - ) -> SparseStateTrieResult> { - // Validate root node. - let Some((path, root_node)) = proof.next() else { return Ok(None) }; - if !path.is_empty() { - return Err(SparseStateTrieErrorKind::InvalidRootNode { - path, - node: alloy_rlp::encode(&root_node).into(), - } - .into()) - } - - // Perform sanity check. - if matches!(root_node, TrieNode::EmptyRoot) && proof.peek().is_some() { - return Err(SparseStateTrieErrorKind::InvalidRootNode { - path, - node: alloy_rlp::encode(&root_node).into(), - } - .into()) - } - - Ok(Some(root_node)) - } - /// Wipe the storage trie at the provided address. pub fn wipe_storage(&mut self, address: B256) -> SparseStateTrieResult<()> { if let Some(trie) = self.storages.get_mut(&address) { @@ -749,11 +694,13 @@ where } } -/// Result of [`filter_revealed_nodes`]. +/// Result of [`filter_map_revealed_nodes`]. #[derive(Debug, PartialEq, Eq)] -struct FilteredProofNodes { - /// Filtered, decoded and sorted proof nodes. - nodes: Vec<(Nibbles, TrieNode)>, +struct FilterMappedProofNodes { + /// Root node which was pulled out of the original node set to be handled specially. + root_node: Option, + /// Filtered, decoded and unsorted proof nodes. Root node is removed. + nodes: Vec, /// Number of nodes in the proof. total_nodes: usize, /// Number of nodes that were skipped because they were already revealed. @@ -763,38 +710,78 @@ struct FilteredProofNodes { new_nodes: usize, } -/// Filters the decoded nodes that are already revealed and returns additional information about the -/// number of total, skipped, and new nodes. -fn filter_revealed_nodes( +/// Filters the decoded nodes that are already revealed, maps them to `RevealedSparseNodes`, +/// separates the root node if present, and returns additional information about the number of +/// total, skipped, and new nodes. +fn filter_map_revealed_nodes( proof_nodes: DecodedProofNodes, - revealed_nodes: &HashSet, -) -> alloy_rlp::Result { - let mut result = FilteredProofNodes { + revealed_nodes: &mut HashSet, + branch_node_hash_masks: &HashMap, + branch_node_tree_masks: &HashMap, +) -> SparseStateTrieResult { + let mut result = FilterMappedProofNodes { + root_node: None, nodes: Vec::with_capacity(proof_nodes.len()), total_nodes: 0, skipped_nodes: 0, new_nodes: 0, }; - for (path, node) in proof_nodes.into_inner() { + let proof_nodes_len = proof_nodes.len(); + for (path, proof_node) in proof_nodes.into_inner() { result.total_nodes += 1; - // If the node is already revealed, skip it. - if revealed_nodes.contains(&path) { + + let is_root = path.is_empty(); + + // If the node is already revealed, skip it. We don't ever skip the root node, nor do we add + // it to `revealed_nodes`. + if !is_root && !revealed_nodes.insert(path) { result.skipped_nodes += 1; continue } result.new_nodes += 1; - // If it's a branch node, increase the number of new nodes by the number of children - // according to the state mask. - if let TrieNode::Branch(branch) = &node { - result.new_nodes += branch.state_mask.count_ones() as usize; + + // Extract hash/tree masks based on the node type (only branch nodes have masks). At the + // same time increase the new_nodes counter if the node is a type which has children. + let masks = match &proof_node { + TrieNode::Branch(branch) => { + // If it's a branch node, increase the number of new nodes by the number of children + // according to the state mask. + result.new_nodes += branch.state_mask.count_ones() as usize; + TrieMasks { + hash_mask: branch_node_hash_masks.get(&path).copied(), + tree_mask: branch_node_tree_masks.get(&path).copied(), + } + } + TrieNode::Extension(_) => { + // There is always exactly one child of an extension node. + result.new_nodes += 1; + TrieMasks::none() + } + _ => TrieMasks::none(), + }; + + let node = RevealedSparseNode { path, node: proof_node, masks }; + + if is_root { + // Perform sanity check. + if matches!(node.node, TrieNode::EmptyRoot) && proof_nodes_len > 1 { + return Err(SparseStateTrieErrorKind::InvalidRootNode { + path, + node: alloy_rlp::encode(&node.node).into(), + } + .into()) + } + + result.root_node = Some(node); + + continue } - result.nodes.push((path, node)); + result.nodes.push(node); } - result.nodes.sort_unstable_by(|a, b| a.0.cmp(&b.0)); Ok(result) } @@ -1123,8 +1110,8 @@ mod tests { } #[test] - fn test_filter_revealed_nodes() { - let revealed_nodes = HashSet::from_iter([Nibbles::from_nibbles([0x0])]); + fn test_filter_map_revealed_nodes() { + let mut revealed_nodes = HashSet::from_iter([Nibbles::from_nibbles([0x0])]); let leaf = TrieNode::Leaf(LeafNode::new(Nibbles::default(), alloy_rlp::encode([]))); let leaf_encoded = alloy_rlp::encode(&leaf); let branch = TrieNode::Branch(BranchNode::new( @@ -1137,12 +1124,30 @@ mod tests { (Nibbles::from_nibbles([0x1]), leaf.clone()), ]); - let decoded = filter_revealed_nodes(proof_nodes, &revealed_nodes).unwrap(); + let branch_node_hash_masks = HashMap::default(); + let branch_node_tree_masks = HashMap::default(); + + let decoded = filter_map_revealed_nodes( + proof_nodes, + &mut revealed_nodes, + &branch_node_hash_masks, + &branch_node_tree_masks, + ) + .unwrap(); assert_eq!( decoded, - FilteredProofNodes { - nodes: vec![(Nibbles::default(), branch), (Nibbles::from_nibbles([0x1]), leaf)], + FilterMappedProofNodes { + root_node: Some(RevealedSparseNode { + path: Nibbles::default(), + node: branch, + masks: TrieMasks::none(), + }), + nodes: vec![RevealedSparseNode { + path: Nibbles::from_nibbles([0x1]), + node: leaf, + masks: TrieMasks::none(), + }], // Branch, leaf, leaf total_nodes: 3, // Revealed leaf node with path 0x1 diff --git a/crates/trie/sparse/src/traits.rs b/crates/trie/sparse/src/traits.rs index 45c990511db..300ac39c1b6 100644 --- a/crates/trie/sparse/src/traits.rs +++ b/crates/trie/sparse/src/traits.rs @@ -2,7 +2,7 @@ use core::fmt::Debug; -use alloc::{borrow::Cow, vec::Vec}; +use alloc::{borrow::Cow, vec, vec::Vec}; use alloy_primitives::{ map::{HashMap, HashSet}, B256, @@ -63,17 +63,7 @@ pub trait SparseTrieInterface: Sized + Debug + Send + Sync { /// * `additional` - The number of additional trie nodes to reserve capacity for. fn reserve_nodes(&mut self, _additional: usize) {} - /// Reveals a trie node if it has not been revealed before. - /// - /// This function decodes a trie node and inserts it into the trie structure. - /// It handles different node types (leaf, extension, branch) by appropriately - /// adding them to the trie and recursively revealing their children. - /// - /// # Arguments - /// - /// * `path` - The path where the node should be revealed - /// * `node` - The trie node to reveal - /// * `masks` - Trie masks for branch nodes + /// The single-node version of `reveal_nodes`. /// /// # Returns /// @@ -83,7 +73,25 @@ pub trait SparseTrieInterface: Sized + Debug + Send + Sync { path: Nibbles, node: TrieNode, masks: TrieMasks, - ) -> SparseTrieResult<()>; + ) -> SparseTrieResult<()> { + self.reveal_nodes(vec![RevealedSparseNode { path, node, masks }]) + } + + /// Reveals one or more trie nodes if they have not been revealed before. + /// + /// This function decodes trie nodes and inserts them into the trie structure. It handles + /// different node types (leaf, extension, branch) by appropriately adding them to the trie and + /// recursively revealing their children. + /// + /// # Arguments + /// + /// * `nodes` - The nodes to be revealed, each having a path and optional set of branch node + /// masks. The nodes will be unsorted. + /// + /// # Returns + /// + /// `Ok(())` if successful, or an error if any of the nodes was not revealed. + fn reveal_nodes(&mut self, nodes: Vec) -> SparseTrieResult<()>; /// Updates the value of a leaf node at the specified path. /// @@ -225,7 +233,7 @@ pub trait SparseTrieInterface: Sized + Debug + Send + Sync { /// /// These masks are essential for efficient trie traversal and serialization, as they /// determine how nodes should be encoded and stored on disk. -#[derive(Debug)] +#[derive(Debug, PartialEq, Eq, Clone, Copy)] pub struct TrieMasks { /// Branch node hash mask, if any. /// @@ -291,3 +299,14 @@ pub enum LeafLookup { /// Leaf does not exist (exclusion proof found). NonExistent, } + +/// Carries all information needed by a sparse trie to reveal a particular node. +#[derive(Debug, PartialEq, Eq)] +pub struct RevealedSparseNode { + /// Path of the node. + pub path: Nibbles, + /// The node itself. + pub node: TrieNode, + /// Tree and hash masks for the node, if known. + pub masks: TrieMasks, +} diff --git a/crates/trie/sparse/src/trie.rs b/crates/trie/sparse/src/trie.rs index c8669cca179..3189a8c3b66 100644 --- a/crates/trie/sparse/src/trie.rs +++ b/crates/trie/sparse/src/trie.rs @@ -1,6 +1,7 @@ use crate::{ provider::{RevealedNode, TrieNodeProvider}, - LeafLookup, LeafLookupError, SparseTrieInterface, SparseTrieUpdates, TrieMasks, + LeafLookup, LeafLookupError, RevealedSparseNode, SparseTrieInterface, SparseTrieUpdates, + TrieMasks, }; use alloc::{ borrow::Cow, @@ -412,7 +413,6 @@ impl SparseTrieInterface for SerialSparseTrie { fn reserve_nodes(&mut self, additional: usize) { self.nodes.reserve(additional); } - fn reveal_node( &mut self, path: Nibbles, @@ -523,7 +523,7 @@ impl SparseTrieInterface for SerialSparseTrie { SparseNode::Hash(hash) => { let mut full = *entry.key(); full.extend(&leaf.key); - self.values.insert(full, leaf.value); + self.values.insert(full, leaf.value.clone()); entry.insert(SparseNode::Leaf { key: leaf.key, // Memoize the hash of a previously blinded node in a new leaf @@ -548,7 +548,7 @@ impl SparseTrieInterface for SerialSparseTrie { let mut full = *entry.key(); full.extend(&leaf.key); entry.insert(SparseNode::new_leaf(leaf.key)); - self.values.insert(full, leaf.value); + self.values.insert(full, leaf.value.clone()); } }, } @@ -556,6 +556,14 @@ impl SparseTrieInterface for SerialSparseTrie { Ok(()) } + fn reveal_nodes(&mut self, mut nodes: Vec) -> SparseTrieResult<()> { + nodes.sort_unstable_by_key(|node| node.path); + for node in nodes { + self.reveal_node(node.path, node.node, node.masks)?; + } + Ok(()) + } + fn update_leaf( &mut self, full_path: Nibbles, From 1e208710432fcb0d3a4b367a3c3977661305a1d1 Mon Sep 17 00:00:00 2001 From: Tomass <155266802+zeroprooff@users.noreply.github.com> Date: Wed, 16 Jul 2025 18:40:52 +0300 Subject: [PATCH 189/305] docs: fix typo in NetworkManager diagram (#17448) --- crates/net/network/docs/mermaid/network-manager.mmd | 2 +- crates/net/network/src/manager.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/net/network/docs/mermaid/network-manager.mmd b/crates/net/network/docs/mermaid/network-manager.mmd index e34dbb17777..aa2514a54d5 100644 --- a/crates/net/network/docs/mermaid/network-manager.mmd +++ b/crates/net/network/docs/mermaid/network-manager.mmd @@ -9,7 +9,7 @@ graph TB subgraph Swarm direction TB B1[(Session Manager)] - B2[(Connection Lister)] + B2[(Connection Listener)] B3[(Network State)] end end diff --git a/crates/net/network/src/manager.rs b/crates/net/network/src/manager.rs index 7107faaf588..ce8cda2b259 100644 --- a/crates/net/network/src/manager.rs +++ b/crates/net/network/src/manager.rs @@ -89,7 +89,7 @@ use tracing::{debug, error, trace, warn}; /// subgraph Swarm /// direction TB /// B1[(Session Manager)] -/// B2[(Connection Lister)] +/// B2[(Connection Listener)] /// B3[(Network State)] /// end /// end From 824e099055aeb0c33d1a25a7235000503d9ea4cc Mon Sep 17 00:00:00 2001 From: Rez Date: Thu, 17 Jul 2025 21:48:46 +1000 Subject: [PATCH 190/305] feat: make engine API metered methods and utilities public (#17460) --- crates/rpc/rpc-engine-api/src/engine_api.rs | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index 3066b440a45..ad708b75da3 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -147,7 +147,7 @@ where } /// Metered version of `new_payload_v1`. - async fn new_payload_v1_metered( + pub async fn new_payload_v1_metered( &self, payload: PayloadT::ExecutionData, ) -> EngineApiResult { @@ -271,6 +271,11 @@ where self.inner.metrics.new_payload_response.update_response_metrics(&res, gas_used, elapsed); Ok(res?) } + + /// Returns whether the engine accepts execution requests hash. + pub fn accept_execution_requests_hash(&self) -> bool { + self.inner.accept_execution_requests_hash + } } impl @@ -754,7 +759,8 @@ where .map_err(|err| EngineApiError::Internal(Box::new(err))) } - fn get_blobs_v1_metered( + /// Metered version of `get_blobs_v1`. + pub fn get_blobs_v1_metered( &self, versioned_hashes: Vec, ) -> EngineApiResult>> { @@ -788,7 +794,8 @@ where .map_err(|err| EngineApiError::Internal(Box::new(err))) } - fn get_blobs_v2_metered( + /// Metered version of `get_blobs_v2`. + pub fn get_blobs_v2_metered( &self, versioned_hashes: Vec, ) -> EngineApiResult>> { From 2afd1098166e4947820efd7766ab04c4435070fc Mon Sep 17 00:00:00 2001 From: cakevm Date: Thu, 17 Jul 2025 15:19:19 +0200 Subject: [PATCH 191/305] chore: correct spelling errors (#17462) --- crates/engine/tree/src/tree/payload_processor/mod.rs | 2 +- crates/engine/tree/src/tree/payload_processor/sparse_trie.rs | 2 +- .../testdata/rpc-compat/eth_getLogs/topic-exact-match.io | 2 +- crates/trie/sparse-parallel/src/lower.rs | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index 3210780ec60..2078df8088a 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -397,7 +397,7 @@ where /// Helper method that handles sparse trie task spawning. /// - /// If we have a stored trie, we will re-use it for spawning. If we do not have a stored trie, + /// If we have a stored trie, we will reuse it for spawning. If we do not have a stored trie, /// we will create a new trie based on the configured trie type (parallel or serial). fn spawn_sparse_trie_task( &self, diff --git a/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs b/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs index 929e4d1de30..4242752867b 100644 --- a/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs +++ b/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs @@ -112,7 +112,7 @@ where /// # Returns /// /// - State root computation outcome. - /// - Accounts trie that needs to be cleared and re-used to avoid reallocations. + /// - Accounts trie that needs to be cleared and reused to avoid reallocations. pub(super) fn run( &mut self, ) -> (Result, SparseTrie) { diff --git a/crates/rpc/rpc-e2e-tests/testdata/rpc-compat/eth_getLogs/topic-exact-match.io b/crates/rpc/rpc-e2e-tests/testdata/rpc-compat/eth_getLogs/topic-exact-match.io index 4795cc4116b..30366e8005e 100644 --- a/crates/rpc/rpc-e2e-tests/testdata/rpc-compat/eth_getLogs/topic-exact-match.io +++ b/crates/rpc/rpc-e2e-tests/testdata/rpc-compat/eth_getLogs/topic-exact-match.io @@ -1,3 +1,3 @@ -// queries for logs with two topics, with both topics set explictly +// queries for logs with two topics, with both topics set explicitly >> {"jsonrpc":"2.0","id":1,"method":"eth_getLogs","params":[{"address":null,"fromBlock":"0x3","toBlock":"0x6","topics":[["0x00000000000000000000000000000000000000000000000000000000656d6974"],["0x4238ace0bf7e66fd40fea01bdf43f4f30423f48432efd0da3af5fcb17a977fd4"]]}]} << {"jsonrpc":"2.0","id":1,"result":[{"address":"0x7dcd17433742f4c0ca53122ab541d0ba67fc27df","topics":["0x00000000000000000000000000000000000000000000000000000000656d6974","0x4238ace0bf7e66fd40fea01bdf43f4f30423f48432efd0da3af5fcb17a977fd4"],"data":"0x0000000000000000000000000000000000000000000000000000000000000001","blockNumber":"0x4","transactionHash":"0xf047c5133c96c405a79d01038b4ccf8208c03e296dd9f6bea083727c9513f805","transactionIndex":"0x0","blockHash":"0x94540b21748e45497c41518ed68b2a0c16d728e917b665ae50d51f6895242e53","logIndex":"0x0","removed":false}]} diff --git a/crates/trie/sparse-parallel/src/lower.rs b/crates/trie/sparse-parallel/src/lower.rs index 0a4356426e5..449c3a7b29b 100644 --- a/crates/trie/sparse-parallel/src/lower.rs +++ b/crates/trie/sparse-parallel/src/lower.rs @@ -5,7 +5,7 @@ use reth_trie_common::Nibbles; /// /// When a [`crate::ParallelSparseTrie`] is initialized/cleared then its `LowerSparseSubtrie`s are /// all blinded, meaning they have no nodes. A blinded `LowerSparseSubtrie` may hold onto a cleared -/// [`SparseSubtrie`] in order to re-use allocations. +/// [`SparseSubtrie`] in order to reuse allocations. #[derive(Clone, Debug, Eq, PartialEq)] pub(crate) enum LowerSparseSubtrie { Blind(Option>), From 7ccb37ebe3ab1082a1bfd9cbcc94484f955cbd21 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Thu, 17 Jul 2025 15:19:30 +0200 Subject: [PATCH 192/305] refactor: move receipt conversions to `RpcConverter` (#17450) --- Cargo.lock | 5 +- crates/ethereum/node/Cargo.toml | 1 + crates/ethereum/node/src/node.rs | 24 ++- crates/optimism/node/Cargo.toml | 7 - crates/optimism/node/src/node.rs | 37 ++-- crates/optimism/rpc/src/eth/block.rs | 84 +-------- crates/optimism/rpc/src/eth/call.rs | 12 +- crates/optimism/rpc/src/eth/mod.rs | 115 +++++++----- crates/optimism/rpc/src/eth/pending_block.rs | 3 +- crates/optimism/rpc/src/eth/receipt.rs | 171 +++++++++++------- crates/optimism/rpc/src/eth/transaction.rs | 48 +++-- crates/rpc/rpc-builder/src/lib.rs | 66 ++++--- crates/rpc/rpc-builder/tests/it/utils.rs | 13 +- crates/rpc/rpc-convert/src/rpc.rs | 3 + crates/rpc/rpc-convert/src/transaction.rs | 137 +++++++++----- crates/rpc/rpc-eth-api/src/helpers/block.rs | 62 ++++++- crates/rpc/rpc-eth-api/src/helpers/receipt.rs | 64 ++++++- crates/rpc/rpc-eth-api/src/types.rs | 2 +- crates/rpc/rpc-eth-types/src/lib.rs | 1 - crates/rpc/rpc-eth-types/src/receipt.rs | 124 ++++++------- crates/rpc/rpc/Cargo.toml | 2 +- crates/rpc/rpc/src/eth/builder.rs | 82 +++++++-- crates/rpc/rpc/src/eth/core.rs | 125 ++++++++----- crates/rpc/rpc/src/eth/filter.rs | 12 +- crates/rpc/rpc/src/eth/helpers/block.rs | 71 ++------ crates/rpc/rpc/src/eth/helpers/call.rs | 25 ++- crates/rpc/rpc/src/eth/helpers/fees.rs | 6 +- .../rpc/rpc/src/eth/helpers/pending_block.rs | 8 +- crates/rpc/rpc/src/eth/helpers/receipt.rs | 64 ++----- crates/rpc/rpc/src/eth/helpers/signer.rs | 4 +- crates/rpc/rpc/src/eth/helpers/spec.rs | 9 +- crates/rpc/rpc/src/eth/helpers/state.rs | 29 ++- crates/rpc/rpc/src/eth/helpers/trace.rs | 4 +- crates/rpc/rpc/src/eth/helpers/transaction.rs | 18 +- crates/rpc/rpc/src/eth/helpers/types.rs | 14 +- examples/exex-hello-world/src/main.rs | 6 +- 36 files changed, 820 insertions(+), 638 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 744dee7a2b6..0e882206bf0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8950,6 +8950,7 @@ dependencies = [ "alloy-contract", "alloy-eips", "alloy-genesis", + "alloy-network", "alloy-primitives", "alloy-provider", "alloy-rpc-types-beacon", @@ -9273,7 +9274,6 @@ dependencies = [ "futures", "op-alloy-consensus", "op-alloy-rpc-types-engine", - "op-revm", "reth-chainspec", "reth-consensus", "reth-db", @@ -9302,15 +9302,12 @@ dependencies = [ "reth-revm", "reth-rpc-api", "reth-rpc-engine-api", - "reth-rpc-eth-api", - "reth-rpc-eth-types", "reth-rpc-server-types", "reth-tasks", "reth-tracing", "reth-transaction-pool", "reth-trie-common", "reth-trie-db", - "revm", "serde", "serde_json", "tokio", diff --git a/crates/ethereum/node/Cargo.toml b/crates/ethereum/node/Cargo.toml index 128ca756190..b9cedc660a4 100644 --- a/crates/ethereum/node/Cargo.toml +++ b/crates/ethereum/node/Cargo.toml @@ -41,6 +41,7 @@ reth-payload-primitives.workspace = true # ethereum alloy-eips.workspace = true +alloy-network.workspace = true alloy-rpc-types-eth.workspace = true alloy-rpc-types-engine.workspace = true # revm with required ethereum features diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index 9585e8abf8b..8938f6e8690 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -3,6 +3,7 @@ pub use crate::{payload::EthereumPayloadBuilder, EthereumEngineValidator}; use crate::{EthEngineTypes, EthEvmConfig}; use alloy_eips::{eip7840::BlobParams, merge::EPOCH_SLOTS}; +use alloy_network::Ethereum; use alloy_rpc_types_engine::ExecutionData; use reth_chainspec::{ChainSpec, EthChainSpec, EthereumHardforks, Hardforks}; use reth_consensus::{ConsensusError, FullConsensus}; @@ -15,6 +16,7 @@ use reth_ethereum_engine_primitives::{ use reth_ethereum_primitives::{EthPrimitives, TransactionSigned}; use reth_evm::{ eth::spec::EthExecutorSpec, ConfigureEvm, EvmFactory, EvmFactoryFor, NextBlockEnvAttributes, + TxEnvFor, }; use reth_network::{primitives::BasicNetworkPrimitives, NetworkHandle, PeersInfo}; use reth_node_api::{ @@ -35,10 +37,13 @@ use reth_node_builder::{ PayloadTypes, }; use reth_provider::{providers::ProviderFactoryBuilder, EthStorage}; -use reth_rpc::{eth::core::EthApiFor, ValidationApi}; -use reth_rpc_api::{eth::FullEthApiServer, servers::BlockSubmissionValidationApiServer}; +use reth_rpc::{ + eth::core::{EthApiFor, EthRpcConverterFor}, + ValidationApi, +}; +use reth_rpc_api::servers::BlockSubmissionValidationApiServer; use reth_rpc_builder::{config::RethRpcServerConfig, middleware::RethRpcMiddleware}; -use reth_rpc_eth_api::helpers::AddDevSigners; +use reth_rpc_eth_api::RpcConvert; use reth_rpc_eth_types::{error::FromEvmError, EthApiError}; use reth_rpc_server_types::RethRpcModule; use reth_tracing::tracing::{debug, info}; @@ -137,8 +142,17 @@ pub struct EthereumEthApiBuilder; impl EthApiBuilder for EthereumEthApiBuilder where - N: FullNodeComponents, - EthApiFor: FullEthApiServer + AddDevSigners, + N: FullNodeComponents< + Types: NodeTypes, + Evm: ConfigureEvm>, + >, + EthRpcConverterFor: RpcConvert< + Primitives = PrimitivesTy, + TxEnv = TxEnvFor, + Error = EthApiError, + Network = Ethereum, + >, + EthApiError: FromEvmError, { type EthApi = EthApiFor; diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index 9bdf4ecb2ea..ee5927de3c6 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -25,8 +25,6 @@ reth-network.workspace = true reth-evm.workspace = true reth-trie-db.workspace = true reth-rpc-server-types.workspace = true -reth-rpc-eth-api.workspace = true -reth-rpc-eth-types.workspace = true reth-tasks = { workspace = true, optional = true } reth-trie-common.workspace = true reth-node-core.workspace = true @@ -45,10 +43,6 @@ reth-optimism-consensus = { workspace = true, features = ["std"] } reth-optimism-forks.workspace = true reth-optimism-primitives = { workspace = true, features = ["serde", "serde-bincode-compat", "reth-codec"] } -# revm with required optimism features -revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg"] } -op-revm.workspace = true - # ethereum alloy-primitives.workspace = true op-alloy-consensus.workspace = true @@ -90,7 +84,6 @@ alloy-eips.workspace = true default = ["reth-codec"] asm-keccak = [ "alloy-primitives/asm-keccak", - "revm/asm-keccak", "reth-optimism-node/asm-keccak", "reth-node-core/asm-keccak", ] diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index adeacfe8ef3..ae41e3d8ee0 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -10,7 +10,7 @@ use op_alloy_consensus::{interop::SafetyLevel, OpPooledTransaction}; use op_alloy_rpc_types_engine::{OpExecutionData, OpPayloadAttributes}; use reth_chainspec::{ChainSpecProvider, EthChainSpec, Hardforks}; use reth_engine_local::LocalPayloadAttributesBuilder; -use reth_evm::{ConfigureEvm, EvmFactory, EvmFactoryFor}; +use reth_evm::ConfigureEvm; use reth_network::{ types::BasicNetworkPrimitives, NetworkConfig, NetworkHandle, NetworkManager, NetworkPrimitives, PeersInfo, @@ -47,17 +47,15 @@ use reth_optimism_rpc::{ historical::{HistoricalRpc, HistoricalRpcClient}, miner::{MinerApiExtServer, OpMinerExtApi}, witness::{DebugExecutionWitnessApiServer, OpDebugWitnessApi}, - OpEthApi, OpEthApiError, SequencerClient, + SequencerClient, }; use reth_optimism_storage::OpStorage; use reth_optimism_txpool::{ supervisor::{SupervisorClient, DEFAULT_SUPERVISOR_URL}, OpPooledTx, }; -use reth_provider::{providers::ProviderFactoryBuilder, CanonStateSubscriptions, ProviderTx}; -use reth_rpc_api::DebugApiServer; -use reth_rpc_eth_api::{ext::L2EthApiExtServer, FullEthApiServer, RpcTypes, SignableTxRequest}; -use reth_rpc_eth_types::error::FromEvmError; +use reth_provider::{providers::ProviderFactoryBuilder, CanonStateSubscriptions}; +use reth_rpc_api::{DebugApiServer, L2EthApiExtServer}; use reth_rpc_server_types::RethRpcModule; use reth_tracing::tracing::{debug, info}; use reth_transaction_pool::{ @@ -65,7 +63,6 @@ use reth_transaction_pool::{ TransactionPool, TransactionValidationTaskExecutor, }; use reth_trie_db::MerklePatriciaTrie; -use revm::context::TxEnv; use std::{marker::PhantomData, sync::Arc}; /// Marker trait for Optimism node types with standard engine, chain spec, and primitives. @@ -289,17 +286,17 @@ pub struct OpAddOns, EV, EB, RpcMi min_suggested_priority_fee: u64, } -impl Default +impl Default for OpAddOns< N, - OpEthApiBuilder, + OpEthApiBuilder, OpEngineValidatorBuilder, OpEngineApiBuilder, Identity, > where N: FullNodeComponents, - OpEthApiBuilder: EthApiBuilder, + OpEthApiBuilder: EthApiBuilder, { fn default() -> Self { Self::builder().build() @@ -428,24 +425,20 @@ where } } -impl NodeAddOns - for OpAddOns, EV, EB, RpcMiddleware> +impl NodeAddOns for OpAddOns where N: FullNodeComponents< Types: OpFullNodeTypes, Evm: ConfigureEvm, >, N::Types: NodeTypes, - OpEthApiError: FromEvmError, + EthB: EthApiBuilder, ::Transaction: OpPooledTx, - EvmFactoryFor: EvmFactory>, - OpEthApi: FullEthApiServer, - NetworkT: RpcTypes>>, EV: EngineValidatorBuilder, EB: EngineApiBuilder, RpcMiddleware: RethRpcMiddleware, { - type Handle = RpcHandle>; + type Handle = RpcHandle; async fn launch_add_ons( self, @@ -548,23 +541,19 @@ where } } -impl RethRpcAddOns - for OpAddOns, EV, EB, RpcMiddleware> +impl RethRpcAddOns for OpAddOns where N: FullNodeComponents< Types: OpFullNodeTypes, Evm: ConfigureEvm, >, - OpEthApiError: FromEvmError, <::Pool as TransactionPool>::Transaction: OpPooledTx, - EvmFactoryFor: EvmFactory>, - OpEthApi: FullEthApiServer, - NetworkT: RpcTypes>>, + EthB: EthApiBuilder, EV: EngineValidatorBuilder, EB: EngineApiBuilder, RpcMiddleware: RethRpcMiddleware, { - type EthApi = OpEthApi; + type EthApi = EthB::EthApi; fn hooks_mut(&mut self) -> &mut reth_node_builder::rpc::RpcHooks { self.rpc_add_ons.hooks_mut() diff --git a/crates/optimism/rpc/src/eth/block.rs b/crates/optimism/rpc/src/eth/block.rs index 6c1053b5f7d..85ed4494cf1 100644 --- a/crates/optimism/rpc/src/eth/block.rs +++ b/crates/optimism/rpc/src/eth/block.rs @@ -1,95 +1,28 @@ //! Loads and formats OP block RPC response. -use alloy_consensus::{transaction::TransactionMeta, BlockHeader}; -use alloy_rpc_types_eth::BlockId; -use op_alloy_rpc_types::OpTransactionReceipt; use reth_chainspec::ChainSpecProvider; use reth_optimism_forks::OpHardforks; -use reth_optimism_primitives::{OpReceipt, OpTransactionSigned}; use reth_rpc_eth_api::{ - helpers::{EthBlocks, LoadBlock, LoadPendingBlock, LoadReceipt, SpawnBlocking}, - types::RpcTypes, - RpcReceipt, + helpers::{EthBlocks, LoadBlock, LoadPendingBlock, SpawnBlocking}, + RpcConvert, }; -use reth_storage_api::{BlockReader, HeaderProvider, ProviderTx}; +use reth_storage_api::{HeaderProvider, ProviderTx}; use reth_transaction_pool::{PoolTransaction, TransactionPool}; -use crate::{eth::OpNodeCore, OpEthApi, OpEthApiError, OpReceiptBuilder}; +use crate::{eth::OpNodeCore, OpEthApi, OpEthApiError}; -impl EthBlocks for OpEthApi +impl EthBlocks for OpEthApi where Self: LoadBlock< Error = OpEthApiError, - NetworkTypes: RpcTypes, - Provider: BlockReader, + RpcConvert: RpcConvert, >, N: OpNodeCore + HeaderProvider>, + Rpc: RpcConvert, { - async fn block_receipts( - &self, - block_id: BlockId, - ) -> Result>>, Self::Error> - where - Self: LoadReceipt, - { - if let Some((block, receipts)) = self.load_block_and_receipts(block_id).await? { - let block_number = block.number(); - let base_fee = block.base_fee_per_gas(); - let block_hash = block.hash(); - let excess_blob_gas = block.excess_blob_gas(); - let timestamp = block.timestamp(); - - let mut l1_block_info = match reth_optimism_evm::extract_l1_info(block.body()) { - Ok(l1_block_info) => l1_block_info, - Err(err) => { - // If it is the genesis block (i.e block number is 0), there is no L1 info, so - // we return an empty l1_block_info. - if block_number == 0 { - return Ok(Some(vec![])); - } - return Err(err.into()); - } - }; - - return block - .transactions_recovered() - .zip(receipts.iter()) - .enumerate() - .map(|(idx, (tx, receipt))| -> Result<_, _> { - let meta = TransactionMeta { - tx_hash: tx.tx_hash(), - index: idx as u64, - block_hash, - block_number, - base_fee, - excess_blob_gas, - timestamp, - }; - - // We must clear this cache as different L2 transactions can have different - // L1 costs. A potential improvement here is to only clear the cache if the - // new transaction input has changed, since otherwise the L1 cost wouldn't. - l1_block_info.clear_tx_l1_cost(); - - Ok(OpReceiptBuilder::new( - &self.inner.eth_api.provider().chain_spec(), - tx, - meta, - receipt, - &receipts, - &mut l1_block_info, - )? - .build()) - }) - .collect::, Self::Error>>() - .map(Some) - } - - Ok(None) - } } -impl LoadBlock for OpEthApi +impl LoadBlock for OpEthApi where Self: LoadPendingBlock< Pool: TransactionPool< @@ -97,5 +30,6 @@ where >, > + SpawnBlocking, N: OpNodeCore, + Rpc: RpcConvert, { } diff --git a/crates/optimism/rpc/src/eth/call.rs b/crates/optimism/rpc/src/eth/call.rs index a988bbf740a..0e644a54667 100644 --- a/crates/optimism/rpc/src/eth/call.rs +++ b/crates/optimism/rpc/src/eth/call.rs @@ -10,22 +10,23 @@ use reth_rpc_eth_api::{ use reth_storage_api::{errors::ProviderError, ProviderHeader, ProviderTx}; use revm::context::TxEnv; -impl EthCall for OpEthApi +impl EthCall for OpEthApi where Self: EstimateCall + LoadBlock + FullEthApiTypes, N: OpNodeCore, + Rpc: RpcConvert, { } -impl EstimateCall for OpEthApi +impl EstimateCall for OpEthApi where - Self: Call, - Self::Error: From, + Self: Call>, N: OpNodeCore, + Rpc: RpcConvert, { } -impl Call for OpEthApi +impl Call for OpEthApi where Self: LoadState< Evm: ConfigureEvm< @@ -44,6 +45,7 @@ where > + SpawnBlocking, Self::Error: From, N: OpNodeCore, + Rpc: RpcConvert, { #[inline] fn call_gas_limit(&self) -> u64 { diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index b5f76539cdc..ec7c865ec6e 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -8,7 +8,10 @@ mod block; mod call; mod pending_block; -use crate::{eth::transaction::OpTxInfoMapper, OpEthApiError, SequencerClient}; +use crate::{ + eth::{receipt::OpReceiptConverter, transaction::OpTxInfoMapper}, + OpEthApiError, SequencerClient, +}; use alloy_primitives::U256; use eyre::WrapErr; use op_alloy_network::Optimism; @@ -16,7 +19,7 @@ pub use receipt::{OpReceiptBuilder, OpReceiptFieldsBuilder}; use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardforks}; use reth_evm::ConfigureEvm; use reth_network_api::NetworkInfo; -use reth_node_api::{FullNodeComponents, NodePrimitives}; +use reth_node_api::{FullNodeComponents, FullNodeTypes, NodePrimitives}; use reth_node_builder::rpc::{EthApiBuilder, EthApiCtx}; use reth_rpc::eth::{core::EthApiInner, DevSigner}; use reth_rpc_eth_api::{ @@ -24,8 +27,8 @@ use reth_rpc_eth_api::{ spec::SignersForApi, AddDevSigners, EthApiSpec, EthFees, EthState, LoadBlock, LoadFee, LoadState, SpawnBlocking, Trace, }, - EthApiTypes, FromEvmError, FullEthApiServer, RpcConverter, RpcNodeCore, RpcNodeCoreExt, - RpcTypes, SignableTxRequest, + EthApiTypes, FromEvmError, FullEthApiServer, RpcConvert, RpcConverter, RpcNodeCore, + RpcNodeCoreExt, RpcTypes, SignableTxRequest, }; use reth_rpc_eth_types::{EthStateCache, FeeHistoryCache, GasPriceOracle}; use reth_storage_api::{ @@ -62,36 +65,31 @@ impl OpNodeCore for T where T: RpcNodeCore {} /// /// This type implements the [`FullEthApi`](reth_rpc_eth_api::helpers::FullEthApi) by implemented /// all the `Eth` helper traits and prerequisite traits. -pub struct OpEthApi { +pub struct OpEthApi { /// Gateway to node's core components. - inner: Arc>, - /// Converter for RPC types. - tx_resp_builder: RpcConverter>, + inner: Arc>, } -impl Clone for OpEthApi { +impl Clone for OpEthApi { fn clone(&self) -> Self { - Self { inner: self.inner.clone(), tx_resp_builder: self.tx_resp_builder.clone() } + Self { inner: self.inner.clone() } } } -impl OpEthApi { +impl OpEthApi { /// Creates a new `OpEthApi`. pub fn new( - eth_api: EthApiNodeBackend, + eth_api: EthApiNodeBackend, sequencer_client: Option, min_suggested_priority_fee: U256, ) -> Self { let inner = Arc::new(OpEthApiInner { eth_api, sequencer_client, min_suggested_priority_fee }); - Self { - inner: inner.clone(), - tx_resp_builder: RpcConverter::with_mapper(OpTxInfoMapper::new(inner)), - } + Self { inner } } /// Returns a reference to the [`EthApiNodeBackend`]. - pub fn eth_api(&self) -> &EthApiNodeBackend { + pub fn eth_api(&self) -> &EthApiNodeBackend { self.inner.eth_api() } /// Returns the configured sequencer client, if any. @@ -100,32 +98,32 @@ impl OpEthApi { } /// Build a [`OpEthApi`] using [`OpEthApiBuilder`]. - pub const fn builder() -> OpEthApiBuilder { + pub const fn builder() -> OpEthApiBuilder { OpEthApiBuilder::new() } } -impl EthApiTypes for OpEthApi +impl EthApiTypes for OpEthApi where Self: Send + Sync + fmt::Debug, N: OpNodeCore, - NetworkT: RpcTypes, + Rpc: RpcConvert, ::Evm: fmt::Debug, ::Primitives: fmt::Debug, { type Error = OpEthApiError; - type NetworkTypes = NetworkT; - type RpcConvert = RpcConverter>; + type NetworkTypes = Rpc::Network; + type RpcConvert = Rpc; fn tx_resp_builder(&self) -> &Self::RpcConvert { - &self.tx_resp_builder + self.inner.eth_api.tx_resp_builder() } } -impl RpcNodeCore for OpEthApi +impl RpcNodeCore for OpEthApi where N: OpNodeCore, - NetworkT: RpcTypes, + Rpc: RpcConvert, { type Primitives = N::Primitives; type Provider = N::Provider; @@ -160,10 +158,10 @@ where } } -impl RpcNodeCoreExt for OpEthApi +impl RpcNodeCoreExt for OpEthApi where N: OpNodeCore, - NetworkT: RpcTypes, + Rpc: RpcConvert, { #[inline] fn cache(&self) -> &EthStateCache, ProviderReceipt> { @@ -171,7 +169,7 @@ where } } -impl EthApiSpec for OpEthApi +impl EthApiSpec for OpEthApi where N: OpNodeCore< Provider: ChainSpecProvider @@ -179,10 +177,10 @@ where + StageCheckpointReader, Network: NetworkInfo, >, - NetworkT: RpcTypes, + Rpc: RpcConvert, { type Transaction = ProviderTx; - type Rpc = NetworkT; + type Rpc = Rpc::Network; #[inline] fn starting_block(&self) -> U256 { @@ -195,11 +193,11 @@ where } } -impl SpawnBlocking for OpEthApi +impl SpawnBlocking for OpEthApi where Self: Send + Sync + Clone + 'static, N: OpNodeCore, - NetworkT: RpcTypes, + Rpc: RpcConvert, ::Evm: fmt::Debug, ::Primitives: fmt::Debug, { @@ -219,7 +217,7 @@ where } } -impl LoadFee for OpEthApi +impl LoadFee for OpEthApi where Self: LoadBlock, N: OpNodeCore< @@ -227,7 +225,7 @@ where + ChainSpecProvider + StateProviderFactory, >, - NetworkT: RpcTypes, + Rpc: RpcConvert, { #[inline] fn gas_oracle(&self) -> &GasPriceOracle { @@ -245,23 +243,23 @@ where } } -impl LoadState for OpEthApi +impl LoadState for OpEthApi where N: OpNodeCore< Provider: StateProviderFactory + ChainSpecProvider, Pool: TransactionPool, >, - NetworkT: RpcTypes, + Rpc: RpcConvert, ::Evm: fmt::Debug, ::Primitives: fmt::Debug, { } -impl EthState for OpEthApi +impl EthState for OpEthApi where Self: LoadState + SpawnBlocking, N: OpNodeCore, - NetworkT: RpcTypes, + Rpc: RpcConvert, { #[inline] fn max_proof_window(&self) -> u64 { @@ -269,7 +267,7 @@ where } } -impl EthFees for OpEthApi +impl EthFees for OpEthApi where Self: LoadFee< Provider: ChainSpecProvider< @@ -277,11 +275,11 @@ where >, >, N: OpNodeCore, - NetworkT: RpcTypes, + Rpc: RpcConvert, { } -impl Trace for OpEthApi +impl Trace for OpEthApi where Self: RpcNodeCore + LoadState< @@ -294,28 +292,30 @@ where Error: FromEvmError, >, N: OpNodeCore, - NetworkT: RpcTypes, + Rpc: RpcConvert, { } -impl AddDevSigners for OpEthApi +impl AddDevSigners for OpEthApi where N: OpNodeCore, - NetworkT: RpcTypes>>, + Rpc: RpcConvert< + Network: RpcTypes>>, + >, { fn with_dev_accounts(&self) { *self.inner.eth_api.signers().write() = DevSigner::random_signers(20) } } -impl fmt::Debug for OpEthApi { +impl fmt::Debug for OpEthApi { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("OpEthApi").finish_non_exhaustive() } } /// Container type `OpEthApi` -pub struct OpEthApiInner { +pub struct OpEthApiInner { /// Gateway to node's core components. eth_api: EthApiNodeBackend, /// Sequencer client, configured to forward submitted transactions to sequencer of given OP @@ -327,13 +327,13 @@ pub struct OpEthApiInner { min_suggested_priority_fee: U256, } -impl fmt::Debug for OpEthApiInner { +impl fmt::Debug for OpEthApiInner { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { f.debug_struct("OpEthApiInner").finish() } } -impl OpEthApiInner { +impl OpEthApiInner { /// Returns a reference to the [`EthApiNodeBackend`]. const fn eth_api(&self) -> &EthApiNodeBackend { &self.eth_api @@ -345,6 +345,14 @@ impl OpEthApiInner { } } +/// Converter for OP RPC types. +pub type OpRpcConvert = RpcConverter< + NetworkT, + ::Evm, + OpReceiptConverter<::Provider>, + OpTxInfoMapper<::Provider>, +>; + /// Builds [`OpEthApi`] for Optimism. #[derive(Debug)] pub struct OpEthApiBuilder { @@ -404,18 +412,25 @@ impl EthApiBuilder for OpEthApiBuilder where N: FullNodeComponents, NetworkT: RpcTypes, - OpEthApi: FullEthApiServer + AddDevSigners, + OpRpcConvert: RpcConvert, + OpEthApi>: + FullEthApiServer + AddDevSigners, { - type EthApi = OpEthApi; + type EthApi = OpEthApi>; async fn build_eth_api(self, ctx: EthApiCtx<'_, N>) -> eyre::Result { let Self { sequencer_url, sequencer_headers, min_suggested_priority_fee, .. } = self; + let rpc_converter = RpcConverter::new( + OpReceiptConverter::new(ctx.components.provider().clone()), + OpTxInfoMapper::new(ctx.components.provider().clone()), + ); let eth_api = reth_rpc::EthApiBuilder::new( ctx.components.provider().clone(), ctx.components.pool().clone(), ctx.components.network().clone(), ctx.components.evm_config().clone(), ) + .with_rpc_converter(rpc_converter) .eth_cache(ctx.cache) .task_spawner(ctx.components.task_executor().clone()) .gas_cap(ctx.config.rpc_gas_cap.into()) diff --git a/crates/optimism/rpc/src/eth/pending_block.rs b/crates/optimism/rpc/src/eth/pending_block.rs index 8d6eae8a2f6..fb1c85dabb7 100644 --- a/crates/optimism/rpc/src/eth/pending_block.rs +++ b/crates/optimism/rpc/src/eth/pending_block.rs @@ -24,7 +24,7 @@ use reth_storage_api::{ }; use reth_transaction_pool::{PoolTransaction, TransactionPool}; -impl LoadPendingBlock for OpEthApi +impl LoadPendingBlock for OpEthApi where Self: SpawnBlocking + EthApiTypes< @@ -50,6 +50,7 @@ where Block = ProviderBlock, >, >, + Rpc: RpcConvert, { #[inline] fn pending_block( diff --git a/crates/optimism/rpc/src/eth/receipt.rs b/crates/optimism/rpc/src/eth/receipt.rs index 81f9702db00..f304305cc8f 100644 --- a/crates/optimism/rpc/src/eth/receipt.rs +++ b/crates/optimism/rpc/src/eth/receipt.rs @@ -1,62 +1,108 @@ //! Loads and formats OP receipt RPC response. -use crate::{OpEthApi, OpEthApiError}; -use alloy_consensus::transaction::{SignerRecoverable, TransactionMeta}; +use crate::{eth::OpNodeCore, OpEthApi, OpEthApiError}; use alloy_eips::eip2718::Encodable2718; use alloy_rpc_types_eth::{Log, TransactionReceipt}; -use op_alloy_consensus::{OpDepositReceipt, OpDepositReceiptWithBloom, OpReceiptEnvelope}; +use op_alloy_consensus::{ + OpDepositReceipt, OpDepositReceiptWithBloom, OpReceiptEnvelope, OpTransaction, +}; use op_alloy_rpc_types::{L1BlockInfo, OpTransactionReceipt, OpTransactionReceiptFields}; use reth_chainspec::ChainSpecProvider; -use reth_node_api::{FullNodeComponents, NodeTypes}; +use reth_node_api::NodePrimitives; use reth_optimism_evm::RethL1BlockInfo; use reth_optimism_forks::OpHardforks; -use reth_optimism_primitives::{OpReceipt, OpTransactionSigned}; -use reth_primitives_traits::Recovered; -use reth_rpc_eth_api::{helpers::LoadReceipt, FromEthApiError, RpcReceipt}; +use reth_optimism_primitives::OpReceipt; +use reth_primitives_traits::Block; +use reth_rpc_eth_api::{ + helpers::LoadReceipt, + transaction::{ConvertReceiptInput, ReceiptConverter}, + EthApiTypes, RpcConvert, RpcNodeCoreExt, +}; use reth_rpc_eth_types::{receipt::build_receipt, EthApiError}; -use reth_storage_api::{ReceiptProvider, TransactionsProvider}; -use std::borrow::Cow; +use reth_storage_api::{BlockReader, ProviderReceipt, ProviderTx}; +use std::fmt::Debug; -impl LoadReceipt for OpEthApi +impl LoadReceipt for OpEthApi where - Self: Send + Sync, - N: FullNodeComponents>, - Self::Provider: TransactionsProvider - + ReceiptProvider, + Self: RpcNodeCoreExt< + Primitives: NodePrimitives< + SignedTx = ProviderTx, + Receipt = ProviderReceipt, + >, + > + EthApiTypes< + NetworkTypes = Rpc::Network, + RpcConvert: RpcConvert< + Network = Rpc::Network, + Primitives = Self::Primitives, + Error = Self::Error, + >, + >, + N: OpNodeCore, + Rpc: RpcConvert, { - async fn build_transaction_receipt( +} + +/// Converter for OP receipts. +#[derive(Debug, Clone)] +pub struct OpReceiptConverter { + provider: Provider, +} + +impl OpReceiptConverter { + /// Creates a new [`OpReceiptConverter`]. + pub const fn new(provider: Provider) -> Self { + Self { provider } + } +} + +impl ReceiptConverter for OpReceiptConverter +where + N: NodePrimitives, + Provider: BlockReader + ChainSpecProvider + Debug, +{ + type RpcReceipt = OpTransactionReceipt; + type Error = OpEthApiError; + + fn convert_receipts( &self, - tx: OpTransactionSigned, - meta: TransactionMeta, - receipt: OpReceipt, - ) -> Result, Self::Error> { - let (block, receipts) = self - .inner - .eth_api - .cache() - .get_block_and_receipts(meta.block_hash) - .await - .map_err(Self::Error::from_eth_err)? - .ok_or(Self::Error::from_eth_err(EthApiError::HeaderNotFound( - meta.block_hash.into(), - )))?; + inputs: Vec>, + ) -> Result, Self::Error> { + let Some(block_number) = inputs.first().map(|r| r.meta.block_number) else { + return Ok(Vec::new()); + }; - let mut l1_block_info = - reth_optimism_evm::extract_l1_info(block.body()).map_err(OpEthApiError::from)?; - - let recovered_tx = tx - .try_into_recovered_unchecked() - .map_err(|_| reth_rpc_eth_types::EthApiError::InvalidTransactionSignature)?; - - Ok(OpReceiptBuilder::new( - &self.inner.eth_api.provider().chain_spec(), - recovered_tx.as_recovered_ref(), - meta, - &receipt, - &receipts, - &mut l1_block_info, - )? - .build()) + let block = self + .provider + .block_by_number(block_number)? + .ok_or(EthApiError::HeaderNotFound(block_number.into()))?; + + let mut l1_block_info = match reth_optimism_evm::extract_l1_info(block.body()) { + Ok(l1_block_info) => l1_block_info, + Err(err) => { + // If it is the genesis block (i.e block number is 0), there is no L1 info, so + // we return an empty l1_block_info. + if block_number == 0 { + return Ok(vec![]); + } + return Err(err.into()); + } + }; + + let mut receipts = Vec::with_capacity(inputs.len()); + + for input in inputs { + // We must clear this cache as different L2 transactions can have different + // L1 costs. A potential improvement here is to only clear the cache if the + // new transaction input has changed, since otherwise the L1 cost wouldn't. + l1_block_info.clear_tx_l1_cost(); + + receipts.push( + OpReceiptBuilder::new(&self.provider.chain_spec(), input, &mut l1_block_info)? + .build(), + ); + } + + Ok(receipts) } } @@ -117,10 +163,10 @@ impl OpReceiptFieldsBuilder { } /// Applies [`L1BlockInfo`](op_revm::L1BlockInfo). - pub fn l1_block_info( + pub fn l1_block_info( mut self, chain_spec: &impl OpHardforks, - tx: &OpTransactionSigned, + tx: &T, l1_block_info: &mut op_revm::L1BlockInfo, ) -> Result { let raw_tx = tx.encoded_2718(); @@ -226,24 +272,19 @@ pub struct OpReceiptBuilder { impl OpReceiptBuilder { /// Returns a new builder. - pub fn new( + pub fn new( chain_spec: &impl OpHardforks, - transaction: Recovered<&OpTransactionSigned>, - meta: TransactionMeta, - receipt: &OpReceipt, - all_receipts: &[OpReceipt], + input: ConvertReceiptInput<'_, N>, l1_block_info: &mut op_revm::L1BlockInfo, - ) -> Result { - let timestamp = meta.timestamp; - let block_number = meta.block_number; - let tx_signed = *transaction.inner(); - let core_receipt = build_receipt( - transaction, - meta, - Cow::Borrowed(receipt), - all_receipts, - None, - |receipt_with_bloom| match receipt { + ) -> Result + where + N: NodePrimitives, + { + let timestamp = input.meta.timestamp; + let block_number = input.meta.block_number; + let tx_signed = *input.tx.inner(); + let core_receipt = + build_receipt(&input, None, |receipt_with_bloom| match input.receipt.as_ref() { OpReceipt::Legacy(_) => OpReceiptEnvelope::Legacy(receipt_with_bloom), OpReceipt::Eip2930(_) => OpReceiptEnvelope::Eip2930(receipt_with_bloom), OpReceipt::Eip1559(_) => OpReceiptEnvelope::Eip1559(receipt_with_bloom), @@ -258,8 +299,7 @@ impl OpReceiptBuilder { logs_bloom: receipt_with_bloom.logs_bloom, }) } - }, - ); + }); let op_receipt_fields = OpReceiptFieldsBuilder::new(timestamp, block_number) .l1_block_info(chain_spec, tx_signed, l1_block_info)? @@ -286,6 +326,7 @@ mod test { use alloy_primitives::{hex, U256}; use op_alloy_network::eip2718::Decodable2718; use reth_optimism_chainspec::{BASE_MAINNET, OP_MAINNET}; + use reth_optimism_primitives::OpTransactionSigned; /// OP Mainnet transaction at index 0 in block 124665056. /// diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index b92bd71f994..8127387b420 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -1,18 +1,14 @@ //! Loads and formats OP transaction RPC response. -use crate::{ - eth::{OpEthApiInner, OpNodeCore}, - OpEthApi, OpEthApiError, SequencerClient, -}; +use crate::{eth::OpNodeCore, OpEthApi, OpEthApiError, SequencerClient}; use alloy_primitives::{Bytes, B256}; use alloy_rpc_types_eth::TransactionInfo; use op_alloy_consensus::{transaction::OpTransactionInfo, OpTxEnvelope}; -use reth_node_api::FullNodeComponents; use reth_optimism_primitives::DepositReceipt; use reth_rpc_eth_api::{ helpers::{spec::SignersForRpc, EthTransactions, LoadTransaction, SpawnBlocking}, - try_into_op_tx_info, EthApiTypes, FromEthApiError, FullEthApiTypes, RpcNodeCore, - RpcNodeCoreExt, RpcTypes, TxInfoMapper, + try_into_op_tx_info, EthApiTypes, FromEthApiError, FullEthApiTypes, RpcConvert, RpcNodeCore, + RpcNodeCoreExt, TxInfoMapper, }; use reth_rpc_eth_types::utils::recover_raw_transaction; use reth_storage_api::{ @@ -20,17 +16,14 @@ use reth_storage_api::{ TransactionsProvider, }; use reth_transaction_pool::{PoolTransaction, TransactionOrigin, TransactionPool}; -use std::{ - fmt::{Debug, Formatter}, - sync::Arc, -}; +use std::fmt::{Debug, Formatter}; impl EthTransactions for OpEthApi where Self: LoadTransaction - + EthApiTypes, + + EthApiTypes, N: OpNodeCore>>, - Rpc: RpcTypes, + Rpc: RpcConvert, { fn signers(&self) -> &SignersForRpc { self.inner.eth_api.signers() @@ -77,18 +70,19 @@ where } } -impl LoadTransaction for OpEthApi +impl LoadTransaction for OpEthApi where Self: SpawnBlocking + FullEthApiTypes + RpcNodeCoreExt, N: OpNodeCore, Self::Pool: TransactionPool, + Rpc: RpcConvert, { } impl OpEthApi where N: OpNodeCore, - Rpc: RpcTypes, + Rpc: RpcConvert, { /// Returns the [`SequencerClient`] if one is set. pub fn raw_tx_forwarder(&self) -> Option { @@ -100,32 +94,32 @@ where /// /// For deposits, receipt is fetched to extract `deposit_nonce` and `deposit_receipt_version`. /// Otherwise, it works like regular Ethereum implementation, i.e. uses [`TransactionInfo`]. -pub struct OpTxInfoMapper(Arc>); +pub struct OpTxInfoMapper { + provider: Provider, +} -impl Clone for OpTxInfoMapper { +impl Clone for OpTxInfoMapper { fn clone(&self) -> Self { - Self(self.0.clone()) + Self { provider: self.provider.clone() } } } -impl Debug for OpTxInfoMapper { +impl Debug for OpTxInfoMapper { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { f.debug_struct("OpTxInfoMapper").finish() } } -impl OpTxInfoMapper { +impl OpTxInfoMapper { /// Creates [`OpTxInfoMapper`] that uses [`ReceiptProvider`] borrowed from given `eth_api`. - pub const fn new(eth_api: Arc>) -> Self { - Self(eth_api) + pub const fn new(provider: Provider) -> Self { + Self { provider } } } -impl TxInfoMapper<&OpTxEnvelope> for OpTxInfoMapper +impl TxInfoMapper<&OpTxEnvelope> for OpTxInfoMapper where - N: FullNodeComponents, - N::Provider: ReceiptProvider, - Rpc: RpcTypes, + Provider: ReceiptProvider, { type Out = OpTransactionInfo; type Err = ProviderError; @@ -135,6 +129,6 @@ where tx: &OpTxEnvelope, tx_info: TransactionInfo, ) -> Result { - try_into_op_tx_info(self.0.eth_api.provider(), tx, tx_info) + try_into_op_tx_info(&self.provider, tx, tx_info) } } diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 7c5604e2420..4f0f11babee 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -37,15 +37,15 @@ use reth_network_api::{noop::NoopNetwork, NetworkInfo, Peers}; use reth_primitives_traits::NodePrimitives; use reth_rpc::{ AdminApi, DebugApi, EngineEthApi, EthApi, EthApiBuilder, EthBundle, MinerApi, NetApi, - OtterscanApi, RPCApi, RethApi, RpcTypes, TraceApi, TxPoolApi, ValidationApiConfig, Web3Api, + OtterscanApi, RPCApi, RethApi, TraceApi, TxPoolApi, ValidationApiConfig, Web3Api, }; use reth_rpc_api::servers::*; use reth_rpc_eth_api::{ helpers::{Call, EthApiSpec, EthTransactions, LoadPendingBlock, TraceExt}, - EthApiServer, EthApiTypes, FullEthApiServer, RpcBlock, RpcHeader, RpcReceipt, RpcTransaction, - RpcTxReq, + EthApiServer, EthApiTypes, FullEthApiServer, RpcBlock, RpcConvert, RpcConverter, RpcHeader, + RpcReceipt, RpcTransaction, RpcTxReq, }; -use reth_rpc_eth_types::{EthConfig, EthSubscriptionIdProvider}; +use reth_rpc_eth_types::{receipt::EthReceiptConverter, EthConfig, EthSubscriptionIdProvider}; use reth_rpc_layer::{AuthLayer, Claims, CompressionLayer, JwtAuthValidator, JwtSecret}; use reth_storage_api::{ AccountReader, BlockReader, BlockReaderIdExt, ChangeSetReader, FullRpcProvider, ProviderBlock, @@ -106,7 +106,7 @@ pub mod rate_limiter; /// /// This is the main entrypoint and the easiest way to configure an RPC server. #[derive(Debug, Clone)] -pub struct RpcModuleBuilder { +pub struct RpcModuleBuilder { /// The Provider type to when creating all rpc handlers provider: Provider, /// The Pool type to when creating all rpc handlers @@ -120,15 +120,13 @@ pub struct RpcModuleBuilder, + _primitives: PhantomData, } // === impl RpcBuilder === -impl - RpcModuleBuilder -where - Rpc: RpcTypes, +impl + RpcModuleBuilder { /// Create a new instance of the builder pub const fn new( @@ -146,7 +144,7 @@ where pub fn with_provider

( self, provider: P, - ) -> RpcModuleBuilder { + ) -> RpcModuleBuilder { let Self { pool, network, executor, evm_config, consensus, _primitives, .. } = self; RpcModuleBuilder { provider, network, pool, executor, evm_config, consensus, _primitives } } @@ -155,7 +153,7 @@ where pub fn with_pool

( self, pool: P, - ) -> RpcModuleBuilder { + ) -> RpcModuleBuilder { let Self { provider, network, executor, evm_config, consensus, _primitives, .. } = self; RpcModuleBuilder { provider, network, pool, executor, evm_config, consensus, _primitives } } @@ -167,8 +165,7 @@ where /// [`EthApi`] which requires a [`TransactionPool`] implementation. pub fn with_noop_pool( self, - ) -> RpcModuleBuilder - { + ) -> RpcModuleBuilder { let Self { provider, executor, network, evm_config, consensus, _primitives, .. } = self; RpcModuleBuilder { provider, @@ -185,7 +182,7 @@ where pub fn with_network( self, network: Net, - ) -> RpcModuleBuilder { + ) -> RpcModuleBuilder { let Self { provider, pool, executor, evm_config, consensus, _primitives, .. } = self; RpcModuleBuilder { provider, network, pool, executor, evm_config, consensus, _primitives } } @@ -197,7 +194,7 @@ where /// [`EthApi`] which requires a [`NetworkInfo`] implementation. pub fn with_noop_network( self, - ) -> RpcModuleBuilder { + ) -> RpcModuleBuilder { let Self { provider, pool, executor, evm_config, consensus, _primitives, .. } = self; RpcModuleBuilder { provider, @@ -237,7 +234,7 @@ where pub fn with_evm_config( self, evm_config: E, - ) -> RpcModuleBuilder { + ) -> RpcModuleBuilder { let Self { provider, pool, executor, network, consensus, _primitives, .. } = self; RpcModuleBuilder { provider, network, pool, executor, evm_config, consensus, _primitives } } @@ -246,15 +243,24 @@ where pub fn with_consensus( self, consensus: C, - ) -> RpcModuleBuilder { + ) -> RpcModuleBuilder { let Self { provider, network, pool, executor, evm_config, _primitives, .. } = self; RpcModuleBuilder { provider, network, pool, executor, evm_config, consensus, _primitives } } /// Instantiates a new [`EthApiBuilder`] from the configured components. - pub fn eth_api_builder(&self) -> EthApiBuilder + #[expect(clippy::type_complexity)] + pub fn eth_api_builder( + &self, + ) -> EthApiBuilder< + Provider, + Pool, + Network, + EvmConfig, + RpcConverter>, + > where - Provider: BlockReaderIdExt + Clone, + Provider: BlockReaderIdExt + ChainSpecProvider + Clone, Pool: Clone, Network: Clone, EvmConfig: Clone, @@ -272,7 +278,16 @@ where /// Note: This spawns all necessary tasks. /// /// See also [`EthApiBuilder`]. - pub fn bootstrap_eth_api(&self) -> EthApi + #[expect(clippy::type_complexity)] + pub fn bootstrap_eth_api( + &self, + ) -> EthApi< + Provider, + Pool, + Network, + EvmConfig, + RpcConverter>, + > where N: NodePrimitives, Provider: BlockReaderIdExt @@ -283,15 +298,16 @@ where + Unpin + 'static, Pool: Clone, - EvmConfig: Clone, + EvmConfig: ConfigureEvm, Network: Clone, + RpcConverter>: RpcConvert, { self.eth_api_builder().build() } } -impl - RpcModuleBuilder +impl + RpcModuleBuilder where N: NodePrimitives, Provider: FullRpcProvider @@ -391,7 +407,7 @@ where } } -impl Default for RpcModuleBuilder { +impl Default for RpcModuleBuilder { fn default() -> Self { Self::new((), (), (), Box::new(TokioTaskExecutor::default()), (), ()) } diff --git a/crates/rpc/rpc-builder/tests/it/utils.rs b/crates/rpc/rpc-builder/tests/it/utils.rs index f03d73f01d9..293dd4e1937 100644 --- a/crates/rpc/rpc-builder/tests/it/utils.rs +++ b/crates/rpc/rpc-builder/tests/it/utils.rs @@ -1,4 +1,3 @@ -use alloy_network::Ethereum; use alloy_rpc_types_engine::{ClientCode, ClientVersionV1}; use reth_chainspec::MAINNET; use reth_consensus::noop::NoopConsensus; @@ -118,15 +117,9 @@ pub async fn launch_http_ws_same_port(modules: impl Into) -> } /// Returns an [`RpcModuleBuilder`] with testing components. -pub fn test_rpc_builder() -> RpcModuleBuilder< - EthPrimitives, - NoopProvider, - TestPool, - NoopNetwork, - EthEvmConfig, - NoopConsensus, - Ethereum, -> { +pub fn test_rpc_builder( +) -> RpcModuleBuilder +{ RpcModuleBuilder::default() .with_provider(NoopProvider::default()) .with_pool(TestPoolBuilder::default().into()) diff --git a/crates/rpc/rpc-convert/src/rpc.rs b/crates/rpc/rpc-convert/src/rpc.rs index 4e052672102..73061d55543 100644 --- a/crates/rpc/rpc-convert/src/rpc.rs +++ b/crates/rpc/rpc-convert/src/rpc.rs @@ -35,6 +35,9 @@ where /// Adapter for network specific transaction response. pub type RpcTransaction = ::TransactionResponse; +/// Adapter for network specific receipt response. +pub type RpcReceipt = ::Receipt; + /// Adapter for network specific transaction request. pub type RpcTxReq = ::TransactionRequest; diff --git a/crates/rpc/rpc-convert/src/transaction.rs b/crates/rpc/rpc-convert/src/transaction.rs index edb16d341ad..4bc088788fd 100644 --- a/crates/rpc/rpc-convert/src/transaction.rs +++ b/crates/rpc/rpc-convert/src/transaction.rs @@ -2,7 +2,7 @@ use crate::{ fees::{CallFees, CallFeesError}, - RpcTransaction, RpcTxReq, RpcTypes, + RpcReceipt, RpcTransaction, RpcTxReq, RpcTypes, }; use alloy_consensus::{error::ValueError, transaction::Recovered, EthereumTxEnvelope, TxEip4844}; use alloy_primitives::{Address, TxKind, U256}; @@ -15,11 +15,42 @@ use reth_evm::{ revm::context_interface::{either::Either, Block}, ConfigureEvm, TxEnvFor, }; -use reth_primitives_traits::{NodePrimitives, TxTy}; +use reth_primitives_traits::{NodePrimitives, TransactionMeta, TxTy}; use revm_context::{BlockEnv, CfgEnv, TxEnv}; -use std::{convert::Infallible, error::Error, fmt::Debug, marker::PhantomData}; +use std::{borrow::Cow, convert::Infallible, error::Error, fmt::Debug, marker::PhantomData}; use thiserror::Error; +/// Input for [`RpcConvert::convert_receipts`]. +#[derive(Debug, Clone)] +pub struct ConvertReceiptInput<'a, N: NodePrimitives> { + /// Primitive receipt. + pub receipt: Cow<'a, N::Receipt>, + /// Transaction the receipt corresponds to. + pub tx: Recovered<&'a N::SignedTx>, + /// Gas used by the transaction. + pub gas_used: u64, + /// Number of logs emitted before this transaction. + pub next_log_index: usize, + /// Metadata for the transaction. + pub meta: TransactionMeta, +} + +/// A type that knows how to convert primitive receipts to RPC representations. +pub trait ReceiptConverter: Debug { + /// RPC representation. + type RpcReceipt; + + /// Error that may occur during conversion. + type Error; + + /// Converts a set of primitive receipts to RPC representations. It is guaranteed that all + /// receipts are from the same block. + fn convert_receipts( + &self, + receipts: Vec>, + ) -> Result, Self::Error>; +} + /// Responsible for the conversions from and into RPC requests and responses. /// /// The JSON-RPC schema and the Node primitives are configurable using the [`RpcConvert::Network`] @@ -78,6 +109,13 @@ pub trait RpcConvert: Send + Sync + Unpin + Clone + Debug { cfg_env: &CfgEnv, block_env: &BlockEnv, ) -> Result; + + /// Converts a set of primitive receipts to RPC representations. It is guaranteed that all + /// receipts are from the same block. + fn convert_receipts( + &self, + receipts: Vec>, + ) -> Result>, Self::Error>; } /// Converts `self` into `T`. The opposite of [`FromConsensusTx`]. @@ -323,70 +361,66 @@ pub struct TransactionConversionError(String); /// is [`TransactionInfo`] then `()` can be used as `Map` which trivially passes over the input /// object. #[derive(Debug)] -pub struct RpcConverter { - phantom: PhantomData<(E, Evm, Err)>, +pub struct RpcConverter { + phantom: PhantomData<(E, Evm)>, + receipt_converter: Receipt, mapper: Map, } -impl RpcConverter { - /// Creates a new [`RpcConverter`] with the default mapper. - pub const fn new() -> Self { - Self::with_mapper(()) +impl RpcConverter { + /// Creates a new [`RpcConverter`] with `receipt_converter` and `mapper`. + pub const fn new(receipt_converter: Receipt, mapper: Map) -> Self { + Self { phantom: PhantomData, receipt_converter, mapper } } } -impl RpcConverter { - /// Creates a new [`RpcConverter`] with `mapper`. - pub const fn with_mapper(mapper: Map) -> Self { - Self { phantom: PhantomData, mapper } - } - - /// Converts the generic types. - pub fn convert(self) -> RpcConverter { - RpcConverter::with_mapper(self.mapper) - } - - /// Swaps the inner `mapper`. - pub fn map(self, mapper: Map2) -> RpcConverter { - RpcConverter::with_mapper(mapper) - } - - /// Converts the generic types and swaps the inner `mapper`. - pub fn convert_map( - self, - mapper: Map2, - ) -> RpcConverter { - self.convert().map(mapper) +impl Default for RpcConverter +where + Receipt: Default, + Map: Default, +{ + fn default() -> Self { + Self { + phantom: PhantomData, + receipt_converter: Default::default(), + mapper: Default::default(), + } } } -impl Clone for RpcConverter { +impl Clone for RpcConverter { fn clone(&self) -> Self { - Self::with_mapper(self.mapper.clone()) - } -} - -impl Default for RpcConverter { - fn default() -> Self { - Self::new() + Self { + phantom: PhantomData, + receipt_converter: self.receipt_converter.clone(), + mapper: self.mapper.clone(), + } } } -impl RpcConvert for RpcConverter +impl RpcConvert for RpcConverter where N: NodePrimitives, E: RpcTypes + Send + Sync + Unpin + Clone + Debug, Evm: ConfigureEvm, TxTy: IntoRpcTx + Clone + Debug, RpcTxReq: TryIntoSimTx> + TryIntoTxEnv>, - Err: From - + From< as TryIntoTxEnv>>::Err> - + for<'a> From<>>::Err> - + Error - + Unpin + Receipt: ReceiptConverter< + N, + RpcReceipt = RpcReceipt, + Error: From + + From< as TryIntoTxEnv>>::Err> + + for<'a> From<>>::Err> + + Error + + Unpin + + Sync + + Send + + Into>, + > + Send + Sync - + Send - + Into>, + + Unpin + + Clone + + Debug, Map: for<'a> TxInfoMapper< &'a TxTy, Out = as IntoRpcTx>::TxInfo, @@ -399,7 +433,7 @@ where type Primitives = N; type Network = E; type TxEnv = TxEnvFor; - type Error = Err; + type Error = Receipt::Error; fn fill( &self, @@ -424,6 +458,13 @@ where ) -> Result { Ok(request.try_into_tx_env(cfg_env, block_env)?) } + + fn convert_receipts( + &self, + receipts: Vec>, + ) -> Result>, Self::Error> { + self.receipt_converter.convert_receipts(receipts) + } } /// Optimism specific RPC transaction compatibility implementations. diff --git a/crates/rpc/rpc-eth-api/src/helpers/block.rs b/crates/rpc/rpc-eth-api/src/helpers/block.rs index a0503f4946e..ac70a4705b4 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/block.rs @@ -5,6 +5,7 @@ use crate::{ node::RpcNodeCoreExt, EthApiTypes, FromEthApiError, FullEthApiTypes, RpcBlock, RpcNodeCore, RpcReceipt, }; +use alloy_consensus::TxReceipt; use alloy_eips::BlockId; use alloy_primitives::{Sealable, U256}; use alloy_rlp::Encodable; @@ -12,11 +13,13 @@ use alloy_rpc_types_eth::{Block, BlockTransactions, Header, Index}; use futures::Future; use reth_evm::ConfigureEvm; use reth_node_api::BlockBody; -use reth_primitives_traits::{NodePrimitives, RecoveredBlock}; -use reth_rpc_convert::RpcConvert; +use reth_primitives_traits::{ + AlloyBlockHeader, NodePrimitives, RecoveredBlock, SignedTransaction, TransactionMeta, +}; +use reth_rpc_convert::{transaction::ConvertReceiptInput, RpcConvert}; use reth_storage_api::{BlockIdReader, BlockReader, ProviderHeader, ProviderReceipt, ProviderTx}; use reth_transaction_pool::{PoolTransaction, TransactionPool}; -use std::sync::Arc; +use std::{borrow::Cow, sync::Arc}; /// Result type of the fetched block receipts. pub type BlockReceiptsResult = Result>>, E>; @@ -31,7 +34,9 @@ pub type BlockAndReceiptsResult = Result< /// Block related functions for the [`EthApiServer`](crate::EthApiServer) trait in the /// `eth_` namespace. -pub trait EthBlocks: LoadBlock { +pub trait EthBlocks: + LoadBlock> +{ /// Returns the block header for the given block id. #[expect(clippy::type_complexity)] fn rpc_block_header( @@ -109,7 +114,54 @@ pub trait EthBlocks: LoadBlock { block_id: BlockId, ) -> impl Future> + Send where - Self: LoadReceipt; + Self: LoadReceipt, + { + async move { + if let Some((block, receipts)) = self.load_block_and_receipts(block_id).await? { + let block_number = block.number(); + let base_fee = block.base_fee_per_gas(); + let block_hash = block.hash(); + let excess_blob_gas = block.excess_blob_gas(); + let timestamp = block.timestamp(); + let mut gas_used = 0; + let mut next_log_index = 0; + + let inputs = block + .transactions_recovered() + .zip(receipts.iter()) + .enumerate() + .map(|(idx, (tx, receipt))| { + let meta = TransactionMeta { + tx_hash: *tx.tx_hash(), + index: idx as u64, + block_hash, + block_number, + base_fee, + excess_blob_gas, + timestamp, + }; + + let input = ConvertReceiptInput { + receipt: Cow::Borrowed(receipt), + tx, + gas_used: receipt.cumulative_gas_used() - gas_used, + next_log_index, + meta, + }; + + gas_used = receipt.cumulative_gas_used(); + next_log_index = receipt.logs().len(); + + input + }) + .collect::>(); + + return self.tx_resp_builder().convert_receipts(inputs).map(Some) + } + + Ok(None) + } + } /// Helper method that loads a block and all its receipts. fn load_block_and_receipts( diff --git a/crates/rpc/rpc-eth-api/src/helpers/receipt.rs b/crates/rpc/rpc-eth-api/src/helpers/receipt.rs index 4f1b5ebe16a..8db4c9a7199 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/receipt.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/receipt.rs @@ -1,17 +1,35 @@ //! Loads a receipt from database. Helper trait for `eth_` block and transaction RPC methods, that //! loads receipt data w.r.t. network. -use alloy_consensus::transaction::TransactionMeta; +use crate::{EthApiTypes, RpcNodeCoreExt, RpcReceipt}; +use alloy_consensus::{transaction::TransactionMeta, TxReceipt}; use futures::Future; +use reth_node_api::NodePrimitives; +use reth_primitives_traits::SignerRecoverable; +use reth_rpc_convert::{transaction::ConvertReceiptInput, RpcConvert}; +use reth_rpc_eth_types::{error::FromEthApiError, EthApiError}; use reth_storage_api::{ProviderReceipt, ProviderTx, ReceiptProvider, TransactionsProvider}; - -use crate::{EthApiTypes, RpcNodeCoreExt, RpcReceipt}; +use std::borrow::Cow; /// Assembles transaction receipt data w.r.t to network. /// /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` receipts RPC methods. pub trait LoadReceipt: - EthApiTypes + RpcNodeCoreExt + Send + Sync + EthApiTypes< + RpcConvert: RpcConvert< + Primitives = Self::Primitives, + Error = Self::Error, + Network = Self::NetworkTypes, + >, + Error: FromEthApiError, + > + RpcNodeCoreExt< + Provider: TransactionsProvider + ReceiptProvider, + Primitives: NodePrimitives< + Receipt = ProviderReceipt, + SignedTx = ProviderTx, + >, + > + Send + + Sync { /// Helper method for `eth_getBlockReceipts` and `eth_getTransactionReceipt`. fn build_transaction_receipt( @@ -19,5 +37,41 @@ pub trait LoadReceipt: tx: ProviderTx, meta: TransactionMeta, receipt: ProviderReceipt, - ) -> impl Future, Self::Error>> + Send; + ) -> impl Future, Self::Error>> + Send { + async move { + let hash = meta.block_hash; + // get all receipts for the block + let all_receipts = self + .cache() + .get_receipts(hash) + .await + .map_err(Self::Error::from_eth_err)? + .ok_or(EthApiError::HeaderNotFound(hash.into()))?; + + let mut gas_used = 0; + let mut next_log_index = 0; + + if meta.index > 0 { + for receipt in all_receipts.iter().take(meta.index as usize) { + gas_used = receipt.cumulative_gas_used(); + next_log_index += receipt.logs().len(); + } + } + + Ok(self + .tx_resp_builder() + .convert_receipts(vec![ConvertReceiptInput { + tx: tx + .try_into_recovered_unchecked() + .map_err(Self::Error::from_eth_err)? + .as_recovered_ref(), + gas_used: receipt.cumulative_gas_used() - gas_used, + receipt: Cow::Owned(receipt), + next_log_index, + meta, + }])? + .pop() + .unwrap()) + } + } } diff --git a/crates/rpc/rpc-eth-api/src/types.rs b/crates/rpc/rpc-eth-api/src/types.rs index 2b4148ebe81..4eb8b466ed3 100644 --- a/crates/rpc/rpc-eth-api/src/types.rs +++ b/crates/rpc/rpc-eth-api/src/types.rs @@ -59,7 +59,7 @@ where >, > + EthApiTypes< RpcConvert: RpcConvert< - Primitives = ::Primitives, + Primitives = Self::Primitives, Network = Self::NetworkTypes, Error = RpcError, >, diff --git a/crates/rpc/rpc-eth-types/src/lib.rs b/crates/rpc/rpc-eth-types/src/lib.rs index 815160abf4e..eead8c5fc2a 100644 --- a/crates/rpc/rpc-eth-types/src/lib.rs +++ b/crates/rpc/rpc-eth-types/src/lib.rs @@ -33,5 +33,4 @@ pub use gas_oracle::{ }; pub use id_provider::EthSubscriptionIdProvider; pub use pending_block::{PendingBlock, PendingBlockEnv, PendingBlockEnvOrigin}; -pub use receipt::EthReceiptBuilder; pub use transaction::TransactionSource; diff --git a/crates/rpc/rpc-eth-types/src/receipt.rs b/crates/rpc/rpc-eth-types/src/receipt.rs index f68547ddac6..786f6e3f193 100644 --- a/crates/rpc/rpc-eth-types/src/receipt.rs +++ b/crates/rpc/rpc-eth-types/src/receipt.rs @@ -1,42 +1,30 @@ //! RPC receipt response builder, extends a layer one receipt with layer two data. -use alloy_consensus::{ - transaction::{Recovered, SignerRecoverable, TransactionMeta}, - ReceiptEnvelope, Transaction, TxReceipt, -}; +use alloy_consensus::{ReceiptEnvelope, Transaction, TxReceipt}; use alloy_eips::eip7840::BlobParams; use alloy_primitives::{Address, TxKind}; use alloy_rpc_types_eth::{Log, ReceiptWithBloom, TransactionReceipt}; -use reth_ethereum_primitives::{Receipt, TransactionSigned}; -use std::borrow::Cow; +use reth_chainspec::EthChainSpec; +use reth_ethereum_primitives::Receipt; +use reth_primitives_traits::NodePrimitives; +use reth_rpc_convert::transaction::{ConvertReceiptInput, ReceiptConverter}; +use std::{borrow::Cow, sync::Arc}; + +use crate::EthApiError; /// Builds an [`TransactionReceipt`] obtaining the inner receipt envelope from the given closure. -pub fn build_receipt( - transaction: Recovered<&T>, - meta: TransactionMeta, - receipt: Cow<'_, R>, - all_receipts: &[R], +pub fn build_receipt( + input: &ConvertReceiptInput<'_, N>, blob_params: Option, build_envelope: impl FnOnce(ReceiptWithBloom>) -> E, ) -> TransactionReceipt where - R: TxReceipt, - T: Transaction + SignerRecoverable, + N: NodePrimitives, { - let from = transaction.signer(); - - // get the previous transaction cumulative gas used - let gas_used = if meta.index == 0 { - receipt.cumulative_gas_used() - } else { - let prev_tx_idx = (meta.index - 1) as usize; - all_receipts - .get(prev_tx_idx) - .map(|prev_receipt| receipt.cumulative_gas_used() - prev_receipt.cumulative_gas_used()) - .unwrap_or_default() - }; + let ConvertReceiptInput { tx, meta, receipt, gas_used, next_log_index } = input; + let from = tx.signer(); - let blob_gas_used = transaction.blob_gas_used(); + let blob_gas_used = tx.blob_gas_used(); // Blob gas price should only be present if the transaction is a blob transaction let blob_gas_price = blob_gas_used.and_then(|_| Some(blob_params?.calc_blob_fee(meta.excess_blob_gas?))); @@ -45,12 +33,6 @@ where let cumulative_gas_used = receipt.cumulative_gas_used(); let logs_bloom = receipt.bloom(); - // get number of logs in the block - let mut num_logs = 0; - for prev_receipt in all_receipts.iter().take(meta.index as usize) { - num_logs += prev_receipt.logs().len(); - } - macro_rules! build_rpc_logs { ($logs:expr) => { $logs @@ -62,7 +44,7 @@ where block_timestamp: Some(meta.timestamp), transaction_hash: Some(meta.tx_hash), transaction_index: Some(meta.index), - log_index: Some((num_logs + tx_log_idx) as u64), + log_index: Some((next_log_index + tx_log_idx) as u64), removed: false, }) .collect() @@ -76,8 +58,8 @@ where let rpc_receipt = alloy_rpc_types_eth::Receipt { status, cumulative_gas_used, logs }; - let (contract_address, to) = match transaction.kind() { - TxKind::Create => (Some(from.create(transaction.nonce())), None), + let (contract_address, to) = match tx.kind() { + TxKind::Create => (Some(from.create(tx.nonce())), None), TxKind::Call(addr) => (None, Some(Address(*addr))), }; @@ -89,50 +71,56 @@ where block_number: Some(meta.block_number), from, to, - gas_used, + gas_used: *gas_used, contract_address, - effective_gas_price: transaction.effective_gas_price(meta.base_fee), + effective_gas_price: tx.effective_gas_price(meta.base_fee), // EIP-4844 fields blob_gas_price, blob_gas_used, } } -/// Receipt response builder. +/// Converter for Ethereum receipts. #[derive(Debug)] -pub struct EthReceiptBuilder { - /// The base response body, contains L1 fields. - pub base: TransactionReceipt, +pub struct EthReceiptConverter { + chain_spec: Arc, +} + +impl Clone for EthReceiptConverter { + fn clone(&self) -> Self { + Self { chain_spec: self.chain_spec.clone() } + } } -impl EthReceiptBuilder { - /// Returns a new builder with the base response body (L1 fields) set. - /// - /// Note: This requires _all_ block receipts because we need to calculate the gas used by the - /// transaction. - pub fn new( - transaction: Recovered<&TransactionSigned>, - meta: TransactionMeta, - receipt: Cow<'_, Receipt>, - all_receipts: &[Receipt], - blob_params: Option, - ) -> Self { - let tx_type = receipt.tx_type; - - let base = build_receipt( - transaction, - meta, - receipt, - all_receipts, - blob_params, - |receipt_with_bloom| ReceiptEnvelope::from_typed(tx_type, receipt_with_bloom), - ); - - Self { base } +impl EthReceiptConverter { + /// Creates a new converter with the given chain spec. + pub const fn new(chain_spec: Arc) -> Self { + Self { chain_spec } } +} - /// Builds a receipt response from the base response body, and any set additional fields. - pub fn build(self) -> TransactionReceipt { - self.base +impl ReceiptConverter for EthReceiptConverter +where + N: NodePrimitives, + ChainSpec: EthChainSpec, +{ + type Error = EthApiError; + type RpcReceipt = TransactionReceipt; + + fn convert_receipts( + &self, + inputs: Vec>, + ) -> Result, Self::Error> { + let mut receipts = Vec::with_capacity(inputs.len()); + + for input in inputs { + let tx_type = input.receipt.tx_type; + let blob_params = self.chain_spec.blob_params_at_timestamp(input.meta.timestamp); + receipts.push(build_receipt(&input, blob_params, |receipt_with_bloom| { + ReceiptEnvelope::from_typed(tx_type, receipt_with_bloom) + })); + } + + Ok(receipts) } } diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index 4e6ca6ae24b..d7cf9839b03 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -18,7 +18,6 @@ reth-primitives-traits.workspace = true reth-rpc-api.workspace = true reth-rpc-eth-api.workspace = true reth-engine-primitives.workspace = true -reth-ethereum-primitives.workspace = true reth-errors.workspace = true reth-metrics.workspace = true reth-storage-api.workspace = true @@ -92,6 +91,7 @@ thiserror.workspace = true derive_more.workspace = true [dev-dependencies] +reth-ethereum-primitives.workspace = true reth-evm-ethereum.workspace = true reth-testing-utils.workspace = true reth-transaction-pool = { workspace = true, features = ["test-utils"] } diff --git a/crates/rpc/rpc/src/eth/builder.rs b/crates/rpc/rpc/src/eth/builder.rs index 1b4374c1770..813b79bb0be 100644 --- a/crates/rpc/rpc/src/eth/builder.rs +++ b/crates/rpc/rpc/src/eth/builder.rs @@ -1,20 +1,22 @@ //! `EthApiBuilder` implementation use crate::{eth::core::EthApiInner, EthApi}; +use alloy_network::Ethereum; use reth_chain_state::CanonStateSubscriptions; use reth_chainspec::ChainSpecProvider; use reth_node_api::NodePrimitives; -use reth_rpc_convert::RpcTypes; +use reth_rpc_convert::{RpcConvert, RpcConverter}; use reth_rpc_eth_types::{ - fee_history::fee_history_cache_new_blocks_task, EthStateCache, EthStateCacheConfig, - FeeHistoryCache, FeeHistoryCacheConfig, GasCap, GasPriceOracle, GasPriceOracleConfig, + fee_history::fee_history_cache_new_blocks_task, receipt::EthReceiptConverter, EthStateCache, + EthStateCacheConfig, FeeHistoryCache, FeeHistoryCacheConfig, GasCap, GasPriceOracle, + GasPriceOracleConfig, }; use reth_rpc_server_types::constants::{ DEFAULT_ETH_PROOF_WINDOW, DEFAULT_MAX_SIMULATE_BLOCKS, DEFAULT_PROOF_PERMITS, }; use reth_storage_api::{BlockReaderIdExt, StateProviderFactory}; use reth_tasks::{pool::BlockingTaskPool, TaskSpawner, TokioTaskExecutor}; -use std::{marker::PhantomData, sync::Arc}; +use std::sync::Arc; /// A helper to build the `EthApi` handler instance. /// @@ -29,7 +31,7 @@ where pool: Pool, network: Network, evm_config: EvmConfig, - rpc: PhantomData, + rpc_converter: Rpc, gas_cap: GasCap, max_simulate_blocks: u64, eth_proof_window: u64, @@ -43,22 +45,29 @@ where task_spawner: Box, } -impl EthApiBuilder +impl + EthApiBuilder< + Provider, + Pool, + Network, + EvmConfig, + RpcConverter>, + > where - Provider: BlockReaderIdExt, - Rpc: RpcTypes, + Provider: BlockReaderIdExt + ChainSpecProvider, { /// Creates a new `EthApiBuilder` instance. pub fn new(provider: Provider, pool: Pool, network: Network, evm_config: EvmConfig) -> Self where Provider: BlockReaderIdExt, { + let rpc_converter = RpcConverter::new(EthReceiptConverter::new(provider.chain_spec()), ()); Self { provider, pool, network, evm_config, - rpc: PhantomData, + rpc_converter, eth_cache: None, gas_oracle: None, gas_cap: GasCap::default(), @@ -72,13 +81,61 @@ where eth_state_cache_config: Default::default(), } } +} +impl EthApiBuilder +where + Provider: BlockReaderIdExt + ChainSpecProvider, +{ /// Configures the task spawner used to spawn additional tasks. pub fn task_spawner(mut self, spawner: impl TaskSpawner + 'static) -> Self { self.task_spawner = Box::new(spawner); self } + /// Changes the configured converter. + pub fn with_rpc_converter( + self, + rpc_converter: RpcNew, + ) -> EthApiBuilder { + let Self { + provider, + pool, + network, + evm_config, + rpc_converter: _, + gas_cap, + max_simulate_blocks, + eth_proof_window, + fee_history_cache_config, + proof_permits, + eth_state_cache_config, + eth_cache, + gas_oracle, + blocking_task_pool, + task_spawner, + gas_oracle_config, + } = self; + EthApiBuilder { + provider, + pool, + network, + evm_config, + rpc_converter, + gas_cap, + max_simulate_blocks, + eth_proof_window, + fee_history_cache_config, + proof_permits, + eth_state_cache_config, + eth_cache, + gas_oracle, + blocking_task_pool, + task_spawner, + gas_oracle_config, + } + } + /// Sets `eth_cache` config for the cache that will be used if no [`EthStateCache`] is /// configured. pub const fn eth_state_cache_config( @@ -172,13 +229,14 @@ where > + Clone + Unpin + 'static, + Rpc: RpcConvert, { let Self { provider, pool, network, - rpc: _, evm_config, + rpc_converter, eth_state_cache_config, gas_oracle_config, eth_cache, @@ -225,6 +283,7 @@ where evm_config, task_spawner, proof_permits, + rpc_converter, ) } @@ -250,7 +309,8 @@ where + Clone + Unpin + 'static, + Rpc: RpcConvert, { - EthApi { inner: Arc::new(self.build_inner()), tx_resp_builder: Default::default() } + EthApi { inner: Arc::new(self.build_inner()) } } } diff --git a/crates/rpc/rpc/src/eth/core.rs b/crates/rpc/rpc/src/eth/core.rs index a8699cb5af7..32dfbeadfb6 100644 --- a/crates/rpc/rpc/src/eth/core.rs +++ b/crates/rpc/rpc/src/eth/core.rs @@ -9,19 +9,21 @@ use alloy_eips::BlockNumberOrTag; use alloy_network::Ethereum; use alloy_primitives::{Bytes, U256}; use derive_more::Deref; +use reth_chainspec::{ChainSpec, ChainSpecProvider}; use reth_node_api::{FullNodeComponents, FullNodeTypes}; -use reth_rpc_convert::RpcTypes; +use reth_rpc_convert::{RpcConvert, RpcConverter}; use reth_rpc_eth_api::{ helpers::{spec::SignersForRpc, SpawnBlocking}, node::RpcNodeCoreExt, EthApiTypes, RpcNodeCore, }; use reth_rpc_eth_types::{ - EthApiError, EthStateCache, FeeHistoryCache, GasCap, GasPriceOracle, PendingBlock, + receipt::EthReceiptConverter, EthApiError, EthStateCache, FeeHistoryCache, GasCap, + GasPriceOracle, PendingBlock, }; use reth_storage_api::{ - BlockReader, BlockReaderIdExt, NodePrimitivesProvider, ProviderBlock, ProviderHeader, - ProviderReceipt, + noop::NoopProvider, BlockReader, BlockReaderIdExt, NodePrimitivesProvider, ProviderBlock, + ProviderHeader, ProviderReceipt, }; use reth_tasks::{ pool::{BlockingTaskGuard, BlockingTaskPool}, @@ -31,22 +33,29 @@ use tokio::sync::{broadcast, Mutex}; const DEFAULT_BROADCAST_CAPACITY: usize = 2000; +/// Helper type alias for [`RpcConverter`] with components from the given [`FullNodeComponents`]. +pub type EthRpcConverterFor = RpcConverter< + Ethereum, + ::Evm, + EthReceiptConverter<<::Provider as ChainSpecProvider>::ChainSpec>, +>; + /// Helper type alias for [`EthApi`] with components from the given [`FullNodeComponents`]. -pub type EthApiFor = EthApi< +pub type EthApiFor = EthApi< ::Provider, ::Pool, ::Network, ::Evm, - Rpc, + EthRpcConverterFor, >; /// Helper type alias for [`EthApi`] with components from the given [`FullNodeComponents`]. -pub type EthApiBuilderFor = EthApiBuilder< +pub type EthApiBuilderFor = EthApiBuilder< ::Provider, ::Pool, ::Network, ::Evm, - Rpc, + EthRpcConverterFor, >; /// `Eth` API implementation. @@ -64,30 +73,24 @@ pub type EthApiBuilderFor = EthApiBuilder< /// While this type requires various unrestricted generic components, trait bounds are enforced when /// additional traits are implemented for this type. #[derive(Deref)] -pub struct EthApi { +pub struct EthApi { /// All nested fields bundled together. #[deref] pub(super) inner: Arc>, - /// Transaction RPC response builder. - pub tx_resp_builder: EthRpcConverter, } impl Clone for EthApi where Provider: BlockReader, - Rpc: RpcTypes, + Rpc: RpcConvert, { fn clone(&self) -> Self { - Self { inner: self.inner.clone(), tx_resp_builder: self.tx_resp_builder.clone() } + Self { inner: self.inner.clone() } } } -impl EthApi -where - Provider: BlockReaderIdExt, - Rpc: RpcTypes, -{ +impl EthApi> { /// Convenience fn to obtain a new [`EthApiBuilder`] instance with mandatory components. /// /// Creating an [`EthApi`] requires a few mandatory components: @@ -106,7 +109,7 @@ where /// use reth_provider::noop::NoopProvider; /// use reth_rpc::EthApi; /// use reth_transaction_pool::noop::NoopTransactionPool; - /// let eth_api = EthApi::<_, _, _, _, Ethereum>::builder( + /// let eth_api = EthApi::builder( /// NoopProvider::default(), /// NoopTransactionPool::default(), /// NoopNetwork::default(), @@ -114,15 +117,31 @@ where /// ) /// .build(); /// ``` - pub fn builder( + #[expect(clippy::type_complexity)] + pub fn builder( provider: Provider, pool: Pool, network: Network, evm_config: EvmConfig, - ) -> EthApiBuilder { + ) -> EthApiBuilder< + Provider, + Pool, + Network, + EvmConfig, + RpcConverter>, + > + where + Provider: ChainSpecProvider + BlockReaderIdExt, + { EthApiBuilder::new(provider, pool, network, evm_config) } +} +impl EthApi +where + Provider: BlockReaderIdExt + ChainSpecProvider, + Rpc: RpcConvert, +{ /// Creates a new, shareable instance using the default tokio task spawner. #[expect(clippy::too_many_arguments)] pub fn new( @@ -138,6 +157,7 @@ where fee_history_cache: FeeHistoryCache>, evm_config: EvmConfig, proof_permits: usize, + rpc_converter: Rpc, ) -> Self { let inner = EthApiInner::new( provider, @@ -153,21 +173,23 @@ where evm_config, TokioTaskExecutor::default().boxed(), proof_permits, + rpc_converter, ); - Self { inner: Arc::new(inner), tx_resp_builder: Default::default() } + Self { inner: Arc::new(inner) } } } -impl EthApiTypes - for EthApi +impl EthApiTypes + for EthApi where Self: Send + Sync, Provider: BlockReader, + Rpc: RpcConvert, { type Error = EthApiError; - type NetworkTypes = Ethereum; - type RpcConvert = EthRpcConverter; + type NetworkTypes = Rpc::Network; + type RpcConvert = Rpc; fn tx_resp_builder(&self) -> &Self::RpcConvert { &self.tx_resp_builder @@ -181,7 +203,7 @@ where Pool: Send + Sync + Clone + Unpin, Network: Send + Sync + Clone, EvmConfig: Send + Sync + Clone + Unpin, - Rpc: RpcTypes, + Rpc: RpcConvert, { type Primitives = Provider::Primitives; type Provider = Provider; @@ -218,7 +240,7 @@ where Pool: Send + Sync + Clone + Unpin, Network: Send + Sync + Clone, EvmConfig: Send + Sync + Clone + Unpin, - Rpc: RpcTypes, + Rpc: RpcConvert, { #[inline] fn cache(&self) -> &EthStateCache, ProviderReceipt> { @@ -230,7 +252,7 @@ impl std::fmt::Debug for EthApi where Provider: BlockReader, - Rpc: RpcTypes, + Rpc: RpcConvert, { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("EthApi").finish_non_exhaustive() @@ -240,9 +262,9 @@ where impl SpawnBlocking for EthApi where - Self: EthApiTypes + Clone + Send + Sync + 'static, + Self: EthApiTypes + Clone + Send + Sync + 'static, Provider: BlockReader, - Rpc: RpcTypes, + Rpc: RpcConvert, { #[inline] fn io_task_spawner(&self) -> impl TaskSpawner { @@ -262,7 +284,7 @@ where /// Container type `EthApi` #[expect(missing_debug_implementations)] -pub struct EthApiInner { +pub struct EthApiInner { /// The transaction pool. pool: Pool, /// The provider that can interact with the chain. @@ -270,7 +292,7 @@ pub struct EthApiInner, + signers: SignersForRpc, /// The async cache frontend for eth related data eth_cache: EthStateCache, /// The async gas oracle frontend for gas price suggestions @@ -299,12 +321,15 @@ pub struct EthApiInner, + + /// Converter for RPC types. + tx_resp_builder: Rpc, } impl EthApiInner where Provider: BlockReaderIdExt, - Rpc: RpcTypes, + Rpc: RpcConvert, { /// Creates a new, shareable instance using the default tokio task spawner. #[expect(clippy::too_many_arguments)] @@ -322,6 +347,7 @@ where evm_config: EvmConfig, task_spawner: Box, proof_permits: usize, + tx_resp_builder: Rpc, ) -> Self { let signers = parking_lot::RwLock::new(Default::default()); // get the block number of the latest block @@ -354,6 +380,7 @@ where evm_config, blocking_task_guard: BlockingTaskGuard::new(proof_permits), raw_tx_sender, + tx_resp_builder, } } } @@ -361,7 +388,7 @@ where impl EthApiInner where Provider: BlockReader, - Rpc: RpcTypes, + Rpc: RpcConvert, { /// Returns a handle to data on disk. #[inline] @@ -369,6 +396,12 @@ where &self.provider } + /// Returns a handle to the transaction response builder. + #[inline] + pub const fn tx_resp_builder(&self) -> &Rpc { + &self.tx_resp_builder + } + /// Returns a handle to data in memory. #[inline] pub const fn cache(&self) -> &EthStateCache { @@ -433,7 +466,7 @@ where /// Returns a handle to the signers. #[inline] - pub const fn signers(&self) -> &SignersForRpc { + pub const fn signers(&self) -> &SignersForRpc { &self.signers } @@ -490,12 +523,20 @@ mod tests { use reth_evm_ethereum::EthEvmConfig; use reth_network_api::noop::NoopNetwork; use reth_provider::test_utils::{MockEthProvider, NoopProvider}; + use reth_rpc_convert::RpcConverter; use reth_rpc_eth_api::EthApiServer; + use reth_rpc_eth_types::receipt::EthReceiptConverter; use reth_storage_api::{BlockReader, BlockReaderIdExt, StateProviderFactory}; use reth_testing_utils::generators; use reth_transaction_pool::test_utils::{testing_pool, TestPool}; - type FakeEthApi = EthApi; + type FakeEthApi

= EthApi< + P, + TestPool, + NoopNetwork, + EthEvmConfig, + RpcConverter>, + >; fn build_test_eth_api< P: BlockReaderIdExt< @@ -511,7 +552,7 @@ mod tests { + 'static, >( provider: P, - ) -> EthApi { + ) -> FakeEthApi

{ EthApiBuilder::new( provider.clone(), testing_pool(), @@ -613,7 +654,7 @@ mod tests { /// Invalid block range #[tokio::test] async fn test_fee_history_empty() { - let response = as EthApiServer<_, _, _, _, _>>::fee_history( + let response = as EthApiServer<_, _, _, _, _>>::fee_history( &build_test_eth_api(NoopProvider::default()), U64::from(1), BlockNumberOrTag::Latest, @@ -635,7 +676,7 @@ mod tests { let (eth_api, _, _) = prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default()); - let response = as EthApiServer<_, _, _, _, _>>::fee_history( + let response = as EthApiServer<_, _, _, _, _>>::fee_history( ð_api, U64::from(newest_block + 1), newest_block.into(), @@ -658,7 +699,7 @@ mod tests { let (eth_api, _, _) = prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default()); - let response = as EthApiServer<_, _, _, _, _>>::fee_history( + let response = as EthApiServer<_, _, _, _, _>>::fee_history( ð_api, U64::from(1), (newest_block + 1000).into(), @@ -681,7 +722,7 @@ mod tests { let (eth_api, _, _) = prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default()); - let response = as EthApiServer<_, _, _, _, _>>::fee_history( + let response = as EthApiServer<_, _, _, _, _>>::fee_history( ð_api, U64::from(0), newest_block.into(), diff --git a/crates/rpc/rpc/src/eth/filter.rs b/crates/rpc/rpc/src/eth/filter.rs index 78e28edb467..a6214eb7890 100644 --- a/crates/rpc/rpc/src/eth/filter.rs +++ b/crates/rpc/rpc/src/eth/filter.rs @@ -1087,11 +1087,13 @@ mod tests { use alloy_network::Ethereum; use alloy_primitives::FixedBytes; use rand::Rng; - use reth_chainspec::ChainSpecProvider; + use reth_chainspec::{ChainSpec, ChainSpecProvider}; use reth_ethereum_primitives::TxType; use reth_evm_ethereum::EthEvmConfig; use reth_network_api::noop::NoopNetwork; use reth_provider::test_utils::MockEthProvider; + use reth_rpc_convert::RpcConverter; + use reth_rpc_eth_types::receipt::EthReceiptConverter; use reth_tasks::TokioTaskExecutor; use reth_testing_utils::generators; use reth_transaction_pool::test_utils::{testing_pool, TestPool}; @@ -1122,7 +1124,13 @@ mod tests { // Helper function to create a test EthApi instance fn build_test_eth_api( provider: MockEthProvider, - ) -> EthApi { + ) -> EthApi< + MockEthProvider, + TestPool, + NoopNetwork, + EthEvmConfig, + RpcConverter>, + > { EthApiBuilder::new( provider.clone(), testing_pool(), diff --git a/crates/rpc/rpc/src/eth/helpers/block.rs b/crates/rpc/rpc/src/eth/helpers/block.rs index fd4a9cc6ea0..90d7db80356 100644 --- a/crates/rpc/rpc/src/eth/helpers/block.rs +++ b/crates/rpc/rpc/src/eth/helpers/block.rs @@ -1,19 +1,14 @@ //! Contains RPC handler implementations specific to blocks. -use std::borrow::Cow; - -use alloy_consensus::{transaction::TransactionMeta, BlockHeader}; -use alloy_rpc_types_eth::{BlockId, TransactionReceipt}; -use reth_chainspec::{ChainSpecProvider, EthChainSpec}; +use reth_chainspec::ChainSpecProvider; use reth_evm::ConfigureEvm; use reth_primitives_traits::NodePrimitives; use reth_rpc_convert::RpcConvert; use reth_rpc_eth_api::{ - helpers::{EthBlocks, LoadBlock, LoadPendingBlock, LoadReceipt, SpawnBlocking}, - types::RpcTypes, - RpcNodeCore, RpcNodeCoreExt, RpcReceipt, + helpers::{EthBlocks, LoadBlock, LoadPendingBlock, SpawnBlocking}, + RpcNodeCore, RpcNodeCoreExt, }; -use reth_rpc_eth_types::{EthApiError, EthReceiptBuilder}; +use reth_rpc_eth_types::EthApiError; use reth_storage_api::{BlockReader, ProviderTx}; use reth_transaction_pool::{PoolTransaction, TransactionPool}; @@ -24,60 +19,16 @@ impl EthBlocks where Self: LoadBlock< Error = EthApiError, - NetworkTypes = Rpc, - RpcConvert: RpcConvert, - Provider: BlockReader< - Transaction = reth_ethereum_primitives::TransactionSigned, - Receipt = reth_ethereum_primitives::Receipt, + NetworkTypes = Rpc::Network, + RpcConvert: RpcConvert< + Primitives = Self::Primitives, + Error = Self::Error, + Network = Rpc::Network, >, >, Provider: BlockReader + ChainSpecProvider, - Rpc: RpcTypes, + Rpc: RpcConvert, { - async fn block_receipts( - &self, - block_id: BlockId, - ) -> Result>>, Self::Error> - where - Self: LoadReceipt, - { - if let Some((block, receipts)) = self.load_block_and_receipts(block_id).await? { - let block_number = block.number(); - let base_fee = block.base_fee_per_gas(); - let block_hash = block.hash(); - let excess_blob_gas = block.excess_blob_gas(); - let timestamp = block.timestamp(); - let blob_params = self.provider().chain_spec().blob_params_at_timestamp(timestamp); - - return block - .transactions_recovered() - .zip(receipts.iter()) - .enumerate() - .map(|(idx, (tx, receipt))| { - let meta = TransactionMeta { - tx_hash: *tx.tx_hash(), - index: idx as u64, - block_hash, - block_number, - base_fee, - excess_blob_gas, - timestamp, - }; - Ok(EthReceiptBuilder::new( - tx, - meta, - Cow::Borrowed(receipt), - &receipts, - blob_params, - ) - .build()) - }) - .collect::, Self::Error>>() - .map(Some) - } - - Ok(None) - } } impl LoadBlock @@ -94,6 +45,6 @@ where >, Provider: BlockReader, EvmConfig: ConfigureEvm::Primitives>, - Rpc: RpcTypes, + Rpc: RpcConvert, { } diff --git a/crates/rpc/rpc/src/eth/helpers/call.rs b/crates/rpc/rpc/src/eth/helpers/call.rs index 0053ca15478..f910fcdbe73 100644 --- a/crates/rpc/rpc/src/eth/helpers/call.rs +++ b/crates/rpc/rpc/src/eth/helpers/call.rs @@ -1,25 +1,23 @@ //! Contains RPC handler implementations specific to endpoints that call/execute within evm. use crate::EthApi; -use alloy_evm::block::BlockExecutorFactory; use reth_errors::ProviderError; -use reth_evm::{ConfigureEvm, EvmFactory, TxEnvFor}; +use reth_evm::{ConfigureEvm, TxEnvFor}; use reth_node_api::NodePrimitives; -use reth_rpc_convert::{RpcConvert, RpcTypes}; +use reth_rpc_convert::RpcConvert; use reth_rpc_eth_api::{ helpers::{estimate::EstimateCall, Call, EthCall, LoadPendingBlock, LoadState, SpawnBlocking}, FromEvmError, FullEthApiTypes, RpcNodeCore, RpcNodeCoreExt, }; use reth_storage_api::{BlockReader, ProviderHeader, ProviderTx}; use reth_transaction_pool::{PoolTransaction, TransactionPool}; -use revm::context::TxEnv; impl EthCall for EthApi where - Self: EstimateCall - + LoadPendingBlock - + FullEthApiTypes + Self: EstimateCall + + LoadPendingBlock + + FullEthApiTypes + RpcNodeCoreExt< Pool: TransactionPool< Transaction: PoolTransaction>, @@ -29,7 +27,7 @@ where >, EvmConfig: ConfigureEvm::Primitives>, Provider: BlockReader, - Rpc: RpcTypes, + Rpc: RpcConvert, { } @@ -38,20 +36,19 @@ impl Call where Self: LoadState< Evm: ConfigureEvm< - BlockExecutorFactory: BlockExecutorFactory>, Primitives: NodePrimitives< BlockHeader = ProviderHeader, SignedTx = ProviderTx, >, >, - RpcConvert: RpcConvert, Network = Rpc>, - NetworkTypes = Rpc, + RpcConvert: RpcConvert, Network = Rpc::Network>, + NetworkTypes = Rpc::Network, Error: FromEvmError + From<::Error> + From, > + SpawnBlocking, Provider: BlockReader, - Rpc: RpcTypes, + Rpc: RpcConvert, { #[inline] fn call_gas_limit(&self) -> u64 { @@ -67,8 +64,8 @@ where impl EstimateCall for EthApi where - Self: Call, + Self: Call, Provider: BlockReader, - Rpc: RpcTypes, + Rpc: RpcConvert, { } diff --git a/crates/rpc/rpc/src/eth/helpers/fees.rs b/crates/rpc/rpc/src/eth/helpers/fees.rs index 45b0a2a70dc..65c98b9c989 100644 --- a/crates/rpc/rpc/src/eth/helpers/fees.rs +++ b/crates/rpc/rpc/src/eth/helpers/fees.rs @@ -1,7 +1,7 @@ //! Contains RPC handler implementations for fee history. use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardforks}; -use reth_rpc_convert::RpcTypes; +use reth_rpc_convert::RpcConvert; use reth_rpc_eth_api::helpers::{EthFees, LoadBlock, LoadFee}; use reth_rpc_eth_types::{FeeHistoryCache, GasPriceOracle}; use reth_storage_api::{BlockReader, BlockReaderIdExt, ProviderHeader, StateProviderFactory}; @@ -17,7 +17,7 @@ where >, >, Provider: BlockReader, - Rpc: RpcTypes, + Rpc: RpcConvert, { } @@ -28,7 +28,7 @@ where Provider: BlockReaderIdExt + ChainSpecProvider + StateProviderFactory, - Rpc: RpcTypes, + Rpc: RpcConvert, { #[inline] fn gas_oracle(&self) -> &GasPriceOracle { diff --git a/crates/rpc/rpc/src/eth/helpers/pending_block.rs b/crates/rpc/rpc/src/eth/helpers/pending_block.rs index acb4072bff6..9bc47803035 100644 --- a/crates/rpc/rpc/src/eth/helpers/pending_block.rs +++ b/crates/rpc/rpc/src/eth/helpers/pending_block.rs @@ -24,9 +24,9 @@ impl LoadPendingBlock for EthApi where Self: SpawnBlocking< - NetworkTypes = Rpc, + NetworkTypes = Rpc::Network, Error: FromEvmError, - RpcConvert: RpcConvert, + RpcConvert: RpcConvert, > + RpcNodeCore< Provider: BlockReaderIdExt + ChainSpecProvider @@ -46,7 +46,9 @@ where >, >, Provider: BlockReader, - Rpc: RpcTypes

>>, + Rpc: RpcConvert< + Network: RpcTypes
>>, + >, { #[inline] fn pending_block( diff --git a/crates/rpc/rpc/src/eth/helpers/receipt.rs b/crates/rpc/rpc/src/eth/helpers/receipt.rs index fee7724df5e..e033a1fcf3b 100644 --- a/crates/rpc/rpc/src/eth/helpers/receipt.rs +++ b/crates/rpc/rpc/src/eth/helpers/receipt.rs @@ -1,55 +1,31 @@ //! Builds an RPC receipt response w.r.t. data layout of network. use crate::EthApi; -use alloy_consensus::{ - crypto::RecoveryError, - transaction::{SignerRecoverable, TransactionMeta}, -}; -use alloy_rpc_types_eth::TransactionReceipt; -use reth_chainspec::{ChainSpecProvider, EthChainSpec}; -use reth_ethereum_primitives::{Receipt, TransactionSigned}; -use reth_rpc_convert::RpcTypes; -use reth_rpc_eth_api::{ - helpers::LoadReceipt, EthApiTypes, FromEthApiError, RpcNodeCoreExt, RpcReceipt, -}; -use reth_rpc_eth_types::{EthApiError, EthReceiptBuilder}; -use reth_storage_api::{BlockReader, ReceiptProvider, TransactionsProvider}; -use std::borrow::Cow; +use alloy_consensus::crypto::RecoveryError; +use reth_chainspec::ChainSpecProvider; +use reth_node_api::NodePrimitives; +use reth_rpc_convert::RpcConvert; +use reth_rpc_eth_api::{helpers::LoadReceipt, EthApiTypes, RpcNodeCoreExt}; +use reth_storage_api::{BlockReader, ProviderReceipt, ProviderTx}; impl LoadReceipt for EthApi where Self: RpcNodeCoreExt< - Provider: TransactionsProvider - + ReceiptProvider, - > + EthApiTypes>, + Primitives: NodePrimitives< + SignedTx = ProviderTx, + Receipt = ProviderReceipt, + >, + > + EthApiTypes< + NetworkTypes = Rpc::Network, + RpcConvert: RpcConvert< + Network = Rpc::Network, + Primitives = Self::Primitives, + Error = Self::Error, + >, + Error: From, + >, Provider: BlockReader + ChainSpecProvider, - Rpc: RpcTypes, + Rpc: RpcConvert, { - async fn build_transaction_receipt( - &self, - tx: TransactionSigned, - meta: TransactionMeta, - receipt: Receipt, - ) -> Result, Self::Error> { - let hash = meta.block_hash; - // get all receipts for the block - let all_receipts = self - .cache() - .get_receipts(hash) - .await - .map_err(Self::Error::from_eth_err)? - .ok_or(EthApiError::HeaderNotFound(hash.into()))?; - let blob_params = self.provider().chain_spec().blob_params_at_timestamp(meta.timestamp); - - Ok(EthReceiptBuilder::new( - // Note: we assume this transaction is valid, because it's mined and therefore valid - tx.try_into_recovered_unchecked()?.as_recovered_ref(), - meta, - Cow::Owned(receipt), - &all_receipts, - blob_params, - ) - .build()) - } } diff --git a/crates/rpc/rpc/src/eth/helpers/signer.rs b/crates/rpc/rpc/src/eth/helpers/signer.rs index fcd8161adaa..9aba5198bb7 100644 --- a/crates/rpc/rpc/src/eth/helpers/signer.rs +++ b/crates/rpc/rpc/src/eth/helpers/signer.rs @@ -8,7 +8,7 @@ use alloy_eips::eip2718::Decodable2718; use alloy_primitives::{eip191_hash_message, Address, Signature, B256}; use alloy_signer::SignerSync; use alloy_signer_local::PrivateKeySigner; -use reth_rpc_convert::{RpcTypes, SignableTxRequest}; +use reth_rpc_convert::{RpcConvert, RpcTypes, SignableTxRequest}; use reth_rpc_eth_api::helpers::{signer::Result, AddDevSigners, EthSigner}; use reth_rpc_eth_types::SignError; use reth_storage_api::{BlockReader, ProviderTx}; @@ -17,7 +17,7 @@ impl AddDevSigners for EthApi where Provider: BlockReader, - Rpc: RpcTypes>>, + Rpc: RpcConvert>>>, { fn with_dev_accounts(&self) { *self.inner.signers().write() = DevSigner::random_signers(20) diff --git a/crates/rpc/rpc/src/eth/helpers/spec.rs b/crates/rpc/rpc/src/eth/helpers/spec.rs index 3bec5a67a09..e372dec4cb1 100644 --- a/crates/rpc/rpc/src/eth/helpers/spec.rs +++ b/crates/rpc/rpc/src/eth/helpers/spec.rs @@ -1,7 +1,7 @@ -use alloy_network::Ethereum; use alloy_primitives::U256; use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; use reth_network_api::NetworkInfo; +use reth_rpc_convert::RpcConvert; use reth_rpc_eth_api::{ helpers::{spec::SignersForApi, EthApiSpec}, RpcNodeCore, @@ -10,8 +10,8 @@ use reth_storage_api::{BlockNumReader, BlockReader, ProviderTx, StageCheckpointR use crate::EthApi; -impl EthApiSpec - for EthApi +impl EthApiSpec + for EthApi where Self: RpcNodeCore< Provider: ChainSpecProvider @@ -20,9 +20,10 @@ where Network: NetworkInfo, >, Provider: BlockReader, + Rpc: RpcConvert, { type Transaction = ProviderTx; - type Rpc = Ethereum; + type Rpc = Rpc::Network; fn starting_block(&self) -> U256 { self.inner.starting_block() diff --git a/crates/rpc/rpc/src/eth/helpers/state.rs b/crates/rpc/rpc/src/eth/helpers/state.rs index 62a94f1bd7e..36d754676c3 100644 --- a/crates/rpc/rpc/src/eth/helpers/state.rs +++ b/crates/rpc/rpc/src/eth/helpers/state.rs @@ -1,7 +1,7 @@ //! Contains RPC handler implementations specific to state. use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; -use reth_rpc_convert::RpcTypes; +use reth_rpc_convert::RpcConvert; use reth_storage_api::{BlockReader, StateProviderFactory}; use reth_transaction_pool::TransactionPool; @@ -17,7 +17,7 @@ impl EthState where Self: LoadState + SpawnBlocking, Provider: BlockReader, - Rpc: RpcTypes, + Rpc: RpcConvert, { fn max_proof_window(&self) -> u64 { self.inner.eth_proof_window() @@ -32,25 +32,28 @@ where + StateProviderFactory + ChainSpecProvider, Pool: TransactionPool, - > + EthApiTypes, + > + EthApiTypes, Provider: BlockReader, - Rpc: RpcTypes, + Rpc: RpcConvert, { } #[cfg(test)] mod tests { + use crate::eth::helpers::types::EthRpcConverter; + use super::*; use alloy_consensus::Header; use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT_30M; - use alloy_network::Ethereum; use alloy_primitives::{Address, StorageKey, StorageValue, U256}; + use reth_chainspec::ChainSpec; use reth_evm_ethereum::EthEvmConfig; use reth_network_api::noop::NoopNetwork; use reth_provider::test_utils::{ExtendedAccount, MockEthProvider, NoopProvider}; use reth_rpc_eth_api::helpers::EthState; use reth_rpc_eth_types::{ - EthStateCache, FeeHistoryCache, FeeHistoryCacheConfig, GasPriceOracle, + receipt::EthReceiptConverter, EthStateCache, FeeHistoryCache, FeeHistoryCacheConfig, + GasPriceOracle, }; use reth_rpc_server_types::constants::{ DEFAULT_ETH_PROOF_WINDOW, DEFAULT_MAX_SIMULATE_BLOCKS, DEFAULT_PROOF_PERMITS, @@ -59,13 +62,17 @@ mod tests { use reth_transaction_pool::test_utils::{testing_pool, TestPool}; use std::collections::HashMap; - fn noop_eth_api() -> EthApi { + fn noop_eth_api( + ) -> EthApi> { + let provider = NoopProvider::default(); let pool = testing_pool(); let evm_config = EthEvmConfig::mainnet(); let cache = EthStateCache::spawn(NoopProvider::default(), Default::default()); + let rpc_converter = + EthRpcConverter::new(EthReceiptConverter::new(provider.chain_spec()), ()); EthApi::new( - NoopProvider::default(), + provider, pool, NoopNetwork::default(), cache.clone(), @@ -77,12 +84,13 @@ mod tests { FeeHistoryCache::
::new(FeeHistoryCacheConfig::default()), evm_config, DEFAULT_PROOF_PERMITS, + rpc_converter, ) } fn mock_eth_api( accounts: HashMap, - ) -> EthApi { + ) -> EthApi> { let pool = testing_pool(); let mock_provider = MockEthProvider::default(); @@ -90,6 +98,8 @@ mod tests { mock_provider.extend_accounts(accounts); let cache = EthStateCache::spawn(mock_provider.clone(), Default::default()); + let rpc_converter = + EthRpcConverter::new(EthReceiptConverter::new(mock_provider.chain_spec()), ()); EthApi::new( mock_provider.clone(), pool, @@ -103,6 +113,7 @@ mod tests { FeeHistoryCache::
::new(FeeHistoryCacheConfig::default()), evm_config, DEFAULT_PROOF_PERMITS, + rpc_converter, ) } diff --git a/crates/rpc/rpc/src/eth/helpers/trace.rs b/crates/rpc/rpc/src/eth/helpers/trace.rs index a080264698d..78ea5c12b48 100644 --- a/crates/rpc/rpc/src/eth/helpers/trace.rs +++ b/crates/rpc/rpc/src/eth/helpers/trace.rs @@ -2,7 +2,7 @@ use reth_evm::ConfigureEvm; use reth_node_api::NodePrimitives; -use reth_rpc_convert::RpcTypes; +use reth_rpc_convert::RpcConvert; use reth_rpc_eth_api::{ helpers::{LoadState, Trace}, FromEvmError, @@ -25,6 +25,6 @@ where Error: FromEvmError, >, Provider: BlockReader, - Rpc: RpcTypes, + Rpc: RpcConvert, { } diff --git a/crates/rpc/rpc/src/eth/helpers/transaction.rs b/crates/rpc/rpc/src/eth/helpers/transaction.rs index 313b5778785..97f0bebd75e 100644 --- a/crates/rpc/rpc/src/eth/helpers/transaction.rs +++ b/crates/rpc/rpc/src/eth/helpers/transaction.rs @@ -2,7 +2,7 @@ use crate::EthApi; use alloy_primitives::{Bytes, B256}; -use reth_rpc_convert::RpcTypes; +use reth_rpc_convert::RpcConvert; use reth_rpc_eth_api::{ helpers::{spec::SignersForRpc, EthTransactions, LoadTransaction, SpawnBlocking}, EthApiTypes, FromEthApiError, FullEthApiTypes, RpcNodeCore, RpcNodeCoreExt, @@ -14,9 +14,9 @@ use reth_transaction_pool::{PoolTransaction, TransactionOrigin, TransactionPool} impl EthTransactions for EthApi where - Self: LoadTransaction + EthApiTypes, + Self: LoadTransaction + EthApiTypes, Provider: BlockReader>, - Rpc: RpcTypes, + Rpc: RpcConvert, { #[inline] fn signers(&self) -> &SignersForRpc { @@ -51,14 +51,16 @@ where Self: SpawnBlocking + FullEthApiTypes + RpcNodeCoreExt - + EthApiTypes, + + EthApiTypes, Provider: BlockReader, - Rpc: RpcTypes, + Rpc: RpcConvert, { } #[cfg(test)] mod tests { + use crate::eth::helpers::types::EthRpcConverter; + use super::*; use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT_30M; use alloy_primitives::{hex_literal::hex, Bytes}; @@ -68,7 +70,8 @@ mod tests { use reth_provider::test_utils::NoopProvider; use reth_rpc_eth_api::helpers::EthTransactions; use reth_rpc_eth_types::{ - EthStateCache, FeeHistoryCache, FeeHistoryCacheConfig, GasPriceOracle, + receipt::EthReceiptConverter, EthStateCache, FeeHistoryCache, FeeHistoryCacheConfig, + GasPriceOracle, }; use reth_rpc_server_types::constants::{ DEFAULT_ETH_PROOF_WINDOW, DEFAULT_MAX_SIMULATE_BLOCKS, DEFAULT_PROOF_PERMITS, @@ -86,6 +89,8 @@ mod tests { let evm_config = EthEvmConfig::new(noop_provider.chain_spec()); let cache = EthStateCache::spawn(noop_provider.clone(), Default::default()); let fee_history_cache = FeeHistoryCache::new(FeeHistoryCacheConfig::default()); + let rpc_converter = + EthRpcConverter::new(EthReceiptConverter::new(noop_provider.chain_spec()), ()); let eth_api = EthApi::new( noop_provider.clone(), pool.clone(), @@ -99,6 +104,7 @@ mod tests { fee_history_cache, evm_config, DEFAULT_PROOF_PERMITS, + rpc_converter, ); // https://etherscan.io/tx/0xa694b71e6c128a2ed8e2e0f6770bddbe52e3bb8f10e8472f9a79ab81497a8b5d diff --git a/crates/rpc/rpc/src/eth/helpers/types.rs b/crates/rpc/rpc/src/eth/helpers/types.rs index 2425c15fc0b..816820fea6e 100644 --- a/crates/rpc/rpc/src/eth/helpers/types.rs +++ b/crates/rpc/rpc/src/eth/helpers/types.rs @@ -3,10 +3,11 @@ use alloy_network::Ethereum; use reth_evm_ethereum::EthEvmConfig; use reth_rpc_convert::RpcConverter; -use reth_rpc_eth_types::EthApiError; +use reth_rpc_eth_types::receipt::EthReceiptConverter; /// An [`RpcConverter`] with its generics set to Ethereum specific. -pub type EthRpcConverter = RpcConverter; +pub type EthRpcConverter = + RpcConverter>; //tests for simulate #[cfg(test)] @@ -14,12 +15,13 @@ mod tests { use super::*; use alloy_consensus::{Transaction, TxType}; use alloy_rpc_types_eth::TransactionRequest; + use reth_chainspec::MAINNET; use reth_rpc_eth_types::simulate::resolve_transaction; use revm::database::CacheDB; #[test] fn test_resolve_transaction_empty_request() { - let builder = EthRpcConverter::default(); + let builder = EthRpcConverter::new(EthReceiptConverter::new(MAINNET.clone()), ()); let mut db = CacheDB::>::default(); let tx = TransactionRequest::default(); let result = resolve_transaction(tx, 21000, 0, 1, &mut db, &builder).unwrap(); @@ -34,7 +36,7 @@ mod tests { #[test] fn test_resolve_transaction_legacy() { let mut db = CacheDB::>::default(); - let builder = EthRpcConverter::default(); + let builder = EthRpcConverter::new(EthReceiptConverter::new(MAINNET.clone()), ()); let tx = TransactionRequest { gas_price: Some(100), ..Default::default() }; @@ -50,7 +52,7 @@ mod tests { #[test] fn test_resolve_transaction_partial_eip1559() { let mut db = CacheDB::>::default(); - let builder = EthRpcConverter::default(); + let rpc_converter = EthRpcConverter::new(EthReceiptConverter::new(MAINNET.clone()), ()); let tx = TransactionRequest { max_fee_per_gas: Some(200), @@ -58,7 +60,7 @@ mod tests { ..Default::default() }; - let result = resolve_transaction(tx, 21000, 0, 1, &mut db, &builder).unwrap(); + let result = resolve_transaction(tx, 21000, 0, 1, &mut db, &rpc_converter).unwrap(); assert_eq!(result.tx_type(), TxType::Eip1559); let tx = result.into_inner(); diff --git a/examples/exex-hello-world/src/main.rs b/examples/exex-hello-world/src/main.rs index 0f9e904881a..9b9710ac6af 100644 --- a/examples/exex-hello-world/src/main.rs +++ b/examples/exex-hello-world/src/main.rs @@ -11,8 +11,8 @@ use futures::TryStreamExt; use reth_ethereum::{ exex::{ExExContext, ExExEvent, ExExNotification}, node::{api::FullNodeComponents, EthereumNode}, - rpc::eth::EthApiFor, }; +use reth_op::rpc::api::eth::helpers::FullEthApi; use reth_tracing::tracing::info; use tokio::sync::oneshot; @@ -46,9 +46,9 @@ async fn my_exex(mut ctx: ExExContext) -> eyre:: /// This is an example of how to access the `EthApi` inside an ExEx. It receives the `EthApi` once /// the node is launched fully. -async fn ethapi_exex( +async fn ethapi_exex( mut ctx: ExExContext, - ethapi_rx: oneshot::Receiver>, + ethapi_rx: oneshot::Receiver, ) -> eyre::Result<()> where Node: FullNodeComponents, From 237e97ab83cf4890b05a0d1280f6387ef6bff3bc Mon Sep 17 00:00:00 2001 From: strmfos <155266597+strmfos@users.noreply.github.com> Date: Thu, 17 Jul 2025 15:41:33 +0200 Subject: [PATCH 193/305] docs: fix typo from `optstack` to `opstack` (#17454) --- crates/primitives-traits/src/extended.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/primitives-traits/src/extended.rs b/crates/primitives-traits/src/extended.rs index f2f46cf36d2..b2731aa5a96 100644 --- a/crates/primitives-traits/src/extended.rs +++ b/crates/primitives-traits/src/extended.rs @@ -25,7 +25,7 @@ macro_rules! delegate { /// An enum that combines two different transaction types. /// -/// This is intended to be used to extend existing presets, for example the ethereum or optstack +/// This is intended to be used to extend existing presets, for example the ethereum or opstack /// transaction types and receipts /// /// Note: The [`Extended::Other`] variants must not overlap with the builtin one, transaction From 0b1d950f67f399168b918357c417ae61d7d484f5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9a=20Narzis?= <78718413+lean-apple@users.noreply.github.com> Date: Thu, 17 Jul 2025 15:46:10 +0200 Subject: [PATCH 194/305] feat(tx-pool): add submit methods to `TransactionPool` (#17431) --- crates/transaction-pool/src/traits.rs | 38 ++++++++++++++++++++++++++- examples/txpool-tracing/src/submit.rs | 26 +++++------------- 2 files changed, 44 insertions(+), 20 deletions(-) diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 0621394d11e..10bac5afe9c 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -52,7 +52,7 @@ use crate::{ blobstore::BlobStoreError, - error::{InvalidPoolTransactionError, PoolResult}, + error::{InvalidPoolTransactionError, PoolError, PoolResult}, pool::{ state::SubPool, BestTransactionFilter, NewTransactionEvent, TransactionEvents, TransactionListenerKind, @@ -176,6 +176,42 @@ pub trait TransactionPool: Clone + Debug + Send + Sync { transactions: Vec, ) -> impl Future>> + Send; + /// Submit a consensus transaction directly to the pool + fn add_consensus_transaction( + &self, + tx: Recovered<::Consensus>, + origin: TransactionOrigin, + ) -> impl Future> + Send { + async move { + let tx_hash = *tx.tx_hash(); + + let pool_transaction = match Self::Transaction::try_from_consensus(tx) { + Ok(tx) => tx, + Err(e) => return Err(PoolError::other(tx_hash, e.to_string())), + }; + + self.add_transaction(origin, pool_transaction).await + } + } + + /// Submit a consensus transaction and subscribe to event stream + fn add_consensus_transaction_and_subscribe( + &self, + tx: Recovered<::Consensus>, + origin: TransactionOrigin, + ) -> impl Future> + Send { + async move { + let tx_hash = *tx.tx_hash(); + + let pool_transaction = match Self::Transaction::try_from_consensus(tx) { + Ok(tx) => tx, + Err(e) => return Err(PoolError::other(tx_hash, e.to_string())), + }; + + self.add_transaction_and_subscribe(origin, pool_transaction).await + } + } + /// Returns a new transaction change event stream for the given transaction. /// /// Returns `None` if the transaction is not in the pool. diff --git a/examples/txpool-tracing/src/submit.rs b/examples/txpool-tracing/src/submit.rs index 04744f37244..eb2c7957e04 100644 --- a/examples/txpool-tracing/src/submit.rs +++ b/examples/txpool-tracing/src/submit.rs @@ -51,16 +51,11 @@ where // Recover the transaction let transaction = transaction.try_into_recovered()?; - // Convert to pool transaction type - let pool_transaction = - ::Transaction::try_from_consensus(transaction) - .map_err(|e| eyre::eyre!("Failed to convert to pool transaction: {e}"))?; - - // Submit the transaction to the pool and get event stream let mut tx_events = node .pool() - .add_transaction_and_subscribe(TransactionOrigin::Local, pool_transaction) - .await?; + .add_consensus_transaction_and_subscribe(transaction, TransactionOrigin::Local) + .await + .map_err(|e| eyre::eyre!("Pool error: {e}"))?; // Wait for the transaction to be added to the pool while let Some(event) = tx_events.next().await { @@ -118,16 +113,9 @@ where // Recover the transaction let transaction = transaction.try_into_recovered()?; - // Get the transaction hash - let tx_hash = *transaction.hash(); - - // Convert to pool transaction type - let pool_transaction = - ::Transaction::try_from_consensus(transaction) - .map_err(|e| eyre::eyre!("Failed to convert to pool transaction: {e}"))?; - // Submit the transaction to the pool - node.pool().add_transaction(TransactionOrigin::Local, pool_transaction).await?; - - Ok(tx_hash) + node.pool() + .add_consensus_transaction(transaction, TransactionOrigin::Local) + .await + .map_err(|e| eyre::eyre!("Pool error: {e}")) } From 61a19c1bcb4d27cec1be1ab2444ade9a3f0f26f1 Mon Sep 17 00:00:00 2001 From: cakevm Date: Thu, 17 Jul 2025 15:56:21 +0200 Subject: [PATCH 195/305] feat(alloy-provider): implement `sealed_header` method (#17455) --- crates/alloy-provider/src/lib.rs | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/crates/alloy-provider/src/lib.rs b/crates/alloy-provider/src/lib.rs index 0a5c1475d3c..6b0eb95d981 100644 --- a/crates/alloy-provider/src/lib.rs +++ b/crates/alloy-provider/src/lib.rs @@ -301,20 +301,12 @@ where } fn header_by_number(&self, num: u64) -> ProviderResult> { - let block_response = self.block_on_async(async { - self.provider.get_block_by_number(num.into()).await.map_err(ProviderError::other) - })?; - - let Some(block_response) = block_response else { + let Some(sealed_header) = self.sealed_header(num)? else { // If the block was not found, return None return Ok(None); }; - // Convert the network block response to primitive block - let block = as TryFromBlockResponse>::from_block_response(block_response) - .map_err(ProviderError::other)?; - - Ok(Some(block.into_header())) + Ok(Some(sealed_header.into_header())) } fn header_td(&self, _hash: &BlockHash) -> ProviderResult> { @@ -334,9 +326,23 @@ where fn sealed_header( &self, - _number: BlockNumber, + number: BlockNumber, ) -> ProviderResult>> { - Err(ProviderError::UnsupportedProvider) + let block_response = self.block_on_async(async { + self.provider.get_block_by_number(number.into()).await.map_err(ProviderError::other) + })?; + + let Some(block_response) = block_response else { + // If the block was not found, return None + return Ok(None); + }; + let block_hash = block_response.header().hash(); + + // Convert the network block response to primitive block + let block = as TryFromBlockResponse>::from_block_response(block_response) + .map_err(ProviderError::other)?; + + Ok(Some(SealedHeader::new(block.into_header(), block_hash))) } fn sealed_headers_while( From 05fed6f991107359fdf04850ebf11b5cc2077225 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Thu, 17 Jul 2025 16:00:13 +0200 Subject: [PATCH 196/305] feat: add helper for building pending block env (#17464) --- Cargo.lock | 1 + crates/ethereum/node/src/node.rs | 8 +-- crates/optimism/evm/Cargo.toml | 3 + crates/optimism/evm/src/config.rs | 16 +++++ crates/optimism/node/Cargo.toml | 2 +- crates/optimism/rpc/src/eth/mod.rs | 12 ++-- crates/optimism/rpc/src/eth/pending_block.rs | 35 +++------ crates/rpc/rpc-builder/src/lib.rs | 6 +- .../rpc-eth-api/src/helpers/pending_block.rs | 71 ++++++++++++++++--- crates/rpc/rpc/src/eth/builder.rs | 64 ++++++++++++++++- crates/rpc/rpc/src/eth/core.rs | 45 ++++++++++-- crates/rpc/rpc/src/eth/helpers/block.rs | 5 +- crates/rpc/rpc/src/eth/helpers/call.rs | 2 + crates/rpc/rpc/src/eth/helpers/fees.rs | 3 + .../rpc/rpc/src/eth/helpers/pending_block.rs | 29 ++------ crates/rpc/rpc/src/eth/helpers/receipt.rs | 2 + crates/rpc/rpc/src/eth/helpers/signer.rs | 2 + crates/rpc/rpc/src/eth/helpers/spec.rs | 2 + crates/rpc/rpc/src/eth/helpers/state.rs | 3 + crates/rpc/rpc/src/eth/helpers/trace.rs | 1 + crates/rpc/rpc/src/eth/helpers/transaction.rs | 3 + 21 files changed, 234 insertions(+), 81 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0e882206bf0..f8af9dc74ba 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9244,6 +9244,7 @@ dependencies = [ "reth-optimism-primitives", "reth-primitives-traits", "reth-revm", + "reth-rpc-eth-api", "revm", "thiserror 2.0.12", ] diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index 8938f6e8690..804253f45f8 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -20,8 +20,8 @@ use reth_evm::{ }; use reth_network::{primitives::BasicNetworkPrimitives, NetworkHandle, PeersInfo}; use reth_node_api::{ - AddOnsContext, FullNodeComponents, NodeAddOns, NodePrimitives, PayloadAttributesBuilder, - PrimitivesTy, TxTy, + AddOnsContext, FullNodeComponents, HeaderTy, NodeAddOns, NodePrimitives, + PayloadAttributesBuilder, PrimitivesTy, TxTy, }; use reth_node_builder::{ components::{ @@ -43,7 +43,7 @@ use reth_rpc::{ }; use reth_rpc_api::servers::BlockSubmissionValidationApiServer; use reth_rpc_builder::{config::RethRpcServerConfig, middleware::RethRpcMiddleware}; -use reth_rpc_eth_api::RpcConvert; +use reth_rpc_eth_api::{helpers::pending_block::BuildPendingEnv, RpcConvert}; use reth_rpc_eth_types::{error::FromEvmError, EthApiError}; use reth_rpc_server_types::RethRpcModule; use reth_tracing::tracing::{debug, info}; @@ -144,7 +144,7 @@ impl EthApiBuilder for EthereumEthApiBuilder where N: FullNodeComponents< Types: NodeTypes, - Evm: ConfigureEvm>, + Evm: ConfigureEvm>>, >, EthRpcConverterFor: RpcConvert< Primitives = PrimitivesTy, diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index 9acef67dabe..98288c5383e 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -18,6 +18,8 @@ reth-primitives-traits.workspace = true reth-execution-errors.workspace = true reth-execution-types.workspace = true +reth-rpc-eth-api = { workspace = true, optional = true } + # ethereum alloy-eips.workspace = true alloy-evm.workspace = true @@ -71,3 +73,4 @@ std = [ "reth-evm/std", ] portable = ["reth-revm/portable"] +rpc = ["reth-rpc-eth-api"] diff --git a/crates/optimism/evm/src/config.rs b/crates/optimism/evm/src/config.rs index d3786e6e92e..2d4039020f1 100644 --- a/crates/optimism/evm/src/config.rs +++ b/crates/optimism/evm/src/config.rs @@ -56,6 +56,22 @@ pub fn revm_spec_by_timestamp_after_bedrock( } } +#[cfg(feature = "rpc")] +impl reth_rpc_eth_api::helpers::pending_block::BuildPendingEnv + for OpNextBlockEnvAttributes +{ + fn build_pending_env(parent: &crate::SealedHeader) -> Self { + Self { + timestamp: parent.timestamp().saturating_add(12), + suggested_fee_recipient: parent.beneficiary(), + prev_randao: alloy_primitives::B256::random(), + gas_limit: parent.gas_limit(), + parent_beacon_block_root: parent.parent_beacon_block_root(), + extra_data: parent.extra_data().clone(), + } + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index ee5927de3c6..ec4b9a127b2 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -34,7 +34,7 @@ reth-rpc-api.workspace = true # op-reth reth-optimism-payload-builder.workspace = true -reth-optimism-evm.workspace = true +reth-optimism-evm = { workspace = true, features = ["rpc"] } reth-optimism-rpc.workspace = true reth-optimism-storage.workspace = true reth-optimism-txpool.workspace = true diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index ec7c865ec6e..dfbffaa7c41 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -19,13 +19,13 @@ pub use receipt::{OpReceiptBuilder, OpReceiptFieldsBuilder}; use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardforks}; use reth_evm::ConfigureEvm; use reth_network_api::NetworkInfo; -use reth_node_api::{FullNodeComponents, FullNodeTypes, NodePrimitives}; +use reth_node_api::{FullNodeComponents, FullNodeTypes, HeaderTy, NodePrimitives}; use reth_node_builder::rpc::{EthApiBuilder, EthApiCtx}; use reth_rpc::eth::{core::EthApiInner, DevSigner}; use reth_rpc_eth_api::{ helpers::{ - spec::SignersForApi, AddDevSigners, EthApiSpec, EthFees, EthState, LoadBlock, LoadFee, - LoadState, SpawnBlocking, Trace, + pending_block::BuildPendingEnv, spec::SignersForApi, AddDevSigners, EthApiSpec, EthFees, + EthState, LoadBlock, LoadFee, LoadState, SpawnBlocking, Trace, }, EthApiTypes, FromEvmError, FullEthApiServer, RpcConvert, RpcConverter, RpcNodeCore, RpcNodeCoreExt, RpcTypes, SignableTxRequest, @@ -52,8 +52,8 @@ pub type EthApiNodeBackend = EthApiInner< >; /// A helper trait with requirements for [`RpcNodeCore`] to be used in [`OpEthApi`]. -pub trait OpNodeCore: RpcNodeCore {} -impl OpNodeCore for T where T: RpcNodeCore {} +pub trait OpNodeCore: RpcNodeCore {} +impl OpNodeCore for T where T: RpcNodeCore {} /// OP-Reth `Eth` API implementation. /// @@ -410,7 +410,7 @@ impl OpEthApiBuilder { impl EthApiBuilder for OpEthApiBuilder where - N: FullNodeComponents, + N: FullNodeComponents>>>, NetworkT: RpcTypes, OpRpcConvert: RpcConvert, OpEthApi>: diff --git a/crates/optimism/rpc/src/eth/pending_block.rs b/crates/optimism/rpc/src/eth/pending_block.rs index fb1c85dabb7..555f5d59ee5 100644 --- a/crates/optimism/rpc/src/eth/pending_block.rs +++ b/crates/optimism/rpc/src/eth/pending_block.rs @@ -3,17 +3,13 @@ use std::sync::Arc; use crate::OpEthApi; -use alloy_consensus::BlockHeader; use alloy_eips::BlockNumberOrTag; -use alloy_primitives::B256; -use reth_chainspec::{ChainSpecProvider, EthChainSpec}; +use reth_chainspec::ChainSpecProvider; use reth_evm::ConfigureEvm; use reth_node_api::NodePrimitives; -use reth_optimism_evm::OpNextBlockEnvAttributes; -use reth_optimism_forks::OpHardforks; -use reth_primitives_traits::{RecoveredBlock, SealedHeader}; +use reth_primitives_traits::RecoveredBlock; use reth_rpc_eth_api::{ - helpers::{LoadPendingBlock, SpawnBlocking}, + helpers::{pending_block::PendingEnvBuilder, LoadPendingBlock, SpawnBlocking}, types::RpcTypes, EthApiTypes, FromEthApiError, FromEvmError, RpcConvert, RpcNodeCore, }; @@ -35,14 +31,9 @@ where RpcConvert: RpcConvert, >, N: RpcNodeCore< - Provider: BlockReaderIdExt - + ChainSpecProvider - + StateProviderFactory, + Provider: BlockReaderIdExt + ChainSpecProvider + StateProviderFactory, Pool: TransactionPool>>, - Evm: ConfigureEvm< - Primitives = ::Primitives, - NextBlockEnvCtx: From, - >, + Evm: ConfigureEvm, Primitives: NodePrimitives< BlockHeader = ProviderHeader, SignedTx = ProviderTx, @@ -61,19 +52,9 @@ where self.inner.eth_api.pending_block() } - fn next_env_attributes( - &self, - parent: &SealedHeader>, - ) -> Result<::NextBlockEnvCtx, Self::Error> { - Ok(OpNextBlockEnvAttributes { - timestamp: parent.timestamp().saturating_add(12), - suggested_fee_recipient: parent.beneficiary(), - prev_randao: B256::random(), - gas_limit: parent.gas_limit(), - parent_beacon_block_root: parent.parent_beacon_block_root(), - extra_data: parent.extra_data().clone(), - } - .into()) + #[inline] + fn pending_env_builder(&self) -> &dyn PendingEnvBuilder { + self.inner.eth_api.pending_env_builder() } /// Returns the locally built pending block diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 4f0f11babee..3d5dc17ba8b 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -41,7 +41,10 @@ use reth_rpc::{ }; use reth_rpc_api::servers::*; use reth_rpc_eth_api::{ - helpers::{Call, EthApiSpec, EthTransactions, LoadPendingBlock, TraceExt}, + helpers::{ + pending_block::{BasicPendingEnvBuilder, PendingEnvBuilder}, + Call, EthApiSpec, EthTransactions, LoadPendingBlock, TraceExt, + }, EthApiServer, EthApiTypes, FullEthApiServer, RpcBlock, RpcConvert, RpcConverter, RpcHeader, RpcReceipt, RpcTransaction, RpcTxReq, }; @@ -301,6 +304,7 @@ impl EvmConfig: ConfigureEvm, Network: Clone, RpcConverter>: RpcConvert, + BasicPendingEnvBuilder: PendingEnvBuilder, { self.eth_api_builder().build() } diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index 691a5f42bff..99062612db7 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -5,18 +5,18 @@ use super::SpawnBlocking; use crate::{types::RpcTypes, EthApiTypes, FromEthApiError, FromEvmError, RpcNodeCore}; use alloy_consensus::{BlockHeader, Transaction}; use alloy_eips::eip7840::BlobParams; -use alloy_primitives::U256; +use alloy_primitives::{B256, U256}; use alloy_rpc_types_eth::BlockNumberOrTag; use futures::Future; -use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardforks}; +use reth_chainspec::{ChainSpecProvider, EthChainSpec}; use reth_errors::{BlockExecutionError, BlockValidationError, ProviderError, RethError}; use reth_evm::{ execute::{BlockBuilder, BlockBuilderOutcome}, - ConfigureEvm, Evm, SpecFor, + ConfigureEvm, Evm, NextBlockEnvAttributes, SpecFor, }; use reth_node_api::NodePrimitives; use reth_primitives_traits::{ - transaction::error::InvalidTransactionError, Receipt, RecoveredBlock, SealedHeader, + transaction::error::InvalidTransactionError, HeaderTy, Receipt, RecoveredBlock, SealedHeader, }; use reth_revm::{database::StateProviderDatabase, db::State}; use reth_rpc_convert::RpcConvert; @@ -48,10 +48,8 @@ pub trait LoadPendingBlock: Error: FromEvmError, RpcConvert: RpcConvert, > + RpcNodeCore< - Provider: BlockReaderIdExt - + ChainSpecProvider - + StateProviderFactory, - Evm: ConfigureEvm::Primitives>, + Provider: BlockReaderIdExt + ChainSpecProvider + StateProviderFactory, + Evm: ConfigureEvm::Primitives> + 'static, Primitives: NodePrimitives< BlockHeader = ProviderHeader, SignedTx = ProviderTx, @@ -68,6 +66,9 @@ pub trait LoadPendingBlock: &self, ) -> &Mutex, ProviderReceipt>>>; + /// Returns a [`PendingEnvBuilder`] for the pending block. + fn pending_env_builder(&self) -> &dyn PendingEnvBuilder; + /// Configures the [`PendingBlockEnv`] for the pending block /// /// If no pending block is available, this will derive it from the `latest` block @@ -121,7 +122,9 @@ pub trait LoadPendingBlock: fn next_env_attributes( &self, parent: &SealedHeader>, - ) -> Result<::NextBlockEnvCtx, Self::Error>; + ) -> Result<::NextBlockEnvCtx, Self::Error> { + Ok(self.pending_env_builder().pending_env_attributes(parent)?) + } /// Returns the locally built pending block #[expect(clippy::type_complexity)] @@ -341,3 +344,53 @@ pub trait LoadPendingBlock: Ok((block, execution_result.receipts)) } } + +/// A type that knows how to build a [`ConfigureEvm::NextBlockEnvCtx`] for a pending block. +pub trait PendingEnvBuilder: Send + Sync + Unpin + 'static { + /// Builds a [`ConfigureEvm::NextBlockEnvCtx`] for pending block. + fn pending_env_attributes( + &self, + parent: &SealedHeader>, + ) -> Result; +} + +/// Trait that should be implemented on [`ConfigureEvm::NextBlockEnvCtx`] to provide a way for it to +/// build an environment for pending block. +/// +/// This assumes that next environment building doesn't require any additional context, for more +/// complex implementations one should implement [`PendingEnvBuilder`] on their custom type. +pub trait BuildPendingEnv
{ + /// Builds a [`ConfigureEvm::NextBlockEnvCtx`] for pending block. + fn build_pending_env(parent: &SealedHeader
) -> Self; +} + +/// Basic implementation of [`PendingEnvBuilder`] that assumes that the +/// [`ConfigureEvm::NextBlockEnvCtx`] type implements [`BuildPendingEnv`] trait. +#[derive(Debug, Default, Clone, Copy)] +#[non_exhaustive] +pub struct BasicPendingEnvBuilder; + +impl PendingEnvBuilder for BasicPendingEnvBuilder +where + Evm: ConfigureEvm>>, +{ + fn pending_env_attributes( + &self, + parent: &SealedHeader>, + ) -> Result { + Ok(Evm::NextBlockEnvCtx::build_pending_env(parent)) + } +} + +impl BuildPendingEnv for NextBlockEnvAttributes { + fn build_pending_env(parent: &SealedHeader) -> Self { + Self { + timestamp: parent.timestamp().saturating_add(12), + suggested_fee_recipient: parent.beneficiary(), + prev_randao: B256::random(), + gas_limit: parent.gas_limit(), + parent_beacon_block_root: parent.parent_beacon_block_root().map(|_| B256::ZERO), + withdrawals: parent.withdrawals_root().map(|_| Default::default()), + } + } +} diff --git a/crates/rpc/rpc/src/eth/builder.rs b/crates/rpc/rpc/src/eth/builder.rs index 813b79bb0be..a0e6708ce1b 100644 --- a/crates/rpc/rpc/src/eth/builder.rs +++ b/crates/rpc/rpc/src/eth/builder.rs @@ -4,8 +4,10 @@ use crate::{eth::core::EthApiInner, EthApi}; use alloy_network::Ethereum; use reth_chain_state::CanonStateSubscriptions; use reth_chainspec::ChainSpecProvider; +use reth_evm::ConfigureEvm; use reth_node_api::NodePrimitives; use reth_rpc_convert::{RpcConvert, RpcConverter}; +use reth_rpc_eth_api::helpers::pending_block::{BasicPendingEnvBuilder, PendingEnvBuilder}; use reth_rpc_eth_types::{ fee_history::fee_history_cache_new_blocks_task, receipt::EthReceiptConverter, EthStateCache, EthStateCacheConfig, FeeHistoryCache, FeeHistoryCacheConfig, GasCap, GasPriceOracle, @@ -23,7 +25,7 @@ use std::sync::Arc; /// This builder type contains all settings to create an [`EthApiInner`] or an [`EthApi`] instance /// directly. #[derive(Debug)] -pub struct EthApiBuilder +pub struct EthApiBuilder where Provider: BlockReaderIdExt, { @@ -43,6 +45,7 @@ where gas_oracle: Option>, blocking_task_pool: Option, task_spawner: Box, + next_env: NextEnv, } impl @@ -79,11 +82,13 @@ where task_spawner: TokioTaskExecutor::default().boxed(), gas_oracle_config: Default::default(), eth_state_cache_config: Default::default(), + next_env: BasicPendingEnvBuilder::default(), } } } -impl EthApiBuilder +impl + EthApiBuilder where Provider: BlockReaderIdExt + ChainSpecProvider, { @@ -97,7 +102,7 @@ where pub fn with_rpc_converter( self, rpc_converter: RpcNew, - ) -> EthApiBuilder { + ) -> EthApiBuilder { let Self { provider, pool, @@ -115,6 +120,7 @@ where blocking_task_pool, task_spawner, gas_oracle_config, + next_env, } = self; EthApiBuilder { provider, @@ -133,6 +139,52 @@ where blocking_task_pool, task_spawner, gas_oracle_config, + next_env, + } + } + + /// Changes the configured pending environment builder. + pub fn with_pending_env_builder( + self, + next_env: NextEnvNew, + ) -> EthApiBuilder { + let Self { + provider, + pool, + network, + evm_config, + rpc_converter, + gas_cap, + max_simulate_blocks, + eth_proof_window, + fee_history_cache_config, + proof_permits, + eth_state_cache_config, + eth_cache, + gas_oracle, + blocking_task_pool, + task_spawner, + gas_oracle_config, + next_env: _, + } = self; + EthApiBuilder { + provider, + pool, + network, + evm_config, + rpc_converter, + gas_cap, + max_simulate_blocks, + eth_proof_window, + fee_history_cache_config, + proof_permits, + eth_state_cache_config, + eth_cache, + gas_oracle, + blocking_task_pool, + task_spawner, + gas_oracle_config, + next_env, } } @@ -229,7 +281,9 @@ where > + Clone + Unpin + 'static, + EvmConfig: ConfigureEvm, Rpc: RpcConvert, + NextEnv: PendingEnvBuilder, { let Self { provider, @@ -248,6 +302,7 @@ where fee_history_cache_config, proof_permits, task_spawner, + next_env, } = self; let eth_cache = eth_cache @@ -284,6 +339,7 @@ where task_spawner, proof_permits, rpc_converter, + next_env, ) } @@ -310,6 +366,8 @@ where + Unpin + 'static, Rpc: RpcConvert, + EvmConfig: ConfigureEvm, + NextEnv: PendingEnvBuilder, { EthApi { inner: Arc::new(self.build_inner()) } } diff --git a/crates/rpc/rpc/src/eth/core.rs b/crates/rpc/rpc/src/eth/core.rs index 32dfbeadfb6..88828ecf6c4 100644 --- a/crates/rpc/rpc/src/eth/core.rs +++ b/crates/rpc/rpc/src/eth/core.rs @@ -10,10 +10,16 @@ use alloy_network::Ethereum; use alloy_primitives::{Bytes, U256}; use derive_more::Deref; use reth_chainspec::{ChainSpec, ChainSpecProvider}; +use reth_evm::ConfigureEvm; +use reth_evm_ethereum::EthEvmConfig; use reth_node_api::{FullNodeComponents, FullNodeTypes}; use reth_rpc_convert::{RpcConvert, RpcConverter}; use reth_rpc_eth_api::{ - helpers::{spec::SignersForRpc, SpawnBlocking}, + helpers::{ + pending_block::{BasicPendingEnvBuilder, PendingEnvBuilder}, + spec::SignersForRpc, + SpawnBlocking, + }, node::RpcNodeCoreExt, EthApiTypes, RpcNodeCore, }; @@ -73,7 +79,7 @@ pub type EthApiBuilderFor = EthApiBuilder< /// While this type requires various unrestricted generic components, trait bounds are enforced when /// additional traits are implemented for this type. #[derive(Deref)] -pub struct EthApi { +pub struct EthApi { /// All nested fields bundled together. #[deref] pub(super) inner: Arc>, @@ -83,6 +89,7 @@ impl Clone for EthApi where Provider: BlockReader, + EvmConfig: ConfigureEvm, Rpc: RpcConvert, { fn clone(&self) -> Self { @@ -90,7 +97,7 @@ where } } -impl EthApi> { +impl EthApi> { /// Convenience fn to obtain a new [`EthApiBuilder`] instance with mandatory components. /// /// Creating an [`EthApi`] requires a few mandatory components: @@ -140,7 +147,9 @@ impl EthApi> { impl EthApi where Provider: BlockReaderIdExt + ChainSpecProvider, + EvmConfig: ConfigureEvm, Rpc: RpcConvert, + BasicPendingEnvBuilder: PendingEnvBuilder, { /// Creates a new, shareable instance using the default tokio task spawner. #[expect(clippy::too_many_arguments)] @@ -174,6 +183,7 @@ where TokioTaskExecutor::default().boxed(), proof_permits, rpc_converter, + BasicPendingEnvBuilder::default(), ); Self { inner: Arc::new(inner) } @@ -185,6 +195,7 @@ impl EthApiTypes where Self: Send + Sync, Provider: BlockReader, + EvmConfig: ConfigureEvm, Rpc: RpcConvert, { type Error = EthApiError; @@ -202,7 +213,7 @@ where Provider: BlockReader + NodePrimitivesProvider + Clone + Unpin, Pool: Send + Sync + Clone + Unpin, Network: Send + Sync + Clone, - EvmConfig: Send + Sync + Clone + Unpin, + EvmConfig: ConfigureEvm, Rpc: RpcConvert, { type Primitives = Provider::Primitives; @@ -239,7 +250,7 @@ where Provider: BlockReader + NodePrimitivesProvider + Clone + Unpin, Pool: Send + Sync + Clone + Unpin, Network: Send + Sync + Clone, - EvmConfig: Send + Sync + Clone + Unpin, + EvmConfig: ConfigureEvm, Rpc: RpcConvert, { #[inline] @@ -252,6 +263,7 @@ impl std::fmt::Debug for EthApi where Provider: BlockReader, + EvmConfig: ConfigureEvm, Rpc: RpcConvert, { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { @@ -264,6 +276,7 @@ impl SpawnBlocking where Self: EthApiTypes + Clone + Send + Sync + 'static, Provider: BlockReader, + EvmConfig: ConfigureEvm, Rpc: RpcConvert, { #[inline] @@ -284,7 +297,13 @@ where /// Container type `EthApi` #[expect(missing_debug_implementations)] -pub struct EthApiInner { +pub struct EthApiInner< + Provider: BlockReader, + Pool, + Network, + EvmConfig: ConfigureEvm, + Rpc: RpcConvert, +> { /// The transaction pool. pool: Pool, /// The provider that can interact with the chain. @@ -324,11 +343,15 @@ pub struct EthApiInner>, } impl EthApiInner where Provider: BlockReaderIdExt, + EvmConfig: ConfigureEvm, Rpc: RpcConvert, { /// Creates a new, shareable instance using the default tokio task spawner. @@ -348,6 +371,7 @@ where task_spawner: Box, proof_permits: usize, tx_resp_builder: Rpc, + next_env: impl PendingEnvBuilder, ) -> Self { let signers = parking_lot::RwLock::new(Default::default()); // get the block number of the latest block @@ -381,6 +405,7 @@ where blocking_task_guard: BlockingTaskGuard::new(proof_permits), raw_tx_sender, tx_resp_builder, + next_env_builder: Box::new(next_env), } } } @@ -388,6 +413,7 @@ where impl EthApiInner where Provider: BlockReader, + EvmConfig: ConfigureEvm, Rpc: RpcConvert, { /// Returns a handle to data on disk. @@ -416,6 +442,13 @@ where &self.pending_block } + /// Returns a type that knows how to build a [`ConfigureEvm::NextBlockEnvCtx`] for a pending + /// block. + #[inline] + pub const fn pending_env_builder(&self) -> &dyn PendingEnvBuilder { + &*self.next_env_builder + } + /// Returns a handle to the task spawner. #[inline] pub const fn task_spawner(&self) -> &dyn TaskSpawner { diff --git a/crates/rpc/rpc/src/eth/helpers/block.rs b/crates/rpc/rpc/src/eth/helpers/block.rs index 90d7db80356..115183d0f17 100644 --- a/crates/rpc/rpc/src/eth/helpers/block.rs +++ b/crates/rpc/rpc/src/eth/helpers/block.rs @@ -6,7 +6,7 @@ use reth_primitives_traits::NodePrimitives; use reth_rpc_convert::RpcConvert; use reth_rpc_eth_api::{ helpers::{EthBlocks, LoadBlock, LoadPendingBlock, SpawnBlocking}, - RpcNodeCore, RpcNodeCoreExt, + RpcNodeCoreExt, }; use reth_rpc_eth_types::EthApiError; use reth_storage_api::{BlockReader, ProviderTx}; @@ -27,6 +27,7 @@ where >, >, Provider: BlockReader + ChainSpecProvider, + EvmConfig: ConfigureEvm, Rpc: RpcConvert, { } @@ -44,7 +45,7 @@ where Evm = EvmConfig, >, Provider: BlockReader, - EvmConfig: ConfigureEvm::Primitives>, + EvmConfig: ConfigureEvm, Rpc: RpcConvert, { } diff --git a/crates/rpc/rpc/src/eth/helpers/call.rs b/crates/rpc/rpc/src/eth/helpers/call.rs index f910fcdbe73..60be882bd2d 100644 --- a/crates/rpc/rpc/src/eth/helpers/call.rs +++ b/crates/rpc/rpc/src/eth/helpers/call.rs @@ -48,6 +48,7 @@ where + From, > + SpawnBlocking, Provider: BlockReader, + EvmConfig: ConfigureEvm, Rpc: RpcConvert, { #[inline] @@ -66,6 +67,7 @@ impl EstimateCall where Self: Call, Provider: BlockReader, + EvmConfig: ConfigureEvm, Rpc: RpcConvert, { } diff --git a/crates/rpc/rpc/src/eth/helpers/fees.rs b/crates/rpc/rpc/src/eth/helpers/fees.rs index 65c98b9c989..e9e6e4c6bd0 100644 --- a/crates/rpc/rpc/src/eth/helpers/fees.rs +++ b/crates/rpc/rpc/src/eth/helpers/fees.rs @@ -1,6 +1,7 @@ //! Contains RPC handler implementations for fee history. use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardforks}; +use reth_evm::ConfigureEvm; use reth_rpc_convert::RpcConvert; use reth_rpc_eth_api::helpers::{EthFees, LoadBlock, LoadFee}; use reth_rpc_eth_types::{FeeHistoryCache, GasPriceOracle}; @@ -17,6 +18,7 @@ where >, >, Provider: BlockReader, + EvmConfig: ConfigureEvm, Rpc: RpcConvert, { } @@ -28,6 +30,7 @@ where Provider: BlockReaderIdExt + ChainSpecProvider + StateProviderFactory, + EvmConfig: ConfigureEvm, Rpc: RpcConvert, { #[inline] diff --git a/crates/rpc/rpc/src/eth/helpers/pending_block.rs b/crates/rpc/rpc/src/eth/helpers/pending_block.rs index 9bc47803035..d792baeb13c 100644 --- a/crates/rpc/rpc/src/eth/helpers/pending_block.rs +++ b/crates/rpc/rpc/src/eth/helpers/pending_block.rs @@ -1,14 +1,12 @@ //! Support for building a pending block with transactions from local view of mempool. use crate::EthApi; -use alloy_consensus::BlockHeader; use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardforks}; -use reth_evm::{ConfigureEvm, NextBlockEnvAttributes}; +use reth_evm::ConfigureEvm; use reth_node_api::NodePrimitives; -use reth_primitives_traits::SealedHeader; use reth_rpc_convert::RpcConvert; use reth_rpc_eth_api::{ - helpers::{LoadPendingBlock, SpawnBlocking}, + helpers::{pending_block::PendingEnvBuilder, LoadPendingBlock, SpawnBlocking}, types::RpcTypes, FromEvmError, RpcNodeCore, }; @@ -18,7 +16,6 @@ use reth_storage_api::{ StateProviderFactory, }; use reth_transaction_pool::{PoolTransaction, TransactionPool}; -use revm_primitives::B256; impl LoadPendingBlock for EthApi @@ -34,10 +31,7 @@ where Pool: TransactionPool< Transaction: PoolTransaction>, >, - Evm: ConfigureEvm< - Primitives = ::Primitives, - NextBlockEnvCtx: From, - >, + Evm = EvmConfig, Primitives: NodePrimitives< BlockHeader = ProviderHeader, SignedTx = ProviderTx, @@ -46,6 +40,7 @@ where >, >, Provider: BlockReader, + EvmConfig: ConfigureEvm, Rpc: RpcConvert< Network: RpcTypes
>>, >, @@ -59,18 +54,8 @@ where self.inner.pending_block() } - fn next_env_attributes( - &self, - parent: &SealedHeader>, - ) -> Result<::NextBlockEnvCtx, Self::Error> { - Ok(NextBlockEnvAttributes { - timestamp: parent.timestamp().saturating_add(12), - suggested_fee_recipient: parent.beneficiary(), - prev_randao: B256::random(), - gas_limit: parent.gas_limit(), - parent_beacon_block_root: parent.parent_beacon_block_root().map(|_| B256::ZERO), - withdrawals: parent.withdrawals_root().map(|_| Default::default()), - } - .into()) + #[inline] + fn pending_env_builder(&self) -> &dyn PendingEnvBuilder { + self.inner.pending_env_builder() } } diff --git a/crates/rpc/rpc/src/eth/helpers/receipt.rs b/crates/rpc/rpc/src/eth/helpers/receipt.rs index e033a1fcf3b..489e3abe079 100644 --- a/crates/rpc/rpc/src/eth/helpers/receipt.rs +++ b/crates/rpc/rpc/src/eth/helpers/receipt.rs @@ -3,6 +3,7 @@ use crate::EthApi; use alloy_consensus::crypto::RecoveryError; use reth_chainspec::ChainSpecProvider; +use reth_evm::ConfigureEvm; use reth_node_api::NodePrimitives; use reth_rpc_convert::RpcConvert; use reth_rpc_eth_api::{helpers::LoadReceipt, EthApiTypes, RpcNodeCoreExt}; @@ -26,6 +27,7 @@ where Error: From, >, Provider: BlockReader + ChainSpecProvider, + EvmConfig: ConfigureEvm, Rpc: RpcConvert, { } diff --git a/crates/rpc/rpc/src/eth/helpers/signer.rs b/crates/rpc/rpc/src/eth/helpers/signer.rs index 9aba5198bb7..f55d6259267 100644 --- a/crates/rpc/rpc/src/eth/helpers/signer.rs +++ b/crates/rpc/rpc/src/eth/helpers/signer.rs @@ -8,6 +8,7 @@ use alloy_eips::eip2718::Decodable2718; use alloy_primitives::{eip191_hash_message, Address, Signature, B256}; use alloy_signer::SignerSync; use alloy_signer_local::PrivateKeySigner; +use reth_evm::ConfigureEvm; use reth_rpc_convert::{RpcConvert, RpcTypes, SignableTxRequest}; use reth_rpc_eth_api::helpers::{signer::Result, AddDevSigners, EthSigner}; use reth_rpc_eth_types::SignError; @@ -17,6 +18,7 @@ impl AddDevSigners for EthApi where Provider: BlockReader, + EvmConfig: ConfigureEvm, Rpc: RpcConvert>>>, { fn with_dev_accounts(&self) { diff --git a/crates/rpc/rpc/src/eth/helpers/spec.rs b/crates/rpc/rpc/src/eth/helpers/spec.rs index e372dec4cb1..a26d671b8e5 100644 --- a/crates/rpc/rpc/src/eth/helpers/spec.rs +++ b/crates/rpc/rpc/src/eth/helpers/spec.rs @@ -1,5 +1,6 @@ use alloy_primitives::U256; use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; +use reth_evm::ConfigureEvm; use reth_network_api::NetworkInfo; use reth_rpc_convert::RpcConvert; use reth_rpc_eth_api::{ @@ -20,6 +21,7 @@ where Network: NetworkInfo, >, Provider: BlockReader, + EvmConfig: ConfigureEvm, Rpc: RpcConvert, { type Transaction = ProviderTx; diff --git a/crates/rpc/rpc/src/eth/helpers/state.rs b/crates/rpc/rpc/src/eth/helpers/state.rs index 36d754676c3..c26dccdd4e1 100644 --- a/crates/rpc/rpc/src/eth/helpers/state.rs +++ b/crates/rpc/rpc/src/eth/helpers/state.rs @@ -1,6 +1,7 @@ //! Contains RPC handler implementations specific to state. use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; +use reth_evm::ConfigureEvm; use reth_rpc_convert::RpcConvert; use reth_storage_api::{BlockReader, StateProviderFactory}; use reth_transaction_pool::TransactionPool; @@ -17,6 +18,7 @@ impl EthState where Self: LoadState + SpawnBlocking, Provider: BlockReader, + EvmConfig: ConfigureEvm, Rpc: RpcConvert, { fn max_proof_window(&self) -> u64 { @@ -34,6 +36,7 @@ where Pool: TransactionPool, > + EthApiTypes, Provider: BlockReader, + EvmConfig: ConfigureEvm, Rpc: RpcConvert, { } diff --git a/crates/rpc/rpc/src/eth/helpers/trace.rs b/crates/rpc/rpc/src/eth/helpers/trace.rs index 78ea5c12b48..34db918a135 100644 --- a/crates/rpc/rpc/src/eth/helpers/trace.rs +++ b/crates/rpc/rpc/src/eth/helpers/trace.rs @@ -25,6 +25,7 @@ where Error: FromEvmError, >, Provider: BlockReader, + EvmConfig: ConfigureEvm, Rpc: RpcConvert, { } diff --git a/crates/rpc/rpc/src/eth/helpers/transaction.rs b/crates/rpc/rpc/src/eth/helpers/transaction.rs index 97f0bebd75e..e6123895060 100644 --- a/crates/rpc/rpc/src/eth/helpers/transaction.rs +++ b/crates/rpc/rpc/src/eth/helpers/transaction.rs @@ -2,6 +2,7 @@ use crate::EthApi; use alloy_primitives::{Bytes, B256}; +use reth_evm::ConfigureEvm; use reth_rpc_convert::RpcConvert; use reth_rpc_eth_api::{ helpers::{spec::SignersForRpc, EthTransactions, LoadTransaction, SpawnBlocking}, @@ -16,6 +17,7 @@ impl EthTransactions where Self: LoadTransaction + EthApiTypes, Provider: BlockReader>, + EvmConfig: ConfigureEvm, Rpc: RpcConvert, { #[inline] @@ -53,6 +55,7 @@ where + RpcNodeCoreExt + EthApiTypes, Provider: BlockReader, + EvmConfig: ConfigureEvm, Rpc: RpcConvert, { } From 425541d5a682d9d3fdb9379f220a091be0523a7e Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 17 Jul 2025 16:03:15 +0200 Subject: [PATCH 197/305] fix: use primitives headers for pruner (#17458) --- crates/prune/prune/src/builder.rs | 7 ++-- crates/prune/prune/src/segments/set.rs | 5 +-- .../prune/src/segments/static_file/headers.rs | 36 ++++++++++++++----- crates/stages/stages/src/stages/prune.rs | 8 +++-- 4 files changed, 41 insertions(+), 15 deletions(-) diff --git a/crates/prune/prune/src/builder.rs b/crates/prune/prune/src/builder.rs index f5bb95df3f5..509ef6a5be8 100644 --- a/crates/prune/prune/src/builder.rs +++ b/crates/prune/prune/src/builder.rs @@ -82,7 +82,7 @@ impl PrunerBuilder { ProviderRW: PruneCheckpointWriter + BlockReader + StaticFileProviderFactory< - Primitives: NodePrimitives, + Primitives: NodePrimitives, >, > + StaticFileProviderFactory< Primitives = ::Primitives, @@ -107,8 +107,9 @@ impl PrunerBuilder { static_file_provider: StaticFileProvider, ) -> Pruner where - Provider: StaticFileProviderFactory> - + DBProvider + Provider: StaticFileProviderFactory< + Primitives: NodePrimitives, + > + DBProvider + BlockReader + PruneCheckpointWriter, { diff --git a/crates/prune/prune/src/segments/set.rs b/crates/prune/prune/src/segments/set.rs index 52e6ee75442..7d5db03714b 100644 --- a/crates/prune/prune/src/segments/set.rs +++ b/crates/prune/prune/src/segments/set.rs @@ -47,8 +47,9 @@ impl SegmentSet { impl SegmentSet where - Provider: StaticFileProviderFactory> - + DBProvider + Provider: StaticFileProviderFactory< + Primitives: NodePrimitives, + > + DBProvider + PruneCheckpointWriter + BlockReader, { diff --git a/crates/prune/prune/src/segments/static_file/headers.rs b/crates/prune/prune/src/segments/static_file/headers.rs index be4e50fe48b..d8b7e6a5398 100644 --- a/crates/prune/prune/src/segments/static_file/headers.rs +++ b/crates/prune/prune/src/segments/static_file/headers.rs @@ -7,9 +7,11 @@ use alloy_primitives::BlockNumber; use itertools::Itertools; use reth_db_api::{ cursor::{DbCursorRO, RangeWalker}, + table::Value, tables, transaction::DbTxMut, }; +use reth_primitives_traits::NodePrimitives; use reth_provider::{providers::StaticFileProvider, DBProvider, StaticFileProviderFactory}; use reth_prune_types::{ PruneMode, PrunePurpose, PruneSegment, SegmentOutput, SegmentOutputCheckpoint, @@ -32,8 +34,10 @@ impl Headers { } } -impl> Segment - for Headers +impl Segment for Headers +where + Provider: StaticFileProviderFactory> + + DBProvider, { fn segment(&self) -> PruneSegment { PruneSegment::Headers @@ -63,7 +67,12 @@ impl> Segment()?; + // let mut headers_cursor = provider.tx_ref().cursor_write::()?; + let mut headers_cursor = provider + .tx_ref() + .cursor_write::::BlockHeader>>( + )?; + let mut header_tds_cursor = provider.tx_ref().cursor_write::()?; let mut canonical_headers_cursor = @@ -108,11 +117,16 @@ type Walker<'a, Provider, T> = #[allow(missing_debug_implementations)] struct HeaderTablesIter<'a, Provider> where - Provider: DBProvider, + Provider: StaticFileProviderFactory> + + DBProvider, { provider: &'a Provider, limiter: &'a mut PruneLimiter, - headers_walker: Walker<'a, Provider, tables::Headers>, + headers_walker: Walker< + 'a, + Provider, + tables::Headers<::BlockHeader>, + >, header_tds_walker: Walker<'a, Provider, tables::HeaderTerminalDifficulties>, canonical_headers_walker: Walker<'a, Provider, tables::CanonicalHeaders>, } @@ -124,12 +138,17 @@ struct HeaderTablesIterItem { impl<'a, Provider> HeaderTablesIter<'a, Provider> where - Provider: DBProvider, + Provider: StaticFileProviderFactory> + + DBProvider, { const fn new( provider: &'a Provider, limiter: &'a mut PruneLimiter, - headers_walker: Walker<'a, Provider, tables::Headers>, + headers_walker: Walker< + 'a, + Provider, + tables::Headers<::BlockHeader>, + >, header_tds_walker: Walker<'a, Provider, tables::HeaderTerminalDifficulties>, canonical_headers_walker: Walker<'a, Provider, tables::CanonicalHeaders>, ) -> Self { @@ -139,7 +158,8 @@ where impl Iterator for HeaderTablesIter<'_, Provider> where - Provider: DBProvider, + Provider: StaticFileProviderFactory> + + DBProvider, { type Item = Result; fn next(&mut self) -> Option { diff --git a/crates/stages/stages/src/stages/prune.rs b/crates/stages/stages/src/stages/prune.rs index 6671c4a4139..f62259dcfdd 100644 --- a/crates/stages/stages/src/stages/prune.rs +++ b/crates/stages/stages/src/stages/prune.rs @@ -42,7 +42,9 @@ where + PruneCheckpointReader + PruneCheckpointWriter + BlockReader - + StaticFileProviderFactory>, + + StaticFileProviderFactory< + Primitives: NodePrimitives, + >, { fn id(&self) -> StageId { StageId::Prune @@ -131,7 +133,9 @@ where + PruneCheckpointReader + PruneCheckpointWriter + BlockReader - + StaticFileProviderFactory>, + + StaticFileProviderFactory< + Primitives: NodePrimitives, + >, { fn id(&self) -> StageId { StageId::PruneSenderRecovery From 1912ac7547cea4451c4cb66c279f5f38246677dc Mon Sep 17 00:00:00 2001 From: cakevm Date: Thu, 17 Jul 2025 17:39:47 +0200 Subject: [PATCH 198/305] feat(alloy-provider): implement `bytecode_by_hash` method (#17471) --- Cargo.lock | 2 ++ crates/alloy-provider/Cargo.toml | 2 +- crates/alloy-provider/src/lib.rs | 21 +++++++++++++++++---- 3 files changed, 20 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f8af9dc74ba..21862eefe6a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -446,8 +446,10 @@ dependencies = [ "alloy-primitives", "alloy-pubsub", "alloy-rpc-client", + "alloy-rpc-types-debug", "alloy-rpc-types-engine", "alloy-rpc-types-eth", + "alloy-rpc-types-trace", "alloy-signer", "alloy-sol-types", "alloy-transport", diff --git a/crates/alloy-provider/Cargo.toml b/crates/alloy-provider/Cargo.toml index 22a8e724890..14e9031666d 100644 --- a/crates/alloy-provider/Cargo.toml +++ b/crates/alloy-provider/Cargo.toml @@ -27,7 +27,7 @@ reth-db-api.workspace = true reth-rpc-convert.workspace = true # alloy -alloy-provider.workspace = true +alloy-provider = { workspace = true, features = ["debug-api"] } alloy-network.workspace = true alloy-primitives.workspace = true alloy-consensus.workspace = true diff --git a/crates/alloy-provider/src/lib.rs b/crates/alloy-provider/src/lib.rs index 6b0eb95d981..b0af63f85fe 100644 --- a/crates/alloy-provider/src/lib.rs +++ b/crates/alloy-provider/src/lib.rs @@ -24,7 +24,7 @@ use alloy_consensus::BlockHeader; use alloy_eips::BlockHashOrNumber; use alloy_network::{primitives::HeaderResponse, BlockResponse}; use alloy_primitives::{Address, BlockHash, BlockNumber, StorageKey, TxHash, TxNumber, B256, U256}; -use alloy_provider::{network::Network, Provider}; +use alloy_provider::{ext::DebugApi, network::Network, Provider}; use alloy_rpc_types::BlockId; use alloy_rpc_types_engine::ForkchoiceState; use reth_chainspec::{ChainInfo, ChainSpecProvider}; @@ -940,9 +940,22 @@ where N: Network, Node: NodeTypes, { - fn bytecode_by_hash(&self, _code_hash: &B256) -> Result, ProviderError> { - // Cannot fetch bytecode by hash via RPC - Err(ProviderError::UnsupportedProvider) + fn bytecode_by_hash(&self, code_hash: &B256) -> Result, ProviderError> { + self.block_on_async(async { + // The method `debug_codeByHash` is currently only available on a Reth node + let code = self + .provider + .debug_code_by_hash(*code_hash, None) + .await + .map_err(ProviderError::other)?; + + let Some(code) = code else { + // If the code was not found, return None + return Ok(None); + }; + + Ok(Some(Bytecode::new_raw(code))) + }) } } From d4d3e22f798b1b9790cb9b783f565aa401c0cf92 Mon Sep 17 00:00:00 2001 From: bigbear <155267841+aso20455@users.noreply.github.com> Date: Thu, 17 Jul 2025 17:47:55 +0200 Subject: [PATCH 199/305] fix: correct documentation for block_mut method in RecoveredBlock (#17472) --- crates/primitives-traits/src/block/recovered.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/primitives-traits/src/block/recovered.rs b/crates/primitives-traits/src/block/recovered.rs index 3340342abbf..5c3c9eb08c6 100644 --- a/crates/primitives-traits/src/block/recovered.rs +++ b/crates/primitives-traits/src/block/recovered.rs @@ -559,7 +559,7 @@ impl RecoveredBlock { self.block.header_mut() } - /// Returns a mutable reference to the header. + /// Returns a mutable reference to the body. pub const fn block_mut(&mut self) -> &mut B::Body { self.block.body_mut() } From 0fff798cb6f3ab29e992fc1684cc25a71809fa09 Mon Sep 17 00:00:00 2001 From: Yash Atreya <44857776+yash-atreya@users.noreply.github.com> Date: Thu, 17 Jul 2025 23:11:22 +0530 Subject: [PATCH 200/305] fix(`docs`): change sdk overview path to /sdk (#17467) --- docs/vocs/docs/pages/index.mdx | 4 ++-- docs/vocs/docs/pages/introduction/why-reth.mdx | 2 +- docs/vocs/docs/pages/{sdk/overview.mdx => sdk.mdx} | 0 docs/vocs/redirects.config.ts | 2 ++ docs/vocs/sidebar.ts | 2 +- docs/vocs/vocs.config.ts | 2 +- 6 files changed, 7 insertions(+), 5 deletions(-) rename docs/vocs/docs/pages/{sdk/overview.mdx => sdk.mdx} (100%) diff --git a/docs/vocs/docs/pages/index.mdx b/docs/vocs/docs/pages/index.mdx index a3ba66c3932..8778914f4c8 100644 --- a/docs/vocs/docs/pages/index.mdx +++ b/docs/vocs/docs/pages/index.mdx @@ -25,7 +25,7 @@ import { TrustedBy } from "../components/TrustedBy";
Run a Node - Build a Node + Build a Node Why Reth?
@@ -117,7 +117,7 @@ import { TrustedBy } from "../components/TrustedBy";
- +
Customizable
Build custom nodes with tailored transaction handling
diff --git a/docs/vocs/docs/pages/introduction/why-reth.mdx b/docs/vocs/docs/pages/introduction/why-reth.mdx index f140c0e3128..1b03870a877 100644 --- a/docs/vocs/docs/pages/introduction/why-reth.mdx +++ b/docs/vocs/docs/pages/introduction/why-reth.mdx @@ -46,5 +46,5 @@ Reth isn't just a tool—it's a movement toward better blockchain infrastructure **Ready to build the future?** - [Get Started](/run/ethereum) with running your first Reth node -- [Explore the SDK](/sdk/overview) to build custom blockchain infrastructure +- [Explore the SDK](/sdk) to build custom blockchain infrastructure - [Join the Community](https://github.com/paradigmxyz/reth) and contribute to the future of Ethereum diff --git a/docs/vocs/docs/pages/sdk/overview.mdx b/docs/vocs/docs/pages/sdk.mdx similarity index 100% rename from docs/vocs/docs/pages/sdk/overview.mdx rename to docs/vocs/docs/pages/sdk.mdx diff --git a/docs/vocs/redirects.config.ts b/docs/vocs/redirects.config.ts index 6d30c882a14..82a911b6bfc 100644 --- a/docs/vocs/redirects.config.ts +++ b/docs/vocs/redirects.config.ts @@ -17,6 +17,8 @@ export const redirects: Record = { '/run/pruning': '/run/faq/pruning', '/run/ports': '/run/faq/ports', '/run/troubleshooting': '/run/faq/troubleshooting', + // SDK + '/sdk/overview': '/sdk', // Exex '/developers/exex': '/exex/overview', '/developers/exex/how-it-works': '/exex/how-it-works', diff --git a/docs/vocs/sidebar.ts b/docs/vocs/sidebar.ts index 140b056e0a2..e51af1c260c 100644 --- a/docs/vocs/sidebar.ts +++ b/docs/vocs/sidebar.ts @@ -136,7 +136,7 @@ export const sidebar: SidebarItem[] = [ items: [ { text: "Overview", - link: "/sdk/overview" + link: "/sdk" }, { text: "Typesystem", diff --git a/docs/vocs/vocs.config.ts b/docs/vocs/vocs.config.ts index a13320ae40d..56f304a8233 100644 --- a/docs/vocs/vocs.config.ts +++ b/docs/vocs/vocs.config.ts @@ -12,7 +12,7 @@ export default defineConfig({ basePath, topNav: [ { text: 'Run', link: '/run/ethereum' }, - { text: 'SDK', link: '/sdk/overview' }, + { text: 'SDK', link: '/sdk' }, { element: React.createElement('a', { href: '/docs', target: '_self' }, 'Rustdocs') }, From 65a63e129e90b792c51591f8b1ae5126dce93d22 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 17 Jul 2025 20:48:50 +0200 Subject: [PATCH 201/305] feat: add envelope conversion for op (#17469) --- crates/optimism/primitives/src/receipt.rs | 24 +++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/crates/optimism/primitives/src/receipt.rs b/crates/optimism/primitives/src/receipt.rs index d7549670d2a..74f21eab115 100644 --- a/crates/optimism/primitives/src/receipt.rs +++ b/crates/optimism/primitives/src/receipt.rs @@ -388,6 +388,30 @@ impl InMemorySize for OpReceipt { } } +impl From for OpReceipt { + fn from(envelope: op_alloy_consensus::OpReceiptEnvelope) -> Self { + match envelope { + op_alloy_consensus::OpReceiptEnvelope::Legacy(receipt) => Self::Legacy(receipt.receipt), + op_alloy_consensus::OpReceiptEnvelope::Eip2930(receipt) => { + Self::Eip2930(receipt.receipt) + } + op_alloy_consensus::OpReceiptEnvelope::Eip1559(receipt) => { + Self::Eip1559(receipt.receipt) + } + op_alloy_consensus::OpReceiptEnvelope::Eip7702(receipt) => { + Self::Eip7702(receipt.receipt) + } + op_alloy_consensus::OpReceiptEnvelope::Deposit(receipt) => { + Self::Deposit(OpDepositReceipt { + deposit_nonce: receipt.receipt.deposit_nonce, + deposit_receipt_version: receipt.receipt.deposit_receipt_version, + inner: receipt.receipt.inner, + }) + } + } + } +} + /// Trait for deposit receipt. pub trait DepositReceipt: reth_primitives_traits::Receipt { /// Converts a `Receipt` into a mutable Optimism deposit receipt. From 6927afac16a986f3ca0135cc135058777a76b55e Mon Sep 17 00:00:00 2001 From: Yash Atreya <44857776+yash-atreya@users.noreply.github.com> Date: Fri, 18 Jul 2025 13:19:38 +0530 Subject: [PATCH 202/305] fix(`docs`): rustdocs module and nested links (#17478) --- docs/vocs/scripts/inject-cargo-docs.ts | 49 ++++++++++++++++++++++++-- 1 file changed, 47 insertions(+), 2 deletions(-) diff --git a/docs/vocs/scripts/inject-cargo-docs.ts b/docs/vocs/scripts/inject-cargo-docs.ts index f2d9869aecf..74857cb03e9 100644 --- a/docs/vocs/scripts/inject-cargo-docs.ts +++ b/docs/vocs/scripts/inject-cargo-docs.ts @@ -40,6 +40,22 @@ async function injectCargoDocs() { for (const file of htmlFiles) { let content = await fs.readFile(file, 'utf-8'); + // Extract the current crate name and module path from the file path + // Remove the base path to get the relative path within the docs + const relativePath = file.startsWith('./') ? file.slice(2) : file; + const docsRelativePath = relativePath.replace(/^docs\/dist\/docs\//, ''); + const pathParts = docsRelativePath.split('/'); + const fileName = pathParts[pathParts.length - 1]; + + // Determine if this is the root index + const isRootIndex = pathParts.length === 1 && fileName === 'index.html'; + + // Extract crate name - it's the first directory in the docs-relative path + const crateName = isRootIndex ? null : pathParts[0]; + + // Build the current module path (everything between crate and filename) + const modulePath = pathParts.slice(1, -1).join('/'); + // Fix static file references content = content // CSS and JS in static.files @@ -55,8 +71,37 @@ async function injectCargoDocs() { // Fix crate navigation links .replace(/href="\.\/([^/]+)\/index\.html"/g, `href="${BASE_PATH}/$1/index.html"`) .replace(/href="\.\.\/([^/]+)\/index\.html"/g, `href="${BASE_PATH}/$1/index.html"`) - // Fix simple crate links (without ./ or ../) - .replace(/href="([^/:"]+)\/index\.html"/g, `href="${BASE_PATH}/$1/index.html"`) + // Fix module links within the same crate (relative paths without ./ or ../) + // These need to include the current crate name in the path + .replace(/href="([^/:"\.](?:[^/:"]*)?)\/index\.html"/g, (match, moduleName) => { + // Skip if it's already an absolute path or contains a protocol + if (moduleName.startsWith('/') || moduleName.includes('://')) { + return match; + } + // For the root index page, these are crate links, not module links + if (isRootIndex) { + return `href="${BASE_PATH}/${moduleName}/index.html"`; + } + // For module links within a crate, we need to build the full path + // If we're in a nested module, we need to go up to the crate root then down to the target + const fullPath = modulePath ? `${crateName}/${modulePath}/${moduleName}` : `${crateName}/${moduleName}`; + return `href="${BASE_PATH}/${fullPath}/index.html"`; + }) + + // Also fix other relative links (structs, enums, traits) that don't have index.html + .replace(/href="([^/:"\.#][^/:"#]*\.html)"/g, (match, pageName) => { + // Skip if it's already an absolute path or contains a protocol + if (pageName.startsWith('/') || pageName.includes('://')) { + return match; + } + // Skip for root index page as it shouldn't have such links + if (isRootIndex) { + return match; + } + // For other doc pages in nested modules, build the full path + const fullPath = modulePath ? `${crateName}/${modulePath}/${pageName}` : `${crateName}/${pageName}`; + return `href="${BASE_PATH}/${fullPath}"`; + }) // Fix root index.html links .replace(/href="\.\/index\.html"/g, `href="${BASE_PATH}/index.html"`) From 87000e33594e93aeaad3c4f96d38ca38c469cb2e Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 18 Jul 2025 11:14:36 +0200 Subject: [PATCH 203/305] chore: expose chainspec getter (#17461) --- crates/rpc/rpc-engine-api/src/engine_api.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index ad708b75da3..8738e94abe9 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -61,6 +61,15 @@ pub struct EngineApi>, } +impl + EngineApi +{ + /// Returns the configured chainspec. + pub fn chain_spec(&self) -> &Arc { + &self.inner.chain_spec + } +} + impl EngineApi where From 3add4b1e3d7c4f2b47816b9cc81231c6abaf67ab Mon Sep 17 00:00:00 2001 From: cakevm Date: Fri, 18 Jul 2025 11:14:12 +0200 Subject: [PATCH 204/305] feat(alloy-provider): implement `transaction_by_hash` method (#17479) --- Cargo.lock | 2 + crates/alloy-provider/src/lib.rs | 24 ++++- crates/rpc/rpc-convert/Cargo.toml | 3 + crates/rpc/rpc-convert/src/lib.rs | 4 +- crates/rpc/rpc-convert/src/transaction.rs | 106 ++++++++++++++++++++++ 5 files changed, 134 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 21862eefe6a..0691b3aeba3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9980,8 +9980,10 @@ dependencies = [ "alloy-signer", "jsonrpsee-types", "op-alloy-consensus", + "op-alloy-network", "op-alloy-rpc-types", "op-revm", + "reth-ethereum-primitives", "reth-evm", "reth-optimism-primitives", "reth-primitives-traits", diff --git a/crates/alloy-provider/src/lib.rs b/crates/alloy-provider/src/lib.rs index b0af63f85fe..477327aa23c 100644 --- a/crates/alloy-provider/src/lib.rs +++ b/crates/alloy-provider/src/lib.rs @@ -44,7 +44,7 @@ use reth_provider::{ TransactionVariant, TransactionsProvider, }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; -use reth_rpc_convert::TryFromBlockResponse; +use reth_rpc_convert::{TryFromBlockResponse, TryFromTransactionResponse}; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_api::{ BlockBodyIndicesProvider, BlockReaderIdExt, BlockSource, DBProvider, NodePrimitivesProvider, @@ -378,6 +378,7 @@ where N: Network, Node: NodeTypes, BlockTy: TryFromBlockResponse, + TxTy: TryFromTransactionResponse, { type Block = BlockTy; @@ -457,6 +458,7 @@ where N: Network, Node: NodeTypes, BlockTy: TryFromBlockResponse, + TxTy: TryFromTransactionResponse, { fn block_by_id(&self, id: BlockId) -> ProviderResult> { match id { @@ -528,6 +530,7 @@ where P: Provider + Clone + 'static, N: Network, Node: NodeTypes, + TxTy: TryFromTransactionResponse, { type Transaction = TxTy; @@ -546,8 +549,23 @@ where Err(ProviderError::UnsupportedProvider) } - fn transaction_by_hash(&self, _hash: TxHash) -> ProviderResult> { - Err(ProviderError::UnsupportedProvider) + fn transaction_by_hash(&self, hash: TxHash) -> ProviderResult> { + let transaction_response = self.block_on_async(async { + self.provider.get_transaction_by_hash(hash).await.map_err(ProviderError::other) + })?; + + let Some(transaction_response) = transaction_response else { + // If the transaction was not found, return None + return Ok(None); + }; + + // Convert the network transaction response to primitive transaction + let transaction = as TryFromTransactionResponse>::from_transaction_response( + transaction_response, + ) + .map_err(ProviderError::other)?; + + Ok(Some(transaction)) } fn transaction_by_hash_with_meta( diff --git a/crates/rpc/rpc-convert/Cargo.toml b/crates/rpc/rpc-convert/Cargo.toml index 4923c5ab27c..abaf8d8d04b 100644 --- a/crates/rpc/rpc-convert/Cargo.toml +++ b/crates/rpc/rpc-convert/Cargo.toml @@ -16,6 +16,7 @@ workspace = true reth-primitives-traits.workspace = true reth-storage-api = { workspace = true, optional = true } reth-evm.workspace = true +reth-ethereum-primitives.workspace = true # ethereum alloy-primitives.workspace = true @@ -28,6 +29,7 @@ alloy-json-rpc.workspace = true # optimism op-alloy-consensus = { workspace = true, optional = true } op-alloy-rpc-types = { workspace = true, optional = true } +op-alloy-network = { workspace = true, optional = true } reth-optimism-primitives = { workspace = true, optional = true } op-revm = { workspace = true, optional = true } @@ -45,6 +47,7 @@ default = [] op = [ "dep:op-alloy-consensus", "dep:op-alloy-rpc-types", + "dep:op-alloy-network", "dep:reth-optimism-primitives", "dep:reth-storage-api", "dep:op-revm", diff --git a/crates/rpc/rpc-convert/src/lib.rs b/crates/rpc/rpc-convert/src/lib.rs index bdd8035780c..04821ff4d77 100644 --- a/crates/rpc/rpc-convert/src/lib.rs +++ b/crates/rpc/rpc-convert/src/lib.rs @@ -19,8 +19,8 @@ pub use block::TryFromBlockResponse; pub use fees::{CallFees, CallFeesError}; pub use rpc::*; pub use transaction::{ - EthTxEnvError, IntoRpcTx, RpcConvert, RpcConverter, TransactionConversionError, TryIntoSimTx, - TxInfoMapper, + EthTxEnvError, IntoRpcTx, RpcConvert, RpcConverter, TransactionConversionError, + TryFromTransactionResponse, TryIntoSimTx, TxInfoMapper, }; #[cfg(feature = "op")] diff --git a/crates/rpc/rpc-convert/src/transaction.rs b/crates/rpc/rpc-convert/src/transaction.rs index 4bc088788fd..eb4abe918b6 100644 --- a/crates/rpc/rpc-convert/src/transaction.rs +++ b/crates/rpc/rpc-convert/src/transaction.rs @@ -5,6 +5,7 @@ use crate::{ RpcReceipt, RpcTransaction, RpcTxReq, RpcTypes, }; use alloy_consensus::{error::ValueError, transaction::Recovered, EthereumTxEnvelope, TxEip4844}; +use alloy_network::Network; use alloy_primitives::{Address, TxKind, U256}; use alloy_rpc_types_eth::{ request::{TransactionInputError, TransactionRequest}, @@ -543,3 +544,108 @@ pub mod op { } } } + +/// Trait for converting network transaction responses to primitive transaction types. +pub trait TryFromTransactionResponse { + /// The error type returned if the conversion fails. + type Error: core::error::Error + Send + Sync + Unpin; + + /// Converts a network transaction response to a primitive transaction type. + /// + /// # Returns + /// + /// Returns `Ok(Self)` on successful conversion, or `Err(Self::Error)` if the conversion fails. + fn from_transaction_response( + transaction_response: N::TransactionResponse, + ) -> Result + where + Self: Sized; +} + +impl TryFromTransactionResponse + for reth_ethereum_primitives::TransactionSigned +{ + type Error = Infallible; + + fn from_transaction_response(transaction_response: Transaction) -> Result { + Ok(transaction_response.into_inner().into()) + } +} + +#[cfg(feature = "op")] +impl TryFromTransactionResponse + for reth_optimism_primitives::OpTransactionSigned +{ + type Error = Infallible; + + fn from_transaction_response( + transaction_response: op_alloy_rpc_types::Transaction, + ) -> Result { + Ok(transaction_response.inner.into_inner()) + } +} + +#[cfg(test)] +mod transaction_response_tests { + use super::*; + use alloy_consensus::{transaction::Recovered, EthereumTxEnvelope, Signed, TxLegacy}; + use alloy_network::Ethereum; + use alloy_primitives::{Address, Signature, B256, U256}; + use alloy_rpc_types_eth::Transaction; + + #[test] + fn test_ethereum_transaction_conversion() { + let signed_tx = Signed::new_unchecked( + TxLegacy::default(), + Signature::new(U256::ONE, U256::ONE, false), + B256::ZERO, + ); + let envelope = EthereumTxEnvelope::Legacy(signed_tx); + + let tx_response = Transaction { + inner: Recovered::new_unchecked(envelope, Address::ZERO), + block_hash: None, + block_number: None, + transaction_index: None, + effective_gas_price: None, + }; + + let result = >::from_transaction_response(tx_response); + assert!(result.is_ok()); + } + + #[cfg(feature = "op")] + #[test] + fn test_optimism_transaction_conversion() { + use op_alloy_consensus::OpTxEnvelope; + use op_alloy_network::Optimism; + use reth_optimism_primitives::OpTransactionSigned; + + let signed_tx = Signed::new_unchecked( + TxLegacy::default(), + Signature::new(U256::ONE, U256::ONE, false), + B256::ZERO, + ); + let envelope = OpTxEnvelope::Legacy(signed_tx); + + let inner_tx = Transaction { + inner: Recovered::new_unchecked(envelope, Address::ZERO), + block_hash: None, + block_number: None, + transaction_index: None, + effective_gas_price: None, + }; + + let tx_response = op_alloy_rpc_types::Transaction { + inner: inner_tx, + deposit_nonce: None, + deposit_receipt_version: None, + }; + + let result = >::from_transaction_response(tx_response); + + assert!(result.is_ok()); + } +} From e089d902ca53730ec1fd60b4ce46b40c71222e6b Mon Sep 17 00:00:00 2001 From: o-az Date: Fri, 18 Jul 2025 02:30:52 -0700 Subject: [PATCH 205/305] fix: edit link and config (#17453) Co-authored-by: Matthias Seitz --- .gitattributes | 2 + .gitignore | 4 + docs/vocs/bun.lock | 1542 +++++++++++++++++++++ docs/vocs/bun.lockb | Bin 312478 -> 0 bytes docs/vocs/bunfig.toml | 4 + docs/vocs/docs/components/SdkShowcase.tsx | 14 +- docs/vocs/docs/components/TrustedBy.tsx | 8 +- docs/vocs/links-report.json | 17 - docs/vocs/package.json | 12 +- docs/vocs/vocs.config.ts | 4 +- 10 files changed, 1569 insertions(+), 38 deletions(-) create mode 100644 docs/vocs/bun.lock delete mode 100755 docs/vocs/bun.lockb create mode 100644 docs/vocs/bunfig.toml delete mode 100644 docs/vocs/links-report.json diff --git a/.gitattributes b/.gitattributes index 52ee28d3ba9..17286acb516 100644 --- a/.gitattributes +++ b/.gitattributes @@ -2,3 +2,5 @@ book/cli/**/*.md linguist-vendored book/cli/cli.md -linguist-vendored crates/storage/libmdbx-rs/mdbx-sys/libmdbx/** linguist-vendored + +bun.lock linguist-language=JSON-with-Comments diff --git a/.gitignore b/.gitignore index 7335978db14..58813003cfb 100644 --- a/.gitignore +++ b/.gitignore @@ -59,3 +59,7 @@ docs/vocs/node_modules # Cargo chef recipe file recipe.json + +_ +# broken links report +links-report.json diff --git a/docs/vocs/bun.lock b/docs/vocs/bun.lock new file mode 100644 index 00000000000..4203e94aa62 --- /dev/null +++ b/docs/vocs/bun.lock @@ -0,0 +1,1542 @@ +{ + "lockfileVersion": 1, + "workspaces": { + "": { + "name": "vocs", + "dependencies": { + "react": "^19.1.0", + "react-dom": "^19.1.0", + "vocs": "^1.0.13", + }, + "devDependencies": { + "@types/node": "^24.0.14", + "@types/react": "^19.1.8", + "glob": "^11.0.3", + "typescript": "^5.8.3", + }, + }, + }, + "packages": { + "@ampproject/remapping": ["@ampproject/remapping@2.3.0", "", { "dependencies": { "@jridgewell/gen-mapping": "^0.3.5", "@jridgewell/trace-mapping": "^0.3.24" } }, "sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw=="], + + "@antfu/install-pkg": ["@antfu/install-pkg@1.1.0", "", { "dependencies": { "package-manager-detector": "^1.3.0", "tinyexec": "^1.0.1" } }, "sha512-MGQsmw10ZyI+EJo45CdSER4zEb+p31LpDAFp2Z3gkSd1yqVZGi0Ebx++YTEMonJy4oChEMLsxZ64j8FH6sSqtQ=="], + + "@antfu/utils": ["@antfu/utils@8.1.1", "", {}, "sha512-Mex9nXf9vR6AhcXmMrlz/HVgYYZpVGJ6YlPgwl7UnaFpnshXs6EK/oa5Gpf3CzENMjkvEx2tQtntGnb7UtSTOQ=="], + + "@babel/code-frame": ["@babel/code-frame@7.27.1", "", { "dependencies": { "@babel/helper-validator-identifier": "^7.27.1", "js-tokens": "^4.0.0", "picocolors": "^1.1.1" } }, "sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg=="], + + "@babel/compat-data": ["@babel/compat-data@7.28.0", "", {}, "sha512-60X7qkglvrap8mn1lh2ebxXdZYtUcpd7gsmy9kLaBJ4i/WdY8PqTSdxyA8qraikqKQK5C1KRBKXqznrVapyNaw=="], + + "@babel/core": ["@babel/core@7.28.0", "", { "dependencies": { "@ampproject/remapping": "^2.2.0", "@babel/code-frame": "^7.27.1", "@babel/generator": "^7.28.0", "@babel/helper-compilation-targets": "^7.27.2", "@babel/helper-module-transforms": "^7.27.3", "@babel/helpers": "^7.27.6", "@babel/parser": "^7.28.0", "@babel/template": "^7.27.2", "@babel/traverse": "^7.28.0", "@babel/types": "^7.28.0", "convert-source-map": "^2.0.0", "debug": "^4.1.0", "gensync": "^1.0.0-beta.2", "json5": "^2.2.3", "semver": "^6.3.1" } }, "sha512-UlLAnTPrFdNGoFtbSXwcGFQBtQZJCNjaN6hQNP3UPvuNXT1i82N26KL3dZeIpNalWywr9IuQuncaAfUaS1g6sQ=="], + + "@babel/generator": ["@babel/generator@7.28.0", "", { "dependencies": { "@babel/parser": "^7.28.0", "@babel/types": "^7.28.0", "@jridgewell/gen-mapping": "^0.3.12", "@jridgewell/trace-mapping": "^0.3.28", "jsesc": "^3.0.2" } }, "sha512-lJjzvrbEeWrhB4P3QBsH7tey117PjLZnDbLiQEKjQ/fNJTjuq4HSqgFA+UNSwZT8D7dxxbnuSBMsa1lrWzKlQg=="], + + "@babel/helper-compilation-targets": ["@babel/helper-compilation-targets@7.27.2", "", { "dependencies": { "@babel/compat-data": "^7.27.2", "@babel/helper-validator-option": "^7.27.1", "browserslist": "^4.24.0", "lru-cache": "^5.1.1", "semver": "^6.3.1" } }, "sha512-2+1thGUUWWjLTYTHZWK1n8Yga0ijBz1XAhUXcKy81rd5g6yh7hGqMp45v7cadSbEHc9G3OTv45SyneRN3ps4DQ=="], + + "@babel/helper-globals": ["@babel/helper-globals@7.28.0", "", {}, "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw=="], + + "@babel/helper-module-imports": ["@babel/helper-module-imports@7.27.1", "", { "dependencies": { "@babel/traverse": "^7.27.1", "@babel/types": "^7.27.1" } }, "sha512-0gSFWUPNXNopqtIPQvlD5WgXYI5GY2kP2cCvoT8kczjbfcfuIljTbcWrulD1CIPIX2gt1wghbDy08yE1p+/r3w=="], + + "@babel/helper-module-transforms": ["@babel/helper-module-transforms@7.27.3", "", { "dependencies": { "@babel/helper-module-imports": "^7.27.1", "@babel/helper-validator-identifier": "^7.27.1", "@babel/traverse": "^7.27.3" }, "peerDependencies": { "@babel/core": "^7.0.0" } }, "sha512-dSOvYwvyLsWBeIRyOeHXp5vPj5l1I011r52FM1+r1jCERv+aFXYk4whgQccYEGYxK2H3ZAIA8nuPkQ0HaUo3qg=="], + + "@babel/helper-plugin-utils": ["@babel/helper-plugin-utils@7.27.1", "", {}, "sha512-1gn1Up5YXka3YYAHGKpbideQ5Yjf1tDa9qYcgysz+cNCXukyLl6DjPXhD3VRwSb8c0J9tA4b2+rHEZtc6R0tlw=="], + + "@babel/helper-string-parser": ["@babel/helper-string-parser@7.27.1", "", {}, "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA=="], + + "@babel/helper-validator-identifier": ["@babel/helper-validator-identifier@7.27.1", "", {}, "sha512-D2hP9eA+Sqx1kBZgzxZh0y1trbuU+JoDkiEwqhQ36nodYqJwyEIhPSdMNd7lOm/4io72luTPWH20Yda0xOuUow=="], + + "@babel/helper-validator-option": ["@babel/helper-validator-option@7.27.1", "", {}, "sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg=="], + + "@babel/helpers": ["@babel/helpers@7.27.6", "", { "dependencies": { "@babel/template": "^7.27.2", "@babel/types": "^7.27.6" } }, "sha512-muE8Tt8M22638HU31A3CgfSUciwz1fhATfoVai05aPXGor//CdWDCbnlY1yvBPo07njuVOCNGCSp/GTt12lIug=="], + + "@babel/parser": ["@babel/parser@7.28.0", "", { "dependencies": { "@babel/types": "^7.28.0" }, "bin": "./bin/babel-parser.js" }, "sha512-jVZGvOxOuNSsuQuLRTh13nU0AogFlw32w/MT+LV6D3sP5WdbW61E77RnkbaO2dUvmPAYrBDJXGn5gGS6tH4j8g=="], + + "@babel/plugin-syntax-typescript": ["@babel/plugin-syntax-typescript@7.27.1", "", { "dependencies": { "@babel/helper-plugin-utils": "^7.27.1" }, "peerDependencies": { "@babel/core": "^7.0.0-0" } }, "sha512-xfYCBMxveHrRMnAWl1ZlPXOZjzkN82THFvLhQhFXFt81Z5HnN+EtUkZhv/zcKpmT3fzmWZB0ywiBrbC3vogbwQ=="], + + "@babel/plugin-transform-react-jsx-self": ["@babel/plugin-transform-react-jsx-self@7.27.1", "", { "dependencies": { "@babel/helper-plugin-utils": "^7.27.1" }, "peerDependencies": { "@babel/core": "^7.0.0-0" } }, "sha512-6UzkCs+ejGdZ5mFFC/OCUrv028ab2fp1znZmCZjAOBKiBK2jXD1O+BPSfX8X2qjJ75fZBMSnQn3Rq2mrBJK2mw=="], + + "@babel/plugin-transform-react-jsx-source": ["@babel/plugin-transform-react-jsx-source@7.27.1", "", { "dependencies": { "@babel/helper-plugin-utils": "^7.27.1" }, "peerDependencies": { "@babel/core": "^7.0.0-0" } }, "sha512-zbwoTsBruTeKB9hSq73ha66iFeJHuaFkUbwvqElnygoNbj/jHRsSeokowZFN3CZ64IvEqcmmkVe89OPXc7ldAw=="], + + "@babel/runtime": ["@babel/runtime@7.27.6", "", {}, "sha512-vbavdySgbTTrmFE+EsiqUTzlOr5bzlnJtUv9PynGCAKvfQqjIXbvFdumPM/GxMDfyuGMJaJAU6TO4zc1Jf1i8Q=="], + + "@babel/template": ["@babel/template@7.27.2", "", { "dependencies": { "@babel/code-frame": "^7.27.1", "@babel/parser": "^7.27.2", "@babel/types": "^7.27.1" } }, "sha512-LPDZ85aEJyYSd18/DkjNh4/y1ntkE5KwUHWTiqgRxruuZL2F1yuHligVHLvcHY2vMHXttKFpJn6LwfI7cw7ODw=="], + + "@babel/traverse": ["@babel/traverse@7.28.0", "", { "dependencies": { "@babel/code-frame": "^7.27.1", "@babel/generator": "^7.28.0", "@babel/helper-globals": "^7.28.0", "@babel/parser": "^7.28.0", "@babel/template": "^7.27.2", "@babel/types": "^7.28.0", "debug": "^4.3.1" } }, "sha512-mGe7UK5wWyh0bKRfupsUchrQGqvDbZDbKJw+kcRGSmdHVYrv+ltd0pnpDTVpiTqnaBru9iEvA8pz8W46v0Amwg=="], + + "@babel/types": ["@babel/types@7.28.1", "", { "dependencies": { "@babel/helper-string-parser": "^7.27.1", "@babel/helper-validator-identifier": "^7.27.1" } }, "sha512-x0LvFTekgSX+83TI28Y9wYPUfzrnl2aT5+5QLnO6v7mSJYtEEevuDRN0F0uSHRk1G1IWZC43o00Y0xDDrpBGPQ=="], + + "@braintree/sanitize-url": ["@braintree/sanitize-url@7.1.1", "", {}, "sha512-i1L7noDNxtFyL5DmZafWy1wRVhGehQmzZaz1HiN5e7iylJMSZR7ekOV7NsIqa5qBldlLrsKv4HbgFUVlQrz8Mw=="], + + "@chevrotain/cst-dts-gen": ["@chevrotain/cst-dts-gen@11.0.3", "", { "dependencies": { "@chevrotain/gast": "11.0.3", "@chevrotain/types": "11.0.3", "lodash-es": "4.17.21" } }, "sha512-BvIKpRLeS/8UbfxXxgC33xOumsacaeCKAjAeLyOn7Pcp95HiRbrpl14S+9vaZLolnbssPIUuiUd8IvgkRyt6NQ=="], + + "@chevrotain/gast": ["@chevrotain/gast@11.0.3", "", { "dependencies": { "@chevrotain/types": "11.0.3", "lodash-es": "4.17.21" } }, "sha512-+qNfcoNk70PyS/uxmj3li5NiECO+2YKZZQMbmjTqRI3Qchu8Hig/Q9vgkHpI3alNjr7M+a2St5pw5w5F6NL5/Q=="], + + "@chevrotain/regexp-to-ast": ["@chevrotain/regexp-to-ast@11.0.3", "", {}, "sha512-1fMHaBZxLFvWI067AVbGJav1eRY7N8DDvYCTwGBiE/ytKBgP8azTdgyrKyWZ9Mfh09eHWb5PgTSO8wi7U824RA=="], + + "@chevrotain/types": ["@chevrotain/types@11.0.3", "", {}, "sha512-gsiM3G8b58kZC2HaWR50gu6Y1440cHiJ+i3JUvcp/35JchYejb2+5MVeJK0iKThYpAa/P2PYFV4hoi44HD+aHQ=="], + + "@chevrotain/utils": ["@chevrotain/utils@11.0.3", "", {}, "sha512-YslZMgtJUyuMbZ+aKvfF3x1f5liK4mWNxghFRv7jqRR9C3R3fAOGTTKvxXDa2Y1s9zSbcpuO0cAxDYsc9SrXoQ=="], + + "@clack/core": ["@clack/core@0.3.5", "", { "dependencies": { "picocolors": "^1.0.0", "sisteransi": "^1.0.5" } }, "sha512-5cfhQNH+1VQ2xLQlmzXMqUoiaH0lRBq9/CLW9lTyMbuKLC3+xEK01tHVvyut++mLOn5urSHmkm6I0Lg9MaJSTQ=="], + + "@clack/prompts": ["@clack/prompts@0.7.0", "", { "dependencies": { "@clack/core": "^0.3.3", "is-unicode-supported": "*", "picocolors": "^1.0.0", "sisteransi": "^1.0.5" } }, "sha512-0MhX9/B4iL6Re04jPrttDm+BsP8y6mS7byuv0BvXgdXhbV5PdlsHt55dvNsuBCPZ7xq1oTAOOuotR9NFbQyMSA=="], + + "@emotion/hash": ["@emotion/hash@0.9.2", "", {}, "sha512-MyqliTZGuOm3+5ZRSaaBGP3USLw6+EGykkwZns2EPC5g8jJ4z9OrdZY9apkl3+UP9+sdz76YYkwCKP5gh8iY3g=="], + + "@esbuild/aix-ppc64": ["@esbuild/aix-ppc64@0.25.6", "", { "os": "aix", "cpu": "ppc64" }, "sha512-ShbM/3XxwuxjFiuVBHA+d3j5dyac0aEVVq1oluIDf71hUw0aRF59dV/efUsIwFnR6m8JNM2FjZOzmaZ8yG61kw=="], + + "@esbuild/android-arm": ["@esbuild/android-arm@0.25.6", "", { "os": "android", "cpu": "arm" }, "sha512-S8ToEOVfg++AU/bHwdksHNnyLyVM+eMVAOf6yRKFitnwnbwwPNqKr3srzFRe7nzV69RQKb5DgchIX5pt3L53xg=="], + + "@esbuild/android-arm64": ["@esbuild/android-arm64@0.25.6", "", { "os": "android", "cpu": "arm64" }, "sha512-hd5zdUarsK6strW+3Wxi5qWws+rJhCCbMiC9QZyzoxfk5uHRIE8T287giQxzVpEvCwuJ9Qjg6bEjcRJcgfLqoA=="], + + "@esbuild/android-x64": ["@esbuild/android-x64@0.25.6", "", { "os": "android", "cpu": "x64" }, "sha512-0Z7KpHSr3VBIO9A/1wcT3NTy7EB4oNC4upJ5ye3R7taCc2GUdeynSLArnon5G8scPwaU866d3H4BCrE5xLW25A=="], + + "@esbuild/darwin-arm64": ["@esbuild/darwin-arm64@0.25.6", "", { "os": "darwin", "cpu": "arm64" }, "sha512-FFCssz3XBavjxcFxKsGy2DYK5VSvJqa6y5HXljKzhRZ87LvEi13brPrf/wdyl/BbpbMKJNOr1Sd0jtW4Ge1pAA=="], + + "@esbuild/darwin-x64": ["@esbuild/darwin-x64@0.25.6", "", { "os": "darwin", "cpu": "x64" }, "sha512-GfXs5kry/TkGM2vKqK2oyiLFygJRqKVhawu3+DOCk7OxLy/6jYkWXhlHwOoTb0WqGnWGAS7sooxbZowy+pK9Yg=="], + + "@esbuild/freebsd-arm64": ["@esbuild/freebsd-arm64@0.25.6", "", { "os": "freebsd", "cpu": "arm64" }, "sha512-aoLF2c3OvDn2XDTRvn8hN6DRzVVpDlj2B/F66clWd/FHLiHaG3aVZjxQX2DYphA5y/evbdGvC6Us13tvyt4pWg=="], + + "@esbuild/freebsd-x64": ["@esbuild/freebsd-x64@0.25.6", "", { "os": "freebsd", "cpu": "x64" }, "sha512-2SkqTjTSo2dYi/jzFbU9Plt1vk0+nNg8YC8rOXXea+iA3hfNJWebKYPs3xnOUf9+ZWhKAaxnQNUf2X9LOpeiMQ=="], + + "@esbuild/linux-arm": ["@esbuild/linux-arm@0.25.6", "", { "os": "linux", "cpu": "arm" }, "sha512-SZHQlzvqv4Du5PrKE2faN0qlbsaW/3QQfUUc6yO2EjFcA83xnwm91UbEEVx4ApZ9Z5oG8Bxz4qPE+HFwtVcfyw=="], + + "@esbuild/linux-arm64": ["@esbuild/linux-arm64@0.25.6", "", { "os": "linux", "cpu": "arm64" }, "sha512-b967hU0gqKd9Drsh/UuAm21Khpoh6mPBSgz8mKRq4P5mVK8bpA+hQzmm/ZwGVULSNBzKdZPQBRT3+WuVavcWsQ=="], + + "@esbuild/linux-ia32": ["@esbuild/linux-ia32@0.25.6", "", { "os": "linux", "cpu": "ia32" }, "sha512-aHWdQ2AAltRkLPOsKdi3xv0mZ8fUGPdlKEjIEhxCPm5yKEThcUjHpWB1idN74lfXGnZ5SULQSgtr5Qos5B0bPw=="], + + "@esbuild/linux-loong64": ["@esbuild/linux-loong64@0.25.6", "", { "os": "linux", "cpu": "none" }, "sha512-VgKCsHdXRSQ7E1+QXGdRPlQ/e08bN6WMQb27/TMfV+vPjjTImuT9PmLXupRlC90S1JeNNW5lzkAEO/McKeJ2yg=="], + + "@esbuild/linux-mips64el": ["@esbuild/linux-mips64el@0.25.6", "", { "os": "linux", "cpu": "none" }, "sha512-WViNlpivRKT9/py3kCmkHnn44GkGXVdXfdc4drNmRl15zVQ2+D2uFwdlGh6IuK5AAnGTo2qPB1Djppj+t78rzw=="], + + "@esbuild/linux-ppc64": ["@esbuild/linux-ppc64@0.25.6", "", { "os": "linux", "cpu": "ppc64" }, "sha512-wyYKZ9NTdmAMb5730I38lBqVu6cKl4ZfYXIs31Baf8aoOtB4xSGi3THmDYt4BTFHk7/EcVixkOV2uZfwU3Q2Jw=="], + + "@esbuild/linux-riscv64": ["@esbuild/linux-riscv64@0.25.6", "", { "os": "linux", "cpu": "none" }, "sha512-KZh7bAGGcrinEj4qzilJ4hqTY3Dg2U82c8bv+e1xqNqZCrCyc+TL9AUEn5WGKDzm3CfC5RODE/qc96OcbIe33w=="], + + "@esbuild/linux-s390x": ["@esbuild/linux-s390x@0.25.6", "", { "os": "linux", "cpu": "s390x" }, "sha512-9N1LsTwAuE9oj6lHMyyAM+ucxGiVnEqUdp4v7IaMmrwb06ZTEVCIs3oPPplVsnjPfyjmxwHxHMF8b6vzUVAUGw=="], + + "@esbuild/linux-x64": ["@esbuild/linux-x64@0.25.6", "", { "os": "linux", "cpu": "x64" }, "sha512-A6bJB41b4lKFWRKNrWoP2LHsjVzNiaurf7wyj/XtFNTsnPuxwEBWHLty+ZE0dWBKuSK1fvKgrKaNjBS7qbFKig=="], + + "@esbuild/netbsd-arm64": ["@esbuild/netbsd-arm64@0.25.6", "", { "os": "none", "cpu": "arm64" }, "sha512-IjA+DcwoVpjEvyxZddDqBY+uJ2Snc6duLpjmkXm/v4xuS3H+3FkLZlDm9ZsAbF9rsfP3zeA0/ArNDORZgrxR/Q=="], + + "@esbuild/netbsd-x64": ["@esbuild/netbsd-x64@0.25.6", "", { "os": "none", "cpu": "x64" }, "sha512-dUXuZr5WenIDlMHdMkvDc1FAu4xdWixTCRgP7RQLBOkkGgwuuzaGSYcOpW4jFxzpzL1ejb8yF620UxAqnBrR9g=="], + + "@esbuild/openbsd-arm64": ["@esbuild/openbsd-arm64@0.25.6", "", { "os": "openbsd", "cpu": "arm64" }, "sha512-l8ZCvXP0tbTJ3iaqdNf3pjaOSd5ex/e6/omLIQCVBLmHTlfXW3zAxQ4fnDmPLOB1x9xrcSi/xtCWFwCZRIaEwg=="], + + "@esbuild/openbsd-x64": ["@esbuild/openbsd-x64@0.25.6", "", { "os": "openbsd", "cpu": "x64" }, "sha512-hKrmDa0aOFOr71KQ/19JC7az1P0GWtCN1t2ahYAf4O007DHZt/dW8ym5+CUdJhQ/qkZmI1HAF8KkJbEFtCL7gw=="], + + "@esbuild/openharmony-arm64": ["@esbuild/openharmony-arm64@0.25.6", "", { "os": "none", "cpu": "arm64" }, "sha512-+SqBcAWoB1fYKmpWoQP4pGtx+pUUC//RNYhFdbcSA16617cchuryuhOCRpPsjCblKukAckWsV+aQ3UKT/RMPcA=="], + + "@esbuild/sunos-x64": ["@esbuild/sunos-x64@0.25.6", "", { "os": "sunos", "cpu": "x64" }, "sha512-dyCGxv1/Br7MiSC42qinGL8KkG4kX0pEsdb0+TKhmJZgCUDBGmyo1/ArCjNGiOLiIAgdbWgmWgib4HoCi5t7kA=="], + + "@esbuild/win32-arm64": ["@esbuild/win32-arm64@0.25.6", "", { "os": "win32", "cpu": "arm64" }, "sha512-42QOgcZeZOvXfsCBJF5Afw73t4veOId//XD3i+/9gSkhSV6Gk3VPlWncctI+JcOyERv85FUo7RxuxGy+z8A43Q=="], + + "@esbuild/win32-ia32": ["@esbuild/win32-ia32@0.25.6", "", { "os": "win32", "cpu": "ia32" }, "sha512-4AWhgXmDuYN7rJI6ORB+uU9DHLq/erBbuMoAuB4VWJTu5KtCgcKYPynF0YI1VkBNuEfjNlLrFr9KZPJzrtLkrQ=="], + + "@esbuild/win32-x64": ["@esbuild/win32-x64@0.25.6", "", { "os": "win32", "cpu": "x64" }, "sha512-NgJPHHbEpLQgDH2MjQu90pzW/5vvXIZ7KOnPyNBm92A6WgZ/7b6fJyUBjoumLqeOQQGqY2QjQxRo97ah4Sj0cA=="], + + "@floating-ui/core": ["@floating-ui/core@1.7.2", "", { "dependencies": { "@floating-ui/utils": "^0.2.10" } }, "sha512-wNB5ooIKHQc+Kui96jE/n69rHFWAVoxn5CAzL1Xdd8FG03cgY3MLO+GF9U3W737fYDSgPWA6MReKhBQBop6Pcw=="], + + "@floating-ui/dom": ["@floating-ui/dom@1.7.2", "", { "dependencies": { "@floating-ui/core": "^1.7.2", "@floating-ui/utils": "^0.2.10" } }, "sha512-7cfaOQuCS27HD7DX+6ib2OrnW+b4ZBwDNnCcT0uTyidcmyWb03FnQqJybDBoCnpdxwBSfA94UAYlRCt7mV+TbA=="], + + "@floating-ui/react": ["@floating-ui/react@0.27.13", "", { "dependencies": { "@floating-ui/react-dom": "^2.1.4", "@floating-ui/utils": "^0.2.10", "tabbable": "^6.0.0" }, "peerDependencies": { "react": ">=17.0.0", "react-dom": ">=17.0.0" } }, "sha512-Qmj6t9TjgWAvbygNEu1hj4dbHI9CY0ziCMIJrmYoDIn9TUAH5lRmiIeZmRd4c6QEZkzdoH7jNnoNyoY1AIESiA=="], + + "@floating-ui/react-dom": ["@floating-ui/react-dom@2.1.4", "", { "dependencies": { "@floating-ui/dom": "^1.7.2" }, "peerDependencies": { "react": ">=16.8.0", "react-dom": ">=16.8.0" } }, "sha512-JbbpPhp38UmXDDAu60RJmbeme37Jbgsm7NrHGgzYYFKmblzRUh6Pa641dII6LsjwF4XlScDrde2UAzDo/b9KPw=="], + + "@floating-ui/utils": ["@floating-ui/utils@0.2.10", "", {}, "sha512-aGTxbpbg8/b5JfU1HXSrbH3wXZuLPJcNEcZQFMxLs3oSzgtVu6nFPkbbGGUvBcUjKV2YyB9Wxxabo+HEH9tcRQ=="], + + "@fortawesome/fontawesome-free": ["@fortawesome/fontawesome-free@6.7.2", "", {}, "sha512-JUOtgFW6k9u4Y+xeIaEiLr3+cjoUPiAuLXoyKOJSia6Duzb7pq+A76P9ZdPDoAoxHdHzq6gE9/jKBGXlZT8FbA=="], + + "@hono/node-server": ["@hono/node-server@1.16.0", "", { "peerDependencies": { "hono": "^4" } }, "sha512-9LwRb5XOrTFapOABiQjGC50wRVlzUvWZsDHINCnkBniP+Q+LQf4waN0nzk9t+2kqcTsnGnieSmqpHsr6kH2bdw=="], + + "@iconify/types": ["@iconify/types@2.0.0", "", {}, "sha512-+wluvCrRhXrhyOmRDJ3q8mux9JkKy5SJ/v8ol2tu4FVjyYvtEzkc/3pK15ET6RKg4b4w4BmTk1+gsCUhf21Ykg=="], + + "@iconify/utils": ["@iconify/utils@2.3.0", "", { "dependencies": { "@antfu/install-pkg": "^1.0.0", "@antfu/utils": "^8.1.0", "@iconify/types": "^2.0.0", "debug": "^4.4.0", "globals": "^15.14.0", "kolorist": "^1.8.0", "local-pkg": "^1.0.0", "mlly": "^1.7.4" } }, "sha512-GmQ78prtwYW6EtzXRU1rY+KwOKfz32PD7iJh6Iyqw68GiKuoZ2A6pRtzWONz5VQJbp50mEjXh/7NkumtrAgRKA=="], + + "@isaacs/balanced-match": ["@isaacs/balanced-match@4.0.1", "", {}, "sha512-yzMTt9lEb8Gv7zRioUilSglI0c0smZ9k5D65677DLWLtWJaXIS3CqcGyUFByYKlnUj6TkjLVs54fBl6+TiGQDQ=="], + + "@isaacs/brace-expansion": ["@isaacs/brace-expansion@5.0.0", "", { "dependencies": { "@isaacs/balanced-match": "^4.0.1" } }, "sha512-ZT55BDLV0yv0RBm2czMiZ+SqCGO7AvmOM3G/w2xhVPH+te0aKgFjmBvGlL1dH+ql2tgGO3MVrbb3jCKyvpgnxA=="], + + "@isaacs/cliui": ["@isaacs/cliui@8.0.2", "", { "dependencies": { "string-width": "^5.1.2", "string-width-cjs": "npm:string-width@^4.2.0", "strip-ansi": "^7.0.1", "strip-ansi-cjs": "npm:strip-ansi@^6.0.1", "wrap-ansi": "^8.1.0", "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0" } }, "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA=="], + + "@jridgewell/gen-mapping": ["@jridgewell/gen-mapping@0.3.12", "", { "dependencies": { "@jridgewell/sourcemap-codec": "^1.5.0", "@jridgewell/trace-mapping": "^0.3.24" } }, "sha512-OuLGC46TjB5BbN1dH8JULVVZY4WTdkF7tV9Ys6wLL1rubZnCMstOhNHueU5bLCrnRuDhKPDM4g6sw4Bel5Gzqg=="], + + "@jridgewell/resolve-uri": ["@jridgewell/resolve-uri@3.1.2", "", {}, "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw=="], + + "@jridgewell/sourcemap-codec": ["@jridgewell/sourcemap-codec@1.5.4", "", {}, "sha512-VT2+G1VQs/9oz078bLrYbecdZKs912zQlkelYpuf+SXF+QvZDYJlbx/LSx+meSAwdDFnF8FVXW92AVjjkVmgFw=="], + + "@jridgewell/trace-mapping": ["@jridgewell/trace-mapping@0.3.29", "", { "dependencies": { "@jridgewell/resolve-uri": "^3.1.0", "@jridgewell/sourcemap-codec": "^1.4.14" } }, "sha512-uw6guiW/gcAGPDhLmd77/6lW8QLeiV5RUTsAX46Db6oLhGaVj4lhnPwb184s1bkc8kdVg/+h988dro8GRDpmYQ=="], + + "@mdx-js/mdx": ["@mdx-js/mdx@3.1.0", "", { "dependencies": { "@types/estree": "^1.0.0", "@types/estree-jsx": "^1.0.0", "@types/hast": "^3.0.0", "@types/mdx": "^2.0.0", "collapse-white-space": "^2.0.0", "devlop": "^1.0.0", "estree-util-is-identifier-name": "^3.0.0", "estree-util-scope": "^1.0.0", "estree-walker": "^3.0.0", "hast-util-to-jsx-runtime": "^2.0.0", "markdown-extensions": "^2.0.0", "recma-build-jsx": "^1.0.0", "recma-jsx": "^1.0.0", "recma-stringify": "^1.0.0", "rehype-recma": "^1.0.0", "remark-mdx": "^3.0.0", "remark-parse": "^11.0.0", "remark-rehype": "^11.0.0", "source-map": "^0.7.0", "unified": "^11.0.0", "unist-util-position-from-estree": "^2.0.0", "unist-util-stringify-position": "^4.0.0", "unist-util-visit": "^5.0.0", "vfile": "^6.0.0" } }, "sha512-/QxEhPAvGwbQmy1Px8F899L5Uc2KZ6JtXwlCgJmjSTBedwOZkByYcBG4GceIGPXRDsmfxhHazuS+hlOShRLeDw=="], + + "@mdx-js/react": ["@mdx-js/react@3.1.0", "", { "dependencies": { "@types/mdx": "^2.0.0" }, "peerDependencies": { "@types/react": ">=16", "react": ">=16" } }, "sha512-QjHtSaoameoalGnKDT3FoIl4+9RwyTmo9ZJGBdLOks/YOiWHoRDI3PUwEzOE7kEmGcV3AFcp9K6dYu9rEuKLAQ=="], + + "@mdx-js/rollup": ["@mdx-js/rollup@3.1.0", "", { "dependencies": { "@mdx-js/mdx": "^3.0.0", "@rollup/pluginutils": "^5.0.0", "source-map": "^0.7.0", "vfile": "^6.0.0" }, "peerDependencies": { "rollup": ">=2" } }, "sha512-q4xOtUXpCzeouE8GaJ8StT4rDxm/U5j6lkMHL2srb2Q3Y7cobE0aXyPzXVVlbeIMBi+5R5MpbiaVE5/vJUdnHg=="], + + "@mermaid-js/parser": ["@mermaid-js/parser@0.6.2", "", { "dependencies": { "langium": "3.3.1" } }, "sha512-+PO02uGF6L6Cs0Bw8RpGhikVvMWEysfAyl27qTlroUB8jSWr1lL0Sf6zi78ZxlSnmgSY2AMMKVgghnN9jTtwkQ=="], + + "@noble/hashes": ["@noble/hashes@1.8.0", "", {}, "sha512-jCs9ldd7NwzpgXDIf6P3+NrHh9/sD6CQdxHyjQI+h/6rDNo88ypBxxz45UDuZHz9r3tNz7N/VInSVoVdtXEI4A=="], + + "@nodelib/fs.scandir": ["@nodelib/fs.scandir@2.1.5", "", { "dependencies": { "@nodelib/fs.stat": "2.0.5", "run-parallel": "^1.1.9" } }, "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g=="], + + "@nodelib/fs.stat": ["@nodelib/fs.stat@2.0.5", "", {}, "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A=="], + + "@nodelib/fs.walk": ["@nodelib/fs.walk@1.2.8", "", { "dependencies": { "@nodelib/fs.scandir": "2.1.5", "fastq": "^1.6.0" } }, "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg=="], + + "@radix-ui/colors": ["@radix-ui/colors@3.0.0", "", {}, "sha512-FUOsGBkHrYJwCSEtWRCIfQbZG7q1e6DgxCIOe1SUQzDe/7rXXeA47s8yCn6fuTNQAj1Zq4oTFi9Yjp3wzElcxg=="], + + "@radix-ui/number": ["@radix-ui/number@1.1.1", "", {}, "sha512-MkKCwxlXTgz6CFoJx3pCwn07GKp36+aZyu/u2Ln2VrA5DcdyCZkASEDBTd8x5whTQQL5CiYf4prXKLcgQdv29g=="], + + "@radix-ui/primitive": ["@radix-ui/primitive@1.1.2", "", {}, "sha512-XnbHrrprsNqZKQhStrSwgRUQzoCI1glLzdw79xiZPoofhGICeZRSQ3dIxAKH1gb3OHfNf4d6f+vAv3kil2eggA=="], + + "@radix-ui/react-accessible-icon": ["@radix-ui/react-accessible-icon@1.1.7", "", { "dependencies": { "@radix-ui/react-visually-hidden": "1.2.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-XM+E4WXl0OqUJFovy6GjmxxFyx9opfCAIUku4dlKRd5YEPqt4kALOkQOp0Of6reHuUkJuiPBEc5k0o4z4lTC8A=="], + + "@radix-ui/react-accordion": ["@radix-ui/react-accordion@1.2.11", "", { "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-collapsible": "1.1.11", "@radix-ui/react-collection": "1.1.7", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-controllable-state": "1.2.2" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-l3W5D54emV2ues7jjeG1xcyN7S3jnK3zE2zHqgn0CmMsy9lNJwmgcrmaxS+7ipw15FAivzKNzH3d5EcGoFKw0A=="], + + "@radix-ui/react-alert-dialog": ["@radix-ui/react-alert-dialog@1.1.14", "", { "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-dialog": "1.1.14", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-slot": "1.2.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-IOZfZ3nPvN6lXpJTBCunFQPRSvK8MDgSc1FB85xnIpUKOw9en0dJj8JmCAxV7BiZdtYlUpmrQjoTFkVYtdoWzQ=="], + + "@radix-ui/react-arrow": ["@radix-ui/react-arrow@1.1.7", "", { "dependencies": { "@radix-ui/react-primitive": "2.1.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-F+M1tLhO+mlQaOWspE8Wstg+z6PwxwRd8oQ8IXceWz92kfAmalTRf0EjrouQeo7QssEPfCn05B4Ihs1K9WQ/7w=="], + + "@radix-ui/react-aspect-ratio": ["@radix-ui/react-aspect-ratio@1.1.7", "", { "dependencies": { "@radix-ui/react-primitive": "2.1.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-Yq6lvO9HQyPwev1onK1daHCHqXVLzPhSVjmsNjCa2Zcxy2f7uJD2itDtxknv6FzAKCwD1qQkeVDmX/cev13n/g=="], + + "@radix-ui/react-avatar": ["@radix-ui/react-avatar@1.1.10", "", { "dependencies": { "@radix-ui/react-context": "1.1.2", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-callback-ref": "1.1.1", "@radix-ui/react-use-is-hydrated": "0.1.0", "@radix-ui/react-use-layout-effect": "1.1.1" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-V8piFfWapM5OmNCXTzVQY+E1rDa53zY+MQ4Y7356v4fFz6vqCyUtIz2rUD44ZEdwg78/jKmMJHj07+C/Z/rcog=="], + + "@radix-ui/react-checkbox": ["@radix-ui/react-checkbox@1.3.2", "", { "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-presence": "1.1.4", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-controllable-state": "1.2.2", "@radix-ui/react-use-previous": "1.1.1", "@radix-ui/react-use-size": "1.1.1" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-yd+dI56KZqawxKZrJ31eENUwqc1QSqg4OZ15rybGjF2ZNwMO+wCyHzAVLRp9qoYJf7kYy0YpZ2b0JCzJ42HZpA=="], + + "@radix-ui/react-collapsible": ["@radix-ui/react-collapsible@1.1.11", "", { "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-presence": "1.1.4", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-controllable-state": "1.2.2", "@radix-ui/react-use-layout-effect": "1.1.1" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-2qrRsVGSCYasSz1RFOorXwl0H7g7J1frQtgpQgYrt+MOidtPAINHn9CPovQXb83r8ahapdx3Tu0fa/pdFFSdPg=="], + + "@radix-ui/react-collection": ["@radix-ui/react-collection@1.1.7", "", { "dependencies": { "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-slot": "1.2.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-Fh9rGN0MoI4ZFUNyfFVNU4y9LUz93u9/0K+yLgA2bwRojxM8JU1DyvvMBabnZPBgMWREAJvU2jjVzq+LrFUglw=="], + + "@radix-ui/react-compose-refs": ["@radix-ui/react-compose-refs@1.1.2", "", { "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-z4eqJvfiNnFMHIIvXP3CY57y2WJs5g2v3X0zm9mEJkrkNv4rDxu+sg9Jh8EkXyeqBkB7SOcboo9dMVqhyrACIg=="], + + "@radix-ui/react-context": ["@radix-ui/react-context@1.1.2", "", { "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-jCi/QKUM2r1Ju5a3J64TH2A5SpKAgh0LpknyqdQ4m6DCV0xJ2HG1xARRwNGPQfi1SLdLWZ1OJz6F4OMBBNiGJA=="], + + "@radix-ui/react-context-menu": ["@radix-ui/react-context-menu@2.2.15", "", { "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-menu": "2.1.15", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-callback-ref": "1.1.1", "@radix-ui/react-use-controllable-state": "1.2.2" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-UsQUMjcYTsBjTSXw0P3GO0werEQvUY2plgRQuKoCTtkNr45q1DiL51j4m7gxhABzZ0BadoXNsIbg7F3KwiUBbw=="], + + "@radix-ui/react-dialog": ["@radix-ui/react-dialog@1.1.14", "", { "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-dismissable-layer": "1.1.10", "@radix-ui/react-focus-guards": "1.1.2", "@radix-ui/react-focus-scope": "1.1.7", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-portal": "1.1.9", "@radix-ui/react-presence": "1.1.4", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-slot": "1.2.3", "@radix-ui/react-use-controllable-state": "1.2.2", "aria-hidden": "^1.2.4", "react-remove-scroll": "^2.6.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-+CpweKjqpzTmwRwcYECQcNYbI8V9VSQt0SNFKeEBLgfucbsLssU6Ppq7wUdNXEGb573bMjFhVjKVll8rmV6zMw=="], + + "@radix-ui/react-direction": ["@radix-ui/react-direction@1.1.1", "", { "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-1UEWRX6jnOA2y4H5WczZ44gOOjTEmlqv1uNW4GAJEO5+bauCBhv8snY65Iw5/VOS/ghKN9gr2KjnLKxrsvoMVw=="], + + "@radix-ui/react-dismissable-layer": ["@radix-ui/react-dismissable-layer@1.1.10", "", { "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-callback-ref": "1.1.1", "@radix-ui/react-use-escape-keydown": "1.1.1" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-IM1zzRV4W3HtVgftdQiiOmA0AdJlCtMLe00FXaHwgt3rAnNsIyDqshvkIW3hj/iu5hu8ERP7KIYki6NkqDxAwQ=="], + + "@radix-ui/react-dropdown-menu": ["@radix-ui/react-dropdown-menu@2.1.15", "", { "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-menu": "2.1.15", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-controllable-state": "1.2.2" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-mIBnOjgwo9AH3FyKaSWoSu/dYj6VdhJ7frEPiGTeXCdUFHjl9h3mFh2wwhEtINOmYXWhdpf1rY2minFsmaNgVQ=="], + + "@radix-ui/react-focus-guards": ["@radix-ui/react-focus-guards@1.1.2", "", { "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-fyjAACV62oPV925xFCrH8DR5xWhg9KYtJT4s3u54jxp+L/hbpTY2kIeEFFbFe+a/HCE94zGQMZLIpVTPVZDhaA=="], + + "@radix-ui/react-focus-scope": ["@radix-ui/react-focus-scope@1.1.7", "", { "dependencies": { "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-callback-ref": "1.1.1" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-t2ODlkXBQyn7jkl6TNaw/MtVEVvIGelJDCG41Okq/KwUsJBwQ4XVZsHAVUkK4mBv3ewiAS3PGuUWuY2BoK4ZUw=="], + + "@radix-ui/react-form": ["@radix-ui/react-form@0.1.7", "", { "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-label": "2.1.7", "@radix-ui/react-primitive": "2.1.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-IXLKFnaYvFg/KkeV5QfOX7tRnwHXp127koOFUjLWMTrRv5Rny3DQcAtIFFeA/Cli4HHM8DuJCXAUsgnFVJndlw=="], + + "@radix-ui/react-hover-card": ["@radix-ui/react-hover-card@1.1.14", "", { "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-dismissable-layer": "1.1.10", "@radix-ui/react-popper": "1.2.7", "@radix-ui/react-portal": "1.1.9", "@radix-ui/react-presence": "1.1.4", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-controllable-state": "1.2.2" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-CPYZ24Mhirm+g6D8jArmLzjYu4Eyg3TTUHswR26QgzXBHBe64BO/RHOJKzmF/Dxb4y4f9PKyJdwm/O/AhNkb+Q=="], + + "@radix-ui/react-icons": ["@radix-ui/react-icons@1.3.2", "", { "peerDependencies": { "react": "^16.x || ^17.x || ^18.x || ^19.0.0 || ^19.0.0-rc" } }, "sha512-fyQIhGDhzfc9pK2kH6Pl9c4BDJGfMkPqkyIgYDthyNYoNg3wVhoJMMh19WS4Up/1KMPFVpNsT2q3WmXn2N1m6g=="], + + "@radix-ui/react-id": ["@radix-ui/react-id@1.1.1", "", { "dependencies": { "@radix-ui/react-use-layout-effect": "1.1.1" }, "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-kGkGegYIdQsOb4XjsfM97rXsiHaBwco+hFI66oO4s9LU+PLAC5oJ7khdOVFxkhsmlbpUqDAvXw11CluXP+jkHg=="], + + "@radix-ui/react-label": ["@radix-ui/react-label@2.1.7", "", { "dependencies": { "@radix-ui/react-primitive": "2.1.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-YT1GqPSL8kJn20djelMX7/cTRp/Y9w5IZHvfxQTVHrOqa2yMl7i/UfMqKRU5V7mEyKTrUVgJXhNQPVCG8PBLoQ=="], + + "@radix-ui/react-menu": ["@radix-ui/react-menu@2.1.15", "", { "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-collection": "1.1.7", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-dismissable-layer": "1.1.10", "@radix-ui/react-focus-guards": "1.1.2", "@radix-ui/react-focus-scope": "1.1.7", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-popper": "1.2.7", "@radix-ui/react-portal": "1.1.9", "@radix-ui/react-presence": "1.1.4", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-roving-focus": "1.1.10", "@radix-ui/react-slot": "1.2.3", "@radix-ui/react-use-callback-ref": "1.1.1", "aria-hidden": "^1.2.4", "react-remove-scroll": "^2.6.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-tVlmA3Vb9n8SZSd+YSbuFR66l87Wiy4du+YE+0hzKQEANA+7cWKH1WgqcEX4pXqxUFQKrWQGHdvEfw00TjFiew=="], + + "@radix-ui/react-menubar": ["@radix-ui/react-menubar@1.1.15", "", { "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-collection": "1.1.7", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-menu": "2.1.15", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-roving-focus": "1.1.10", "@radix-ui/react-use-controllable-state": "1.2.2" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-Z71C7LGD+YDYo3TV81paUs8f3Zbmkvg6VLRQpKYfzioOE6n7fOhA3ApK/V/2Odolxjoc4ENk8AYCjohCNayd5A=="], + + "@radix-ui/react-navigation-menu": ["@radix-ui/react-navigation-menu@1.2.13", "", { "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-collection": "1.1.7", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-dismissable-layer": "1.1.10", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-presence": "1.1.4", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-callback-ref": "1.1.1", "@radix-ui/react-use-controllable-state": "1.2.2", "@radix-ui/react-use-layout-effect": "1.1.1", "@radix-ui/react-use-previous": "1.1.1", "@radix-ui/react-visually-hidden": "1.2.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-WG8wWfDiJlSF5hELjwfjSGOXcBR/ZMhBFCGYe8vERpC39CQYZeq1PQ2kaYHdye3V95d06H89KGMsVCIE4LWo3g=="], + + "@radix-ui/react-one-time-password-field": ["@radix-ui/react-one-time-password-field@0.1.7", "", { "dependencies": { "@radix-ui/number": "1.1.1", "@radix-ui/primitive": "1.1.2", "@radix-ui/react-collection": "1.1.7", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-roving-focus": "1.1.10", "@radix-ui/react-use-controllable-state": "1.2.2", "@radix-ui/react-use-effect-event": "0.0.2", "@radix-ui/react-use-is-hydrated": "0.1.0", "@radix-ui/react-use-layout-effect": "1.1.1" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-w1vm7AGI8tNXVovOK7TYQHrAGpRF7qQL+ENpT1a743De5Zmay2RbWGKAiYDKIyIuqptns+znCKwNztE2xl1n0Q=="], + + "@radix-ui/react-password-toggle-field": ["@radix-ui/react-password-toggle-field@0.1.2", "", { "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-controllable-state": "1.2.2", "@radix-ui/react-use-effect-event": "0.0.2", "@radix-ui/react-use-is-hydrated": "0.1.0" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-F90uYnlBsLPU1UbSLciLsWQmk8+hdWa6SFw4GXaIdNWxFxI5ITKVdAG64f+Twaa9ic6xE7pqxPyUmodrGjT4pQ=="], + + "@radix-ui/react-popover": ["@radix-ui/react-popover@1.1.14", "", { "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-dismissable-layer": "1.1.10", "@radix-ui/react-focus-guards": "1.1.2", "@radix-ui/react-focus-scope": "1.1.7", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-popper": "1.2.7", "@radix-ui/react-portal": "1.1.9", "@radix-ui/react-presence": "1.1.4", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-slot": "1.2.3", "@radix-ui/react-use-controllable-state": "1.2.2", "aria-hidden": "^1.2.4", "react-remove-scroll": "^2.6.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-ODz16+1iIbGUfFEfKx2HTPKizg2MN39uIOV8MXeHnmdd3i/N9Wt7vU46wbHsqA0xoaQyXVcs0KIlBdOA2Y95bw=="], + + "@radix-ui/react-popper": ["@radix-ui/react-popper@1.2.7", "", { "dependencies": { "@floating-ui/react-dom": "^2.0.0", "@radix-ui/react-arrow": "1.1.7", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-callback-ref": "1.1.1", "@radix-ui/react-use-layout-effect": "1.1.1", "@radix-ui/react-use-rect": "1.1.1", "@radix-ui/react-use-size": "1.1.1", "@radix-ui/rect": "1.1.1" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-IUFAccz1JyKcf/RjB552PlWwxjeCJB8/4KxT7EhBHOJM+mN7LdW+B3kacJXILm32xawcMMjb2i0cIZpo+f9kiQ=="], + + "@radix-ui/react-portal": ["@radix-ui/react-portal@1.1.9", "", { "dependencies": { "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-layout-effect": "1.1.1" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-bpIxvq03if6UNwXZ+HTK71JLh4APvnXntDc6XOX8UVq4XQOVl7lwok0AvIl+b8zgCw3fSaVTZMpAPPagXbKmHQ=="], + + "@radix-ui/react-presence": ["@radix-ui/react-presence@1.1.4", "", { "dependencies": { "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-use-layout-effect": "1.1.1" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-ueDqRbdc4/bkaQT3GIpLQssRlFgWaL/U2z/S31qRwwLWoxHLgry3SIfCwhxeQNbirEUXFa+lq3RL3oBYXtcmIA=="], + + "@radix-ui/react-primitive": ["@radix-ui/react-primitive@2.1.3", "", { "dependencies": { "@radix-ui/react-slot": "1.2.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-m9gTwRkhy2lvCPe6QJp4d3G1TYEUHn/FzJUtq9MjH46an1wJU+GdoGC5VLof8RX8Ft/DlpshApkhswDLZzHIcQ=="], + + "@radix-ui/react-progress": ["@radix-ui/react-progress@1.1.7", "", { "dependencies": { "@radix-ui/react-context": "1.1.2", "@radix-ui/react-primitive": "2.1.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-vPdg/tF6YC/ynuBIJlk1mm7Le0VgW6ub6J2UWnTQ7/D23KXcPI1qy+0vBkgKgd38RCMJavBXpB83HPNFMTb0Fg=="], + + "@radix-ui/react-radio-group": ["@radix-ui/react-radio-group@1.3.7", "", { "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-presence": "1.1.4", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-roving-focus": "1.1.10", "@radix-ui/react-use-controllable-state": "1.2.2", "@radix-ui/react-use-previous": "1.1.1", "@radix-ui/react-use-size": "1.1.1" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-9w5XhD0KPOrm92OTTE0SysH3sYzHsSTHNvZgUBo/VZ80VdYyB5RneDbc0dKpURS24IxkoFRu/hI0i4XyfFwY6g=="], + + "@radix-ui/react-roving-focus": ["@radix-ui/react-roving-focus@1.1.10", "", { "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-collection": "1.1.7", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-callback-ref": "1.1.1", "@radix-ui/react-use-controllable-state": "1.2.2" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-dT9aOXUen9JSsxnMPv/0VqySQf5eDQ6LCk5Sw28kamz8wSOW2bJdlX2Bg5VUIIcV+6XlHpWTIuTPCf/UNIyq8Q=="], + + "@radix-ui/react-scroll-area": ["@radix-ui/react-scroll-area@1.2.9", "", { "dependencies": { "@radix-ui/number": "1.1.1", "@radix-ui/primitive": "1.1.2", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-presence": "1.1.4", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-callback-ref": "1.1.1", "@radix-ui/react-use-layout-effect": "1.1.1" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-YSjEfBXnhUELsO2VzjdtYYD4CfQjvao+lhhrX5XsHD7/cyUNzljF1FHEbgTPN7LH2MClfwRMIsYlqTYpKTTe2A=="], + + "@radix-ui/react-select": ["@radix-ui/react-select@2.2.5", "", { "dependencies": { "@radix-ui/number": "1.1.1", "@radix-ui/primitive": "1.1.2", "@radix-ui/react-collection": "1.1.7", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-dismissable-layer": "1.1.10", "@radix-ui/react-focus-guards": "1.1.2", "@radix-ui/react-focus-scope": "1.1.7", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-popper": "1.2.7", "@radix-ui/react-portal": "1.1.9", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-slot": "1.2.3", "@radix-ui/react-use-callback-ref": "1.1.1", "@radix-ui/react-use-controllable-state": "1.2.2", "@radix-ui/react-use-layout-effect": "1.1.1", "@radix-ui/react-use-previous": "1.1.1", "@radix-ui/react-visually-hidden": "1.2.3", "aria-hidden": "^1.2.4", "react-remove-scroll": "^2.6.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-HnMTdXEVuuyzx63ME0ut4+sEMYW6oouHWNGUZc7ddvUWIcfCva/AMoqEW/3wnEllriMWBa0RHspCYnfCWJQYmA=="], + + "@radix-ui/react-separator": ["@radix-ui/react-separator@1.1.7", "", { "dependencies": { "@radix-ui/react-primitive": "2.1.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-0HEb8R9E8A+jZjvmFCy/J4xhbXy3TV+9XSnGJ3KvTtjlIUy/YQ/p6UYZvi7YbeoeXdyU9+Y3scizK6hkY37baA=="], + + "@radix-ui/react-slider": ["@radix-ui/react-slider@1.3.5", "", { "dependencies": { "@radix-ui/number": "1.1.1", "@radix-ui/primitive": "1.1.2", "@radix-ui/react-collection": "1.1.7", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-controllable-state": "1.2.2", "@radix-ui/react-use-layout-effect": "1.1.1", "@radix-ui/react-use-previous": "1.1.1", "@radix-ui/react-use-size": "1.1.1" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-rkfe2pU2NBAYfGaxa3Mqosi7VZEWX5CxKaanRv0vZd4Zhl9fvQrg0VM93dv3xGLGfrHuoTRF3JXH8nb9g+B3fw=="], + + "@radix-ui/react-slot": ["@radix-ui/react-slot@1.2.3", "", { "dependencies": { "@radix-ui/react-compose-refs": "1.1.2" }, "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A=="], + + "@radix-ui/react-switch": ["@radix-ui/react-switch@1.2.5", "", { "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-controllable-state": "1.2.2", "@radix-ui/react-use-previous": "1.1.1", "@radix-ui/react-use-size": "1.1.1" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-5ijLkak6ZMylXsaImpZ8u4Rlf5grRmoc0p0QeX9VJtlrM4f5m3nCTX8tWga/zOA8PZYIR/t0p2Mnvd7InrJ6yQ=="], + + "@radix-ui/react-tabs": ["@radix-ui/react-tabs@1.1.12", "", { "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-presence": "1.1.4", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-roving-focus": "1.1.10", "@radix-ui/react-use-controllable-state": "1.2.2" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-GTVAlRVrQrSw3cEARM0nAx73ixrWDPNZAruETn3oHCNP6SbZ/hNxdxp+u7VkIEv3/sFoLq1PfcHrl7Pnp0CDpw=="], + + "@radix-ui/react-toast": ["@radix-ui/react-toast@1.2.14", "", { "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-collection": "1.1.7", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-dismissable-layer": "1.1.10", "@radix-ui/react-portal": "1.1.9", "@radix-ui/react-presence": "1.1.4", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-callback-ref": "1.1.1", "@radix-ui/react-use-controllable-state": "1.2.2", "@radix-ui/react-use-layout-effect": "1.1.1", "@radix-ui/react-visually-hidden": "1.2.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-nAP5FBxBJGQ/YfUB+r+O6USFVkWq3gAInkxyEnmvEV5jtSbfDhfa4hwX8CraCnbjMLsE7XSf/K75l9xXY7joWg=="], + + "@radix-ui/react-toggle": ["@radix-ui/react-toggle@1.1.9", "", { "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-use-controllable-state": "1.2.2" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-ZoFkBBz9zv9GWer7wIjvdRxmh2wyc2oKWw6C6CseWd6/yq1DK/l5lJ+wnsmFwJZbBYqr02mrf8A2q/CVCuM3ZA=="], + + "@radix-ui/react-toggle-group": ["@radix-ui/react-toggle-group@1.1.10", "", { "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-roving-focus": "1.1.10", "@radix-ui/react-toggle": "1.1.9", "@radix-ui/react-use-controllable-state": "1.2.2" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-kiU694Km3WFLTC75DdqgM/3Jauf3rD9wxeS9XtyWFKsBUeZA337lC+6uUazT7I1DhanZ5gyD5Stf8uf2dbQxOQ=="], + + "@radix-ui/react-toolbar": ["@radix-ui/react-toolbar@1.1.10", "", { "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-roving-focus": "1.1.10", "@radix-ui/react-separator": "1.1.7", "@radix-ui/react-toggle-group": "1.1.10" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-jiwQsduEL++M4YBIurjSa+voD86OIytCod0/dbIxFZDLD8NfO1//keXYMfsW8BPcfqwoNjt+y06XcJqAb4KR7A=="], + + "@radix-ui/react-tooltip": ["@radix-ui/react-tooltip@1.2.7", "", { "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-dismissable-layer": "1.1.10", "@radix-ui/react-id": "1.1.1", "@radix-ui/react-popper": "1.2.7", "@radix-ui/react-portal": "1.1.9", "@radix-ui/react-presence": "1.1.4", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-slot": "1.2.3", "@radix-ui/react-use-controllable-state": "1.2.2", "@radix-ui/react-visually-hidden": "1.2.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-Ap+fNYwKTYJ9pzqW+Xe2HtMRbQ/EeWkj2qykZ6SuEV4iS/o1bZI5ssJbk4D2r8XuDuOBVz/tIx2JObtuqU+5Zw=="], + + "@radix-ui/react-use-callback-ref": ["@radix-ui/react-use-callback-ref@1.1.1", "", { "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-FkBMwD+qbGQeMu1cOHnuGB6x4yzPjho8ap5WtbEJ26umhgqVXbhekKUQO+hZEL1vU92a3wHwdp0HAcqAUF5iDg=="], + + "@radix-ui/react-use-controllable-state": ["@radix-ui/react-use-controllable-state@1.2.2", "", { "dependencies": { "@radix-ui/react-use-effect-event": "0.0.2", "@radix-ui/react-use-layout-effect": "1.1.1" }, "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-BjasUjixPFdS+NKkypcyyN5Pmg83Olst0+c6vGov0diwTEo6mgdqVR6hxcEgFuh4QrAs7Rc+9KuGJ9TVCj0Zzg=="], + + "@radix-ui/react-use-effect-event": ["@radix-ui/react-use-effect-event@0.0.2", "", { "dependencies": { "@radix-ui/react-use-layout-effect": "1.1.1" }, "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-Qp8WbZOBe+blgpuUT+lw2xheLP8q0oatc9UpmiemEICxGvFLYmHm9QowVZGHtJlGbS6A6yJ3iViad/2cVjnOiA=="], + + "@radix-ui/react-use-escape-keydown": ["@radix-ui/react-use-escape-keydown@1.1.1", "", { "dependencies": { "@radix-ui/react-use-callback-ref": "1.1.1" }, "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-Il0+boE7w/XebUHyBjroE+DbByORGR9KKmITzbR7MyQ4akpORYP/ZmbhAr0DG7RmmBqoOnZdy2QlvajJ2QA59g=="], + + "@radix-ui/react-use-is-hydrated": ["@radix-ui/react-use-is-hydrated@0.1.0", "", { "dependencies": { "use-sync-external-store": "^1.5.0" }, "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-U+UORVEq+cTnRIaostJv9AGdV3G6Y+zbVd+12e18jQ5A3c0xL03IhnHuiU4UV69wolOQp5GfR58NW/EgdQhwOA=="], + + "@radix-ui/react-use-layout-effect": ["@radix-ui/react-use-layout-effect@1.1.1", "", { "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-RbJRS4UWQFkzHTTwVymMTUv8EqYhOp8dOOviLj2ugtTiXRaRQS7GLGxZTLL1jWhMeoSCf5zmcZkqTl9IiYfXcQ=="], + + "@radix-ui/react-use-previous": ["@radix-ui/react-use-previous@1.1.1", "", { "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-2dHfToCj/pzca2Ck724OZ5L0EVrr3eHRNsG/b3xQJLA2hZpVCS99bLAX+hm1IHXDEnzU6by5z/5MIY794/a8NQ=="], + + "@radix-ui/react-use-rect": ["@radix-ui/react-use-rect@1.1.1", "", { "dependencies": { "@radix-ui/rect": "1.1.1" }, "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-QTYuDesS0VtuHNNvMh+CjlKJ4LJickCMUAqjlE3+j8w+RlRpwyX3apEQKGFzbZGdo7XNG1tXa+bQqIE7HIXT2w=="], + + "@radix-ui/react-use-size": ["@radix-ui/react-use-size@1.1.1", "", { "dependencies": { "@radix-ui/react-use-layout-effect": "1.1.1" }, "peerDependencies": { "@types/react": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-ewrXRDTAqAXlkl6t/fkXWNAhFX9I+CkKlw6zjEwk86RSPKwZr3xpBRso655aqYafwtnbpHLj6toFzmd6xdVptQ=="], + + "@radix-ui/react-visually-hidden": ["@radix-ui/react-visually-hidden@1.2.3", "", { "dependencies": { "@radix-ui/react-primitive": "2.1.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-pzJq12tEaaIhqjbzpCuv/OypJY/BPavOofm+dbab+MHLajy277+1lLm6JFcGgF5eskJ6mquGirhXY2GD/8u8Ug=="], + + "@radix-ui/rect": ["@radix-ui/rect@1.1.1", "", {}, "sha512-HPwpGIzkl28mWyZqG52jiqDJ12waP11Pa1lGoiyUkIEuMLBP0oeK/C89esbXrxsky5we7dfd8U58nm0SgAWpVw=="], + + "@rolldown/pluginutils": ["@rolldown/pluginutils@1.0.0-beta.19", "", {}, "sha512-3FL3mnMbPu0muGOCaKAhhFEYmqv9eTfPSJRJmANrCwtgK8VuxpsZDGK+m0LYAGoyO8+0j5uRe4PeyPDK1yA/hA=="], + + "@rollup/pluginutils": ["@rollup/pluginutils@5.2.0", "", { "dependencies": { "@types/estree": "^1.0.0", "estree-walker": "^2.0.2", "picomatch": "^4.0.2" }, "peerDependencies": { "rollup": "^1.20.0||^2.0.0||^3.0.0||^4.0.0" }, "optionalPeers": ["rollup"] }, "sha512-qWJ2ZTbmumwiLFomfzTyt5Kng4hwPi9rwCYN4SHb6eaRU1KNO4ccxINHr/VhH4GgPlt1XfSTLX2LBTme8ne4Zw=="], + + "@rollup/rollup-android-arm-eabi": ["@rollup/rollup-android-arm-eabi@4.45.1", "", { "os": "android", "cpu": "arm" }, "sha512-NEySIFvMY0ZQO+utJkgoMiCAjMrGvnbDLHvcmlA33UXJpYBCvlBEbMMtV837uCkS+plG2umfhn0T5mMAxGrlRA=="], + + "@rollup/rollup-android-arm64": ["@rollup/rollup-android-arm64@4.45.1", "", { "os": "android", "cpu": "arm64" }, "sha512-ujQ+sMXJkg4LRJaYreaVx7Z/VMgBBd89wGS4qMrdtfUFZ+TSY5Rs9asgjitLwzeIbhwdEhyj29zhst3L1lKsRQ=="], + + "@rollup/rollup-darwin-arm64": ["@rollup/rollup-darwin-arm64@4.45.1", "", { "os": "darwin", "cpu": "arm64" }, "sha512-FSncqHvqTm3lC6Y13xncsdOYfxGSLnP+73k815EfNmpewPs+EyM49haPS105Rh4aF5mJKywk9X0ogzLXZzN9lA=="], + + "@rollup/rollup-darwin-x64": ["@rollup/rollup-darwin-x64@4.45.1", "", { "os": "darwin", "cpu": "x64" }, "sha512-2/vVn/husP5XI7Fsf/RlhDaQJ7x9zjvC81anIVbr4b/f0xtSmXQTFcGIQ/B1cXIYM6h2nAhJkdMHTnD7OtQ9Og=="], + + "@rollup/rollup-freebsd-arm64": ["@rollup/rollup-freebsd-arm64@4.45.1", "", { "os": "freebsd", "cpu": "arm64" }, "sha512-4g1kaDxQItZsrkVTdYQ0bxu4ZIQ32cotoQbmsAnW1jAE4XCMbcBPDirX5fyUzdhVCKgPcrwWuucI8yrVRBw2+g=="], + + "@rollup/rollup-freebsd-x64": ["@rollup/rollup-freebsd-x64@4.45.1", "", { "os": "freebsd", "cpu": "x64" }, "sha512-L/6JsfiL74i3uK1Ti2ZFSNsp5NMiM4/kbbGEcOCps99aZx3g8SJMO1/9Y0n/qKlWZfn6sScf98lEOUe2mBvW9A=="], + + "@rollup/rollup-linux-arm-gnueabihf": ["@rollup/rollup-linux-arm-gnueabihf@4.45.1", "", { "os": "linux", "cpu": "arm" }, "sha512-RkdOTu2jK7brlu+ZwjMIZfdV2sSYHK2qR08FUWcIoqJC2eywHbXr0L8T/pONFwkGukQqERDheaGTeedG+rra6Q=="], + + "@rollup/rollup-linux-arm-musleabihf": ["@rollup/rollup-linux-arm-musleabihf@4.45.1", "", { "os": "linux", "cpu": "arm" }, "sha512-3kJ8pgfBt6CIIr1o+HQA7OZ9mp/zDk3ctekGl9qn/pRBgrRgfwiffaUmqioUGN9hv0OHv2gxmvdKOkARCtRb8Q=="], + + "@rollup/rollup-linux-arm64-gnu": ["@rollup/rollup-linux-arm64-gnu@4.45.1", "", { "os": "linux", "cpu": "arm64" }, "sha512-k3dOKCfIVixWjG7OXTCOmDfJj3vbdhN0QYEqB+OuGArOChek22hn7Uy5A/gTDNAcCy5v2YcXRJ/Qcnm4/ma1xw=="], + + "@rollup/rollup-linux-arm64-musl": ["@rollup/rollup-linux-arm64-musl@4.45.1", "", { "os": "linux", "cpu": "arm64" }, "sha512-PmI1vxQetnM58ZmDFl9/Uk2lpBBby6B6rF4muJc65uZbxCs0EA7hhKCk2PKlmZKuyVSHAyIw3+/SiuMLxKxWog=="], + + "@rollup/rollup-linux-loongarch64-gnu": ["@rollup/rollup-linux-loongarch64-gnu@4.45.1", "", { "os": "linux", "cpu": "none" }, "sha512-9UmI0VzGmNJ28ibHW2GpE2nF0PBQqsyiS4kcJ5vK+wuwGnV5RlqdczVocDSUfGX/Na7/XINRVoUgJyFIgipoRg=="], + + "@rollup/rollup-linux-powerpc64le-gnu": ["@rollup/rollup-linux-powerpc64le-gnu@4.45.1", "", { "os": "linux", "cpu": "ppc64" }, "sha512-7nR2KY8oEOUTD3pBAxIBBbZr0U7U+R9HDTPNy+5nVVHDXI4ikYniH1oxQz9VoB5PbBU1CZuDGHkLJkd3zLMWsg=="], + + "@rollup/rollup-linux-riscv64-gnu": ["@rollup/rollup-linux-riscv64-gnu@4.45.1", "", { "os": "linux", "cpu": "none" }, "sha512-nlcl3jgUultKROfZijKjRQLUu9Ma0PeNv/VFHkZiKbXTBQXhpytS8CIj5/NfBeECZtY2FJQubm6ltIxm/ftxpw=="], + + "@rollup/rollup-linux-riscv64-musl": ["@rollup/rollup-linux-riscv64-musl@4.45.1", "", { "os": "linux", "cpu": "none" }, "sha512-HJV65KLS51rW0VY6rvZkiieiBnurSzpzore1bMKAhunQiECPuxsROvyeaot/tcK3A3aGnI+qTHqisrpSgQrpgA=="], + + "@rollup/rollup-linux-s390x-gnu": ["@rollup/rollup-linux-s390x-gnu@4.45.1", "", { "os": "linux", "cpu": "s390x" }, "sha512-NITBOCv3Qqc6hhwFt7jLV78VEO/il4YcBzoMGGNxznLgRQf43VQDae0aAzKiBeEPIxnDrACiMgbqjuihx08OOw=="], + + "@rollup/rollup-linux-x64-gnu": ["@rollup/rollup-linux-x64-gnu@4.45.1", "", { "os": "linux", "cpu": "x64" }, "sha512-+E/lYl6qu1zqgPEnTrs4WysQtvc/Sh4fC2nByfFExqgYrqkKWp1tWIbe+ELhixnenSpBbLXNi6vbEEJ8M7fiHw=="], + + "@rollup/rollup-linux-x64-musl": ["@rollup/rollup-linux-x64-musl@4.45.1", "", { "os": "linux", "cpu": "x64" }, "sha512-a6WIAp89p3kpNoYStITT9RbTbTnqarU7D8N8F2CV+4Cl9fwCOZraLVuVFvlpsW0SbIiYtEnhCZBPLoNdRkjQFw=="], + + "@rollup/rollup-win32-arm64-msvc": ["@rollup/rollup-win32-arm64-msvc@4.45.1", "", { "os": "win32", "cpu": "arm64" }, "sha512-T5Bi/NS3fQiJeYdGvRpTAP5P02kqSOpqiopwhj0uaXB6nzs5JVi2XMJb18JUSKhCOX8+UE1UKQufyD6Or48dJg=="], + + "@rollup/rollup-win32-ia32-msvc": ["@rollup/rollup-win32-ia32-msvc@4.45.1", "", { "os": "win32", "cpu": "ia32" }, "sha512-lxV2Pako3ujjuUe9jiU3/s7KSrDfH6IgTSQOnDWr9aJ92YsFd7EurmClK0ly/t8dzMkDtd04g60WX6yl0sGfdw=="], + + "@rollup/rollup-win32-x64-msvc": ["@rollup/rollup-win32-x64-msvc@4.45.1", "", { "os": "win32", "cpu": "x64" }, "sha512-M/fKi4sasCdM8i0aWJjCSFm2qEnYRR8AMLG2kxp6wD13+tMGA4Z1tVAuHkNRjud5SW2EM3naLuK35w9twvf6aA=="], + + "@shikijs/core": ["@shikijs/core@1.29.2", "", { "dependencies": { "@shikijs/engine-javascript": "1.29.2", "@shikijs/engine-oniguruma": "1.29.2", "@shikijs/types": "1.29.2", "@shikijs/vscode-textmate": "^10.0.1", "@types/hast": "^3.0.4", "hast-util-to-html": "^9.0.4" } }, "sha512-vju0lY9r27jJfOY4Z7+Rt/nIOjzJpZ3y+nYpqtUZInVoXQ/TJZcfGnNOGnKjFdVZb8qexiCuSlZRKcGfhhTTZQ=="], + + "@shikijs/engine-javascript": ["@shikijs/engine-javascript@1.29.2", "", { "dependencies": { "@shikijs/types": "1.29.2", "@shikijs/vscode-textmate": "^10.0.1", "oniguruma-to-es": "^2.2.0" } }, "sha512-iNEZv4IrLYPv64Q6k7EPpOCE/nuvGiKl7zxdq0WFuRPF5PAE9PRo2JGq/d8crLusM59BRemJ4eOqrFrC4wiQ+A=="], + + "@shikijs/engine-oniguruma": ["@shikijs/engine-oniguruma@1.29.2", "", { "dependencies": { "@shikijs/types": "1.29.2", "@shikijs/vscode-textmate": "^10.0.1" } }, "sha512-7iiOx3SG8+g1MnlzZVDYiaeHe7Ez2Kf2HrJzdmGwkRisT7r4rak0e655AcM/tF9JG/kg5fMNYlLLKglbN7gBqA=="], + + "@shikijs/langs": ["@shikijs/langs@1.29.2", "", { "dependencies": { "@shikijs/types": "1.29.2" } }, "sha512-FIBA7N3LZ+223U7cJDUYd5shmciFQlYkFXlkKVaHsCPgfVLiO+e12FmQE6Tf9vuyEsFe3dIl8qGWKXgEHL9wmQ=="], + + "@shikijs/rehype": ["@shikijs/rehype@1.29.2", "", { "dependencies": { "@shikijs/types": "1.29.2", "@types/hast": "^3.0.4", "hast-util-to-string": "^3.0.1", "shiki": "1.29.2", "unified": "^11.0.5", "unist-util-visit": "^5.0.0" } }, "sha512-sxi53HZe5XDz0s2UqF+BVN/kgHPMS9l6dcacM4Ra3ZDzCJa5rDGJ+Ukpk4LxdD1+MITBM6hoLbPfGv9StV8a5Q=="], + + "@shikijs/themes": ["@shikijs/themes@1.29.2", "", { "dependencies": { "@shikijs/types": "1.29.2" } }, "sha512-i9TNZlsq4uoyqSbluIcZkmPL9Bfi3djVxRnofUHwvx/h6SRW3cwgBC5SML7vsDcWyukY0eCzVN980rqP6qNl9g=="], + + "@shikijs/transformers": ["@shikijs/transformers@1.29.2", "", { "dependencies": { "@shikijs/core": "1.29.2", "@shikijs/types": "1.29.2" } }, "sha512-NHQuA+gM7zGuxGWP9/Ub4vpbwrYCrho9nQCLcCPfOe3Yc7LOYwmSuhElI688oiqIXk9dlZwDiyAG9vPBTuPJMA=="], + + "@shikijs/twoslash": ["@shikijs/twoslash@1.29.2", "", { "dependencies": { "@shikijs/core": "1.29.2", "@shikijs/types": "1.29.2", "twoslash": "^0.2.12" } }, "sha512-2S04ppAEa477tiaLfGEn1QJWbZUmbk8UoPbAEw4PifsrxkBXtAtOflIZJNtuCwz8ptc/TPxy7CO7gW4Uoi6o/g=="], + + "@shikijs/types": ["@shikijs/types@1.29.2", "", { "dependencies": { "@shikijs/vscode-textmate": "^10.0.1", "@types/hast": "^3.0.4" } }, "sha512-VJjK0eIijTZf0QSTODEXCqinjBn0joAHQ+aPSBzrv4O2d/QSbsMw+ZeSRx03kV34Hy7NzUvV/7NqfYGRLrASmw=="], + + "@shikijs/vscode-textmate": ["@shikijs/vscode-textmate@10.0.2", "", {}, "sha512-83yeghZ2xxin3Nj8z1NMd/NCuca+gsYXswywDy5bHvwlWL8tpTQmzGeUuHd9FC3E/SBEMvzJRwWEOz5gGes9Qg=="], + + "@sindresorhus/merge-streams": ["@sindresorhus/merge-streams@2.3.0", "", {}, "sha512-LtoMMhxAlorcGhmFYI+LhPgbPZCkgP6ra1YL604EeF6U98pLlQ3iWIGMdWSC+vWmPBWBNgmDBAhnAobLROJmwg=="], + + "@tailwindcss/node": ["@tailwindcss/node@4.0.7", "", { "dependencies": { "enhanced-resolve": "^5.18.1", "jiti": "^2.4.2", "tailwindcss": "4.0.7" } }, "sha512-dkFXufkbRB2mu3FPsW5xLAUWJyexpJA+/VtQj18k3SUiJVLdpgzBd1v1gRRcIpEJj7K5KpxBKfOXlZxT3ZZRuA=="], + + "@tailwindcss/oxide": ["@tailwindcss/oxide@4.0.7", "", { "optionalDependencies": { "@tailwindcss/oxide-android-arm64": "4.0.7", "@tailwindcss/oxide-darwin-arm64": "4.0.7", "@tailwindcss/oxide-darwin-x64": "4.0.7", "@tailwindcss/oxide-freebsd-x64": "4.0.7", "@tailwindcss/oxide-linux-arm-gnueabihf": "4.0.7", "@tailwindcss/oxide-linux-arm64-gnu": "4.0.7", "@tailwindcss/oxide-linux-arm64-musl": "4.0.7", "@tailwindcss/oxide-linux-x64-gnu": "4.0.7", "@tailwindcss/oxide-linux-x64-musl": "4.0.7", "@tailwindcss/oxide-win32-arm64-msvc": "4.0.7", "@tailwindcss/oxide-win32-x64-msvc": "4.0.7" } }, "sha512-yr6w5YMgjy+B+zkJiJtIYGXW+HNYOPfRPtSs+aqLnKwdEzNrGv4ZuJh9hYJ3mcA+HMq/K1rtFV+KsEr65S558g=="], + + "@tailwindcss/oxide-android-arm64": ["@tailwindcss/oxide-android-arm64@4.0.7", "", { "os": "android", "cpu": "arm64" }, "sha512-5iQXXcAeOHBZy8ASfHFm1k0O/9wR2E3tKh6+P+ilZZbQiMgu+qrnfpBWYPc3FPuQdWiWb73069WT5D+CAfx/tg=="], + + "@tailwindcss/oxide-darwin-arm64": ["@tailwindcss/oxide-darwin-arm64@4.0.7", "", { "os": "darwin", "cpu": "arm64" }, "sha512-7yGZtEc5IgVYylqK/2B0yVqoofk4UAbkn1ygNpIJZyrOhbymsfr8uUFCueTu2fUxmAYIfMZ8waWo2dLg/NgLgg=="], + + "@tailwindcss/oxide-darwin-x64": ["@tailwindcss/oxide-darwin-x64@4.0.7", "", { "os": "darwin", "cpu": "x64" }, "sha512-tPQDV20fBjb26yWbPqT1ZSoDChomMCiXTKn4jupMSoMCFyU7+OJvIY1ryjqBuY622dEBJ8LnCDDWsnj1lX9nNQ=="], + + "@tailwindcss/oxide-freebsd-x64": ["@tailwindcss/oxide-freebsd-x64@4.0.7", "", { "os": "freebsd", "cpu": "x64" }, "sha512-sZqJpTyTZiknU9LLHuByg5GKTW+u3FqM7q7myequAXxKOpAFiOfXpY710FuMY+gjzSapyRbDXJlsTQtCyiTo5w=="], + + "@tailwindcss/oxide-linux-arm-gnueabihf": ["@tailwindcss/oxide-linux-arm-gnueabihf@4.0.7", "", { "os": "linux", "cpu": "arm" }, "sha512-PBgvULgeSswjd8cbZ91gdIcIDMdc3TUHV5XemEpxlqt9M8KoydJzkuB/Dt910jYdofOIaTWRL6adG9nJICvU4A=="], + + "@tailwindcss/oxide-linux-arm64-gnu": ["@tailwindcss/oxide-linux-arm64-gnu@4.0.7", "", { "os": "linux", "cpu": "arm64" }, "sha512-By/a2yeh+e9b+C67F88ndSwVJl2A3tcUDb29FbedDi+DZ4Mr07Oqw9Y1DrDrtHIDhIZ3bmmiL1dkH2YxrtV+zw=="], + + "@tailwindcss/oxide-linux-arm64-musl": ["@tailwindcss/oxide-linux-arm64-musl@4.0.7", "", { "os": "linux", "cpu": "arm64" }, "sha512-WHYs3cpPEJb/ccyT20NOzopYQkl7JKncNBUbb77YFlwlXMVJLLV3nrXQKhr7DmZxz2ZXqjyUwsj2rdzd9stYdw=="], + + "@tailwindcss/oxide-linux-x64-gnu": ["@tailwindcss/oxide-linux-x64-gnu@4.0.7", "", { "os": "linux", "cpu": "x64" }, "sha512-7bP1UyuX9kFxbOwkeIJhBZNevKYPXB6xZI37v09fqi6rqRJR8elybwjMUHm54GVP+UTtJ14ueB1K54Dy1tIO6w=="], + + "@tailwindcss/oxide-linux-x64-musl": ["@tailwindcss/oxide-linux-x64-musl@4.0.7", "", { "os": "linux", "cpu": "x64" }, "sha512-gBQIV8nL/LuhARNGeroqzXymMzzW5wQzqlteVqOVoqwEfpHOP3GMird5pGFbnpY+NP0fOlsZGrxxOPQ4W/84bQ=="], + + "@tailwindcss/oxide-win32-arm64-msvc": ["@tailwindcss/oxide-win32-arm64-msvc@4.0.7", "", { "os": "win32", "cpu": "arm64" }, "sha512-aH530NFfx0kpQpvYMfWoeG03zGnRCMVlQG8do/5XeahYydz+6SIBxA1tl/cyITSJyWZHyVt6GVNkXeAD30v0Xg=="], + + "@tailwindcss/oxide-win32-x64-msvc": ["@tailwindcss/oxide-win32-x64-msvc@4.0.7", "", { "os": "win32", "cpu": "x64" }, "sha512-8Cva6bbJN7ZJx320k7vxGGdU0ewmpfS5A4PudyzUuofdi8MgeINuiiWiPQ0VZCda/GX88K6qp+6UpDZNVr8HMQ=="], + + "@tailwindcss/vite": ["@tailwindcss/vite@4.0.7", "", { "dependencies": { "@tailwindcss/node": "4.0.7", "@tailwindcss/oxide": "4.0.7", "lightningcss": "^1.29.1", "tailwindcss": "4.0.7" }, "peerDependencies": { "vite": "^5.2.0 || ^6" } }, "sha512-GYx5sxArfIMtdZCsxfya3S/efMmf4RvfqdiLUozkhmSFBNUFnYVodatpoO/en4/BsOIGvq/RB6HwcTLn9prFnQ=="], + + "@types/babel__core": ["@types/babel__core@7.20.5", "", { "dependencies": { "@babel/parser": "^7.20.7", "@babel/types": "^7.20.7", "@types/babel__generator": "*", "@types/babel__template": "*", "@types/babel__traverse": "*" } }, "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA=="], + + "@types/babel__generator": ["@types/babel__generator@7.27.0", "", { "dependencies": { "@babel/types": "^7.0.0" } }, "sha512-ufFd2Xi92OAVPYsy+P4n7/U7e68fex0+Ee8gSG9KX7eo084CWiQ4sdxktvdl0bOPupXtVJPY19zk6EwWqUQ8lg=="], + + "@types/babel__template": ["@types/babel__template@7.4.4", "", { "dependencies": { "@babel/parser": "^7.1.0", "@babel/types": "^7.0.0" } }, "sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A=="], + + "@types/babel__traverse": ["@types/babel__traverse@7.20.7", "", { "dependencies": { "@babel/types": "^7.20.7" } }, "sha512-dkO5fhS7+/oos4ciWxyEyjWe48zmG6wbCheo/G2ZnHx4fs3EU6YC6UM8rk56gAjNJ9P3MTH2jo5jb92/K6wbng=="], + + "@types/d3": ["@types/d3@7.4.3", "", { "dependencies": { "@types/d3-array": "*", "@types/d3-axis": "*", "@types/d3-brush": "*", "@types/d3-chord": "*", "@types/d3-color": "*", "@types/d3-contour": "*", "@types/d3-delaunay": "*", "@types/d3-dispatch": "*", "@types/d3-drag": "*", "@types/d3-dsv": "*", "@types/d3-ease": "*", "@types/d3-fetch": "*", "@types/d3-force": "*", "@types/d3-format": "*", "@types/d3-geo": "*", "@types/d3-hierarchy": "*", "@types/d3-interpolate": "*", "@types/d3-path": "*", "@types/d3-polygon": "*", "@types/d3-quadtree": "*", "@types/d3-random": "*", "@types/d3-scale": "*", "@types/d3-scale-chromatic": "*", "@types/d3-selection": "*", "@types/d3-shape": "*", "@types/d3-time": "*", "@types/d3-time-format": "*", "@types/d3-timer": "*", "@types/d3-transition": "*", "@types/d3-zoom": "*" } }, "sha512-lZXZ9ckh5R8uiFVt8ogUNf+pIrK4EsWrx2Np75WvF/eTpJ0FMHNhjXk8CKEx/+gpHbNQyJWehbFaTvqmHWB3ww=="], + + "@types/d3-array": ["@types/d3-array@3.2.1", "", {}, "sha512-Y2Jn2idRrLzUfAKV2LyRImR+y4oa2AntrgID95SHJxuMUrkNXmanDSed71sRNZysveJVt1hLLemQZIady0FpEg=="], + + "@types/d3-axis": ["@types/d3-axis@3.0.6", "", { "dependencies": { "@types/d3-selection": "*" } }, "sha512-pYeijfZuBd87T0hGn0FO1vQ/cgLk6E1ALJjfkC0oJ8cbwkZl3TpgS8bVBLZN+2jjGgg38epgxb2zmoGtSfvgMw=="], + + "@types/d3-brush": ["@types/d3-brush@3.0.6", "", { "dependencies": { "@types/d3-selection": "*" } }, "sha512-nH60IZNNxEcrh6L1ZSMNA28rj27ut/2ZmI3r96Zd+1jrZD++zD3LsMIjWlvg4AYrHn/Pqz4CF3veCxGjtbqt7A=="], + + "@types/d3-chord": ["@types/d3-chord@3.0.6", "", {}, "sha512-LFYWWd8nwfwEmTZG9PfQxd17HbNPksHBiJHaKuY1XeqscXacsS2tyoo6OdRsjf+NQYeB6XrNL3a25E3gH69lcg=="], + + "@types/d3-color": ["@types/d3-color@3.1.3", "", {}, "sha512-iO90scth9WAbmgv7ogoq57O9YpKmFBbmoEoCHDB2xMBY0+/KVrqAaCDyCE16dUspeOvIxFFRI+0sEtqDqy2b4A=="], + + "@types/d3-contour": ["@types/d3-contour@3.0.6", "", { "dependencies": { "@types/d3-array": "*", "@types/geojson": "*" } }, "sha512-BjzLgXGnCWjUSYGfH1cpdo41/hgdWETu4YxpezoztawmqsvCeep+8QGfiY6YbDvfgHz/DkjeIkkZVJavB4a3rg=="], + + "@types/d3-delaunay": ["@types/d3-delaunay@6.0.4", "", {}, "sha512-ZMaSKu4THYCU6sV64Lhg6qjf1orxBthaC161plr5KuPHo3CNm8DTHiLw/5Eq2b6TsNP0W0iJrUOFscY6Q450Hw=="], + + "@types/d3-dispatch": ["@types/d3-dispatch@3.0.6", "", {}, "sha512-4fvZhzMeeuBJYZXRXrRIQnvUYfyXwYmLsdiN7XXmVNQKKw1cM8a5WdID0g1hVFZDqT9ZqZEY5pD44p24VS7iZQ=="], + + "@types/d3-drag": ["@types/d3-drag@3.0.7", "", { "dependencies": { "@types/d3-selection": "*" } }, "sha512-HE3jVKlzU9AaMazNufooRJ5ZpWmLIoc90A37WU2JMmeq28w1FQqCZswHZ3xR+SuxYftzHq6WU6KJHvqxKzTxxQ=="], + + "@types/d3-dsv": ["@types/d3-dsv@3.0.7", "", {}, "sha512-n6QBF9/+XASqcKK6waudgL0pf/S5XHPPI8APyMLLUHd8NqouBGLsU8MgtO7NINGtPBtk9Kko/W4ea0oAspwh9g=="], + + "@types/d3-ease": ["@types/d3-ease@3.0.2", "", {}, "sha512-NcV1JjO5oDzoK26oMzbILE6HW7uVXOHLQvHshBUW4UMdZGfiY6v5BeQwh9a9tCzv+CeefZQHJt5SRgK154RtiA=="], + + "@types/d3-fetch": ["@types/d3-fetch@3.0.7", "", { "dependencies": { "@types/d3-dsv": "*" } }, "sha512-fTAfNmxSb9SOWNB9IoG5c8Hg6R+AzUHDRlsXsDZsNp6sxAEOP0tkP3gKkNSO/qmHPoBFTxNrjDprVHDQDvo5aA=="], + + "@types/d3-force": ["@types/d3-force@3.0.10", "", {}, "sha512-ZYeSaCF3p73RdOKcjj+swRlZfnYpK1EbaDiYICEEp5Q6sUiqFaFQ9qgoshp5CzIyyb/yD09kD9o2zEltCexlgw=="], + + "@types/d3-format": ["@types/d3-format@3.0.4", "", {}, "sha512-fALi2aI6shfg7vM5KiR1wNJnZ7r6UuggVqtDA+xiEdPZQwy/trcQaHnwShLuLdta2rTymCNpxYTiMZX/e09F4g=="], + + "@types/d3-geo": ["@types/d3-geo@3.1.0", "", { "dependencies": { "@types/geojson": "*" } }, "sha512-856sckF0oP/diXtS4jNsiQw/UuK5fQG8l/a9VVLeSouf1/PPbBE1i1W852zVwKwYCBkFJJB7nCFTbk6UMEXBOQ=="], + + "@types/d3-hierarchy": ["@types/d3-hierarchy@3.1.7", "", {}, "sha512-tJFtNoYBtRtkNysX1Xq4sxtjK8YgoWUNpIiUee0/jHGRwqvzYxkq0hGVbbOGSz+JgFxxRu4K8nb3YpG3CMARtg=="], + + "@types/d3-interpolate": ["@types/d3-interpolate@3.0.4", "", { "dependencies": { "@types/d3-color": "*" } }, "sha512-mgLPETlrpVV1YRJIglr4Ez47g7Yxjl1lj7YKsiMCb27VJH9W8NVM6Bb9d8kkpG/uAQS5AmbA48q2IAolKKo1MA=="], + + "@types/d3-path": ["@types/d3-path@3.1.1", "", {}, "sha512-VMZBYyQvbGmWyWVea0EHs/BwLgxc+MKi1zLDCONksozI4YJMcTt8ZEuIR4Sb1MMTE8MMW49v0IwI5+b7RmfWlg=="], + + "@types/d3-polygon": ["@types/d3-polygon@3.0.2", "", {}, "sha512-ZuWOtMaHCkN9xoeEMr1ubW2nGWsp4nIql+OPQRstu4ypeZ+zk3YKqQT0CXVe/PYqrKpZAi+J9mTs05TKwjXSRA=="], + + "@types/d3-quadtree": ["@types/d3-quadtree@3.0.6", "", {}, "sha512-oUzyO1/Zm6rsxKRHA1vH0NEDG58HrT5icx/azi9MF1TWdtttWl0UIUsjEQBBh+SIkrpd21ZjEv7ptxWys1ncsg=="], + + "@types/d3-random": ["@types/d3-random@3.0.3", "", {}, "sha512-Imagg1vJ3y76Y2ea0871wpabqp613+8/r0mCLEBfdtqC7xMSfj9idOnmBYyMoULfHePJyxMAw3nWhJxzc+LFwQ=="], + + "@types/d3-scale": ["@types/d3-scale@4.0.9", "", { "dependencies": { "@types/d3-time": "*" } }, "sha512-dLmtwB8zkAeO/juAMfnV+sItKjlsw2lKdZVVy6LRr0cBmegxSABiLEpGVmSJJ8O08i4+sGR6qQtb6WtuwJdvVw=="], + + "@types/d3-scale-chromatic": ["@types/d3-scale-chromatic@3.1.0", "", {}, "sha512-iWMJgwkK7yTRmWqRB5plb1kadXyQ5Sj8V/zYlFGMUBbIPKQScw+Dku9cAAMgJG+z5GYDoMjWGLVOvjghDEFnKQ=="], + + "@types/d3-selection": ["@types/d3-selection@3.0.11", "", {}, "sha512-bhAXu23DJWsrI45xafYpkQ4NtcKMwWnAC/vKrd2l+nxMFuvOT3XMYTIj2opv8vq8AO5Yh7Qac/nSeP/3zjTK0w=="], + + "@types/d3-shape": ["@types/d3-shape@3.1.7", "", { "dependencies": { "@types/d3-path": "*" } }, "sha512-VLvUQ33C+3J+8p+Daf+nYSOsjB4GXp19/S/aGo60m9h1v6XaxjiT82lKVWJCfzhtuZ3yD7i/TPeC/fuKLLOSmg=="], + + "@types/d3-time": ["@types/d3-time@3.0.4", "", {}, "sha512-yuzZug1nkAAaBlBBikKZTgzCeA+k1uy4ZFwWANOfKw5z5LRhV0gNA7gNkKm7HoK+HRN0wX3EkxGk0fpbWhmB7g=="], + + "@types/d3-time-format": ["@types/d3-time-format@4.0.3", "", {}, "sha512-5xg9rC+wWL8kdDj153qZcsJ0FWiFt0J5RB6LYUNZjwSnesfblqrI/bJ1wBdJ8OQfncgbJG5+2F+qfqnqyzYxyg=="], + + "@types/d3-timer": ["@types/d3-timer@3.0.2", "", {}, "sha512-Ps3T8E8dZDam6fUyNiMkekK3XUsaUEik+idO9/YjPtfj2qruF8tFBXS7XhtE4iIXBLxhmLjP3SXpLhVf21I9Lw=="], + + "@types/d3-transition": ["@types/d3-transition@3.0.9", "", { "dependencies": { "@types/d3-selection": "*" } }, "sha512-uZS5shfxzO3rGlu0cC3bjmMFKsXv+SmZZcgp0KD22ts4uGXp5EVYGzu/0YdwZeKmddhcAccYtREJKkPfXkZuCg=="], + + "@types/d3-zoom": ["@types/d3-zoom@3.0.8", "", { "dependencies": { "@types/d3-interpolate": "*", "@types/d3-selection": "*" } }, "sha512-iqMC4/YlFCSlO8+2Ii1GGGliCAY4XdeG748w5vQUbevlbDu0zSjH/+jojorQVBK/se0j6DUFNPBGSqD3YWYnDw=="], + + "@types/debug": ["@types/debug@4.1.12", "", { "dependencies": { "@types/ms": "*" } }, "sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ=="], + + "@types/estree": ["@types/estree@1.0.8", "", {}, "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w=="], + + "@types/estree-jsx": ["@types/estree-jsx@1.0.5", "", { "dependencies": { "@types/estree": "*" } }, "sha512-52CcUVNFyfb1A2ALocQw/Dd1BQFNmSdkuC3BkZ6iqhdMfQz7JWOFRuJFloOzjk+6WijU56m9oKXFAXc7o3Towg=="], + + "@types/geojson": ["@types/geojson@7946.0.16", "", {}, "sha512-6C8nqWur3j98U6+lXDfTUWIfgvZU+EumvpHKcYjujKH7woYyLj2sUmff0tRhrqM7BohUw7Pz3ZB1jj2gW9Fvmg=="], + + "@types/hast": ["@types/hast@3.0.4", "", { "dependencies": { "@types/unist": "*" } }, "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ=="], + + "@types/mdast": ["@types/mdast@4.0.4", "", { "dependencies": { "@types/unist": "*" } }, "sha512-kGaNbPh1k7AFzgpud/gMdvIm5xuECykRR+JnWKQno9TAXVa6WIVCGTPvYGekIDL4uwCZQSYbUxNBSb1aUo79oA=="], + + "@types/mdx": ["@types/mdx@2.0.13", "", {}, "sha512-+OWZQfAYyio6YkJb3HLxDrvnx6SWWDbC0zVPfBRzUk0/nqoDyf6dNxQi3eArPe8rJ473nobTMQ/8Zk+LxJ+Yuw=="], + + "@types/ms": ["@types/ms@2.1.0", "", {}, "sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA=="], + + "@types/node": ["@types/node@24.0.14", "", { "dependencies": { "undici-types": "~7.8.0" } }, "sha512-4zXMWD91vBLGRtHK3YbIoFMia+1nqEz72coM42C5ETjnNCa/heoj7NT1G67iAfOqMmcfhuCZ4uNpyz8EjlAejw=="], + + "@types/react": ["@types/react@19.1.8", "", { "dependencies": { "csstype": "^3.0.2" } }, "sha512-AwAfQ2Wa5bCx9WP8nZL2uMZWod7J7/JSplxbTmBQ5ms6QpqNYm672H0Vu9ZVKVngQ+ii4R/byguVEUZQyeg44g=="], + + "@types/trusted-types": ["@types/trusted-types@2.0.7", "", {}, "sha512-ScaPdn1dQczgbl0QFTeTOmVHFULt394XJgOQNoyVhZ6r2vLnMLJfBPd53SB52T/3G36VI1/g2MZaX0cwDuXsfw=="], + + "@types/unist": ["@types/unist@3.0.3", "", {}, "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q=="], + + "@typescript/vfs": ["@typescript/vfs@1.6.1", "", { "dependencies": { "debug": "^4.1.1" }, "peerDependencies": { "typescript": "*" } }, "sha512-JwoxboBh7Oz1v38tPbkrZ62ZXNHAk9bJ7c9x0eI5zBfBnBYGhURdbnh7Z4smN/MV48Y5OCcZb58n972UtbazsA=="], + + "@ungap/structured-clone": ["@ungap/structured-clone@1.3.0", "", {}, "sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g=="], + + "@vanilla-extract/babel-plugin-debug-ids": ["@vanilla-extract/babel-plugin-debug-ids@1.2.2", "", { "dependencies": { "@babel/core": "^7.23.9" } }, "sha512-MeDWGICAF9zA/OZLOKwhoRlsUW+fiMwnfuOAqFVohL31Agj7Q/RBWAYweqjHLgFBCsdnr6XIfwjJnmb2znEWxw=="], + + "@vanilla-extract/compiler": ["@vanilla-extract/compiler@0.3.0", "", { "dependencies": { "@vanilla-extract/css": "^1.17.4", "@vanilla-extract/integration": "^8.0.4", "vite": "^5.0.0 || ^6.0.0", "vite-node": "^3.2.2" } }, "sha512-8EbPmDMXhY9NrN38Kh8xYDENgBk4i6s6ce4p7E9F3kHtCqxtEgfaKSNS08z/SVCTmaX3IB3N/kGSO0gr+APffg=="], + + "@vanilla-extract/css": ["@vanilla-extract/css@1.17.4", "", { "dependencies": { "@emotion/hash": "^0.9.0", "@vanilla-extract/private": "^1.0.9", "css-what": "^6.1.0", "cssesc": "^3.0.0", "csstype": "^3.0.7", "dedent": "^1.5.3", "deep-object-diff": "^1.1.9", "deepmerge": "^4.2.2", "lru-cache": "^10.4.3", "media-query-parser": "^2.0.2", "modern-ahocorasick": "^1.0.0", "picocolors": "^1.0.0" } }, "sha512-m3g9nQDWPtL+sTFdtCGRMI1Vrp86Ay4PBYq1Bo7Bnchj5ElNtAJpOqD+zg+apthVA4fB7oVpMWNjwpa6ElDWFQ=="], + + "@vanilla-extract/dynamic": ["@vanilla-extract/dynamic@2.1.5", "", { "dependencies": { "@vanilla-extract/private": "^1.0.9" } }, "sha512-QGIFGb1qyXQkbzx6X6i3+3LMc/iv/ZMBttMBL+Wm/DetQd36KsKsFg5CtH3qy+1hCA/5w93mEIIAiL4fkM8ycw=="], + + "@vanilla-extract/integration": ["@vanilla-extract/integration@8.0.4", "", { "dependencies": { "@babel/core": "^7.23.9", "@babel/plugin-syntax-typescript": "^7.23.3", "@vanilla-extract/babel-plugin-debug-ids": "^1.2.2", "@vanilla-extract/css": "^1.17.4", "dedent": "^1.5.3", "esbuild": "npm:esbuild@>=0.17.6 <0.26.0", "eval": "0.1.8", "find-up": "^5.0.0", "javascript-stringify": "^2.0.1", "mlly": "^1.4.2" } }, "sha512-cmOb7tR+g3ulKvFtSbmdw3YUyIS1d7MQqN+FcbwNhdieyno5xzUyfDCMjeWJhmCSMvZ6WlinkrOkgs6SHB+FRg=="], + + "@vanilla-extract/private": ["@vanilla-extract/private@1.0.9", "", {}, "sha512-gT2jbfZuaaCLrAxwXbRgIhGhcXbRZCG3v4TTUnjw0EJ7ArdBRxkq4msNJkbuRkCgfIK5ATmprB5t9ljvLeFDEA=="], + + "@vanilla-extract/vite-plugin": ["@vanilla-extract/vite-plugin@5.1.0", "", { "dependencies": { "@vanilla-extract/compiler": "^0.3.0", "@vanilla-extract/integration": "^8.0.4" }, "peerDependencies": { "vite": "^5.0.0 || ^6.0.0" } }, "sha512-BzVdmBD+FUyJnY6I29ZezwtDBc1B78l+VvHvIgoJYbgfPj0hvY0RmrGL8B4oNNGY/lOt7KgQflXY5kBMd3MGZg=="], + + "@vitejs/plugin-react": ["@vitejs/plugin-react@4.6.0", "", { "dependencies": { "@babel/core": "^7.27.4", "@babel/plugin-transform-react-jsx-self": "^7.27.1", "@babel/plugin-transform-react-jsx-source": "^7.27.1", "@rolldown/pluginutils": "1.0.0-beta.19", "@types/babel__core": "^7.20.5", "react-refresh": "^0.17.0" }, "peerDependencies": { "vite": "^4.2.0 || ^5.0.0 || ^6.0.0 || ^7.0.0-beta.0" } }, "sha512-5Kgff+m8e2PB+9j51eGHEpn5kUzRKH2Ry0qGoe8ItJg7pqnkPrYPkDQZGgGmTa0EGarHrkjLvOdU3b1fzI8otQ=="], + + "acorn": ["acorn@8.15.0", "", { "bin": { "acorn": "bin/acorn" } }, "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg=="], + + "acorn-jsx": ["acorn-jsx@5.3.2", "", { "peerDependencies": { "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" } }, "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ=="], + + "ansi-regex": ["ansi-regex@6.1.0", "", {}, "sha512-7HSX4QQb4CspciLpVFwyRe79O3xsIZDDLER21kERQ71oaPodF8jL725AgJMFAYbooIqolJoRLuM81SpeUkpkvA=="], + + "ansi-styles": ["ansi-styles@6.2.1", "", {}, "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug=="], + + "aria-hidden": ["aria-hidden@1.2.6", "", { "dependencies": { "tslib": "^2.0.0" } }, "sha512-ik3ZgC9dY/lYVVM++OISsaYDeg1tb0VtP5uL3ouh1koGOaUMDPpbFIei4JkFimWUFPn90sbMNMXQAIVOlnYKJA=="], + + "astring": ["astring@1.9.0", "", { "bin": { "astring": "bin/astring" } }, "sha512-LElXdjswlqjWrPpJFg1Fx4wpkOCxj1TDHlSV4PlaRxHGWko024xICaa97ZkMfs6DRKlCguiAI+rbXv5GWwXIkg=="], + + "autoprefixer": ["autoprefixer@10.4.21", "", { "dependencies": { "browserslist": "^4.24.4", "caniuse-lite": "^1.0.30001702", "fraction.js": "^4.3.7", "normalize-range": "^0.1.2", "picocolors": "^1.1.1", "postcss-value-parser": "^4.2.0" }, "peerDependencies": { "postcss": "^8.1.0" }, "bin": { "autoprefixer": "bin/autoprefixer" } }, "sha512-O+A6LWV5LDHSJD3LjHYoNi4VLsj/Whi7k6zG12xTYaU4cQ8oxQGckXNX8cRHK5yOZ/ppVHe0ZBXGzSV9jXdVbQ=="], + + "bail": ["bail@2.0.2", "", {}, "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw=="], + + "balanced-match": ["balanced-match@1.0.2", "", {}, "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw=="], + + "base64-js": ["base64-js@1.5.1", "", {}, "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA=="], + + "bcp-47-match": ["bcp-47-match@2.0.3", "", {}, "sha512-JtTezzbAibu8G0R9op9zb3vcWZd9JF6M0xOYGPn0fNCd7wOpRB1mU2mH9T8gaBGbAAyIIVgB2G7xG0GP98zMAQ=="], + + "bl": ["bl@5.1.0", "", { "dependencies": { "buffer": "^6.0.3", "inherits": "^2.0.4", "readable-stream": "^3.4.0" } }, "sha512-tv1ZJHLfTDnXE6tMHv73YgSJaWR2AFuPwMntBe7XL/GBFHnT0CLnsHMogfk5+GzCDC5ZWarSCYaIGATZt9dNsQ=="], + + "boolbase": ["boolbase@1.0.0", "", {}, "sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww=="], + + "brace-expansion": ["brace-expansion@2.0.2", "", { "dependencies": { "balanced-match": "^1.0.0" } }, "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ=="], + + "braces": ["braces@3.0.3", "", { "dependencies": { "fill-range": "^7.1.1" } }, "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA=="], + + "browserslist": ["browserslist@4.25.1", "", { "dependencies": { "caniuse-lite": "^1.0.30001726", "electron-to-chromium": "^1.5.173", "node-releases": "^2.0.19", "update-browserslist-db": "^1.1.3" }, "bin": { "browserslist": "cli.js" } }, "sha512-KGj0KoOMXLpSNkkEI6Z6mShmQy0bc1I+T7K9N81k4WWMrfz+6fQ6es80B/YLAeRoKvjYE1YSHHOW1qe9xIVzHw=="], + + "buffer": ["buffer@6.0.3", "", { "dependencies": { "base64-js": "^1.3.1", "ieee754": "^1.2.1" } }, "sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA=="], + + "bytes": ["bytes@3.1.2", "", {}, "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg=="], + + "cac": ["cac@6.7.14", "", {}, "sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ=="], + + "caniuse-lite": ["caniuse-lite@1.0.30001727", "", {}, "sha512-pB68nIHmbN6L/4C6MH1DokyR3bYqFwjaSs/sWDHGj4CTcFtQUQMuJftVwWkXq7mNWOybD3KhUv3oWHoGxgP14Q=="], + + "ccount": ["ccount@2.0.1", "", {}, "sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg=="], + + "chalk": ["chalk@5.4.1", "", {}, "sha512-zgVZuo2WcZgfUEmsn6eO3kINexW8RAE4maiQ8QNs8CtpPCSyMiYsULR3HQYkm3w8FIA3SberyMJMSldGsW+U3w=="], + + "character-entities": ["character-entities@2.0.2", "", {}, "sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ=="], + + "character-entities-html4": ["character-entities-html4@2.1.0", "", {}, "sha512-1v7fgQRj6hnSwFpq1Eu0ynr/CDEw0rXo2B61qXrLNdHZmPKgb7fqS1a2JwF0rISo9q77jDI8VMEHoApn8qDoZA=="], + + "character-entities-legacy": ["character-entities-legacy@3.0.0", "", {}, "sha512-RpPp0asT/6ufRm//AJVwpViZbGM/MkjQFxJccQRHmISF/22NBtsHqAWmL+/pmkPWoIUJdWyeVleTl1wydHATVQ=="], + + "character-reference-invalid": ["character-reference-invalid@2.0.1", "", {}, "sha512-iBZ4F4wRbyORVsu0jPV7gXkOsGYjGHPmAyv+HiHG8gi5PtC9KI2j1+v8/tlibRvjoWX027ypmG/n0HtO5t7unw=="], + + "chevrotain": ["chevrotain@11.0.3", "", { "dependencies": { "@chevrotain/cst-dts-gen": "11.0.3", "@chevrotain/gast": "11.0.3", "@chevrotain/regexp-to-ast": "11.0.3", "@chevrotain/types": "11.0.3", "@chevrotain/utils": "11.0.3", "lodash-es": "4.17.21" } }, "sha512-ci2iJH6LeIkvP9eJW6gpueU8cnZhv85ELY8w8WiFtNjMHA5ad6pQLaJo9mEly/9qUyCpvqX8/POVUTf18/HFdw=="], + + "chevrotain-allstar": ["chevrotain-allstar@0.3.1", "", { "dependencies": { "lodash-es": "^4.17.21" }, "peerDependencies": { "chevrotain": "^11.0.0" } }, "sha512-b7g+y9A0v4mxCW1qUhf3BSVPg+/NvGErk/dOkrDaHA0nQIQGAtrOjlX//9OQtRlSCy+x9rfB5N8yC71lH1nvMw=="], + + "chroma-js": ["chroma-js@3.1.2", "", {}, "sha512-IJnETTalXbsLx1eKEgx19d5L6SRM7cH4vINw/99p/M11HCuXGRWL+6YmCm7FWFGIo6dtWuQoQi1dc5yQ7ESIHg=="], + + "cli-cursor": ["cli-cursor@4.0.0", "", { "dependencies": { "restore-cursor": "^4.0.0" } }, "sha512-VGtlMu3x/4DOtIUwEkRezxUZ2lBacNJCHash0N0WeZDBS+7Ux1dm3XWAgWYxLJFMMdOeXMHXorshEFhbMSGelg=="], + + "cli-spinners": ["cli-spinners@2.9.2", "", {}, "sha512-ywqV+5MmyL4E7ybXgKys4DugZbX0FC6LnwrhjuykIjnK9k8OQacQ7axGKnjDXWNhns0xot3bZI5h55H8yo9cJg=="], + + "clsx": ["clsx@2.1.1", "", {}, "sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA=="], + + "collapse-white-space": ["collapse-white-space@2.1.0", "", {}, "sha512-loKTxY1zCOuG4j9f6EPnuyyYkf58RnhhWTvRoZEokgB+WbdXehfjFviyOVYkqzEWz1Q5kRiZdBYS5SwxbQYwzw=="], + + "color-convert": ["color-convert@2.0.1", "", { "dependencies": { "color-name": "~1.1.4" } }, "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ=="], + + "color-name": ["color-name@1.1.4", "", {}, "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA=="], + + "comma-separated-tokens": ["comma-separated-tokens@2.0.3", "", {}, "sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg=="], + + "commander": ["commander@8.3.0", "", {}, "sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww=="], + + "compressible": ["compressible@2.0.18", "", { "dependencies": { "mime-db": ">= 1.43.0 < 2" } }, "sha512-AF3r7P5dWxL8MxyITRMlORQNaOA2IkAFaTr4k7BUumjPtRpGDTZpl0Pb1XCO6JeDCBdp126Cgs9sMxqSjgYyRg=="], + + "compression": ["compression@1.8.0", "", { "dependencies": { "bytes": "3.1.2", "compressible": "~2.0.18", "debug": "2.6.9", "negotiator": "~0.6.4", "on-headers": "~1.0.2", "safe-buffer": "5.2.1", "vary": "~1.1.2" } }, "sha512-k6WLKfunuqCYD3t6AsuPGvQWaKwuLLh2/xHNcX4qE+vIfDNXpSqnrhwA7O53R7WVQUnt8dVAIW+YHr7xTgOgGA=="], + + "confbox": ["confbox@0.1.8", "", {}, "sha512-RMtmw0iFkeR4YV+fUOSucriAQNb9g8zFR52MWCtl+cCZOFRNL6zeB395vPzFhEjjn4fMxXudmELnl/KF/WrK6w=="], + + "convert-source-map": ["convert-source-map@2.0.0", "", {}, "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg=="], + + "cookie": ["cookie@1.0.2", "", {}, "sha512-9Kr/j4O16ISv8zBBhJoi4bXOYNTkFLOqSL3UDB0njXxCXNezjeyVrJyGOWtgfs/q2km1gwBcfH8q1yEGoMYunA=="], + + "cose-base": ["cose-base@1.0.3", "", { "dependencies": { "layout-base": "^1.0.0" } }, "sha512-s9whTXInMSgAp/NVXVNuVxVKzGH2qck3aQlVHxDCdAEPgtMKwc4Wq6/QKhgdEdgbLSi9rBTAcPoRa6JpiG4ksg=="], + + "create-vocs": ["create-vocs@1.0.0", "", { "dependencies": { "@clack/prompts": "^0.7.0", "cac": "^6.7.14", "detect-package-manager": "^3.0.2", "fs-extra": "^11.3.0", "picocolors": "^1.1.1" }, "bin": { "create-vocs": "_lib/bin.js" } }, "sha512-Lv1Bd3WZEgwG4nrogkM54m8viW+TWPlGivLyEi7aNb3cuKPsEfMDZ/kTbo87fzOGtsZ2yh7scO54ZmVhhgBgTw=="], + + "cross-spawn": ["cross-spawn@7.0.6", "", { "dependencies": { "path-key": "^3.1.0", "shebang-command": "^2.0.0", "which": "^2.0.1" } }, "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA=="], + + "css-selector-parser": ["css-selector-parser@3.1.3", "", {}, "sha512-gJMigczVZqYAk0hPVzx/M4Hm1D9QOtqkdQk9005TNzDIUGzo5cnHEDiKUT7jGPximL/oYb+LIitcHFQ4aKupxg=="], + + "css-what": ["css-what@6.2.2", "", {}, "sha512-u/O3vwbptzhMs3L1fQE82ZSLHQQfto5gyZzwteVIEyeaY5Fc7R4dapF/BvRoSYFeqfBk4m0V1Vafq5Pjv25wvA=="], + + "cssesc": ["cssesc@3.0.0", "", { "bin": { "cssesc": "bin/cssesc" } }, "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg=="], + + "csstype": ["csstype@3.1.3", "", {}, "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw=="], + + "cytoscape": ["cytoscape@3.32.1", "", {}, "sha512-dbeqFTLYEwlFg7UGtcZhCCG/2WayX72zK3Sq323CEX29CY81tYfVhw1MIdduCtpstB0cTOhJswWlM/OEB3Xp+Q=="], + + "cytoscape-cose-bilkent": ["cytoscape-cose-bilkent@4.1.0", "", { "dependencies": { "cose-base": "^1.0.0" }, "peerDependencies": { "cytoscape": "^3.2.0" } }, "sha512-wgQlVIUJF13Quxiv5e1gstZ08rnZj2XaLHGoFMYXz7SkNfCDOOteKBE6SYRfA9WxxI/iBc3ajfDoc6hb/MRAHQ=="], + + "cytoscape-fcose": ["cytoscape-fcose@2.2.0", "", { "dependencies": { "cose-base": "^2.2.0" }, "peerDependencies": { "cytoscape": "^3.2.0" } }, "sha512-ki1/VuRIHFCzxWNrsshHYPs6L7TvLu3DL+TyIGEsRcvVERmxokbf5Gdk7mFxZnTdiGtnA4cfSmjZJMviqSuZrQ=="], + + "d3": ["d3@7.9.0", "", { "dependencies": { "d3-array": "3", "d3-axis": "3", "d3-brush": "3", "d3-chord": "3", "d3-color": "3", "d3-contour": "4", "d3-delaunay": "6", "d3-dispatch": "3", "d3-drag": "3", "d3-dsv": "3", "d3-ease": "3", "d3-fetch": "3", "d3-force": "3", "d3-format": "3", "d3-geo": "3", "d3-hierarchy": "3", "d3-interpolate": "3", "d3-path": "3", "d3-polygon": "3", "d3-quadtree": "3", "d3-random": "3", "d3-scale": "4", "d3-scale-chromatic": "3", "d3-selection": "3", "d3-shape": "3", "d3-time": "3", "d3-time-format": "4", "d3-timer": "3", "d3-transition": "3", "d3-zoom": "3" } }, "sha512-e1U46jVP+w7Iut8Jt8ri1YsPOvFpg46k+K8TpCb0P+zjCkjkPnV7WzfDJzMHy1LnA+wj5pLT1wjO901gLXeEhA=="], + + "d3-array": ["d3-array@3.2.4", "", { "dependencies": { "internmap": "1 - 2" } }, "sha512-tdQAmyA18i4J7wprpYq8ClcxZy3SC31QMeByyCFyRt7BVHdREQZ5lpzoe5mFEYZUWe+oq8HBvk9JjpibyEV4Jg=="], + + "d3-axis": ["d3-axis@3.0.0", "", {}, "sha512-IH5tgjV4jE/GhHkRV0HiVYPDtvfjHQlQfJHs0usq7M30XcSBvOotpmH1IgkcXsO/5gEQZD43B//fc7SRT5S+xw=="], + + "d3-brush": ["d3-brush@3.0.0", "", { "dependencies": { "d3-dispatch": "1 - 3", "d3-drag": "2 - 3", "d3-interpolate": "1 - 3", "d3-selection": "3", "d3-transition": "3" } }, "sha512-ALnjWlVYkXsVIGlOsuWH1+3udkYFI48Ljihfnh8FZPF2QS9o+PzGLBslO0PjzVoHLZ2KCVgAM8NVkXPJB2aNnQ=="], + + "d3-chord": ["d3-chord@3.0.1", "", { "dependencies": { "d3-path": "1 - 3" } }, "sha512-VE5S6TNa+j8msksl7HwjxMHDM2yNK3XCkusIlpX5kwauBfXuyLAtNg9jCp/iHH61tgI4sb6R/EIMWCqEIdjT/g=="], + + "d3-color": ["d3-color@3.1.0", "", {}, "sha512-zg/chbXyeBtMQ1LbD/WSoW2DpC3I0mpmPdW+ynRTj/x2DAWYrIY7qeZIHidozwV24m4iavr15lNwIwLxRmOxhA=="], + + "d3-contour": ["d3-contour@4.0.2", "", { "dependencies": { "d3-array": "^3.2.0" } }, "sha512-4EzFTRIikzs47RGmdxbeUvLWtGedDUNkTcmzoeyg4sP/dvCexO47AaQL7VKy/gul85TOxw+IBgA8US2xwbToNA=="], + + "d3-delaunay": ["d3-delaunay@6.0.4", "", { "dependencies": { "delaunator": "5" } }, "sha512-mdjtIZ1XLAM8bm/hx3WwjfHt6Sggek7qH043O8KEjDXN40xi3vx/6pYSVTwLjEgiXQTbvaouWKynLBiUZ6SK6A=="], + + "d3-dispatch": ["d3-dispatch@3.0.1", "", {}, "sha512-rzUyPU/S7rwUflMyLc1ETDeBj0NRuHKKAcvukozwhshr6g6c5d8zh4c2gQjY2bZ0dXeGLWc1PF174P2tVvKhfg=="], + + "d3-drag": ["d3-drag@3.0.0", "", { "dependencies": { "d3-dispatch": "1 - 3", "d3-selection": "3" } }, "sha512-pWbUJLdETVA8lQNJecMxoXfH6x+mO2UQo8rSmZ+QqxcbyA3hfeprFgIT//HW2nlHChWeIIMwS2Fq+gEARkhTkg=="], + + "d3-dsv": ["d3-dsv@3.0.1", "", { "dependencies": { "commander": "7", "iconv-lite": "0.6", "rw": "1" }, "bin": { "csv2json": "bin/dsv2json.js", "csv2tsv": "bin/dsv2dsv.js", "dsv2dsv": "bin/dsv2dsv.js", "dsv2json": "bin/dsv2json.js", "json2csv": "bin/json2dsv.js", "json2dsv": "bin/json2dsv.js", "json2tsv": "bin/json2dsv.js", "tsv2csv": "bin/dsv2dsv.js", "tsv2json": "bin/dsv2json.js" } }, "sha512-UG6OvdI5afDIFP9w4G0mNq50dSOsXHJaRE8arAS5o9ApWnIElp8GZw1Dun8vP8OyHOZ/QJUKUJwxiiCCnUwm+Q=="], + + "d3-ease": ["d3-ease@3.0.1", "", {}, "sha512-wR/XK3D3XcLIZwpbvQwQ5fK+8Ykds1ip7A2Txe0yxncXSdq1L9skcG7blcedkOX+ZcgxGAmLX1FrRGbADwzi0w=="], + + "d3-fetch": ["d3-fetch@3.0.1", "", { "dependencies": { "d3-dsv": "1 - 3" } }, "sha512-kpkQIM20n3oLVBKGg6oHrUchHM3xODkTzjMoj7aWQFq5QEM+R6E4WkzT5+tojDY7yjez8KgCBRoj4aEr99Fdqw=="], + + "d3-force": ["d3-force@3.0.0", "", { "dependencies": { "d3-dispatch": "1 - 3", "d3-quadtree": "1 - 3", "d3-timer": "1 - 3" } }, "sha512-zxV/SsA+U4yte8051P4ECydjD/S+qeYtnaIyAs9tgHCqfguma/aAQDjo85A9Z6EKhBirHRJHXIgJUlffT4wdLg=="], + + "d3-format": ["d3-format@3.1.0", "", {}, "sha512-YyUI6AEuY/Wpt8KWLgZHsIU86atmikuoOmCfommt0LYHiQSPjvX2AcFc38PX0CBpr2RCyZhjex+NS/LPOv6YqA=="], + + "d3-geo": ["d3-geo@3.1.1", "", { "dependencies": { "d3-array": "2.5.0 - 3" } }, "sha512-637ln3gXKXOwhalDzinUgY83KzNWZRKbYubaG+fGVuc/dxO64RRljtCTnf5ecMyE1RIdtqpkVcq0IbtU2S8j2Q=="], + + "d3-hierarchy": ["d3-hierarchy@3.1.2", "", {}, "sha512-FX/9frcub54beBdugHjDCdikxThEqjnR93Qt7PvQTOHxyiNCAlvMrHhclk3cD5VeAaq9fxmfRp+CnWw9rEMBuA=="], + + "d3-interpolate": ["d3-interpolate@3.0.1", "", { "dependencies": { "d3-color": "1 - 3" } }, "sha512-3bYs1rOD33uo8aqJfKP3JWPAibgw8Zm2+L9vBKEHJ2Rg+viTR7o5Mmv5mZcieN+FRYaAOWX5SJATX6k1PWz72g=="], + + "d3-path": ["d3-path@3.1.0", "", {}, "sha512-p3KP5HCf/bvjBSSKuXid6Zqijx7wIfNW+J/maPs+iwR35at5JCbLUT0LzF1cnjbCHWhqzQTIN2Jpe8pRebIEFQ=="], + + "d3-polygon": ["d3-polygon@3.0.1", "", {}, "sha512-3vbA7vXYwfe1SYhED++fPUQlWSYTTGmFmQiany/gdbiWgU/iEyQzyymwL9SkJjFFuCS4902BSzewVGsHHmHtXg=="], + + "d3-quadtree": ["d3-quadtree@3.0.1", "", {}, "sha512-04xDrxQTDTCFwP5H6hRhsRcb9xxv2RzkcsygFzmkSIOJy3PeRJP7sNk3VRIbKXcog561P9oU0/rVH6vDROAgUw=="], + + "d3-random": ["d3-random@3.0.1", "", {}, "sha512-FXMe9GfxTxqd5D6jFsQ+DJ8BJS4E/fT5mqqdjovykEB2oFbTMDVdg1MGFxfQW+FBOGoB++k8swBrgwSHT1cUXQ=="], + + "d3-sankey": ["d3-sankey@0.12.3", "", { "dependencies": { "d3-array": "1 - 2", "d3-shape": "^1.2.0" } }, "sha512-nQhsBRmM19Ax5xEIPLMY9ZmJ/cDvd1BG3UVvt5h3WRxKg5zGRbvnteTyWAbzeSvlh3tW7ZEmq4VwR5mB3tutmQ=="], + + "d3-scale": ["d3-scale@4.0.2", "", { "dependencies": { "d3-array": "2.10.0 - 3", "d3-format": "1 - 3", "d3-interpolate": "1.2.0 - 3", "d3-time": "2.1.1 - 3", "d3-time-format": "2 - 4" } }, "sha512-GZW464g1SH7ag3Y7hXjf8RoUuAFIqklOAq3MRl4OaWabTFJY9PN/E1YklhXLh+OQ3fM9yS2nOkCoS+WLZ6kvxQ=="], + + "d3-scale-chromatic": ["d3-scale-chromatic@3.1.0", "", { "dependencies": { "d3-color": "1 - 3", "d3-interpolate": "1 - 3" } }, "sha512-A3s5PWiZ9YCXFye1o246KoscMWqf8BsD9eRiJ3He7C9OBaxKhAd5TFCdEx/7VbKtxxTsu//1mMJFrEt572cEyQ=="], + + "d3-selection": ["d3-selection@3.0.0", "", {}, "sha512-fmTRWbNMmsmWq6xJV8D19U/gw/bwrHfNXxrIN+HfZgnzqTHp9jOmKMhsTUjXOJnZOdZY9Q28y4yebKzqDKlxlQ=="], + + "d3-shape": ["d3-shape@3.2.0", "", { "dependencies": { "d3-path": "^3.1.0" } }, "sha512-SaLBuwGm3MOViRq2ABk3eLoxwZELpH6zhl3FbAoJ7Vm1gofKx6El1Ib5z23NUEhF9AsGl7y+dzLe5Cw2AArGTA=="], + + "d3-time": ["d3-time@3.1.0", "", { "dependencies": { "d3-array": "2 - 3" } }, "sha512-VqKjzBLejbSMT4IgbmVgDjpkYrNWUYJnbCGo874u7MMKIWsILRX+OpX/gTk8MqjpT1A/c6HY2dCA77ZN0lkQ2Q=="], + + "d3-time-format": ["d3-time-format@4.1.0", "", { "dependencies": { "d3-time": "1 - 3" } }, "sha512-dJxPBlzC7NugB2PDLwo9Q8JiTR3M3e4/XANkreKSUxF8vvXKqm1Yfq4Q5dl8budlunRVlUUaDUgFt7eA8D6NLg=="], + + "d3-timer": ["d3-timer@3.0.1", "", {}, "sha512-ndfJ/JxxMd3nw31uyKoY2naivF+r29V+Lc0svZxe1JvvIRmi8hUsrMvdOwgS1o6uBHmiz91geQ0ylPP0aj1VUA=="], + + "d3-transition": ["d3-transition@3.0.1", "", { "dependencies": { "d3-color": "1 - 3", "d3-dispatch": "1 - 3", "d3-ease": "1 - 3", "d3-interpolate": "1 - 3", "d3-timer": "1 - 3" }, "peerDependencies": { "d3-selection": "2 - 3" } }, "sha512-ApKvfjsSR6tg06xrL434C0WydLr7JewBB3V+/39RMHsaXTOG0zmt/OAXeng5M5LBm0ojmxJrpomQVZ1aPvBL4w=="], + + "d3-zoom": ["d3-zoom@3.0.0", "", { "dependencies": { "d3-dispatch": "1 - 3", "d3-drag": "2 - 3", "d3-interpolate": "1 - 3", "d3-selection": "2 - 3", "d3-transition": "2 - 3" } }, "sha512-b8AmV3kfQaqWAuacbPuNbL6vahnOJflOhexLzMMNLga62+/nh0JzvJ0aO/5a5MVgUFGS7Hu1P9P03o3fJkDCyw=="], + + "dagre-d3-es": ["dagre-d3-es@7.0.11", "", { "dependencies": { "d3": "^7.9.0", "lodash-es": "^4.17.21" } }, "sha512-tvlJLyQf834SylNKax8Wkzco/1ias1OPw8DcUMDE7oUIoSEW25riQVuiu/0OWEFqT0cxHT3Pa9/D82Jr47IONw=="], + + "dayjs": ["dayjs@1.11.13", "", {}, "sha512-oaMBel6gjolK862uaPQOVTA7q3TZhuSvuMQAAglQDOWYO9A91IrAOUJEyKVlqJlHE0vq5p5UXxzdPfMH/x6xNg=="], + + "debug": ["debug@2.6.9", "", { "dependencies": { "ms": "2.0.0" } }, "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA=="], + + "decode-named-character-reference": ["decode-named-character-reference@1.2.0", "", { "dependencies": { "character-entities": "^2.0.0" } }, "sha512-c6fcElNV6ShtZXmsgNgFFV5tVX2PaV4g+MOAkb8eXHvn6sryJBrZa9r0zV6+dtTyoCKxtDy5tyQ5ZwQuidtd+Q=="], + + "dedent": ["dedent@1.6.0", "", { "peerDependencies": { "babel-plugin-macros": "^3.1.0" }, "optionalPeers": ["babel-plugin-macros"] }, "sha512-F1Z+5UCFpmQUzJa11agbyPVMbpgT/qA3/SKyJ1jyBgm7dUcUEa8v9JwDkerSQXfakBwFljIxhOJqGkjUwZ9FSA=="], + + "deep-object-diff": ["deep-object-diff@1.1.9", "", {}, "sha512-Rn+RuwkmkDwCi2/oXOFS9Gsr5lJZu/yTGpK7wAaAIE75CC+LCGEZHpY6VQJa/RoJcrmaA/docWJZvYohlNkWPA=="], + + "deepmerge": ["deepmerge@4.3.1", "", {}, "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A=="], + + "delaunator": ["delaunator@5.0.1", "", { "dependencies": { "robust-predicates": "^3.0.2" } }, "sha512-8nvh+XBe96aCESrGOqMp/84b13H9cdKbG5P2ejQCh4d4sK9RL4371qou9drQjMhvnPmhWl5hnmqbEE0fXr9Xnw=="], + + "depd": ["depd@2.0.0", "", {}, "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw=="], + + "dequal": ["dequal@2.0.3", "", {}, "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA=="], + + "destroy": ["destroy@1.2.0", "", {}, "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg=="], + + "detect-libc": ["detect-libc@2.0.4", "", {}, "sha512-3UDv+G9CsCKO1WKMGw9fwq/SWJYbI0c5Y7LU1AXYoDdbhE2AHQ6N6Nb34sG8Fj7T5APy8qXDCKuuIHd1BR0tVA=="], + + "detect-node-es": ["detect-node-es@1.1.0", "", {}, "sha512-ypdmJU/TbBby2Dxibuv7ZLW3Bs1QEmM7nHjEANfohJLvE0XVujisn1qPJcZxg+qDucsr+bP6fLD1rPS3AhJ7EQ=="], + + "detect-package-manager": ["detect-package-manager@3.0.2", "", { "dependencies": { "execa": "^5.1.1" } }, "sha512-8JFjJHutStYrfWwzfretQoyNGoZVW1Fsrp4JO9spa7h/fBfwgTMEIy4/LBzRDGsxwVPHU0q+T9YvwLDJoOApLQ=="], + + "devlop": ["devlop@1.1.0", "", { "dependencies": { "dequal": "^2.0.0" } }, "sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA=="], + + "direction": ["direction@2.0.1", "", { "bin": { "direction": "cli.js" } }, "sha512-9S6m9Sukh1cZNknO1CWAr2QAWsbKLafQiyM5gZ7VgXHeuaoUwffKN4q6NC4A/Mf9iiPlOXQEKW/Mv/mh9/3YFA=="], + + "dompurify": ["dompurify@3.2.6", "", { "optionalDependencies": { "@types/trusted-types": "^2.0.7" } }, "sha512-/2GogDQlohXPZe6D6NOgQvXLPSYBqIWMnZ8zzOhn09REE4eyAzb+Hed3jhoM9OkuaJ8P6ZGTTVWQKAi8ieIzfQ=="], + + "eastasianwidth": ["eastasianwidth@0.2.0", "", {}, "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA=="], + + "ee-first": ["ee-first@1.1.1", "", {}, "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow=="], + + "electron-to-chromium": ["electron-to-chromium@1.5.186", "", {}, "sha512-lur7L4BFklgepaJxj4DqPk7vKbTEl0pajNlg2QjE5shefmlmBLm2HvQ7PMf1R/GvlevT/581cop33/quQcfX3A=="], + + "emoji-regex": ["emoji-regex@10.4.0", "", {}, "sha512-EC+0oUMY1Rqm4O6LLrgjtYDvcVYTy7chDnM4Q7030tP4Kwj3u/pR6gP9ygnp2CJMK5Gq+9Q2oqmrFJAz01DXjw=="], + + "emoji-regex-xs": ["emoji-regex-xs@1.0.0", "", {}, "sha512-LRlerrMYoIDrT6jgpeZ2YYl/L8EulRTt5hQcYjy5AInh7HWXKimpqx68aknBFpGL2+/IcogTcaydJEgaTmOpDg=="], + + "encodeurl": ["encodeurl@2.0.0", "", {}, "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg=="], + + "enhanced-resolve": ["enhanced-resolve@5.18.2", "", { "dependencies": { "graceful-fs": "^4.2.4", "tapable": "^2.2.0" } }, "sha512-6Jw4sE1maoRJo3q8MsSIn2onJFbLTOjY9hlx4DZXmOKvLRd1Ok2kXmAGXaafL2+ijsJZ1ClYbl/pmqr9+k4iUQ=="], + + "entities": ["entities@6.0.1", "", {}, "sha512-aN97NXWF6AWBTahfVOIrB/NShkzi5H7F9r1s9mD3cDj4Ko5f2qhhVoYMibXF7GlLveb/D2ioWay8lxI97Ven3g=="], + + "es-module-lexer": ["es-module-lexer@1.7.0", "", {}, "sha512-jEQoCwk8hyb2AZziIOLhDqpm5+2ww5uIE6lkO/6jcOCusfk6LhMHpXXfBLXTZ7Ydyt0j4VoUQv6uGNYbdW+kBA=="], + + "esast-util-from-estree": ["esast-util-from-estree@2.0.0", "", { "dependencies": { "@types/estree-jsx": "^1.0.0", "devlop": "^1.0.0", "estree-util-visit": "^2.0.0", "unist-util-position-from-estree": "^2.0.0" } }, "sha512-4CyanoAudUSBAn5K13H4JhsMH6L9ZP7XbLVe/dKybkxMO7eDyLsT8UHl9TRNrU2Gr9nz+FovfSIjuXWJ81uVwQ=="], + + "esast-util-from-js": ["esast-util-from-js@2.0.1", "", { "dependencies": { "@types/estree-jsx": "^1.0.0", "acorn": "^8.0.0", "esast-util-from-estree": "^2.0.0", "vfile-message": "^4.0.0" } }, "sha512-8Ja+rNJ0Lt56Pcf3TAmpBZjmx8ZcK5Ts4cAzIOjsjevg9oSXJnl6SUQ2EevU8tv3h6ZLWmoKL5H4fgWvdvfETw=="], + + "esbuild": ["esbuild@0.25.6", "", { "optionalDependencies": { "@esbuild/aix-ppc64": "0.25.6", "@esbuild/android-arm": "0.25.6", "@esbuild/android-arm64": "0.25.6", "@esbuild/android-x64": "0.25.6", "@esbuild/darwin-arm64": "0.25.6", "@esbuild/darwin-x64": "0.25.6", "@esbuild/freebsd-arm64": "0.25.6", "@esbuild/freebsd-x64": "0.25.6", "@esbuild/linux-arm": "0.25.6", "@esbuild/linux-arm64": "0.25.6", "@esbuild/linux-ia32": "0.25.6", "@esbuild/linux-loong64": "0.25.6", "@esbuild/linux-mips64el": "0.25.6", "@esbuild/linux-ppc64": "0.25.6", "@esbuild/linux-riscv64": "0.25.6", "@esbuild/linux-s390x": "0.25.6", "@esbuild/linux-x64": "0.25.6", "@esbuild/netbsd-arm64": "0.25.6", "@esbuild/netbsd-x64": "0.25.6", "@esbuild/openbsd-arm64": "0.25.6", "@esbuild/openbsd-x64": "0.25.6", "@esbuild/openharmony-arm64": "0.25.6", "@esbuild/sunos-x64": "0.25.6", "@esbuild/win32-arm64": "0.25.6", "@esbuild/win32-ia32": "0.25.6", "@esbuild/win32-x64": "0.25.6" }, "bin": { "esbuild": "bin/esbuild" } }, "sha512-GVuzuUwtdsghE3ocJ9Bs8PNoF13HNQ5TXbEi2AhvVb8xU1Iwt9Fos9FEamfoee+u/TOsn7GUWc04lz46n2bbTg=="], + + "escalade": ["escalade@3.2.0", "", {}, "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA=="], + + "escape-html": ["escape-html@1.0.3", "", {}, "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow=="], + + "escape-string-regexp": ["escape-string-regexp@5.0.0", "", {}, "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw=="], + + "estree-util-attach-comments": ["estree-util-attach-comments@3.0.0", "", { "dependencies": { "@types/estree": "^1.0.0" } }, "sha512-cKUwm/HUcTDsYh/9FgnuFqpfquUbwIqwKM26BVCGDPVgvaCl/nDCCjUfiLlx6lsEZ3Z4RFxNbOQ60pkaEwFxGw=="], + + "estree-util-build-jsx": ["estree-util-build-jsx@3.0.1", "", { "dependencies": { "@types/estree-jsx": "^1.0.0", "devlop": "^1.0.0", "estree-util-is-identifier-name": "^3.0.0", "estree-walker": "^3.0.0" } }, "sha512-8U5eiL6BTrPxp/CHbs2yMgP8ftMhR5ww1eIKoWRMlqvltHF8fZn5LRDvTKuxD3DUn+shRbLGqXemcP51oFCsGQ=="], + + "estree-util-is-identifier-name": ["estree-util-is-identifier-name@3.0.0", "", {}, "sha512-hFtqIDZTIUZ9BXLb8y4pYGyk6+wekIivNVTcmvk8NoOh+VeRn5y6cEHzbURrWbfp1fIqdVipilzj+lfaadNZmg=="], + + "estree-util-scope": ["estree-util-scope@1.0.0", "", { "dependencies": { "@types/estree": "^1.0.0", "devlop": "^1.0.0" } }, "sha512-2CAASclonf+JFWBNJPndcOpA8EMJwa0Q8LUFJEKqXLW6+qBvbFZuF5gItbQOs/umBUkjviCSDCbBwU2cXbmrhQ=="], + + "estree-util-to-js": ["estree-util-to-js@2.0.0", "", { "dependencies": { "@types/estree-jsx": "^1.0.0", "astring": "^1.8.0", "source-map": "^0.7.0" } }, "sha512-WDF+xj5rRWmD5tj6bIqRi6CkLIXbbNQUcxQHzGysQzvHmdYG2G7p/Tf0J0gpxGgkeMZNTIjT/AoSvC9Xehcgdg=="], + + "estree-util-value-to-estree": ["estree-util-value-to-estree@3.4.0", "", { "dependencies": { "@types/estree": "^1.0.0" } }, "sha512-Zlp+gxis+gCfK12d3Srl2PdX2ybsEA8ZYy6vQGVQTNNYLEGRQQ56XB64bjemN8kxIKXP1nC9ip4Z+ILy9LGzvQ=="], + + "estree-util-visit": ["estree-util-visit@2.0.0", "", { "dependencies": { "@types/estree-jsx": "^1.0.0", "@types/unist": "^3.0.0" } }, "sha512-m5KgiH85xAhhW8Wta0vShLcUvOsh3LLPI2YVwcbio1l7E09NTLL1EyMZFM1OyWowoH0skScNbhOPl4kcBgzTww=="], + + "estree-walker": ["estree-walker@3.0.3", "", { "dependencies": { "@types/estree": "^1.0.0" } }, "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g=="], + + "etag": ["etag@1.8.1", "", {}, "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg=="], + + "eval": ["eval@0.1.8", "", { "dependencies": { "@types/node": "*", "require-like": ">= 0.1.1" } }, "sha512-EzV94NYKoO09GLXGjXj9JIlXijVck4ONSr5wiCWDvhsvj5jxSrzTmRU/9C1DyB6uToszLs8aifA6NQ7lEQdvFw=="], + + "execa": ["execa@5.1.1", "", { "dependencies": { "cross-spawn": "^7.0.3", "get-stream": "^6.0.0", "human-signals": "^2.1.0", "is-stream": "^2.0.0", "merge-stream": "^2.0.0", "npm-run-path": "^4.0.1", "onetime": "^5.1.2", "signal-exit": "^3.0.3", "strip-final-newline": "^2.0.0" } }, "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg=="], + + "exsolve": ["exsolve@1.0.7", "", {}, "sha512-VO5fQUzZtI6C+vx4w/4BWJpg3s/5l+6pRQEHzFRM8WFi4XffSP1Z+4qi7GbjWbvRQEbdIco5mIMq+zX4rPuLrw=="], + + "extend": ["extend@3.0.2", "", {}, "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g=="], + + "fast-glob": ["fast-glob@3.3.3", "", { "dependencies": { "@nodelib/fs.stat": "^2.0.2", "@nodelib/fs.walk": "^1.2.3", "glob-parent": "^5.1.2", "merge2": "^1.3.0", "micromatch": "^4.0.8" } }, "sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg=="], + + "fastq": ["fastq@1.19.1", "", { "dependencies": { "reusify": "^1.0.4" } }, "sha512-GwLTyxkCXjXbxqIhTsMI2Nui8huMPtnxg7krajPJAjnEG/iiOS7i+zCtWGZR9G0NBKbXKh6X9m9UIsYX/N6vvQ=="], + + "fault": ["fault@2.0.1", "", { "dependencies": { "format": "^0.2.0" } }, "sha512-WtySTkS4OKev5JtpHXnib4Gxiurzh5NCGvWrFaZ34m6JehfTUhKZvn9njTfw48t6JumVQOmrKqpmGcdwxnhqBQ=="], + + "fdir": ["fdir@6.4.6", "", { "peerDependencies": { "picomatch": "^3 || ^4" }, "optionalPeers": ["picomatch"] }, "sha512-hiFoqpyZcfNm1yc4u8oWCf9A2c4D3QjCrks3zmoVKVxpQRzmPNar1hUJcBG2RQHvEVGDN+Jm81ZheVLAQMK6+w=="], + + "fill-range": ["fill-range@7.1.1", "", { "dependencies": { "to-regex-range": "^5.0.1" } }, "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg=="], + + "find-up": ["find-up@5.0.0", "", { "dependencies": { "locate-path": "^6.0.0", "path-exists": "^4.0.0" } }, "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng=="], + + "foreground-child": ["foreground-child@3.3.1", "", { "dependencies": { "cross-spawn": "^7.0.6", "signal-exit": "^4.0.1" } }, "sha512-gIXjKqtFuWEgzFRJA9WCQeSJLZDjgJUOMCMzxtvFq/37KojM1BFGufqsCy0r4qSQmYLsZYMeyRqzIWOMup03sw=="], + + "format": ["format@0.2.2", "", {}, "sha512-wzsgA6WOq+09wrU1tsJ09udeR/YZRaeArL9e1wPbFg3GG2yDnC2ldKpxs4xunpFF9DgqCqOIra3bc1HWrJ37Ww=="], + + "fraction.js": ["fraction.js@4.3.7", "", {}, "sha512-ZsDfxO51wGAXREY55a7la9LScWpwv9RxIrYABrlvOFBlH/ShPnrtsXeuUIfXKKOVicNxQ+o8JTbJvjS4M89yew=="], + + "fresh": ["fresh@0.5.2", "", {}, "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q=="], + + "fs-extra": ["fs-extra@11.3.0", "", { "dependencies": { "graceful-fs": "^4.2.0", "jsonfile": "^6.0.1", "universalify": "^2.0.0" } }, "sha512-Z4XaCL6dUDHfP/jT25jJKMmtxvuwbkrD1vNSMFlo9lNLY2c5FHYSQgHPRZUjAB26TpDEoW9HCOgplrdbaPV/ew=="], + + "fsevents": ["fsevents@2.3.2", "", { "os": "darwin" }, "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA=="], + + "gensync": ["gensync@1.0.0-beta.2", "", {}, "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg=="], + + "get-nonce": ["get-nonce@1.0.1", "", {}, "sha512-FJhYRoDaiatfEkUK8HKlicmu/3SGFD51q3itKDGoSTysQJBnfOcxU5GxnhE1E6soB76MbT0MBtnKJuXyAx+96Q=="], + + "get-stream": ["get-stream@6.0.1", "", {}, "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg=="], + + "github-slugger": ["github-slugger@2.0.0", "", {}, "sha512-IaOQ9puYtjrkq7Y0Ygl9KDZnrf/aiUJYUpVf89y8kyaxbRG7Y1SrX/jaumrv81vc61+kiMempujsM3Yw7w5qcw=="], + + "glob": ["glob@11.0.3", "", { "dependencies": { "foreground-child": "^3.3.1", "jackspeak": "^4.1.1", "minimatch": "^10.0.3", "minipass": "^7.1.2", "package-json-from-dist": "^1.0.0", "path-scurry": "^2.0.0" }, "bin": { "glob": "dist/esm/bin.mjs" } }, "sha512-2Nim7dha1KVkaiF4q6Dj+ngPPMdfvLJEOpZk/jKiUAkqKebpGAWQXAq9z1xu9HKu5lWfqw/FASuccEjyznjPaA=="], + + "glob-parent": ["glob-parent@5.1.2", "", { "dependencies": { "is-glob": "^4.0.1" } }, "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow=="], + + "globals": ["globals@15.15.0", "", {}, "sha512-7ACyT3wmyp3I61S4fG682L0VA2RGD9otkqGJIwNUMF1SWUombIIk+af1unuDYgMm082aHYwD+mzJvv9Iu8dsgg=="], + + "globby": ["globby@14.1.0", "", { "dependencies": { "@sindresorhus/merge-streams": "^2.1.0", "fast-glob": "^3.3.3", "ignore": "^7.0.3", "path-type": "^6.0.0", "slash": "^5.1.0", "unicorn-magic": "^0.3.0" } }, "sha512-0Ia46fDOaT7k4og1PDW4YbodWWr3scS2vAr2lTbsplOt2WkKp0vQbkI9wKis/T5LV/dqPjO3bpS/z6GTJB82LA=="], + + "graceful-fs": ["graceful-fs@4.2.11", "", {}, "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ=="], + + "hachure-fill": ["hachure-fill@0.5.2", "", {}, "sha512-3GKBOn+m2LX9iq+JC1064cSFprJY4jL1jCXTcpnfER5HYE2l/4EfWSGzkPa/ZDBmYI0ZOEj5VHV/eKnPGkHuOg=="], + + "hast-util-classnames": ["hast-util-classnames@3.0.0", "", { "dependencies": { "@types/hast": "^3.0.0", "space-separated-tokens": "^2.0.0" } }, "sha512-tI3JjoGDEBVorMAWK4jNRsfLMYmih1BUOG3VV36pH36njs1IEl7xkNrVTD2mD2yYHmQCa5R/fj61a8IAF4bRaQ=="], + + "hast-util-from-dom": ["hast-util-from-dom@5.0.1", "", { "dependencies": { "@types/hast": "^3.0.0", "hastscript": "^9.0.0", "web-namespaces": "^2.0.0" } }, "sha512-N+LqofjR2zuzTjCPzyDUdSshy4Ma6li7p/c3pA78uTwzFgENbgbUrm2ugwsOdcjI1muO+o6Dgzp9p8WHtn/39Q=="], + + "hast-util-from-html": ["hast-util-from-html@2.0.3", "", { "dependencies": { "@types/hast": "^3.0.0", "devlop": "^1.1.0", "hast-util-from-parse5": "^8.0.0", "parse5": "^7.0.0", "vfile": "^6.0.0", "vfile-message": "^4.0.0" } }, "sha512-CUSRHXyKjzHov8yKsQjGOElXy/3EKpyX56ELnkHH34vDVw1N1XSQ1ZcAvTyAPtGqLTuKP/uxM+aLkSPqF/EtMw=="], + + "hast-util-from-html-isomorphic": ["hast-util-from-html-isomorphic@2.0.0", "", { "dependencies": { "@types/hast": "^3.0.0", "hast-util-from-dom": "^5.0.0", "hast-util-from-html": "^2.0.0", "unist-util-remove-position": "^5.0.0" } }, "sha512-zJfpXq44yff2hmE0XmwEOzdWin5xwH+QIhMLOScpX91e/NSGPsAzNCvLQDIEPyO2TXi+lBmU6hjLIhV8MwP2kw=="], + + "hast-util-from-parse5": ["hast-util-from-parse5@8.0.3", "", { "dependencies": { "@types/hast": "^3.0.0", "@types/unist": "^3.0.0", "devlop": "^1.0.0", "hastscript": "^9.0.0", "property-information": "^7.0.0", "vfile": "^6.0.0", "vfile-location": "^5.0.0", "web-namespaces": "^2.0.0" } }, "sha512-3kxEVkEKt0zvcZ3hCRYI8rqrgwtlIOFMWkbclACvjlDw8Li9S2hk/d51OI0nr/gIpdMHNepwgOKqZ/sy0Clpyg=="], + + "hast-util-has-property": ["hast-util-has-property@3.0.0", "", { "dependencies": { "@types/hast": "^3.0.0" } }, "sha512-MNilsvEKLFpV604hwfhVStK0usFY/QmM5zX16bo7EjnAEGofr5YyI37kzopBlZJkHD4t887i+q/C8/tr5Q94cA=="], + + "hast-util-heading-rank": ["hast-util-heading-rank@3.0.0", "", { "dependencies": { "@types/hast": "^3.0.0" } }, "sha512-EJKb8oMUXVHcWZTDepnr+WNbfnXKFNf9duMesmr4S8SXTJBJ9M4Yok08pu9vxdJwdlGRhVumk9mEhkEvKGifwA=="], + + "hast-util-is-element": ["hast-util-is-element@3.0.0", "", { "dependencies": { "@types/hast": "^3.0.0" } }, "sha512-Val9mnv2IWpLbNPqc/pUem+a7Ipj2aHacCwgNfTiK0vJKl0LF+4Ba4+v1oPHFpf3bLYmreq0/l3Gud9S5OH42g=="], + + "hast-util-parse-selector": ["hast-util-parse-selector@4.0.0", "", { "dependencies": { "@types/hast": "^3.0.0" } }, "sha512-wkQCkSYoOGCRKERFWcxMVMOcYE2K1AaNLU8DXS9arxnLOUEWbOXKXiJUNzEpqZ3JOKpnha3jkFrumEjVliDe7A=="], + + "hast-util-select": ["hast-util-select@6.0.4", "", { "dependencies": { "@types/hast": "^3.0.0", "@types/unist": "^3.0.0", "bcp-47-match": "^2.0.0", "comma-separated-tokens": "^2.0.0", "css-selector-parser": "^3.0.0", "devlop": "^1.0.0", "direction": "^2.0.0", "hast-util-has-property": "^3.0.0", "hast-util-to-string": "^3.0.0", "hast-util-whitespace": "^3.0.0", "nth-check": "^2.0.0", "property-information": "^7.0.0", "space-separated-tokens": "^2.0.0", "unist-util-visit": "^5.0.0", "zwitch": "^2.0.0" } }, "sha512-RqGS1ZgI0MwxLaKLDxjprynNzINEkRHY2i8ln4DDjgv9ZhcYVIHN9rlpiYsqtFwrgpYU361SyWDQcGNIBVu3lw=="], + + "hast-util-to-estree": ["hast-util-to-estree@3.1.3", "", { "dependencies": { "@types/estree": "^1.0.0", "@types/estree-jsx": "^1.0.0", "@types/hast": "^3.0.0", "comma-separated-tokens": "^2.0.0", "devlop": "^1.0.0", "estree-util-attach-comments": "^3.0.0", "estree-util-is-identifier-name": "^3.0.0", "hast-util-whitespace": "^3.0.0", "mdast-util-mdx-expression": "^2.0.0", "mdast-util-mdx-jsx": "^3.0.0", "mdast-util-mdxjs-esm": "^2.0.0", "property-information": "^7.0.0", "space-separated-tokens": "^2.0.0", "style-to-js": "^1.0.0", "unist-util-position": "^5.0.0", "zwitch": "^2.0.0" } }, "sha512-48+B/rJWAp0jamNbAAf9M7Uf//UVqAoMmgXhBdxTDJLGKY+LRnZ99qcG+Qjl5HfMpYNzS5v4EAwVEF34LeAj7w=="], + + "hast-util-to-html": ["hast-util-to-html@9.0.5", "", { "dependencies": { "@types/hast": "^3.0.0", "@types/unist": "^3.0.0", "ccount": "^2.0.0", "comma-separated-tokens": "^2.0.0", "hast-util-whitespace": "^3.0.0", "html-void-elements": "^3.0.0", "mdast-util-to-hast": "^13.0.0", "property-information": "^7.0.0", "space-separated-tokens": "^2.0.0", "stringify-entities": "^4.0.0", "zwitch": "^2.0.4" } }, "sha512-OguPdidb+fbHQSU4Q4ZiLKnzWo8Wwsf5bZfbvu7//a9oTYoqD/fWpe96NuHkoS9h0ccGOTe0C4NGXdtS0iObOw=="], + + "hast-util-to-jsx-runtime": ["hast-util-to-jsx-runtime@2.3.6", "", { "dependencies": { "@types/estree": "^1.0.0", "@types/hast": "^3.0.0", "@types/unist": "^3.0.0", "comma-separated-tokens": "^2.0.0", "devlop": "^1.0.0", "estree-util-is-identifier-name": "^3.0.0", "hast-util-whitespace": "^3.0.0", "mdast-util-mdx-expression": "^2.0.0", "mdast-util-mdx-jsx": "^3.0.0", "mdast-util-mdxjs-esm": "^2.0.0", "property-information": "^7.0.0", "space-separated-tokens": "^2.0.0", "style-to-js": "^1.0.0", "unist-util-position": "^5.0.0", "vfile-message": "^4.0.0" } }, "sha512-zl6s8LwNyo1P9uw+XJGvZtdFF1GdAkOg8ujOw+4Pyb76874fLps4ueHXDhXWdk6YHQ6OgUtinliG7RsYvCbbBg=="], + + "hast-util-to-string": ["hast-util-to-string@3.0.1", "", { "dependencies": { "@types/hast": "^3.0.0" } }, "sha512-XelQVTDWvqcl3axRfI0xSeoVKzyIFPwsAGSLIsKdJKQMXDYJS4WYrBNF/8J7RdhIcFI2BOHgAifggsvsxp/3+A=="], + + "hast-util-to-text": ["hast-util-to-text@4.0.2", "", { "dependencies": { "@types/hast": "^3.0.0", "@types/unist": "^3.0.0", "hast-util-is-element": "^3.0.0", "unist-util-find-after": "^5.0.0" } }, "sha512-KK6y/BN8lbaq654j7JgBydev7wuNMcID54lkRav1P0CaE1e47P72AWWPiGKXTJU271ooYzcvTAn/Zt0REnvc7A=="], + + "hast-util-whitespace": ["hast-util-whitespace@3.0.0", "", { "dependencies": { "@types/hast": "^3.0.0" } }, "sha512-88JUN06ipLwsnv+dVn+OIYOvAuvBMy/Qoi6O7mQHxdPXpjy+Cd6xRkWwux7DKO+4sYILtLBRIKgsdpS2gQc7qw=="], + + "hastscript": ["hastscript@8.0.0", "", { "dependencies": { "@types/hast": "^3.0.0", "comma-separated-tokens": "^2.0.0", "hast-util-parse-selector": "^4.0.0", "property-information": "^6.0.0", "space-separated-tokens": "^2.0.0" } }, "sha512-dMOtzCEd3ABUeSIISmrETiKuyydk1w0pa+gE/uormcTpSYuaNJPbX1NU3JLyscSLjwAQM8bWMhhIlnCqnRvDTw=="], + + "hono": ["hono@4.8.5", "", {}, "sha512-Up2cQbtNz1s111qpnnECdTGqSIUIhZJMLikdKkshebQSEBcoUKq6XJayLGqSZWidiH0zfHRCJqFu062Mz5UuRA=="], + + "html-void-elements": ["html-void-elements@3.0.0", "", {}, "sha512-bEqo66MRXsUGxWHV5IP0PUiAWwoEjba4VCzg0LjFJBpchPaTfyfCKTG6bc5F8ucKec3q5y6qOdGyYTSBEvhCrg=="], + + "http-errors": ["http-errors@2.0.0", "", { "dependencies": { "depd": "2.0.0", "inherits": "2.0.4", "setprototypeof": "1.2.0", "statuses": "2.0.1", "toidentifier": "1.0.1" } }, "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ=="], + + "human-signals": ["human-signals@2.1.0", "", {}, "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw=="], + + "iconv-lite": ["iconv-lite@0.6.3", "", { "dependencies": { "safer-buffer": ">= 2.1.2 < 3.0.0" } }, "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw=="], + + "ieee754": ["ieee754@1.2.1", "", {}, "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA=="], + + "ignore": ["ignore@7.0.5", "", {}, "sha512-Hs59xBNfUIunMFgWAbGX5cq6893IbWg4KnrjbYwX3tx0ztorVgTDA6B2sxf8ejHJ4wz8BqGUMYlnzNBer5NvGg=="], + + "inherits": ["inherits@2.0.4", "", {}, "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ=="], + + "inline-style-parser": ["inline-style-parser@0.2.4", "", {}, "sha512-0aO8FkhNZlj/ZIbNi7Lxxr12obT7cL1moPfE4tg1LkX7LlLfC6DeX4l2ZEud1ukP9jNQyNnfzQVqwbwmAATY4Q=="], + + "internmap": ["internmap@1.0.1", "", {}, "sha512-lDB5YccMydFBtasVtxnZ3MRBHuaoE8GKsppq+EchKL2U4nK/DmEpPHNH8MZe5HkMtpSiTSOZwfN0tzYjO/lJEw=="], + + "is-alphabetical": ["is-alphabetical@2.0.1", "", {}, "sha512-FWyyY60MeTNyeSRpkM2Iry0G9hpr7/9kD40mD/cGQEuilcZYS4okz8SN2Q6rLCJ8gbCt6fN+rC+6tMGS99LaxQ=="], + + "is-alphanumerical": ["is-alphanumerical@2.0.1", "", { "dependencies": { "is-alphabetical": "^2.0.0", "is-decimal": "^2.0.0" } }, "sha512-hmbYhX/9MUMF5uh7tOXyK/n0ZvWpad5caBA17GsC6vyuCqaWliRG5K1qS9inmUhEMaOBIW7/whAnSwveW/LtZw=="], + + "is-decimal": ["is-decimal@2.0.1", "", {}, "sha512-AAB9hiomQs5DXWcRB1rqsxGUstbRroFOPPVAomNk/3XHR5JyEZChOyTWe2oayKnsSsr/kcGqF+z6yuH6HHpN0A=="], + + "is-extglob": ["is-extglob@2.1.1", "", {}, "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ=="], + + "is-fullwidth-code-point": ["is-fullwidth-code-point@3.0.0", "", {}, "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg=="], + + "is-glob": ["is-glob@4.0.3", "", { "dependencies": { "is-extglob": "^2.1.1" } }, "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg=="], + + "is-hexadecimal": ["is-hexadecimal@2.0.1", "", {}, "sha512-DgZQp241c8oO6cA1SbTEWiXeoxV42vlcJxgH+B3hi1AiqqKruZR3ZGF8In3fj4+/y/7rHvlOZLZtgJ/4ttYGZg=="], + + "is-interactive": ["is-interactive@2.0.0", "", {}, "sha512-qP1vozQRI+BMOPcjFzrjXuQvdak2pHNUMZoeG2eRbiSqyvbEf/wQtEOTOX1guk6E3t36RkaqiSt8A/6YElNxLQ=="], + + "is-number": ["is-number@7.0.0", "", {}, "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng=="], + + "is-plain-obj": ["is-plain-obj@4.1.0", "", {}, "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg=="], + + "is-stream": ["is-stream@2.0.1", "", {}, "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg=="], + + "is-unicode-supported": ["is-unicode-supported@1.3.0", "", {}, "sha512-43r2mRvz+8JRIKnWJ+3j8JtjRKZ6GmjzfaE/qiBJnikNnYv/6bagRJ1kUhNk8R5EX/GkobD+r+sfxCPJsiKBLQ=="], + + "isexe": ["isexe@2.0.0", "", {}, "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw=="], + + "jackspeak": ["jackspeak@4.1.1", "", { "dependencies": { "@isaacs/cliui": "^8.0.2" } }, "sha512-zptv57P3GpL+O0I7VdMJNBZCu+BPHVQUk55Ft8/QCJjTVxrnJHuVuX/0Bl2A6/+2oyR/ZMEuFKwmzqqZ/U5nPQ=="], + + "javascript-stringify": ["javascript-stringify@2.1.0", "", {}, "sha512-JVAfqNPTvNq3sB/VHQJAFxN/sPgKnsKrCwyRt15zwNCdrMMJDdcEOdubuy+DuJYYdm0ox1J4uzEuYKkN+9yhVg=="], + + "jiti": ["jiti@2.4.2", "", { "bin": { "jiti": "lib/jiti-cli.mjs" } }, "sha512-rg9zJN+G4n2nfJl5MW3BMygZX56zKPNVEYYqq7adpmMh4Jn2QNEwhvQlFy6jPVdcod7txZtKHWnyZiA3a0zP7A=="], + + "js-tokens": ["js-tokens@4.0.0", "", {}, "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ=="], + + "jsesc": ["jsesc@3.1.0", "", { "bin": { "jsesc": "bin/jsesc" } }, "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA=="], + + "json5": ["json5@2.2.3", "", { "bin": { "json5": "lib/cli.js" } }, "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg=="], + + "jsonfile": ["jsonfile@6.1.0", "", { "dependencies": { "universalify": "^2.0.0" }, "optionalDependencies": { "graceful-fs": "^4.1.6" } }, "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ=="], + + "katex": ["katex@0.16.22", "", { "dependencies": { "commander": "^8.3.0" }, "bin": { "katex": "cli.js" } }, "sha512-XCHRdUw4lf3SKBaJe4EvgqIuWwkPSo9XoeO8GjQW94Bp7TWv9hNhzZjZ+OH9yf1UmLygb7DIT5GSFQiyt16zYg=="], + + "khroma": ["khroma@2.1.0", "", {}, "sha512-Ls993zuzfayK269Svk9hzpeGUKob/sIgZzyHYdjQoAdQetRKpOLj+k/QQQ/6Qi0Yz65mlROrfd+Ev+1+7dz9Kw=="], + + "kolorist": ["kolorist@1.8.0", "", {}, "sha512-Y+60/zizpJ3HRH8DCss+q95yr6145JXZo46OTpFvDZWLfRCE4qChOyk1b26nMaNpfHHgxagk9dXT5OP0Tfe+dQ=="], + + "langium": ["langium@3.3.1", "", { "dependencies": { "chevrotain": "~11.0.3", "chevrotain-allstar": "~0.3.0", "vscode-languageserver": "~9.0.1", "vscode-languageserver-textdocument": "~1.0.11", "vscode-uri": "~3.0.8" } }, "sha512-QJv/h939gDpvT+9SiLVlY7tZC3xB2qK57v0J04Sh9wpMb6MP1q8gB21L3WIo8T5P1MSMg3Ep14L7KkDCFG3y4w=="], + + "layout-base": ["layout-base@1.0.2", "", {}, "sha512-8h2oVEZNktL4BH2JCOI90iD1yXwL6iNW7KcCKT2QZgQJR2vbqDsldCTPRU9NifTCqHZci57XvQQ15YTu+sTYPg=="], + + "lightningcss": ["lightningcss@1.30.1", "", { "dependencies": { "detect-libc": "^2.0.3" }, "optionalDependencies": { "lightningcss-darwin-arm64": "1.30.1", "lightningcss-darwin-x64": "1.30.1", "lightningcss-freebsd-x64": "1.30.1", "lightningcss-linux-arm-gnueabihf": "1.30.1", "lightningcss-linux-arm64-gnu": "1.30.1", "lightningcss-linux-arm64-musl": "1.30.1", "lightningcss-linux-x64-gnu": "1.30.1", "lightningcss-linux-x64-musl": "1.30.1", "lightningcss-win32-arm64-msvc": "1.30.1", "lightningcss-win32-x64-msvc": "1.30.1" } }, "sha512-xi6IyHML+c9+Q3W0S4fCQJOym42pyurFiJUHEcEyHS0CeKzia4yZDEsLlqOFykxOdHpNy0NmvVO31vcSqAxJCg=="], + + "lightningcss-darwin-arm64": ["lightningcss-darwin-arm64@1.30.1", "", { "os": "darwin", "cpu": "arm64" }, "sha512-c8JK7hyE65X1MHMN+Viq9n11RRC7hgin3HhYKhrMyaXflk5GVplZ60IxyoVtzILeKr+xAJwg6zK6sjTBJ0FKYQ=="], + + "lightningcss-darwin-x64": ["lightningcss-darwin-x64@1.30.1", "", { "os": "darwin", "cpu": "x64" }, "sha512-k1EvjakfumAQoTfcXUcHQZhSpLlkAuEkdMBsI/ivWw9hL+7FtilQc0Cy3hrx0AAQrVtQAbMI7YjCgYgvn37PzA=="], + + "lightningcss-freebsd-x64": ["lightningcss-freebsd-x64@1.30.1", "", { "os": "freebsd", "cpu": "x64" }, "sha512-kmW6UGCGg2PcyUE59K5r0kWfKPAVy4SltVeut+umLCFoJ53RdCUWxcRDzO1eTaxf/7Q2H7LTquFHPL5R+Gjyig=="], + + "lightningcss-linux-arm-gnueabihf": ["lightningcss-linux-arm-gnueabihf@1.30.1", "", { "os": "linux", "cpu": "arm" }, "sha512-MjxUShl1v8pit+6D/zSPq9S9dQ2NPFSQwGvxBCYaBYLPlCWuPh9/t1MRS8iUaR8i+a6w7aps+B4N0S1TYP/R+Q=="], + + "lightningcss-linux-arm64-gnu": ["lightningcss-linux-arm64-gnu@1.30.1", "", { "os": "linux", "cpu": "arm64" }, "sha512-gB72maP8rmrKsnKYy8XUuXi/4OctJiuQjcuqWNlJQ6jZiWqtPvqFziskH3hnajfvKB27ynbVCucKSm2rkQp4Bw=="], + + "lightningcss-linux-arm64-musl": ["lightningcss-linux-arm64-musl@1.30.1", "", { "os": "linux", "cpu": "arm64" }, "sha512-jmUQVx4331m6LIX+0wUhBbmMX7TCfjF5FoOH6SD1CttzuYlGNVpA7QnrmLxrsub43ClTINfGSYyHe2HWeLl5CQ=="], + + "lightningcss-linux-x64-gnu": ["lightningcss-linux-x64-gnu@1.30.1", "", { "os": "linux", "cpu": "x64" }, "sha512-piWx3z4wN8J8z3+O5kO74+yr6ze/dKmPnI7vLqfSqI8bccaTGY5xiSGVIJBDd5K5BHlvVLpUB3S2YCfelyJ1bw=="], + + "lightningcss-linux-x64-musl": ["lightningcss-linux-x64-musl@1.30.1", "", { "os": "linux", "cpu": "x64" }, "sha512-rRomAK7eIkL+tHY0YPxbc5Dra2gXlI63HL+v1Pdi1a3sC+tJTcFrHX+E86sulgAXeI7rSzDYhPSeHHjqFhqfeQ=="], + + "lightningcss-win32-arm64-msvc": ["lightningcss-win32-arm64-msvc@1.30.1", "", { "os": "win32", "cpu": "arm64" }, "sha512-mSL4rqPi4iXq5YVqzSsJgMVFENoa4nGTT/GjO2c0Yl9OuQfPsIfncvLrEW6RbbB24WtZ3xP/2CCmI3tNkNV4oA=="], + + "lightningcss-win32-x64-msvc": ["lightningcss-win32-x64-msvc@1.30.1", "", { "os": "win32", "cpu": "x64" }, "sha512-PVqXh48wh4T53F/1CCu8PIPCxLzWyCnn/9T5W1Jpmdy5h9Cwd+0YQS6/LwhHXSafuc61/xg9Lv5OrCby6a++jg=="], + + "local-pkg": ["local-pkg@1.1.1", "", { "dependencies": { "mlly": "^1.7.4", "pkg-types": "^2.0.1", "quansync": "^0.2.8" } }, "sha512-WunYko2W1NcdfAFpuLUoucsgULmgDBRkdxHxWQ7mK0cQqwPiy8E1enjuRBrhLtZkB5iScJ1XIPdhVEFK8aOLSg=="], + + "locate-path": ["locate-path@6.0.0", "", { "dependencies": { "p-locate": "^5.0.0" } }, "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw=="], + + "lodash-es": ["lodash-es@4.17.21", "", {}, "sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw=="], + + "log-symbols": ["log-symbols@5.1.0", "", { "dependencies": { "chalk": "^5.0.0", "is-unicode-supported": "^1.1.0" } }, "sha512-l0x2DvrW294C9uDCoQe1VSU4gf529FkSZ6leBl4TiqZH/e+0R7hSfHQBNut2mNygDgHwvYHfFLn6Oxb3VWj2rA=="], + + "longest-streak": ["longest-streak@3.1.0", "", {}, "sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g=="], + + "lru-cache": ["lru-cache@11.1.0", "", {}, "sha512-QIXZUBJUx+2zHUdQujWejBkcD9+cs94tLn0+YL8UrCh+D5sCXZ4c7LaEH48pNwRY3MLDgqUFyhlCyjJPf1WP0A=="], + + "mark.js": ["mark.js@8.11.1", "", {}, "sha512-1I+1qpDt4idfgLQG+BNWmrqku+7/2bi5nLf4YwF8y8zXvmfiTBY3PV3ZibfrjBueCByROpuBjLLFCajqkgYoLQ=="], + + "markdown-extensions": ["markdown-extensions@2.0.0", "", {}, "sha512-o5vL7aDWatOTX8LzaS1WMoaoxIiLRQJuIKKe2wAw6IeULDHaqbiqiggmx+pKvZDb1Sj+pE46Sn1T7lCqfFtg1Q=="], + + "markdown-table": ["markdown-table@3.0.4", "", {}, "sha512-wiYz4+JrLyb/DqW2hkFJxP7Vd7JuTDm77fvbM8VfEQdmSMqcImWeeRbHwZjBjIFki/VaMK2BhFi7oUUZeM5bqw=="], + + "marked": ["marked@16.0.0", "", { "bin": { "marked": "bin/marked.js" } }, "sha512-MUKMXDjsD/eptB7GPzxo4xcnLS6oo7/RHimUMHEDRhUooPwmN9BEpMl7AEOJv3bmso169wHI2wUF9VQgL7zfmA=="], + + "mdast-util-directive": ["mdast-util-directive@3.1.0", "", { "dependencies": { "@types/mdast": "^4.0.0", "@types/unist": "^3.0.0", "ccount": "^2.0.0", "devlop": "^1.0.0", "mdast-util-from-markdown": "^2.0.0", "mdast-util-to-markdown": "^2.0.0", "parse-entities": "^4.0.0", "stringify-entities": "^4.0.0", "unist-util-visit-parents": "^6.0.0" } }, "sha512-I3fNFt+DHmpWCYAT7quoM6lHf9wuqtI+oCOfvILnoicNIqjh5E3dEJWiXuYME2gNe8vl1iMQwyUHa7bgFmak6Q=="], + + "mdast-util-find-and-replace": ["mdast-util-find-and-replace@3.0.2", "", { "dependencies": { "@types/mdast": "^4.0.0", "escape-string-regexp": "^5.0.0", "unist-util-is": "^6.0.0", "unist-util-visit-parents": "^6.0.0" } }, "sha512-Tmd1Vg/m3Xz43afeNxDIhWRtFZgM2VLyaf4vSTYwudTyeuTneoL3qtWMA5jeLyz/O1vDJmmV4QuScFCA2tBPwg=="], + + "mdast-util-from-markdown": ["mdast-util-from-markdown@2.0.2", "", { "dependencies": { "@types/mdast": "^4.0.0", "@types/unist": "^3.0.0", "decode-named-character-reference": "^1.0.0", "devlop": "^1.0.0", "mdast-util-to-string": "^4.0.0", "micromark": "^4.0.0", "micromark-util-decode-numeric-character-reference": "^2.0.0", "micromark-util-decode-string": "^2.0.0", "micromark-util-normalize-identifier": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0", "unist-util-stringify-position": "^4.0.0" } }, "sha512-uZhTV/8NBuw0WHkPTrCqDOl0zVe1BIng5ZtHoDk49ME1qqcjYmmLmOf0gELgcRMxN4w2iuIeVso5/6QymSrgmA=="], + + "mdast-util-frontmatter": ["mdast-util-frontmatter@2.0.1", "", { "dependencies": { "@types/mdast": "^4.0.0", "devlop": "^1.0.0", "escape-string-regexp": "^5.0.0", "mdast-util-from-markdown": "^2.0.0", "mdast-util-to-markdown": "^2.0.0", "micromark-extension-frontmatter": "^2.0.0" } }, "sha512-LRqI9+wdgC25P0URIJY9vwocIzCcksduHQ9OF2joxQoyTNVduwLAFUzjoopuRJbJAReaKrNQKAZKL3uCMugWJA=="], + + "mdast-util-gfm": ["mdast-util-gfm@3.1.0", "", { "dependencies": { "mdast-util-from-markdown": "^2.0.0", "mdast-util-gfm-autolink-literal": "^2.0.0", "mdast-util-gfm-footnote": "^2.0.0", "mdast-util-gfm-strikethrough": "^2.0.0", "mdast-util-gfm-table": "^2.0.0", "mdast-util-gfm-task-list-item": "^2.0.0", "mdast-util-to-markdown": "^2.0.0" } }, "sha512-0ulfdQOM3ysHhCJ1p06l0b0VKlhU0wuQs3thxZQagjcjPrlFRqY215uZGHHJan9GEAXd9MbfPjFJz+qMkVR6zQ=="], + + "mdast-util-gfm-autolink-literal": ["mdast-util-gfm-autolink-literal@2.0.1", "", { "dependencies": { "@types/mdast": "^4.0.0", "ccount": "^2.0.0", "devlop": "^1.0.0", "mdast-util-find-and-replace": "^3.0.0", "micromark-util-character": "^2.0.0" } }, "sha512-5HVP2MKaP6L+G6YaxPNjuL0BPrq9orG3TsrZ9YXbA3vDw/ACI4MEsnoDpn6ZNm7GnZgtAcONJyPhOP8tNJQavQ=="], + + "mdast-util-gfm-footnote": ["mdast-util-gfm-footnote@2.1.0", "", { "dependencies": { "@types/mdast": "^4.0.0", "devlop": "^1.1.0", "mdast-util-from-markdown": "^2.0.0", "mdast-util-to-markdown": "^2.0.0", "micromark-util-normalize-identifier": "^2.0.0" } }, "sha512-sqpDWlsHn7Ac9GNZQMeUzPQSMzR6Wv0WKRNvQRg0KqHh02fpTz69Qc1QSseNX29bhz1ROIyNyxExfawVKTm1GQ=="], + + "mdast-util-gfm-strikethrough": ["mdast-util-gfm-strikethrough@2.0.0", "", { "dependencies": { "@types/mdast": "^4.0.0", "mdast-util-from-markdown": "^2.0.0", "mdast-util-to-markdown": "^2.0.0" } }, "sha512-mKKb915TF+OC5ptj5bJ7WFRPdYtuHv0yTRxK2tJvi+BDqbkiG7h7u/9SI89nRAYcmap2xHQL9D+QG/6wSrTtXg=="], + + "mdast-util-gfm-table": ["mdast-util-gfm-table@2.0.0", "", { "dependencies": { "@types/mdast": "^4.0.0", "devlop": "^1.0.0", "markdown-table": "^3.0.0", "mdast-util-from-markdown": "^2.0.0", "mdast-util-to-markdown": "^2.0.0" } }, "sha512-78UEvebzz/rJIxLvE7ZtDd/vIQ0RHv+3Mh5DR96p7cS7HsBhYIICDBCu8csTNWNO6tBWfqXPWekRuj2FNOGOZg=="], + + "mdast-util-gfm-task-list-item": ["mdast-util-gfm-task-list-item@2.0.0", "", { "dependencies": { "@types/mdast": "^4.0.0", "devlop": "^1.0.0", "mdast-util-from-markdown": "^2.0.0", "mdast-util-to-markdown": "^2.0.0" } }, "sha512-IrtvNvjxC1o06taBAVJznEnkiHxLFTzgonUdy8hzFVeDun0uTjxxrRGVaNFqkU1wJR3RBPEfsxmU6jDWPofrTQ=="], + + "mdast-util-mdx": ["mdast-util-mdx@3.0.0", "", { "dependencies": { "mdast-util-from-markdown": "^2.0.0", "mdast-util-mdx-expression": "^2.0.0", "mdast-util-mdx-jsx": "^3.0.0", "mdast-util-mdxjs-esm": "^2.0.0", "mdast-util-to-markdown": "^2.0.0" } }, "sha512-JfbYLAW7XnYTTbUsmpu0kdBUVe+yKVJZBItEjwyYJiDJuZ9w4eeaqks4HQO+R7objWgS2ymV60GYpI14Ug554w=="], + + "mdast-util-mdx-expression": ["mdast-util-mdx-expression@2.0.1", "", { "dependencies": { "@types/estree-jsx": "^1.0.0", "@types/hast": "^3.0.0", "@types/mdast": "^4.0.0", "devlop": "^1.0.0", "mdast-util-from-markdown": "^2.0.0", "mdast-util-to-markdown": "^2.0.0" } }, "sha512-J6f+9hUp+ldTZqKRSg7Vw5V6MqjATc+3E4gf3CFNcuZNWD8XdyI6zQ8GqH7f8169MM6P7hMBRDVGnn7oHB9kXQ=="], + + "mdast-util-mdx-jsx": ["mdast-util-mdx-jsx@3.2.0", "", { "dependencies": { "@types/estree-jsx": "^1.0.0", "@types/hast": "^3.0.0", "@types/mdast": "^4.0.0", "@types/unist": "^3.0.0", "ccount": "^2.0.0", "devlop": "^1.1.0", "mdast-util-from-markdown": "^2.0.0", "mdast-util-to-markdown": "^2.0.0", "parse-entities": "^4.0.0", "stringify-entities": "^4.0.0", "unist-util-stringify-position": "^4.0.0", "vfile-message": "^4.0.0" } }, "sha512-lj/z8v0r6ZtsN/cGNNtemmmfoLAFZnjMbNyLzBafjzikOM+glrjNHPlf6lQDOTccj9n5b0PPihEBbhneMyGs1Q=="], + + "mdast-util-mdxjs-esm": ["mdast-util-mdxjs-esm@2.0.1", "", { "dependencies": { "@types/estree-jsx": "^1.0.0", "@types/hast": "^3.0.0", "@types/mdast": "^4.0.0", "devlop": "^1.0.0", "mdast-util-from-markdown": "^2.0.0", "mdast-util-to-markdown": "^2.0.0" } }, "sha512-EcmOpxsZ96CvlP03NghtH1EsLtr0n9Tm4lPUJUBccV9RwUOneqSycg19n5HGzCf+10LozMRSObtVr3ee1WoHtg=="], + + "mdast-util-phrasing": ["mdast-util-phrasing@4.1.0", "", { "dependencies": { "@types/mdast": "^4.0.0", "unist-util-is": "^6.0.0" } }, "sha512-TqICwyvJJpBwvGAMZjj4J2n0X8QWp21b9l0o7eXyVJ25YNWYbJDVIyD1bZXE6WtV6RmKJVYmQAKWa0zWOABz2w=="], + + "mdast-util-to-hast": ["mdast-util-to-hast@13.2.0", "", { "dependencies": { "@types/hast": "^3.0.0", "@types/mdast": "^4.0.0", "@ungap/structured-clone": "^1.0.0", "devlop": "^1.0.0", "micromark-util-sanitize-uri": "^2.0.0", "trim-lines": "^3.0.0", "unist-util-position": "^5.0.0", "unist-util-visit": "^5.0.0", "vfile": "^6.0.0" } }, "sha512-QGYKEuUsYT9ykKBCMOEDLsU5JRObWQusAolFMeko/tYPufNkRffBAQjIE+99jbA87xv6FgmjLtwjh9wBWajwAA=="], + + "mdast-util-to-markdown": ["mdast-util-to-markdown@2.1.2", "", { "dependencies": { "@types/mdast": "^4.0.0", "@types/unist": "^3.0.0", "longest-streak": "^3.0.0", "mdast-util-phrasing": "^4.0.0", "mdast-util-to-string": "^4.0.0", "micromark-util-classify-character": "^2.0.0", "micromark-util-decode-string": "^2.0.0", "unist-util-visit": "^5.0.0", "zwitch": "^2.0.0" } }, "sha512-xj68wMTvGXVOKonmog6LwyJKrYXZPvlwabaryTjLh9LuvovB/KAH+kvi8Gjj+7rJjsFi23nkUxRQv1KqSroMqA=="], + + "mdast-util-to-string": ["mdast-util-to-string@4.0.0", "", { "dependencies": { "@types/mdast": "^4.0.0" } }, "sha512-0H44vDimn51F0YwvxSJSm0eCDOJTRlmN0R1yBh4HLj9wiV1Dn0QoXGbvFAWj2hSItVTlCmBF1hqKlIyUBVFLPg=="], + + "media-query-parser": ["media-query-parser@2.0.2", "", { "dependencies": { "@babel/runtime": "^7.12.5" } }, "sha512-1N4qp+jE0pL5Xv4uEcwVUhIkwdUO3S/9gML90nqKA7v7FcOS5vUtatfzok9S9U1EJU8dHWlcv95WLnKmmxZI9w=="], + + "merge-stream": ["merge-stream@2.0.0", "", {}, "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w=="], + + "merge2": ["merge2@1.4.1", "", {}, "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg=="], + + "mermaid": ["mermaid@11.9.0", "", { "dependencies": { "@braintree/sanitize-url": "^7.0.4", "@iconify/utils": "^2.1.33", "@mermaid-js/parser": "^0.6.2", "@types/d3": "^7.4.3", "cytoscape": "^3.29.3", "cytoscape-cose-bilkent": "^4.1.0", "cytoscape-fcose": "^2.2.0", "d3": "^7.9.0", "d3-sankey": "^0.12.3", "dagre-d3-es": "7.0.11", "dayjs": "^1.11.13", "dompurify": "^3.2.5", "katex": "^0.16.22", "khroma": "^2.1.0", "lodash-es": "^4.17.21", "marked": "^16.0.0", "roughjs": "^4.6.6", "stylis": "^4.3.6", "ts-dedent": "^2.2.0", "uuid": "^11.1.0" } }, "sha512-YdPXn9slEwO0omQfQIsW6vS84weVQftIyyTGAZCwM//MGhPzL1+l6vO6bkf0wnP4tHigH1alZ5Ooy3HXI2gOag=="], + + "mermaid-isomorphic": ["mermaid-isomorphic@3.0.4", "", { "dependencies": { "@fortawesome/fontawesome-free": "^6.0.0", "mermaid": "^11.0.0" }, "peerDependencies": { "playwright": "1" }, "optionalPeers": ["playwright"] }, "sha512-XQTy7H1XwHK3DPEHf+ZNWiqUEd9BwX3Xws38R9Fj2gx718srmgjlZoUzHr+Tca+O+dqJOJsAJaKzCoP65QDfDg=="], + + "micromark": ["micromark@4.0.2", "", { "dependencies": { "@types/debug": "^4.0.0", "debug": "^4.0.0", "decode-named-character-reference": "^1.0.0", "devlop": "^1.0.0", "micromark-core-commonmark": "^2.0.0", "micromark-factory-space": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-chunked": "^2.0.0", "micromark-util-combine-extensions": "^2.0.0", "micromark-util-decode-numeric-character-reference": "^2.0.0", "micromark-util-encode": "^2.0.0", "micromark-util-normalize-identifier": "^2.0.0", "micromark-util-resolve-all": "^2.0.0", "micromark-util-sanitize-uri": "^2.0.0", "micromark-util-subtokenize": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-zpe98Q6kvavpCr1NPVSCMebCKfD7CA2NqZ+rykeNhONIJBpc1tFKt9hucLGwha3jNTNI8lHpctWJWoimVF4PfA=="], + + "micromark-core-commonmark": ["micromark-core-commonmark@2.0.3", "", { "dependencies": { "decode-named-character-reference": "^1.0.0", "devlop": "^1.0.0", "micromark-factory-destination": "^2.0.0", "micromark-factory-label": "^2.0.0", "micromark-factory-space": "^2.0.0", "micromark-factory-title": "^2.0.0", "micromark-factory-whitespace": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-chunked": "^2.0.0", "micromark-util-classify-character": "^2.0.0", "micromark-util-html-tag-name": "^2.0.0", "micromark-util-normalize-identifier": "^2.0.0", "micromark-util-resolve-all": "^2.0.0", "micromark-util-subtokenize": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-RDBrHEMSxVFLg6xvnXmb1Ayr2WzLAWjeSATAoxwKYJV94TeNavgoIdA0a9ytzDSVzBy2YKFK+emCPOEibLeCrg=="], + + "micromark-extension-directive": ["micromark-extension-directive@3.0.2", "", { "dependencies": { "devlop": "^1.0.0", "micromark-factory-space": "^2.0.0", "micromark-factory-whitespace": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0", "parse-entities": "^4.0.0" } }, "sha512-wjcXHgk+PPdmvR58Le9d7zQYWy+vKEU9Se44p2CrCDPiLr2FMyiT4Fyb5UFKFC66wGB3kPlgD7q3TnoqPS7SZA=="], + + "micromark-extension-frontmatter": ["micromark-extension-frontmatter@2.0.0", "", { "dependencies": { "fault": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-C4AkuM3dA58cgZha7zVnuVxBhDsbttIMiytjgsM2XbHAB2faRVaHRle40558FBN+DJcrLNCoqG5mlrpdU4cRtg=="], + + "micromark-extension-gfm": ["micromark-extension-gfm@3.0.0", "", { "dependencies": { "micromark-extension-gfm-autolink-literal": "^2.0.0", "micromark-extension-gfm-footnote": "^2.0.0", "micromark-extension-gfm-strikethrough": "^2.0.0", "micromark-extension-gfm-table": "^2.0.0", "micromark-extension-gfm-tagfilter": "^2.0.0", "micromark-extension-gfm-task-list-item": "^2.0.0", "micromark-util-combine-extensions": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-vsKArQsicm7t0z2GugkCKtZehqUm31oeGBV/KVSorWSy8ZlNAv7ytjFhvaryUiCUJYqs+NoE6AFhpQvBTM6Q4w=="], + + "micromark-extension-gfm-autolink-literal": ["micromark-extension-gfm-autolink-literal@2.1.0", "", { "dependencies": { "micromark-util-character": "^2.0.0", "micromark-util-sanitize-uri": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-oOg7knzhicgQ3t4QCjCWgTmfNhvQbDDnJeVu9v81r7NltNCVmhPy1fJRX27pISafdjL+SVc4d3l48Gb6pbRypw=="], + + "micromark-extension-gfm-footnote": ["micromark-extension-gfm-footnote@2.1.0", "", { "dependencies": { "devlop": "^1.0.0", "micromark-core-commonmark": "^2.0.0", "micromark-factory-space": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-normalize-identifier": "^2.0.0", "micromark-util-sanitize-uri": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-/yPhxI1ntnDNsiHtzLKYnE3vf9JZ6cAisqVDauhp4CEHxlb4uoOTxOCJ+9s51bIB8U1N1FJ1RXOKTIlD5B/gqw=="], + + "micromark-extension-gfm-strikethrough": ["micromark-extension-gfm-strikethrough@2.1.0", "", { "dependencies": { "devlop": "^1.0.0", "micromark-util-chunked": "^2.0.0", "micromark-util-classify-character": "^2.0.0", "micromark-util-resolve-all": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-ADVjpOOkjz1hhkZLlBiYA9cR2Anf8F4HqZUO6e5eDcPQd0Txw5fxLzzxnEkSkfnD0wziSGiv7sYhk/ktvbf1uw=="], + + "micromark-extension-gfm-table": ["micromark-extension-gfm-table@2.1.1", "", { "dependencies": { "devlop": "^1.0.0", "micromark-factory-space": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-t2OU/dXXioARrC6yWfJ4hqB7rct14e8f7m0cbI5hUmDyyIlwv5vEtooptH8INkbLzOatzKuVbQmAYcbWoyz6Dg=="], + + "micromark-extension-gfm-tagfilter": ["micromark-extension-gfm-tagfilter@2.0.0", "", { "dependencies": { "micromark-util-types": "^2.0.0" } }, "sha512-xHlTOmuCSotIA8TW1mDIM6X2O1SiX5P9IuDtqGonFhEK0qgRI4yeC6vMxEV2dgyr2TiD+2PQ10o+cOhdVAcwfg=="], + + "micromark-extension-gfm-task-list-item": ["micromark-extension-gfm-task-list-item@2.1.0", "", { "dependencies": { "devlop": "^1.0.0", "micromark-factory-space": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-qIBZhqxqI6fjLDYFTBIa4eivDMnP+OZqsNwmQ3xNLE4Cxwc+zfQEfbs6tzAo2Hjq+bh6q5F+Z8/cksrLFYWQQw=="], + + "micromark-extension-mdx-expression": ["micromark-extension-mdx-expression@3.0.1", "", { "dependencies": { "@types/estree": "^1.0.0", "devlop": "^1.0.0", "micromark-factory-mdx-expression": "^2.0.0", "micromark-factory-space": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-events-to-acorn": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-dD/ADLJ1AeMvSAKBwO22zG22N4ybhe7kFIZ3LsDI0GlsNr2A3KYxb0LdC1u5rj4Nw+CHKY0RVdnHX8vj8ejm4Q=="], + + "micromark-extension-mdx-jsx": ["micromark-extension-mdx-jsx@3.0.2", "", { "dependencies": { "@types/estree": "^1.0.0", "devlop": "^1.0.0", "estree-util-is-identifier-name": "^3.0.0", "micromark-factory-mdx-expression": "^2.0.0", "micromark-factory-space": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-events-to-acorn": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0", "vfile-message": "^4.0.0" } }, "sha512-e5+q1DjMh62LZAJOnDraSSbDMvGJ8x3cbjygy2qFEi7HCeUT4BDKCvMozPozcD6WmOt6sVvYDNBKhFSz3kjOVQ=="], + + "micromark-extension-mdx-md": ["micromark-extension-mdx-md@2.0.0", "", { "dependencies": { "micromark-util-types": "^2.0.0" } }, "sha512-EpAiszsB3blw4Rpba7xTOUptcFeBFi+6PY8VnJ2hhimH+vCQDirWgsMpz7w1XcZE7LVrSAUGb9VJpG9ghlYvYQ=="], + + "micromark-extension-mdxjs": ["micromark-extension-mdxjs@3.0.0", "", { "dependencies": { "acorn": "^8.0.0", "acorn-jsx": "^5.0.0", "micromark-extension-mdx-expression": "^3.0.0", "micromark-extension-mdx-jsx": "^3.0.0", "micromark-extension-mdx-md": "^2.0.0", "micromark-extension-mdxjs-esm": "^3.0.0", "micromark-util-combine-extensions": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-A873fJfhnJ2siZyUrJ31l34Uqwy4xIFmvPY1oj+Ean5PHcPBYzEsvqvWGaWcfEIr11O5Dlw3p2y0tZWpKHDejQ=="], + + "micromark-extension-mdxjs-esm": ["micromark-extension-mdxjs-esm@3.0.0", "", { "dependencies": { "@types/estree": "^1.0.0", "devlop": "^1.0.0", "micromark-core-commonmark": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-events-to-acorn": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0", "unist-util-position-from-estree": "^2.0.0", "vfile-message": "^4.0.0" } }, "sha512-DJFl4ZqkErRpq/dAPyeWp15tGrcrrJho1hKK5uBS70BCtfrIFg81sqcTVu3Ta+KD1Tk5vAtBNElWxtAa+m8K9A=="], + + "micromark-factory-destination": ["micromark-factory-destination@2.0.1", "", { "dependencies": { "micromark-util-character": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-Xe6rDdJlkmbFRExpTOmRj9N3MaWmbAgdpSrBQvCFqhezUn4AHqJHbaEnfbVYYiexVSs//tqOdY/DxhjdCiJnIA=="], + + "micromark-factory-label": ["micromark-factory-label@2.0.1", "", { "dependencies": { "devlop": "^1.0.0", "micromark-util-character": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-VFMekyQExqIW7xIChcXn4ok29YE3rnuyveW3wZQWWqF4Nv9Wk5rgJ99KzPvHjkmPXF93FXIbBp6YdW3t71/7Vg=="], + + "micromark-factory-mdx-expression": ["micromark-factory-mdx-expression@2.0.3", "", { "dependencies": { "@types/estree": "^1.0.0", "devlop": "^1.0.0", "micromark-factory-space": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-events-to-acorn": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0", "unist-util-position-from-estree": "^2.0.0", "vfile-message": "^4.0.0" } }, "sha512-kQnEtA3vzucU2BkrIa8/VaSAsP+EJ3CKOvhMuJgOEGg9KDC6OAY6nSnNDVRiVNRqj7Y4SlSzcStaH/5jge8JdQ=="], + + "micromark-factory-space": ["micromark-factory-space@2.0.1", "", { "dependencies": { "micromark-util-character": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg=="], + + "micromark-factory-title": ["micromark-factory-title@2.0.1", "", { "dependencies": { "micromark-factory-space": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-5bZ+3CjhAd9eChYTHsjy6TGxpOFSKgKKJPJxr293jTbfry2KDoWkhBb6TcPVB4NmzaPhMs1Frm9AZH7OD4Cjzw=="], + + "micromark-factory-whitespace": ["micromark-factory-whitespace@2.0.1", "", { "dependencies": { "micromark-factory-space": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-Ob0nuZ3PKt/n0hORHyvoD9uZhr+Za8sFoP+OnMcnWK5lngSzALgQYKMr9RJVOWLqQYuyn6ulqGWSXdwf6F80lQ=="], + + "micromark-util-character": ["micromark-util-character@2.1.1", "", { "dependencies": { "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q=="], + + "micromark-util-chunked": ["micromark-util-chunked@2.0.1", "", { "dependencies": { "micromark-util-symbol": "^2.0.0" } }, "sha512-QUNFEOPELfmvv+4xiNg2sRYeS/P84pTW0TCgP5zc9FpXetHY0ab7SxKyAQCNCc1eK0459uoLI1y5oO5Vc1dbhA=="], + + "micromark-util-classify-character": ["micromark-util-classify-character@2.0.1", "", { "dependencies": { "micromark-util-character": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-K0kHzM6afW/MbeWYWLjoHQv1sgg2Q9EccHEDzSkxiP/EaagNzCm7T/WMKZ3rjMbvIpvBiZgwR3dKMygtA4mG1Q=="], + + "micromark-util-combine-extensions": ["micromark-util-combine-extensions@2.0.1", "", { "dependencies": { "micromark-util-chunked": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-OnAnH8Ujmy59JcyZw8JSbK9cGpdVY44NKgSM7E9Eh7DiLS2E9RNQf0dONaGDzEG9yjEl5hcqeIsj4hfRkLH/Bg=="], + + "micromark-util-decode-numeric-character-reference": ["micromark-util-decode-numeric-character-reference@2.0.2", "", { "dependencies": { "micromark-util-symbol": "^2.0.0" } }, "sha512-ccUbYk6CwVdkmCQMyr64dXz42EfHGkPQlBj5p7YVGzq8I7CtjXZJrubAYezf7Rp+bjPseiROqe7G6foFd+lEuw=="], + + "micromark-util-decode-string": ["micromark-util-decode-string@2.0.1", "", { "dependencies": { "decode-named-character-reference": "^1.0.0", "micromark-util-character": "^2.0.0", "micromark-util-decode-numeric-character-reference": "^2.0.0", "micromark-util-symbol": "^2.0.0" } }, "sha512-nDV/77Fj6eH1ynwscYTOsbK7rR//Uj0bZXBwJZRfaLEJ1iGBR6kIfNmlNqaqJf649EP0F3NWNdeJi03elllNUQ=="], + + "micromark-util-encode": ["micromark-util-encode@2.0.1", "", {}, "sha512-c3cVx2y4KqUnwopcO9b/SCdo2O67LwJJ/UyqGfbigahfegL9myoEFoDYZgkT7f36T0bLrM9hZTAaAyH+PCAXjw=="], + + "micromark-util-events-to-acorn": ["micromark-util-events-to-acorn@2.0.3", "", { "dependencies": { "@types/estree": "^1.0.0", "@types/unist": "^3.0.0", "devlop": "^1.0.0", "estree-util-visit": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0", "vfile-message": "^4.0.0" } }, "sha512-jmsiEIiZ1n7X1Rr5k8wVExBQCg5jy4UXVADItHmNk1zkwEVhBuIUKRu3fqv+hs4nxLISi2DQGlqIOGiFxgbfHg=="], + + "micromark-util-html-tag-name": ["micromark-util-html-tag-name@2.0.1", "", {}, "sha512-2cNEiYDhCWKI+Gs9T0Tiysk136SnR13hhO8yW6BGNyhOC4qYFnwF1nKfD3HFAIXA5c45RrIG1ub11GiXeYd1xA=="], + + "micromark-util-normalize-identifier": ["micromark-util-normalize-identifier@2.0.1", "", { "dependencies": { "micromark-util-symbol": "^2.0.0" } }, "sha512-sxPqmo70LyARJs0w2UclACPUUEqltCkJ6PhKdMIDuJ3gSf/Q+/GIe3WKl0Ijb/GyH9lOpUkRAO2wp0GVkLvS9Q=="], + + "micromark-util-resolve-all": ["micromark-util-resolve-all@2.0.1", "", { "dependencies": { "micromark-util-types": "^2.0.0" } }, "sha512-VdQyxFWFT2/FGJgwQnJYbe1jjQoNTS4RjglmSjTUlpUMa95Htx9NHeYW4rGDJzbjvCsl9eLjMQwGeElsqmzcHg=="], + + "micromark-util-sanitize-uri": ["micromark-util-sanitize-uri@2.0.1", "", { "dependencies": { "micromark-util-character": "^2.0.0", "micromark-util-encode": "^2.0.0", "micromark-util-symbol": "^2.0.0" } }, "sha512-9N9IomZ/YuGGZZmQec1MbgxtlgougxTodVwDzzEouPKo3qFWvymFHWcnDi2vzV1ff6kas9ucW+o3yzJK9YB1AQ=="], + + "micromark-util-subtokenize": ["micromark-util-subtokenize@2.1.0", "", { "dependencies": { "devlop": "^1.0.0", "micromark-util-chunked": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-XQLu552iSctvnEcgXw6+Sx75GflAPNED1qx7eBJ+wydBb2KCbRZe+NwvIEEMM83uml1+2WSXpBAcp9IUCgCYWA=="], + + "micromark-util-symbol": ["micromark-util-symbol@2.0.1", "", {}, "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q=="], + + "micromark-util-types": ["micromark-util-types@2.0.2", "", {}, "sha512-Yw0ECSpJoViF1qTU4DC6NwtC4aWGt1EkzaQB8KPPyCRR8z9TWeV0HbEFGTO+ZY1wB22zmxnJqhPyTpOVCpeHTA=="], + + "micromatch": ["micromatch@4.0.8", "", { "dependencies": { "braces": "^3.0.3", "picomatch": "^2.3.1" } }, "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA=="], + + "mime": ["mime@1.6.0", "", { "bin": { "mime": "cli.js" } }, "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg=="], + + "mime-db": ["mime-db@1.54.0", "", {}, "sha512-aU5EJuIN2WDemCcAp2vFBfp/m4EAhWJnUNSSw0ixs7/kXbd6Pg64EmwJkNdFhB8aWt1sH2CTXrLxo/iAGV3oPQ=="], + + "mimic-fn": ["mimic-fn@2.1.0", "", {}, "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg=="], + + "mini-svg-data-uri": ["mini-svg-data-uri@1.4.4", "", { "bin": { "mini-svg-data-uri": "cli.js" } }, "sha512-r9deDe9p5FJUPZAk3A59wGH7Ii9YrjjWw0jmw/liSbHl2CHiyXj6FcDXDu2K3TjVAXqiJdaw3xxwlZZr9E6nHg=="], + + "minimatch": ["minimatch@10.0.3", "", { "dependencies": { "@isaacs/brace-expansion": "^5.0.0" } }, "sha512-IPZ167aShDZZUMdRk66cyQAW3qr0WzbHkPdMYa8bzZhlHhO3jALbKdxcaak7W9FfT2rZNpQuUu4Od7ILEpXSaw=="], + + "minipass": ["minipass@7.1.2", "", {}, "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw=="], + + "minisearch": ["minisearch@6.3.0", "", {}, "sha512-ihFnidEeU8iXzcVHy74dhkxh/dn8Dc08ERl0xwoMMGqp4+LvRSCgicb+zGqWthVokQKvCSxITlh3P08OzdTYCQ=="], + + "mlly": ["mlly@1.7.4", "", { "dependencies": { "acorn": "^8.14.0", "pathe": "^2.0.1", "pkg-types": "^1.3.0", "ufo": "^1.5.4" } }, "sha512-qmdSIPC4bDJXgZTCR7XosJiNKySV7O215tsPtDN9iEO/7q/76b/ijtgRu/+epFXSJhijtTCCGp3DWS549P3xKw=="], + + "modern-ahocorasick": ["modern-ahocorasick@1.1.0", "", {}, "sha512-sEKPVl2rM+MNVkGQt3ChdmD8YsigmXdn5NifZn6jiwn9LRJpWm8F3guhaqrJT/JOat6pwpbXEk6kv+b9DMIjsQ=="], + + "ms": ["ms@2.0.0", "", {}, "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A=="], + + "nanoid": ["nanoid@3.3.11", "", { "bin": { "nanoid": "bin/nanoid.cjs" } }, "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w=="], + + "negotiator": ["negotiator@0.6.4", "", {}, "sha512-myRT3DiWPHqho5PrJaIRyaMv2kgYf0mUVgBNOYMuCH5Ki1yEiQaf/ZJuQ62nvpc44wL5WDbTX7yGJi1Neevw8w=="], + + "node-releases": ["node-releases@2.0.19", "", {}, "sha512-xxOWJsBKtzAq7DY0J+DTzuz58K8e7sJbdgwkbMWQe8UYB6ekmsQ45q0M/tJDsGaZmbC+l7n57UV8Hl5tHxO9uw=="], + + "normalize-range": ["normalize-range@0.1.2", "", {}, "sha512-bdok/XvKII3nUpklnV6P2hxtMNrCboOjAcyBuQnWEhO665FwrSNRxU+AqpsyvO6LgGYPspN+lu5CLtw4jPRKNA=="], + + "npm-run-path": ["npm-run-path@4.0.1", "", { "dependencies": { "path-key": "^3.0.0" } }, "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw=="], + + "nth-check": ["nth-check@2.1.1", "", { "dependencies": { "boolbase": "^1.0.0" } }, "sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w=="], + + "on-finished": ["on-finished@2.4.1", "", { "dependencies": { "ee-first": "1.1.1" } }, "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg=="], + + "on-headers": ["on-headers@1.0.2", "", {}, "sha512-pZAE+FJLoyITytdqK0U5s+FIpjN0JP3OzFi/u8Rx+EV5/W+JTWGXG8xFzevE7AjBfDqHv/8vL8qQsIhHnqRkrA=="], + + "onetime": ["onetime@5.1.2", "", { "dependencies": { "mimic-fn": "^2.1.0" } }, "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg=="], + + "oniguruma-to-es": ["oniguruma-to-es@2.3.0", "", { "dependencies": { "emoji-regex-xs": "^1.0.0", "regex": "^5.1.1", "regex-recursion": "^5.1.1" } }, "sha512-bwALDxriqfKGfUufKGGepCzu9x7nJQuoRoAFp4AnwehhC2crqrDIAP/uN2qdlsAvSMpeRC3+Yzhqc7hLmle5+g=="], + + "ora": ["ora@7.0.1", "", { "dependencies": { "chalk": "^5.3.0", "cli-cursor": "^4.0.0", "cli-spinners": "^2.9.0", "is-interactive": "^2.0.0", "is-unicode-supported": "^1.3.0", "log-symbols": "^5.1.0", "stdin-discarder": "^0.1.0", "string-width": "^6.1.0", "strip-ansi": "^7.1.0" } }, "sha512-0TUxTiFJWv+JnjWm4o9yvuskpEJLXTcng8MJuKd+SzAzp2o+OP3HWqNhB4OdJRt1Vsd9/mR0oyaEYlOnL7XIRw=="], + + "p-limit": ["p-limit@5.0.0", "", { "dependencies": { "yocto-queue": "^1.0.0" } }, "sha512-/Eaoq+QyLSiXQ4lyYV23f14mZRQcXnxfHrN0vCai+ak9G0pp9iEQukIIZq5NccEvwRB8PUnZT0KsOoDCINS1qQ=="], + + "p-locate": ["p-locate@5.0.0", "", { "dependencies": { "p-limit": "^3.0.2" } }, "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw=="], + + "package-json-from-dist": ["package-json-from-dist@1.0.1", "", {}, "sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw=="], + + "package-manager-detector": ["package-manager-detector@1.3.0", "", {}, "sha512-ZsEbbZORsyHuO00lY1kV3/t72yp6Ysay6Pd17ZAlNGuGwmWDLCJxFpRs0IzfXfj1o4icJOkUEioexFHzyPurSQ=="], + + "parse-entities": ["parse-entities@4.0.2", "", { "dependencies": { "@types/unist": "^2.0.0", "character-entities-legacy": "^3.0.0", "character-reference-invalid": "^2.0.0", "decode-named-character-reference": "^1.0.0", "is-alphanumerical": "^2.0.0", "is-decimal": "^2.0.0", "is-hexadecimal": "^2.0.0" } }, "sha512-GG2AQYWoLgL877gQIKeRPGO1xF9+eG1ujIb5soS5gPvLQ1y2o8FL90w2QWNdf9I361Mpp7726c+lj3U0qK1uGw=="], + + "parse5": ["parse5@7.3.0", "", { "dependencies": { "entities": "^6.0.0" } }, "sha512-IInvU7fabl34qmi9gY8XOVxhYyMyuH2xUNpb2q8/Y+7552KlejkRvqvD19nMoUW/uQGGbqNpA6Tufu5FL5BZgw=="], + + "parseurl": ["parseurl@1.3.3", "", {}, "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ=="], + + "path-data-parser": ["path-data-parser@0.1.0", "", {}, "sha512-NOnmBpt5Y2RWbuv0LMzsayp3lVylAHLPUTut412ZA3l+C4uw4ZVkQbjShYCQ8TCpUMdPapr4YjUqLYD6v68j+w=="], + + "path-exists": ["path-exists@4.0.0", "", {}, "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w=="], + + "path-key": ["path-key@3.1.1", "", {}, "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q=="], + + "path-scurry": ["path-scurry@2.0.0", "", { "dependencies": { "lru-cache": "^11.0.0", "minipass": "^7.1.2" } }, "sha512-ypGJsmGtdXUOeM5u93TyeIEfEhM6s+ljAhrk5vAvSx8uyY/02OvrZnA0YNGUrPXfpJMgI1ODd3nwz8Npx4O4cg=="], + + "path-type": ["path-type@6.0.0", "", {}, "sha512-Vj7sf++t5pBD637NSfkxpHSMfWaeig5+DKWLhcqIYx6mWQz5hdJTGDVMQiJcw1ZYkhs7AazKDGpRVji1LJCZUQ=="], + + "pathe": ["pathe@2.0.3", "", {}, "sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w=="], + + "picocolors": ["picocolors@1.1.1", "", {}, "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA=="], + + "picomatch": ["picomatch@4.0.3", "", {}, "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q=="], + + "pkg-types": ["pkg-types@1.3.1", "", { "dependencies": { "confbox": "^0.1.8", "mlly": "^1.7.4", "pathe": "^2.0.1" } }, "sha512-/Jm5M4RvtBFVkKWRu2BLUTNP8/M2a+UwuAX+ae4770q1qVGtfjG+WTCupoZixokjmHiry8uI+dlY8KXYV5HVVQ=="], + + "playwright": ["playwright@1.54.1", "", { "dependencies": { "playwright-core": "1.54.1" }, "optionalDependencies": { "fsevents": "2.3.2" }, "bin": { "playwright": "cli.js" } }, "sha512-peWpSwIBmSLi6aW2auvrUtf2DqY16YYcCMO8rTVx486jKmDTJg7UAhyrraP98GB8BoPURZP8+nxO7TSd4cPr5g=="], + + "playwright-core": ["playwright-core@1.54.1", "", { "bin": { "playwright-core": "cli.js" } }, "sha512-Nbjs2zjj0htNhzgiy5wu+3w09YetDx5pkrpI/kZotDlDUaYk0HVA5xrBVPdow4SAUIlhgKcJeJg4GRKW6xHusA=="], + + "points-on-curve": ["points-on-curve@0.2.0", "", {}, "sha512-0mYKnYYe9ZcqMCWhUjItv/oHjvgEsfKvnUTg8sAtnHr3GVy7rGkXCb6d5cSyqrWqL4k81b9CPg3urd+T7aop3A=="], + + "points-on-path": ["points-on-path@0.2.1", "", { "dependencies": { "path-data-parser": "0.1.0", "points-on-curve": "0.2.0" } }, "sha512-25ClnWWuw7JbWZcgqY/gJ4FQWadKxGWk+3kR/7kD0tCaDtPPMj7oHu2ToLaVhfpnHrZzYby2w6tUA0eOIuUg8g=="], + + "postcss": ["postcss@8.5.6", "", { "dependencies": { "nanoid": "^3.3.11", "picocolors": "^1.1.1", "source-map-js": "^1.2.1" } }, "sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg=="], + + "postcss-value-parser": ["postcss-value-parser@4.2.0", "", {}, "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ=="], + + "property-information": ["property-information@6.5.0", "", {}, "sha512-PgTgs/BlvHxOu8QuEN7wi5A0OmXaBcHpmCSTehcs6Uuu9IkDIEo13Hy7n898RHfrQ49vKCoGeWZSaAK01nwVig=="], + + "quansync": ["quansync@0.2.10", "", {}, "sha512-t41VRkMYbkHyCYmOvx/6URnN80H7k4X0lLdBMGsz+maAwrJQYB1djpV6vHrQIBE0WBSGqhtEHrK9U3DWWH8v7A=="], + + "queue-microtask": ["queue-microtask@1.2.3", "", {}, "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A=="], + + "radix-ui": ["radix-ui@1.4.2", "", { "dependencies": { "@radix-ui/primitive": "1.1.2", "@radix-ui/react-accessible-icon": "1.1.7", "@radix-ui/react-accordion": "1.2.11", "@radix-ui/react-alert-dialog": "1.1.14", "@radix-ui/react-arrow": "1.1.7", "@radix-ui/react-aspect-ratio": "1.1.7", "@radix-ui/react-avatar": "1.1.10", "@radix-ui/react-checkbox": "1.3.2", "@radix-ui/react-collapsible": "1.1.11", "@radix-ui/react-collection": "1.1.7", "@radix-ui/react-compose-refs": "1.1.2", "@radix-ui/react-context": "1.1.2", "@radix-ui/react-context-menu": "2.2.15", "@radix-ui/react-dialog": "1.1.14", "@radix-ui/react-direction": "1.1.1", "@radix-ui/react-dismissable-layer": "1.1.10", "@radix-ui/react-dropdown-menu": "2.1.15", "@radix-ui/react-focus-guards": "1.1.2", "@radix-ui/react-focus-scope": "1.1.7", "@radix-ui/react-form": "0.1.7", "@radix-ui/react-hover-card": "1.1.14", "@radix-ui/react-label": "2.1.7", "@radix-ui/react-menu": "2.1.15", "@radix-ui/react-menubar": "1.1.15", "@radix-ui/react-navigation-menu": "1.2.13", "@radix-ui/react-one-time-password-field": "0.1.7", "@radix-ui/react-password-toggle-field": "0.1.2", "@radix-ui/react-popover": "1.1.14", "@radix-ui/react-popper": "1.2.7", "@radix-ui/react-portal": "1.1.9", "@radix-ui/react-presence": "1.1.4", "@radix-ui/react-primitive": "2.1.3", "@radix-ui/react-progress": "1.1.7", "@radix-ui/react-radio-group": "1.3.7", "@radix-ui/react-roving-focus": "1.1.10", "@radix-ui/react-scroll-area": "1.2.9", "@radix-ui/react-select": "2.2.5", "@radix-ui/react-separator": "1.1.7", "@radix-ui/react-slider": "1.3.5", "@radix-ui/react-slot": "1.2.3", "@radix-ui/react-switch": "1.2.5", "@radix-ui/react-tabs": "1.1.12", "@radix-ui/react-toast": "1.2.14", "@radix-ui/react-toggle": "1.1.9", "@radix-ui/react-toggle-group": "1.1.10", "@radix-ui/react-toolbar": "1.1.10", "@radix-ui/react-tooltip": "1.2.7", "@radix-ui/react-use-callback-ref": "1.1.1", "@radix-ui/react-use-controllable-state": "1.2.2", "@radix-ui/react-use-effect-event": "0.0.2", "@radix-ui/react-use-escape-keydown": "1.1.1", "@radix-ui/react-use-is-hydrated": "0.1.0", "@radix-ui/react-use-layout-effect": "1.1.1", "@radix-ui/react-use-size": "1.1.1", "@radix-ui/react-visually-hidden": "1.2.3" }, "peerDependencies": { "@types/react": "*", "@types/react-dom": "*", "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react", "@types/react-dom"] }, "sha512-fT/3YFPJzf2WUpqDoQi005GS8EpCi+53VhcLaHUj5fwkPYiZAjk1mSxFvbMA8Uq71L03n+WysuYC+mlKkXxt/Q=="], + + "range-parser": ["range-parser@1.2.1", "", {}, "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg=="], + + "react": ["react@19.1.0", "", {}, "sha512-FS+XFBNvn3GTAWq26joslQgWNoFu08F4kl0J4CgdNKADkdSGXQyTCnKteIAJy96Br6YbpEU1LSzV5dYtjMkMDg=="], + + "react-dom": ["react-dom@19.1.0", "", { "dependencies": { "scheduler": "^0.26.0" }, "peerDependencies": { "react": "^19.1.0" } }, "sha512-Xs1hdnE+DyKgeHJeJznQmYMIBG3TKIHJJT95Q58nHLSrElKlGQqDTR2HQ9fx5CN/Gk6Vh/kupBTDLU11/nDk/g=="], + + "react-intersection-observer": ["react-intersection-observer@9.16.0", "", { "peerDependencies": { "react": "^17.0.0 || ^18.0.0 || ^19.0.0", "react-dom": "^17.0.0 || ^18.0.0 || ^19.0.0" }, "optionalPeers": ["react-dom"] }, "sha512-w9nJSEp+DrW9KmQmeWHQyfaP6b03v+TdXynaoA964Wxt7mdR3An11z4NNCQgL4gKSK7y1ver2Fq+JKH6CWEzUA=="], + + "react-refresh": ["react-refresh@0.17.0", "", {}, "sha512-z6F7K9bV85EfseRCp2bzrpyQ0Gkw1uLoCel9XBVWPg/TjRj94SkJzUTGfOa4bs7iJvBWtQG0Wq7wnI0syw3EBQ=="], + + "react-remove-scroll": ["react-remove-scroll@2.7.1", "", { "dependencies": { "react-remove-scroll-bar": "^2.3.7", "react-style-singleton": "^2.2.3", "tslib": "^2.1.0", "use-callback-ref": "^1.3.3", "use-sidecar": "^1.1.3" }, "peerDependencies": { "@types/react": "*", "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-HpMh8+oahmIdOuS5aFKKY6Pyog+FNaZV/XyJOq7b4YFwsFHe5yYfdbIalI4k3vU2nSDql7YskmUseHsRrJqIPA=="], + + "react-remove-scroll-bar": ["react-remove-scroll-bar@2.3.8", "", { "dependencies": { "react-style-singleton": "^2.2.2", "tslib": "^2.0.0" }, "peerDependencies": { "@types/react": "*", "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" }, "optionalPeers": ["@types/react"] }, "sha512-9r+yi9+mgU33AKcj6IbT9oRCO78WriSj6t/cF8DWBZJ9aOGPOTEDvdUDz1FwKim7QXWwmHqtdHnRJfhAxEG46Q=="], + + "react-router": ["react-router@7.7.0", "", { "dependencies": { "cookie": "^1.0.1", "set-cookie-parser": "^2.6.0" }, "peerDependencies": { "react": ">=18", "react-dom": ">=18" }, "optionalPeers": ["react-dom"] }, "sha512-3FUYSwlvB/5wRJVTL/aavqHmfUKe0+Xm9MllkYgGo9eDwNdkvwlJGjpPxono1kCycLt6AnDTgjmXvK3/B4QGuw=="], + + "react-style-singleton": ["react-style-singleton@2.2.3", "", { "dependencies": { "get-nonce": "^1.0.0", "tslib": "^2.0.0" }, "peerDependencies": { "@types/react": "*", "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-b6jSvxvVnyptAiLjbkWLE/lOnR4lfTtDAl+eUC7RZy+QQWc6wRzIV2CE6xBuMmDxc2qIihtDCZD5NPOFl7fRBQ=="], + + "readable-stream": ["readable-stream@3.6.2", "", { "dependencies": { "inherits": "^2.0.3", "string_decoder": "^1.1.1", "util-deprecate": "^1.0.1" } }, "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA=="], + + "recma-build-jsx": ["recma-build-jsx@1.0.0", "", { "dependencies": { "@types/estree": "^1.0.0", "estree-util-build-jsx": "^3.0.0", "vfile": "^6.0.0" } }, "sha512-8GtdyqaBcDfva+GUKDr3nev3VpKAhup1+RvkMvUxURHpW7QyIvk9F5wz7Vzo06CEMSilw6uArgRqhpiUcWp8ew=="], + + "recma-jsx": ["recma-jsx@1.0.0", "", { "dependencies": { "acorn-jsx": "^5.0.0", "estree-util-to-js": "^2.0.0", "recma-parse": "^1.0.0", "recma-stringify": "^1.0.0", "unified": "^11.0.0" } }, "sha512-5vwkv65qWwYxg+Atz95acp8DMu1JDSqdGkA2Of1j6rCreyFUE/gp15fC8MnGEuG1W68UKjM6x6+YTWIh7hZM/Q=="], + + "recma-parse": ["recma-parse@1.0.0", "", { "dependencies": { "@types/estree": "^1.0.0", "esast-util-from-js": "^2.0.0", "unified": "^11.0.0", "vfile": "^6.0.0" } }, "sha512-OYLsIGBB5Y5wjnSnQW6t3Xg7q3fQ7FWbw/vcXtORTnyaSFscOtABg+7Pnz6YZ6c27fG1/aN8CjfwoUEUIdwqWQ=="], + + "recma-stringify": ["recma-stringify@1.0.0", "", { "dependencies": { "@types/estree": "^1.0.0", "estree-util-to-js": "^2.0.0", "unified": "^11.0.0", "vfile": "^6.0.0" } }, "sha512-cjwII1MdIIVloKvC9ErQ+OgAtwHBmcZ0Bg4ciz78FtbT8In39aAYbaA7zvxQ61xVMSPE8WxhLwLbhif4Js2C+g=="], + + "regex": ["regex@5.1.1", "", { "dependencies": { "regex-utilities": "^2.3.0" } }, "sha512-dN5I359AVGPnwzJm2jN1k0W9LPZ+ePvoOeVMMfqIMFz53sSwXkxaJoxr50ptnsC771lK95BnTrVSZxq0b9yCGw=="], + + "regex-recursion": ["regex-recursion@5.1.1", "", { "dependencies": { "regex": "^5.1.1", "regex-utilities": "^2.3.0" } }, "sha512-ae7SBCbzVNrIjgSbh7wMznPcQel1DNlDtzensnFxpiNpXt1U2ju/bHugH422r+4LAVS1FpW1YCwilmnNsjum9w=="], + + "regex-utilities": ["regex-utilities@2.3.0", "", {}, "sha512-8VhliFJAWRaUiVvREIiW2NXXTmHs4vMNnSzuJVhscgmGav3g9VDxLrQndI3dZZVVdp0ZO/5v0xmX516/7M9cng=="], + + "rehype-autolink-headings": ["rehype-autolink-headings@7.1.0", "", { "dependencies": { "@types/hast": "^3.0.0", "@ungap/structured-clone": "^1.0.0", "hast-util-heading-rank": "^3.0.0", "hast-util-is-element": "^3.0.0", "unified": "^11.0.0", "unist-util-visit": "^5.0.0" } }, "sha512-rItO/pSdvnvsP4QRB1pmPiNHUskikqtPojZKJPPPAVx9Hj8i8TwMBhofrrAYRhYOOBZH9tgmG5lPqDLuIWPWmw=="], + + "rehype-class-names": ["rehype-class-names@2.0.0", "", { "dependencies": { "@types/hast": "^3.0.0", "hast-util-classnames": "^3.0.0", "hast-util-select": "^6.0.0", "unified": "^11.0.4" } }, "sha512-jldCIiAEvXKdq8hqr5f5PzNdIDkvHC6zfKhwta9oRoMu7bn0W7qLES/JrrjBvr9rKz3nJ8x4vY1EWI+dhjHVZQ=="], + + "rehype-mermaid": ["rehype-mermaid@3.0.0", "", { "dependencies": { "@types/hast": "^3.0.0", "hast-util-from-html-isomorphic": "^2.0.0", "hast-util-to-text": "^4.0.0", "mermaid-isomorphic": "^3.0.0", "mini-svg-data-uri": "^1.0.0", "space-separated-tokens": "^2.0.0", "unified": "^11.0.0", "unist-util-visit-parents": "^6.0.0", "vfile": "^6.0.0" }, "peerDependencies": { "playwright": "1" }, "optionalPeers": ["playwright"] }, "sha512-fxrD5E4Fa1WXUjmjNDvLOMT4XB1WaxcfycFIWiYU0yEMQhcTDElc9aDFnbDFRLxG1Cfo1I3mfD5kg4sjlWaB+Q=="], + + "rehype-recma": ["rehype-recma@1.0.0", "", { "dependencies": { "@types/estree": "^1.0.0", "@types/hast": "^3.0.0", "hast-util-to-estree": "^3.0.0" } }, "sha512-lqA4rGUf1JmacCNWWZx0Wv1dHqMwxzsDWYMTowuplHF3xH0N/MmrZ/G3BDZnzAkRmxDadujCjaKM2hqYdCBOGw=="], + + "rehype-slug": ["rehype-slug@6.0.0", "", { "dependencies": { "@types/hast": "^3.0.0", "github-slugger": "^2.0.0", "hast-util-heading-rank": "^3.0.0", "hast-util-to-string": "^3.0.0", "unist-util-visit": "^5.0.0" } }, "sha512-lWyvf/jwu+oS5+hL5eClVd3hNdmwM1kAC0BUvEGD19pajQMIzcNUd/k9GsfQ+FfECvX+JE+e9/btsKH0EjJT6A=="], + + "remark-directive": ["remark-directive@3.0.1", "", { "dependencies": { "@types/mdast": "^4.0.0", "mdast-util-directive": "^3.0.0", "micromark-extension-directive": "^3.0.0", "unified": "^11.0.0" } }, "sha512-gwglrEQEZcZYgVyG1tQuA+h58EZfq5CSULw7J90AFuCTyib1thgHPoqQ+h9iFvU6R+vnZ5oNFQR5QKgGpk741A=="], + + "remark-frontmatter": ["remark-frontmatter@5.0.0", "", { "dependencies": { "@types/mdast": "^4.0.0", "mdast-util-frontmatter": "^2.0.0", "micromark-extension-frontmatter": "^2.0.0", "unified": "^11.0.0" } }, "sha512-XTFYvNASMe5iPN0719nPrdItC9aU0ssC4v14mH1BCi1u0n1gAocqcujWUrByftZTbLhRtiKRyjYTSIOcr69UVQ=="], + + "remark-gfm": ["remark-gfm@4.0.1", "", { "dependencies": { "@types/mdast": "^4.0.0", "mdast-util-gfm": "^3.0.0", "micromark-extension-gfm": "^3.0.0", "remark-parse": "^11.0.0", "remark-stringify": "^11.0.0", "unified": "^11.0.0" } }, "sha512-1quofZ2RQ9EWdeN34S79+KExV1764+wCUGop5CPL1WGdD0ocPpu91lzPGbwWMECpEpd42kJGQwzRfyov9j4yNg=="], + + "remark-mdx": ["remark-mdx@3.1.0", "", { "dependencies": { "mdast-util-mdx": "^3.0.0", "micromark-extension-mdxjs": "^3.0.0" } }, "sha512-Ngl/H3YXyBV9RcRNdlYsZujAmhsxwzxpDzpDEhFBVAGthS4GDgnctpDjgFl/ULx5UEDzqtW1cyBSNKqYYrqLBA=="], + + "remark-mdx-frontmatter": ["remark-mdx-frontmatter@5.2.0", "", { "dependencies": { "@types/mdast": "^4.0.0", "estree-util-value-to-estree": "^3.0.0", "toml": "^3.0.0", "unified": "^11.0.0", "unist-util-mdx-define": "^1.0.0", "yaml": "^2.0.0" } }, "sha512-U/hjUYTkQqNjjMRYyilJgLXSPF65qbLPdoESOkXyrwz2tVyhAnm4GUKhfXqOOS9W34M3545xEMq+aMpHgVjEeQ=="], + + "remark-parse": ["remark-parse@11.0.0", "", { "dependencies": { "@types/mdast": "^4.0.0", "mdast-util-from-markdown": "^2.0.0", "micromark-util-types": "^2.0.0", "unified": "^11.0.0" } }, "sha512-FCxlKLNGknS5ba/1lmpYijMUzX2esxW5xQqjWxw2eHFfS2MSdaHVINFmhjo+qN1WhZhNimq0dZATN9pH0IDrpA=="], + + "remark-rehype": ["remark-rehype@11.1.2", "", { "dependencies": { "@types/hast": "^3.0.0", "@types/mdast": "^4.0.0", "mdast-util-to-hast": "^13.0.0", "unified": "^11.0.0", "vfile": "^6.0.0" } }, "sha512-Dh7l57ianaEoIpzbp0PC9UKAdCSVklD8E5Rpw7ETfbTl3FqcOOgq5q2LVDhgGCkaBv7p24JXikPdvhhmHvKMsw=="], + + "remark-stringify": ["remark-stringify@11.0.0", "", { "dependencies": { "@types/mdast": "^4.0.0", "mdast-util-to-markdown": "^2.0.0", "unified": "^11.0.0" } }, "sha512-1OSmLd3awB/t8qdoEOMazZkNsfVTeY4fTsgzcQFdXNq8ToTN4ZGwrMnlda4K6smTFKD+GRV6O48i6Z4iKgPPpw=="], + + "require-like": ["require-like@0.1.2", "", {}, "sha512-oyrU88skkMtDdauHDuKVrgR+zuItqr6/c//FXzvmxRGMexSDc6hNvJInGW3LL46n+8b50RykrvwSUIIQH2LQ5A=="], + + "restore-cursor": ["restore-cursor@4.0.0", "", { "dependencies": { "onetime": "^5.1.0", "signal-exit": "^3.0.2" } }, "sha512-I9fPXU9geO9bHOt9pHHOhOkYerIMsmVaWB0rA2AI9ERh/+x/i7MV5HKBNrg+ljO5eoPVgCcnFuRjJ9uH6I/3eg=="], + + "reusify": ["reusify@1.1.0", "", {}, "sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw=="], + + "robust-predicates": ["robust-predicates@3.0.2", "", {}, "sha512-IXgzBWvWQwE6PrDI05OvmXUIruQTcoMDzRsOd5CDvHCVLcLHMTSYvOK5Cm46kWqlV3yAbuSpBZdJ5oP5OUoStg=="], + + "rollup": ["rollup@4.45.1", "", { "dependencies": { "@types/estree": "1.0.8" }, "optionalDependencies": { "@rollup/rollup-android-arm-eabi": "4.45.1", "@rollup/rollup-android-arm64": "4.45.1", "@rollup/rollup-darwin-arm64": "4.45.1", "@rollup/rollup-darwin-x64": "4.45.1", "@rollup/rollup-freebsd-arm64": "4.45.1", "@rollup/rollup-freebsd-x64": "4.45.1", "@rollup/rollup-linux-arm-gnueabihf": "4.45.1", "@rollup/rollup-linux-arm-musleabihf": "4.45.1", "@rollup/rollup-linux-arm64-gnu": "4.45.1", "@rollup/rollup-linux-arm64-musl": "4.45.1", "@rollup/rollup-linux-loongarch64-gnu": "4.45.1", "@rollup/rollup-linux-powerpc64le-gnu": "4.45.1", "@rollup/rollup-linux-riscv64-gnu": "4.45.1", "@rollup/rollup-linux-riscv64-musl": "4.45.1", "@rollup/rollup-linux-s390x-gnu": "4.45.1", "@rollup/rollup-linux-x64-gnu": "4.45.1", "@rollup/rollup-linux-x64-musl": "4.45.1", "@rollup/rollup-win32-arm64-msvc": "4.45.1", "@rollup/rollup-win32-ia32-msvc": "4.45.1", "@rollup/rollup-win32-x64-msvc": "4.45.1", "fsevents": "~2.3.2" }, "bin": { "rollup": "dist/bin/rollup" } }, "sha512-4iya7Jb76fVpQyLoiVpzUrsjQ12r3dM7fIVz+4NwoYvZOShknRmiv+iu9CClZml5ZLGb0XMcYLutK6w9tgxHDw=="], + + "roughjs": ["roughjs@4.6.6", "", { "dependencies": { "hachure-fill": "^0.5.2", "path-data-parser": "^0.1.0", "points-on-curve": "^0.2.0", "points-on-path": "^0.2.1" } }, "sha512-ZUz/69+SYpFN/g/lUlo2FXcIjRkSu3nDarreVdGGndHEBJ6cXPdKguS8JGxwj5HA5xIbVKSmLgr5b3AWxtRfvQ=="], + + "run-parallel": ["run-parallel@1.2.0", "", { "dependencies": { "queue-microtask": "^1.2.2" } }, "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA=="], + + "rw": ["rw@1.3.3", "", {}, "sha512-PdhdWy89SiZogBLaw42zdeqtRJ//zFd2PgQavcICDUgJT5oW10QCRKbJ6bg4r0/UY2M6BWd5tkxuGFRvCkgfHQ=="], + + "safe-buffer": ["safe-buffer@5.2.1", "", {}, "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ=="], + + "safer-buffer": ["safer-buffer@2.1.2", "", {}, "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg=="], + + "scheduler": ["scheduler@0.26.0", "", {}, "sha512-NlHwttCI/l5gCPR3D1nNXtWABUmBwvZpEQiD4IXSbIDq8BzLIK/7Ir5gTFSGZDUu37K5cMNp0hFtzO38sC7gWA=="], + + "semver": ["semver@6.3.1", "", { "bin": { "semver": "bin/semver.js" } }, "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA=="], + + "send": ["send@0.19.0", "", { "dependencies": { "debug": "2.6.9", "depd": "2.0.0", "destroy": "1.2.0", "encodeurl": "~1.0.2", "escape-html": "~1.0.3", "etag": "~1.8.1", "fresh": "0.5.2", "http-errors": "2.0.0", "mime": "1.6.0", "ms": "2.1.3", "on-finished": "2.4.1", "range-parser": "~1.2.1", "statuses": "2.0.1" } }, "sha512-dW41u5VfLXu8SJh5bwRmyYUbAoSB3c9uQh6L8h/KtsFREPWpbX1lrljJo186Jc4nmci/sGUZ9a0a0J2zgfq2hw=="], + + "serve-static": ["serve-static@1.16.2", "", { "dependencies": { "encodeurl": "~2.0.0", "escape-html": "~1.0.3", "parseurl": "~1.3.3", "send": "0.19.0" } }, "sha512-VqpjJZKadQB/PEbEwvFdO43Ax5dFBZ2UECszz8bQ7pi7wt//PWe1P6MN7eCnjsatYtBT6EuiClbjSWP2WrIoTw=="], + + "set-cookie-parser": ["set-cookie-parser@2.7.1", "", {}, "sha512-IOc8uWeOZgnb3ptbCURJWNjWUPcO3ZnTTdzsurqERrP6nPyv+paC55vJM0LpOlT2ne+Ix+9+CRG1MNLlyZ4GjQ=="], + + "setprototypeof": ["setprototypeof@1.2.0", "", {}, "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw=="], + + "shebang-command": ["shebang-command@2.0.0", "", { "dependencies": { "shebang-regex": "^3.0.0" } }, "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA=="], + + "shebang-regex": ["shebang-regex@3.0.0", "", {}, "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A=="], + + "shiki": ["shiki@1.29.2", "", { "dependencies": { "@shikijs/core": "1.29.2", "@shikijs/engine-javascript": "1.29.2", "@shikijs/engine-oniguruma": "1.29.2", "@shikijs/langs": "1.29.2", "@shikijs/themes": "1.29.2", "@shikijs/types": "1.29.2", "@shikijs/vscode-textmate": "^10.0.1", "@types/hast": "^3.0.4" } }, "sha512-njXuliz/cP+67jU2hukkxCNuH1yUi4QfdZZY+sMr5PPrIyXSu5iTb/qYC4BiWWB0vZ+7TbdvYUCeL23zpwCfbg=="], + + "signal-exit": ["signal-exit@4.1.0", "", {}, "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw=="], + + "sisteransi": ["sisteransi@1.0.5", "", {}, "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg=="], + + "slash": ["slash@5.1.0", "", {}, "sha512-ZA6oR3T/pEyuqwMgAKT0/hAv8oAXckzbkmR0UkUosQ+Mc4RxGoJkRmwHgHufaenlyAgE1Mxgpdcrf75y6XcnDg=="], + + "source-map": ["source-map@0.7.4", "", {}, "sha512-l3BikUxvPOcn5E74dZiq5BGsTb5yEwhaTSzccU6t4sDOH8NWJCstKO5QT2CvtFoK6F0saL7p9xHAqHOlCPJygA=="], + + "source-map-js": ["source-map-js@1.2.1", "", {}, "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA=="], + + "space-separated-tokens": ["space-separated-tokens@2.0.2", "", {}, "sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q=="], + + "statuses": ["statuses@2.0.1", "", {}, "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ=="], + + "stdin-discarder": ["stdin-discarder@0.1.0", "", { "dependencies": { "bl": "^5.0.0" } }, "sha512-xhV7w8S+bUwlPTb4bAOUQhv8/cSS5offJuX8GQGq32ONF0ZtDWKfkdomM3HMRA+LhX6um/FZ0COqlwsjD53LeQ=="], + + "string-width": ["string-width@6.1.0", "", { "dependencies": { "eastasianwidth": "^0.2.0", "emoji-regex": "^10.2.1", "strip-ansi": "^7.0.1" } }, "sha512-k01swCJAgQmuADB0YIc+7TuatfNvTBVOoaUWJjTB9R4VJzR5vNWzf5t42ESVZFPS8xTySF7CAdV4t/aaIm3UnQ=="], + + "string-width-cjs": ["string-width@4.2.3", "", { "dependencies": { "emoji-regex": "^8.0.0", "is-fullwidth-code-point": "^3.0.0", "strip-ansi": "^6.0.1" } }, "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g=="], + + "string_decoder": ["string_decoder@1.3.0", "", { "dependencies": { "safe-buffer": "~5.2.0" } }, "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA=="], + + "stringify-entities": ["stringify-entities@4.0.4", "", { "dependencies": { "character-entities-html4": "^2.0.0", "character-entities-legacy": "^3.0.0" } }, "sha512-IwfBptatlO+QCJUo19AqvrPNqlVMpW9YEL2LIVY+Rpv2qsjCGxaDLNRgeGsQWJhfItebuJhsGSLjaBbNSQ+ieg=="], + + "strip-ansi": ["strip-ansi@7.1.0", "", { "dependencies": { "ansi-regex": "^6.0.1" } }, "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ=="], + + "strip-ansi-cjs": ["strip-ansi@6.0.1", "", { "dependencies": { "ansi-regex": "^5.0.1" } }, "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A=="], + + "strip-final-newline": ["strip-final-newline@2.0.0", "", {}, "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA=="], + + "style-to-js": ["style-to-js@1.1.17", "", { "dependencies": { "style-to-object": "1.0.9" } }, "sha512-xQcBGDxJb6jjFCTzvQtfiPn6YvvP2O8U1MDIPNfJQlWMYfktPy+iGsHE7cssjs7y84d9fQaK4UF3RIJaAHSoYA=="], + + "style-to-object": ["style-to-object@1.0.9", "", { "dependencies": { "inline-style-parser": "0.2.4" } }, "sha512-G4qppLgKu/k6FwRpHiGiKPaPTFcG3g4wNVX/Qsfu+RqQM30E7Tyu/TEgxcL9PNLF5pdRLwQdE3YKKf+KF2Dzlw=="], + + "stylis": ["stylis@4.3.6", "", {}, "sha512-yQ3rwFWRfwNUY7H5vpU0wfdkNSnvnJinhF9830Swlaxl03zsOjCfmX0ugac+3LtK0lYSgwL/KXc8oYL3mG4YFQ=="], + + "tabbable": ["tabbable@6.2.0", "", {}, "sha512-Cat63mxsVJlzYvN51JmVXIgNoUokrIaT2zLclCXjRd8boZ0004U4KCs/sToJ75C6sdlByWxpYnb5Boif1VSFew=="], + + "tailwindcss": ["tailwindcss@4.0.7", "", {}, "sha512-yH5bPPyapavo7L+547h3c4jcBXcrKwybQRjwdEIVAd9iXRvy/3T1CC6XSQEgZtRySjKfqvo3Cc0ZF1DTheuIdA=="], + + "tapable": ["tapable@2.2.2", "", {}, "sha512-Re10+NauLTMCudc7T5WLFLAwDhQ0JWdrMK+9B2M8zR5hRExKmsRDCBA7/aV/pNJFltmBFO5BAMlQFi/vq3nKOg=="], + + "tinyexec": ["tinyexec@1.0.1", "", {}, "sha512-5uC6DDlmeqiOwCPmK9jMSdOuZTh8bU39Ys6yidB+UTt5hfZUPGAypSgFRiEp+jbi9qH40BLDvy85jIU88wKSqw=="], + + "tinyglobby": ["tinyglobby@0.2.14", "", { "dependencies": { "fdir": "^6.4.4", "picomatch": "^4.0.2" } }, "sha512-tX5e7OM1HnYr2+a2C/4V0htOcSQcoSTH9KgJnVvNm5zm/cyEWKJ7j7YutsH9CxMdtOkkLFy2AHrMci9IM8IPZQ=="], + + "to-regex-range": ["to-regex-range@5.0.1", "", { "dependencies": { "is-number": "^7.0.0" } }, "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ=="], + + "toidentifier": ["toidentifier@1.0.1", "", {}, "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA=="], + + "toml": ["toml@3.0.0", "", {}, "sha512-y/mWCZinnvxjTKYhJ+pYxwD0mRLVvOtdS2Awbgxln6iEnt4rk0yBxeSBHkGJcPucRiG0e55mwWp+g/05rsrd6w=="], + + "trim-lines": ["trim-lines@3.0.1", "", {}, "sha512-kRj8B+YHZCc9kQYdWfJB2/oUl9rA99qbowYYBtr4ui4mZyAQ2JpvVBd/6U2YloATfqBhBTSMhTpgBHtU0Mf3Rg=="], + + "trough": ["trough@2.2.0", "", {}, "sha512-tmMpK00BjZiUyVyvrBK7knerNgmgvcV/KLVyuma/SC+TQN167GrMRciANTz09+k3zW8L8t60jWO1GpfkZdjTaw=="], + + "ts-dedent": ["ts-dedent@2.2.0", "", {}, "sha512-q5W7tVM71e2xjHZTlgfTDoPF/SmqKG5hddq9SzR49CH2hayqRKJtQ4mtRlSxKaJlR/+9rEM+mnBHf7I2/BQcpQ=="], + + "tslib": ["tslib@2.8.1", "", {}, "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w=="], + + "twoslash": ["twoslash@0.2.12", "", { "dependencies": { "@typescript/vfs": "^1.6.0", "twoslash-protocol": "0.2.12" }, "peerDependencies": { "typescript": "*" } }, "sha512-tEHPASMqi7kqwfJbkk7hc/4EhlrKCSLcur+TcvYki3vhIfaRMXnXjaYFgXpoZRbT6GdprD4tGuVBEmTpUgLBsw=="], + + "twoslash-protocol": ["twoslash-protocol@0.2.12", "", {}, "sha512-5qZLXVYfZ9ABdjqbvPc4RWMr7PrpPaaDSeaYY55vl/w1j6H6kzsWK/urAEIXlzYlyrFmyz1UbwIt+AA0ck+wbg=="], + + "typescript": ["typescript@5.8.3", "", { "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" } }, "sha512-p1diW6TqL9L07nNxvRMM7hMMw4c5XOo/1ibL4aAIGmSAt9slTE1Xgw5KWuof2uTOvCg9BY7ZRi+GaF+7sfgPeQ=="], + + "ua-parser-js": ["ua-parser-js@1.0.40", "", { "bin": { "ua-parser-js": "script/cli.js" } }, "sha512-z6PJ8Lml+v3ichVojCiB8toQJBuwR42ySM4ezjXIqXK3M0HczmKQ3LF4rhU55PfD99KEEXQG6yb7iOMyvYuHew=="], + + "ufo": ["ufo@1.6.1", "", {}, "sha512-9a4/uxlTWJ4+a5i0ooc1rU7C7YOw3wT+UGqdeNNHWnOF9qcMBgLRS+4IYUqbczewFx4mLEig6gawh7X6mFlEkA=="], + + "undici-types": ["undici-types@7.8.0", "", {}, "sha512-9UJ2xGDvQ43tYyVMpuHlsgApydB8ZKfVYTsLDhXkFL/6gfkp+U8xTGdh8pMJv1SpZna0zxG1DwsKZsreLbXBxw=="], + + "unicorn-magic": ["unicorn-magic@0.3.0", "", {}, "sha512-+QBBXBCvifc56fsbuxZQ6Sic3wqqc3WWaqxs58gvJrcOuN83HGTCwz3oS5phzU9LthRNE9VrJCFCLUgHeeFnfA=="], + + "unified": ["unified@11.0.5", "", { "dependencies": { "@types/unist": "^3.0.0", "bail": "^2.0.0", "devlop": "^1.0.0", "extend": "^3.0.0", "is-plain-obj": "^4.0.0", "trough": "^2.0.0", "vfile": "^6.0.0" } }, "sha512-xKvGhPWw3k84Qjh8bI3ZeJjqnyadK+GEFtazSfZv/rKeTkTjOJho6mFqh2SM96iIcZokxiOpg78GazTSg8+KHA=="], + + "unist-util-find-after": ["unist-util-find-after@5.0.0", "", { "dependencies": { "@types/unist": "^3.0.0", "unist-util-is": "^6.0.0" } }, "sha512-amQa0Ep2m6hE2g72AugUItjbuM8X8cGQnFoHk0pGfrFeT9GZhzN5SW8nRsiGKK7Aif4CrACPENkA6P/Lw6fHGQ=="], + + "unist-util-is": ["unist-util-is@6.0.0", "", { "dependencies": { "@types/unist": "^3.0.0" } }, "sha512-2qCTHimwdxLfz+YzdGfkqNlH0tLi9xjTnHddPmJwtIG9MGsdbutfTc4P+haPD7l7Cjxf/WZj+we5qfVPvvxfYw=="], + + "unist-util-mdx-define": ["unist-util-mdx-define@1.1.2", "", { "dependencies": { "@types/estree": "^1.0.0", "@types/hast": "^3.0.0", "@types/mdast": "^4.0.0", "estree-util-is-identifier-name": "^3.0.0", "estree-util-scope": "^1.0.0", "estree-walker": "^3.0.0", "vfile": "^6.0.0" } }, "sha512-9ncH7i7TN5Xn7/tzX5bE3rXgz1X/u877gYVAUB3mLeTKYJmQHmqKTDBi6BTGXV7AeolBCI9ErcVsOt2qryoD0g=="], + + "unist-util-position": ["unist-util-position@5.0.0", "", { "dependencies": { "@types/unist": "^3.0.0" } }, "sha512-fucsC7HjXvkB5R3kTCO7kUjRdrS0BJt3M/FPxmHMBOm8JQi2BsHAHFsy27E0EolP8rp0NzXsJ+jNPyDWvOJZPA=="], + + "unist-util-position-from-estree": ["unist-util-position-from-estree@2.0.0", "", { "dependencies": { "@types/unist": "^3.0.0" } }, "sha512-KaFVRjoqLyF6YXCbVLNad/eS4+OfPQQn2yOd7zF/h5T/CSL2v8NpN6a5TPvtbXthAGw5nG+PuTtq+DdIZr+cRQ=="], + + "unist-util-remove-position": ["unist-util-remove-position@5.0.0", "", { "dependencies": { "@types/unist": "^3.0.0", "unist-util-visit": "^5.0.0" } }, "sha512-Hp5Kh3wLxv0PHj9m2yZhhLt58KzPtEYKQQ4yxfYFEO7EvHwzyDYnduhHnY1mDxoqr7VUwVuHXk9RXKIiYS1N8Q=="], + + "unist-util-stringify-position": ["unist-util-stringify-position@4.0.0", "", { "dependencies": { "@types/unist": "^3.0.0" } }, "sha512-0ASV06AAoKCDkS2+xw5RXJywruurpbC4JZSm7nr7MOt1ojAzvyyaO+UxZf18j8FCF6kmzCZKcAgN/yu2gm2XgQ=="], + + "unist-util-visit": ["unist-util-visit@5.0.0", "", { "dependencies": { "@types/unist": "^3.0.0", "unist-util-is": "^6.0.0", "unist-util-visit-parents": "^6.0.0" } }, "sha512-MR04uvD+07cwl/yhVuVWAtw+3GOR/knlL55Nd/wAdblk27GCVt3lqpTivy/tkJcZoNPzTwS1Y+KMojlLDhoTzg=="], + + "unist-util-visit-parents": ["unist-util-visit-parents@6.0.1", "", { "dependencies": { "@types/unist": "^3.0.0", "unist-util-is": "^6.0.0" } }, "sha512-L/PqWzfTP9lzzEa6CKs0k2nARxTdZduw3zyh8d2NVBnsyvHjSX4TWse388YrrQKbvI8w20fGjGlhgT96WwKykw=="], + + "universalify": ["universalify@2.0.1", "", {}, "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw=="], + + "update-browserslist-db": ["update-browserslist-db@1.1.3", "", { "dependencies": { "escalade": "^3.2.0", "picocolors": "^1.1.1" }, "peerDependencies": { "browserslist": ">= 4.21.0" }, "bin": { "update-browserslist-db": "cli.js" } }, "sha512-UxhIZQ+QInVdunkDAaiazvvT/+fXL5Osr0JZlJulepYu6Jd7qJtDZjlur0emRlT71EN3ScPoE7gvsuIKKNavKw=="], + + "use-callback-ref": ["use-callback-ref@1.3.3", "", { "dependencies": { "tslib": "^2.0.0" }, "peerDependencies": { "@types/react": "*", "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-jQL3lRnocaFtu3V00JToYz/4QkNWswxijDaCVNZRiRTO3HQDLsdu1ZtmIUvV4yPp+rvWm5j0y0TG/S61cuijTg=="], + + "use-sidecar": ["use-sidecar@1.1.3", "", { "dependencies": { "detect-node-es": "^1.1.0", "tslib": "^2.0.0" }, "peerDependencies": { "@types/react": "*", "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 || ^19.0.0-rc" }, "optionalPeers": ["@types/react"] }, "sha512-Fedw0aZvkhynoPYlA5WXrMCAMm+nSWdZt6lzJQ7Ok8S6Q+VsHmHpRWndVRJ8Be0ZbkfPc5LRYH+5XrzXcEeLRQ=="], + + "use-sync-external-store": ["use-sync-external-store@1.5.0", "", { "peerDependencies": { "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" } }, "sha512-Rb46I4cGGVBmjamjphe8L/UnvJD+uPPtTkNvX5mZgqdbavhI4EbgIWJiIHXJ8bc/i9EQGPRh4DwEURJ552Do0A=="], + + "util-deprecate": ["util-deprecate@1.0.2", "", {}, "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw=="], + + "uuid": ["uuid@11.1.0", "", { "bin": { "uuid": "dist/esm/bin/uuid" } }, "sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A=="], + + "vary": ["vary@1.1.2", "", {}, "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg=="], + + "vfile": ["vfile@6.0.3", "", { "dependencies": { "@types/unist": "^3.0.0", "vfile-message": "^4.0.0" } }, "sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q=="], + + "vfile-location": ["vfile-location@5.0.3", "", { "dependencies": { "@types/unist": "^3.0.0", "vfile": "^6.0.0" } }, "sha512-5yXvWDEgqeiYiBe1lbxYF7UMAIm/IcopxMHrMQDq3nvKcjPKIhZklUKL+AE7J7uApI4kwe2snsK+eI6UTj9EHg=="], + + "vfile-message": ["vfile-message@4.0.2", "", { "dependencies": { "@types/unist": "^3.0.0", "unist-util-stringify-position": "^4.0.0" } }, "sha512-jRDZ1IMLttGj41KcZvlrYAaI3CfqpLpfpf+Mfig13viT6NKvRzWZ+lXz0Y5D60w6uJIBAOGq9mSHf0gktF0duw=="], + + "vite": ["vite@6.3.5", "", { "dependencies": { "esbuild": "^0.25.0", "fdir": "^6.4.4", "picomatch": "^4.0.2", "postcss": "^8.5.3", "rollup": "^4.34.9", "tinyglobby": "^0.2.13" }, "optionalDependencies": { "fsevents": "~2.3.3" }, "peerDependencies": { "@types/node": "^18.0.0 || ^20.0.0 || >=22.0.0", "jiti": ">=1.21.0", "less": "*", "lightningcss": "^1.21.0", "sass": "*", "sass-embedded": "*", "stylus": "*", "sugarss": "*", "terser": "^5.16.0", "tsx": "^4.8.1", "yaml": "^2.4.2" }, "optionalPeers": ["@types/node", "jiti", "less", "lightningcss", "sass", "sass-embedded", "stylus", "sugarss", "terser", "tsx", "yaml"], "bin": { "vite": "bin/vite.js" } }, "sha512-cZn6NDFE7wdTpINgs++ZJ4N49W2vRp8LCKrn3Ob1kYNtOo21vfDoaV5GzBfLU4MovSAB8uNRm4jgzVQZ+mBzPQ=="], + + "vite-node": ["vite-node@3.2.4", "", { "dependencies": { "cac": "^6.7.14", "debug": "^4.4.1", "es-module-lexer": "^1.7.0", "pathe": "^2.0.3", "vite": "^5.0.0 || ^6.0.0 || ^7.0.0-0" }, "bin": { "vite-node": "vite-node.mjs" } }, "sha512-EbKSKh+bh1E1IFxeO0pg1n4dvoOTt0UDiXMd/qn++r98+jPO1xtJilvXldeuQ8giIB5IkpjCgMleHMNEsGH6pg=="], + + "vocs": ["vocs@1.0.13", "", { "dependencies": { "@floating-ui/react": "^0.27.4", "@hono/node-server": "^1.13.8", "@mdx-js/react": "^3.1.0", "@mdx-js/rollup": "^3.1.0", "@noble/hashes": "^1.7.1", "@radix-ui/colors": "^3.0.0", "@radix-ui/react-accordion": "^1.2.3", "@radix-ui/react-dialog": "^1.1.6", "@radix-ui/react-icons": "^1.3.2", "@radix-ui/react-label": "^2.1.2", "@radix-ui/react-navigation-menu": "^1.2.5", "@radix-ui/react-popover": "^1.1.6", "@radix-ui/react-tabs": "^1.1.3", "@shikijs/rehype": "^1", "@shikijs/transformers": "^1", "@shikijs/twoslash": "^1", "@tailwindcss/vite": "4.0.7", "@vanilla-extract/css": "^1.17.1", "@vanilla-extract/dynamic": "^2.1.2", "@vanilla-extract/vite-plugin": "^5.0.1", "@vitejs/plugin-react": "^4.3.4", "autoprefixer": "^10.4.20", "cac": "^6.7.14", "chroma-js": "^3.1.2", "clsx": "^2.1.1", "compression": "^1.8.0", "create-vocs": "^1.0.0-alpha.5", "cross-spawn": "^7.0.6", "fs-extra": "^11.3.0", "globby": "^14.1.0", "hastscript": "^8.0.0", "hono": "^4.7.1", "mark.js": "^8.11.1", "mdast-util-directive": "^3.1.0", "mdast-util-from-markdown": "^2.0.2", "mdast-util-frontmatter": "^2.0.1", "mdast-util-gfm": "^3.1.0", "mdast-util-mdx": "^3.0.0", "mdast-util-mdx-jsx": "^3.2.0", "mdast-util-to-hast": "^13.2.0", "mdast-util-to-markdown": "^2.1.2", "minimatch": "^9.0.5", "minisearch": "^6.3.0", "ora": "^7.0.1", "p-limit": "^5.0.0", "playwright": "^1.52.0", "postcss": "^8.5.2", "radix-ui": "^1.1.3", "react-intersection-observer": "^9.15.1", "react-router": "^7.2.0", "rehype-autolink-headings": "^7.1.0", "rehype-class-names": "^2.0.0", "rehype-mermaid": "^3.0.0", "rehype-slug": "^6.0.0", "remark-directive": "^3.0.1", "remark-frontmatter": "^5.0.0", "remark-gfm": "^4.0.1", "remark-mdx": "^3.1.0", "remark-mdx-frontmatter": "^5.0.0", "remark-parse": "^11.0.0", "serve-static": "^1.16.2", "shiki": "^1", "toml": "^3.0.0", "twoslash": "~0.2.12", "ua-parser-js": "^1.0.40", "unified": "^11.0.5", "unist-util-visit": "^5.0.0", "vite": "^6.1.0" }, "peerDependencies": { "react": "^19", "react-dom": "^19" }, "bin": { "vocs": "_lib/cli/index.js" } }, "sha512-V/ogXG5xw7jMFXI2Wv0d0ZdCeeT5jzaX0PKdRKcqhnd21UtLZrqa5pKZkStNIZyVpvfsLW0WB7wjB4iBOpueiw=="], + + "vscode-jsonrpc": ["vscode-jsonrpc@8.2.0", "", {}, "sha512-C+r0eKJUIfiDIfwJhria30+TYWPtuHJXHtI7J0YlOmKAo7ogxP20T0zxB7HZQIFhIyvoBPwWskjxrvAtfjyZfA=="], + + "vscode-languageserver": ["vscode-languageserver@9.0.1", "", { "dependencies": { "vscode-languageserver-protocol": "3.17.5" }, "bin": { "installServerIntoExtension": "bin/installServerIntoExtension" } }, "sha512-woByF3PDpkHFUreUa7Hos7+pUWdeWMXRd26+ZX2A8cFx6v/JPTtd4/uN0/jB6XQHYaOlHbio03NTHCqrgG5n7g=="], + + "vscode-languageserver-protocol": ["vscode-languageserver-protocol@3.17.5", "", { "dependencies": { "vscode-jsonrpc": "8.2.0", "vscode-languageserver-types": "3.17.5" } }, "sha512-mb1bvRJN8SVznADSGWM9u/b07H7Ecg0I3OgXDuLdn307rl/J3A9YD6/eYOssqhecL27hK1IPZAsaqh00i/Jljg=="], + + "vscode-languageserver-textdocument": ["vscode-languageserver-textdocument@1.0.12", "", {}, "sha512-cxWNPesCnQCcMPeenjKKsOCKQZ/L6Tv19DTRIGuLWe32lyzWhihGVJ/rcckZXJxfdKCFvRLS3fpBIsV/ZGX4zA=="], + + "vscode-languageserver-types": ["vscode-languageserver-types@3.17.5", "", {}, "sha512-Ld1VelNuX9pdF39h2Hgaeb5hEZM2Z3jUrrMgWQAu82jMtZp7p3vJT3BzToKtZI7NgQssZje5o0zryOrhQvzQAg=="], + + "vscode-uri": ["vscode-uri@3.0.8", "", {}, "sha512-AyFQ0EVmsOZOlAnxoFOGOq1SQDWAB7C6aqMGS23svWAllfOaxbuFvcT8D1i8z3Gyn8fraVeZNNmN6e9bxxXkKw=="], + + "web-namespaces": ["web-namespaces@2.0.1", "", {}, "sha512-bKr1DkiNa2krS7qxNtdrtHAmzuYGFQLiQ13TsorsdT6ULTkPLKuu5+GsFpDlg6JFjUTwX2DyhMPG2be8uPrqsQ=="], + + "which": ["which@2.0.2", "", { "dependencies": { "isexe": "^2.0.0" }, "bin": { "node-which": "./bin/node-which" } }, "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA=="], + + "wrap-ansi": ["wrap-ansi@8.1.0", "", { "dependencies": { "ansi-styles": "^6.1.0", "string-width": "^5.0.1", "strip-ansi": "^7.0.1" } }, "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ=="], + + "wrap-ansi-cjs": ["wrap-ansi@7.0.0", "", { "dependencies": { "ansi-styles": "^4.0.0", "string-width": "^4.1.0", "strip-ansi": "^6.0.0" } }, "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q=="], + + "yallist": ["yallist@3.1.1", "", {}, "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g=="], + + "yaml": ["yaml@2.8.0", "", { "bin": { "yaml": "bin.mjs" } }, "sha512-4lLa/EcQCB0cJkyts+FpIRx5G/llPxfP6VQU5KByHEhLxY3IJCH0f0Hy1MHI8sClTvsIb8qwRJ6R/ZdlDJ/leQ=="], + + "yocto-queue": ["yocto-queue@1.2.1", "", {}, "sha512-AyeEbWOu/TAXdxlV9wmGcR0+yh2j3vYPGOECcIj2S7MkrLyC7ne+oye2BKTItt0ii2PHk4cDy+95+LshzbXnGg=="], + + "zwitch": ["zwitch@2.0.4", "", {}, "sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A=="], + + "@babel/core/debug": ["debug@4.4.1", "", { "dependencies": { "ms": "^2.1.3" } }, "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ=="], + + "@babel/helper-compilation-targets/lru-cache": ["lru-cache@5.1.1", "", { "dependencies": { "yallist": "^3.0.2" } }, "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w=="], + + "@babel/traverse/debug": ["debug@4.4.1", "", { "dependencies": { "ms": "^2.1.3" } }, "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ=="], + + "@clack/prompts/is-unicode-supported": ["is-unicode-supported@1.3.0", "", { "bundled": true }, "sha512-43r2mRvz+8JRIKnWJ+3j8JtjRKZ6GmjzfaE/qiBJnikNnYv/6bagRJ1kUhNk8R5EX/GkobD+r+sfxCPJsiKBLQ=="], + + "@iconify/utils/debug": ["debug@4.4.1", "", { "dependencies": { "ms": "^2.1.3" } }, "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ=="], + + "@isaacs/cliui/string-width": ["string-width@5.1.2", "", { "dependencies": { "eastasianwidth": "^0.2.0", "emoji-regex": "^9.2.2", "strip-ansi": "^7.0.1" } }, "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA=="], + + "@rollup/pluginutils/estree-walker": ["estree-walker@2.0.2", "", {}, "sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w=="], + + "@typescript/vfs/debug": ["debug@4.4.1", "", { "dependencies": { "ms": "^2.1.3" } }, "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ=="], + + "@vanilla-extract/css/lru-cache": ["lru-cache@10.4.3", "", {}, "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ=="], + + "cytoscape-fcose/cose-base": ["cose-base@2.2.0", "", { "dependencies": { "layout-base": "^2.0.0" } }, "sha512-AzlgcsCbUMymkADOJtQm3wO9S3ltPfYOFD5033keQn9NJzIbtnZj+UdBJe7DYml/8TdbtHJW3j58SOnKhWY/5g=="], + + "d3-dsv/commander": ["commander@7.2.0", "", {}, "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw=="], + + "d3-sankey/d3-array": ["d3-array@2.12.1", "", { "dependencies": { "internmap": "^1.0.0" } }, "sha512-B0ErZK/66mHtEsR1TkPEEkwdy+WDesimkM5gpZr5Dsg54BiTA5RXtYW5qTLIAcekaS9xfZrzBLF/OAkB3Qn1YQ=="], + + "d3-sankey/d3-shape": ["d3-shape@1.3.7", "", { "dependencies": { "d3-path": "1" } }, "sha512-EUkvKjqPFUAZyOlhY5gzCxCeI0Aep04LwIRpsZ/mLFelJiUfnK56jo5JMDSE7yyP2kLSb6LtF+S5chMk7uqPqw=="], + + "execa/signal-exit": ["signal-exit@3.0.7", "", {}, "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ=="], + + "hast-util-from-dom/hastscript": ["hastscript@9.0.1", "", { "dependencies": { "@types/hast": "^3.0.0", "comma-separated-tokens": "^2.0.0", "hast-util-parse-selector": "^4.0.0", "property-information": "^7.0.0", "space-separated-tokens": "^2.0.0" } }, "sha512-g7df9rMFX/SPi34tyGCyUBREQoKkapwdY/T04Qn9TDWfHhAYt4/I0gMVirzK5wEzeUqIjEB+LXC/ypb7Aqno5w=="], + + "hast-util-from-parse5/hastscript": ["hastscript@9.0.1", "", { "dependencies": { "@types/hast": "^3.0.0", "comma-separated-tokens": "^2.0.0", "hast-util-parse-selector": "^4.0.0", "property-information": "^7.0.0", "space-separated-tokens": "^2.0.0" } }, "sha512-g7df9rMFX/SPi34tyGCyUBREQoKkapwdY/T04Qn9TDWfHhAYt4/I0gMVirzK5wEzeUqIjEB+LXC/ypb7Aqno5w=="], + + "hast-util-from-parse5/property-information": ["property-information@7.1.0", "", {}, "sha512-TwEZ+X+yCJmYfL7TPUOcvBZ4QfoT5YenQiJuX//0th53DE6w0xxLEtfK3iyryQFddXuvkIk51EEgrJQ0WJkOmQ=="], + + "hast-util-select/property-information": ["property-information@7.1.0", "", {}, "sha512-TwEZ+X+yCJmYfL7TPUOcvBZ4QfoT5YenQiJuX//0th53DE6w0xxLEtfK3iyryQFddXuvkIk51EEgrJQ0WJkOmQ=="], + + "hast-util-to-estree/property-information": ["property-information@7.1.0", "", {}, "sha512-TwEZ+X+yCJmYfL7TPUOcvBZ4QfoT5YenQiJuX//0th53DE6w0xxLEtfK3iyryQFddXuvkIk51EEgrJQ0WJkOmQ=="], + + "hast-util-to-html/property-information": ["property-information@7.1.0", "", {}, "sha512-TwEZ+X+yCJmYfL7TPUOcvBZ4QfoT5YenQiJuX//0th53DE6w0xxLEtfK3iyryQFddXuvkIk51EEgrJQ0WJkOmQ=="], + + "hast-util-to-jsx-runtime/property-information": ["property-information@7.1.0", "", {}, "sha512-TwEZ+X+yCJmYfL7TPUOcvBZ4QfoT5YenQiJuX//0th53DE6w0xxLEtfK3iyryQFddXuvkIk51EEgrJQ0WJkOmQ=="], + + "local-pkg/pkg-types": ["pkg-types@2.2.0", "", { "dependencies": { "confbox": "^0.2.2", "exsolve": "^1.0.7", "pathe": "^2.0.3" } }, "sha512-2SM/GZGAEkPp3KWORxQZns4M+WSeXbC2HEvmOIJe3Cmiv6ieAJvdVhDldtHqM5J1Y7MrR1XhkBT/rMlhh9FdqQ=="], + + "micromark/debug": ["debug@4.4.1", "", { "dependencies": { "ms": "^2.1.3" } }, "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ=="], + + "micromatch/picomatch": ["picomatch@2.3.1", "", {}, "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA=="], + + "p-locate/p-limit": ["p-limit@3.1.0", "", { "dependencies": { "yocto-queue": "^0.1.0" } }, "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ=="], + + "parse-entities/@types/unist": ["@types/unist@2.0.11", "", {}, "sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA=="], + + "restore-cursor/signal-exit": ["signal-exit@3.0.7", "", {}, "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ=="], + + "rollup/fsevents": ["fsevents@2.3.3", "", { "os": "darwin" }, "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw=="], + + "send/encodeurl": ["encodeurl@1.0.2", "", {}, "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w=="], + + "send/ms": ["ms@2.1.3", "", {}, "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="], + + "string-width-cjs/emoji-regex": ["emoji-regex@8.0.0", "", {}, "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="], + + "string-width-cjs/strip-ansi": ["strip-ansi@6.0.1", "", { "dependencies": { "ansi-regex": "^5.0.1" } }, "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A=="], + + "strip-ansi-cjs/ansi-regex": ["ansi-regex@5.0.1", "", {}, "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="], + + "vite/fsevents": ["fsevents@2.3.3", "", { "os": "darwin" }, "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw=="], + + "vite-node/debug": ["debug@4.4.1", "", { "dependencies": { "ms": "^2.1.3" } }, "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ=="], + + "vocs/minimatch": ["minimatch@9.0.5", "", { "dependencies": { "brace-expansion": "^2.0.1" } }, "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow=="], + + "wrap-ansi/string-width": ["string-width@5.1.2", "", { "dependencies": { "eastasianwidth": "^0.2.0", "emoji-regex": "^9.2.2", "strip-ansi": "^7.0.1" } }, "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA=="], + + "wrap-ansi-cjs/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="], + + "wrap-ansi-cjs/string-width": ["string-width@4.2.3", "", { "dependencies": { "emoji-regex": "^8.0.0", "is-fullwidth-code-point": "^3.0.0", "strip-ansi": "^6.0.1" } }, "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g=="], + + "wrap-ansi-cjs/strip-ansi": ["strip-ansi@6.0.1", "", { "dependencies": { "ansi-regex": "^5.0.1" } }, "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A=="], + + "@babel/core/debug/ms": ["ms@2.1.3", "", {}, "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="], + + "@babel/traverse/debug/ms": ["ms@2.1.3", "", {}, "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="], + + "@iconify/utils/debug/ms": ["ms@2.1.3", "", {}, "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="], + + "@isaacs/cliui/string-width/emoji-regex": ["emoji-regex@9.2.2", "", {}, "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg=="], + + "@typescript/vfs/debug/ms": ["ms@2.1.3", "", {}, "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="], + + "cytoscape-fcose/cose-base/layout-base": ["layout-base@2.0.1", "", {}, "sha512-dp3s92+uNI1hWIpPGH3jK2kxE2lMjdXdr+DH8ynZHpd6PUlH6x6cbuXnoMmiNumznqaNO31xu9e79F0uuZ0JFg=="], + + "d3-sankey/d3-shape/d3-path": ["d3-path@1.0.9", "", {}, "sha512-VLaYcn81dtHVTjEHd8B+pbe9yHWpXKZUC87PzoFmsFrJqgFwDe/qxfp5MlfsfM1V5E/iVt0MmEbWQ7FVIXh/bg=="], + + "hast-util-from-dom/hastscript/property-information": ["property-information@7.1.0", "", {}, "sha512-TwEZ+X+yCJmYfL7TPUOcvBZ4QfoT5YenQiJuX//0th53DE6w0xxLEtfK3iyryQFddXuvkIk51EEgrJQ0WJkOmQ=="], + + "local-pkg/pkg-types/confbox": ["confbox@0.2.2", "", {}, "sha512-1NB+BKqhtNipMsov4xI/NnhCKp9XG9NamYp5PVm9klAT0fsrNPjaFICsCFhNhwZJKNh7zB/3q8qXz0E9oaMNtQ=="], + + "micromark/debug/ms": ["ms@2.1.3", "", {}, "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="], + + "p-locate/p-limit/yocto-queue": ["yocto-queue@0.1.0", "", {}, "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q=="], + + "string-width-cjs/strip-ansi/ansi-regex": ["ansi-regex@5.0.1", "", {}, "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="], + + "vite-node/debug/ms": ["ms@2.1.3", "", {}, "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="], + + "wrap-ansi-cjs/string-width/emoji-regex": ["emoji-regex@8.0.0", "", {}, "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="], + + "wrap-ansi-cjs/strip-ansi/ansi-regex": ["ansi-regex@5.0.1", "", {}, "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="], + + "wrap-ansi/string-width/emoji-regex": ["emoji-regex@9.2.2", "", {}, "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg=="], + } +} diff --git a/docs/vocs/bun.lockb b/docs/vocs/bun.lockb deleted file mode 100755 index a975dd0d492b53cbc4e9cd653281dcc371755ef8..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 312478 zcmeF430zKF^!J~rq?uGi8qG2!iKwJPl9VJvng`7jA%s%q>6$Z7k(o>x$~e(y1uKc90F9w2aPqe%v3kJ`W*`|J=@*NQqz3T0rbLny6X4?) z2cONLL#*GfE|KVg2Fr7LPEI1J2leP05=kA<_E2vC>IBD8TYy4eNphf#LAydH`k;@% z4s{o3UC>pawL!;&qP?n;L}Cc40Cf$}U{Lha7gQUxKBx}pA&3Y43=fG5fG{P2u>r#a zBI07xRU{HcXrBv;aR>T`#NhF$$k;f)*jULoRfz-+CA|Sfy9l3%$Pj;tBp}u|J|xUv zQd*0P=PoGvKLv{8+s)cnvHCPnEodJJigx~>nxL+rI36odJ>o(zil z8U~8_>&@EDKrz2spbbIaz(HfsOQ1L(+d=g~r?d7*P_%ObH3Dr8iu0ueitFP!bcT8! z6zz6^qTji!eFUoy1_hT%eL!(OTEZLJ4S!>*wZ!sv^QCuT^!oosg<0P>G;ll%Bzy|uk zyqq-Q@|6jS<2~3^B2fdq3kpjl$)q(mu5(bw@tlS_q$sIJ8!lgMLGe7;0qv+g+j8q+ zF4$u{sh}7~Yg5jC7&m;$7HCI5L1B@;K4GyE{{Y|kAUH3=WAW^eNPfa`TvsnZaoj6F zG0#B(5wRm9{3OF-BO}^_U3gg7NVJa+j6{*NgvOd+=NB0f=o^_JiSmgH#uL)VFES{o7b@RdjifTCTAC0CDuI(EzXb9;JS;K_ zH98(ghatT=|DzxdTnEme*lx?l*8&uIgFakbN=zHOaO+PM z6zAsyw4)Y+V!UUW-J-tSyuAm-b^8#s7U(NyZhTikF;BUmxPR_()#3OYQ0%{-I~Vt8sAGJ)fya5$gm`hDM*4)8&Et{&oPC-Hw;o1-qJK|N+(+Uf zIlJ{>hxz_ljpIj+2#NE9^%C#N#T5jq3dc*>@!bQt^&RLF9|qf*gu4%rNLE2R=J5)% zI|PdH&K=D0`JkAmNuaoXHw@wCUE7O0A4V~~z%(WxC?Em)4}&`Tp9S_f-{U}W9O>R% zUTgbs?O&i?);}obAqEuVQH3}#|2KWP@tg-$gt|B5otT|DbXIpcvPZ5N;k*Kr!w}rVHUX z=FcCrDX0}_V^BNRj|R*#t{+pVV?X*#O~bkMat9R8FW<<>Fkhe8fKyP{fcg$l^s|8- zpU>K-fa3aZ2#VwUCd2hR81@Jp*Hc!H^YQid@eK>O1$Atn5Y5>c#d7gUK#ib1KZet6 zP(!FM1~mX3$M>VV?D`3?GO2znnB&(~PXetC_3!|=|ArgHhRIw#%qJo!BtBeH2zAW&IZ#|b zUsAYyPMyS!YYZs%x1VwAKym#lgW`HIO5^<0X8Jjm%TEa?#(xD;>X_ZbPjN+h2qa`WNe4!uhx_hxW;I0A~}^KWP37a0~AbFsW07ZM&Ifp(vm z80cgc*MAlqhb5D=0Tknb<7gKH8UUw_Bo*3m-j}lDb67o*>4G_2{_;U_oyI^r<};Y- zP*BWkf2PoHY_LyM04Uy)U_5x|hIQCV{mL`|c3`Oc#}0=)!aB#Ydmh)X#e6PK$`9E2 z1xLpCgZjqA$Chz1K0(;;sL06hvhgOsgg`wg09IltauyY`DX z`<RD zWopD!lc@qIuAhxCezbF3%V}3o%ro3wmEEY){b)c!Y-HGQxc_JOrJ=Y;;f9>(B@k^AQ&kF)|<_z;E$-Za}Io?ar*d z&Q>lS2`J8Q5h!vGw{ZJx9w^4W1r+!BzT3I|$cFL8JGi)Nfnq##-y#onTz40Da@%Pc%Fb_UV?UWc_;zJ^>ZHDaX;S)S|9Ys9_~6Y8|t`Urh(#o?ScNW z-%0zpII8XE{5yf-d9e}{<9iIpv7ZdE$M$vEoFAXqxR{WLAjxuQ$NmxzaQtyloCl+W z-1#$$aaW+O3GHK`t^pbViuW;nK{1crL2*B62jj(YH)aYsDT`Bw!!ev2lx7{ooPqcx;?cTs(Y7+X{7@$B>BNfS8cD zSpR^ivi+X=_YZ&pM~;*P#stK|Ey_Q>zlGzb3nd^9HYF$@;Dz~}ew@pfUu0xx2v|I2 z?ek7>D}6g`k+%$cTWl>j{i6#Lpx!;wAKp zd5X#7;tv8vZU`uHfw3m=)hotl!g;QL{N9RR*|xE`hWp?h2;}bq#|;EU&V{KRD8_~P zH1YS9_>PPVhy{Dt{>tozUgGw@9-w%h?Yqp~k1Yf>fcj`qJTLl#HUc#V)dy7tZ3;Sv z^+V6Awp`=#n#S5`--z+%#upFmm#^%u9e^YAj${h)aM%mc-7Mu0W} z^#Db`-9YiYY6hwUs>$k~ZgTm#3yN`_0L8erfYt?_!`g>4^#H|v?Dyvbct6t$?oe<) z_ZVTAeUF?TKm)j7Gb`=CNe z%GpVOwVq;szJvSpid08qxrk9 z_3Y~LrE&24=ur-mRcrjCPGl$4z7xON?&qoY$v(+Pd##Tdsx`_zu*U46wWcjo4ZD=) zm_6t5l=_2ZhBnqL_RU;+YR$mRrm`XG-D`eH*gI>2cFN1Nd+p+~>s-=m^4Z<{`Oh!2 zd_LrLwYxu={BOq z+dbw^=XS50vul(>EvpuLW&S#M>O+!V;u`n4xsPUNXlQr;IM?#I(Xi=@(@blP>0h|t z)i+IcQ}EDQof2AxYS(ysbeO~A`EhSnT7Ss))%sRbO|Q;}Fz;1Aug~7swR!EP?PR7F zt-Q2SUH4eil0KOZAwy!Uo20MWFm~8Wg+`a}Obm2rJO6fW$iB`QX7b_1M?d-P+y_SbQ+o0&AJF1V&qr_`T z);0PD#-yG0?|1L;%G7t`b>@t3;5l~q`=*Tolf5S1DLvVt$z}`FuNNJLoQ}4)a~yNI zxN9SY_ucG{eezl~)-3(Z!Nr|&VU_0;0k=sv3Ish4&(k{#0KNs5Mi+-3i;^KYwN zH_iSz{f&oA-^DLq+N`+rBkSk$H;OuE8fDkfsy$@pq6t=B)ec-ORk;5krC`;iLF4Re zEEv1D_0ylDqXw_N)^T>2zJ;ay>PvefP8Inn%B;E}Ggl8xxg&qi$oJY09Hb2q(~$D2LYeR(aa#8&n3>y97dl$A!#o$c1+)*IQe?HBF6)7Rtr=iN3# zdbz3uuCJXi#p*$FJDd0YcYSVndsfo2y=IBi7rsf{aqn4Xz}jV(m+wB&K;y^K61BRK z3oodRO>jJ-@SP8$HQlqK0c>Soolw6<2NdI>54HF z*P8{~uInTkywU9WsK3e_<08*dYYx8Gytk#;WmNUv?u+(#-Fnr^d4ZBY9E&P$UrKy?9LbYxVchtJPu+Y*dc50f|Be&7S*=sq3bi- zKJI;bPoI0J)uFwuY)zY9o-t8kR;Y94#F|~lu4Ubr*+UX|=-7){(RZIcS-jY-^--tp z55nx5=kD3(t<$GNdVi(7PBFe-UFv3z&^p}sc5NjS{c4XMeDK^e`iFm=O-j3+?0)DR z8T#O_rfm~jP0Tj-G%V1*^>L>E{uK{j_vyae{KRcmbWa^~%LtLhPk3G}8^?IwE;uPO8zFqG&uOAY+b;ge5hTn!LUZ351 z;Pl2>T{_l`?0n*B-^JOXA9tHsy|GOlbHFXhGB-6p3W~S-oHE`|0DjXn$`4m8N2&7)(xB4!Pm0qWs|n@x9+7+_17HUTctG1XV2b3V?7;> z?>iOi?QE>AIc?zUf~_--1TqYo>W-Z*S#-T&~2Em5vnJL>86%GEHc@ioEhMbd%d z^ggven{|3K@R80-k5;xHB0sh0=8)Sh`DJ|U@d=N2Y;}}fA3cAT$z_XaCJj=&7S7D+ ze#Y?bhxB8AHO&3=ICstWqw^dxCxoRYoH(x7X_!$H2aS89qpshpzCJKFJap9S;X6If zns@)W`o?Fy^kWu1og2y&8-F;lyHBXW57~X&^N;VUVbnYNVb_A3CA*c^MEl#SjJuR^ zcDdZ%p{g^E?HkryElDz=u<_GL4fg2EEBSQ#*}cn+dwI@|U7II4pI!X2o6il+xWg;$ zWBTc>8(=x%oIyPs-4o$GcFlL2cWn2$)|=N_bf0&=gUR-{cP4HveiCtGgI>`=v&=7> zy6>`k(nRUh>ScNf%a^W5IPh`p=aIKPH-FnZd&Bicp-Yq%1FRYZ>wT=3GN7BToYz&K zzIW0e+#YLvtH$)l9;wqaHuTZGls@Njc5<=0{=6Ess;$rRvpK4IenU#Z*m2(VwqNNo zsn4nH3!MfyEYIJ#N;~`0rI?P)e9C2ivb7AEDY_B_; z3b&{kDh6%Iv2E2sKgOl*SsVWuUl;dWGT3j!g?%0NS`NDQb@QQKN-}l46WT@hy`c4Z zLLEIDRi*XK>JQm|&9l~tj|0Y(?)SSpGJneb=~uhj_E;3tY)ACy^fz+jimoc1G@h|? zhx_+27u?l9Z%tGw?rC&j@}#t_ldBnNpL=QFtN-j1Nsfm4FAk3D@pSD9^G$cGv=18P zw&|IioMIGGD6jHa(R{|6ptiZql9iv2y|{JAr7y|qoxYD=XPAMlRnmJM zT~$LrbYi`rb7z_i)hV7a&401&DA|)qnb#I>U)y?MsEo0x=>+c~{ff=koILxa&%R~5 zbguOa3G!9CtQC>O3Ad)Ki0SXuQEBN9F1 zr+=u?%sJ)zUuzsLWVDpz+NL|?~l@VhIz@jlaY zBR?K=9-THxZuQ~duBMqrSqC)JP3-L~>fY+Vc-GA`gJ$Z!ZbIj&;{5h$i6;KN*UeVX zoghg}oYd2**Xnm^-*sm@M)<9mJEF@8g{E!_C$p62*!QcIe6xGfk3-sR)=tuDtgqEU zFVQ{!>`pyL>tQ`7?p+Z7cJ;mG!*gV1P87rk-c0unSl(?-l+3pC5xFy?K6ZQOTCb_v z=4e*X65X{(4Ix}DQMxCGVO z6SK%i_jP{_9sLuXmKIF0$+ieG)R3KBn36nRBYm8m|DHDM?K~PZ9=A}fhr;Jg!#zsA zm6&wWXg|YX@0geFMVjgh)sC8l9WXWT8vpgQ?wGbHf!%2us{ zKPM*1T4!}1-^2LEs_-qki$d>ff0;3@czL6wmMu~Oe3iD;SP;K2F=F;2`+lCKdgjkt zs6V*n^*((>c%#G3HaqrvJ|)w5n$kt{$D3Q`&vn?;-z=m5MZ0s}XQsT$J2kIw4_)1Z zVJ}+7rO`Ug%|Fu-_k|( z?6fz7J`B5fQa|6g-?#j)pASrHwzsy)yyi(41O4CUKateluk^CX<5wN(PBExA#BI)z zBeyhNYTdfit^RjatJdRPTKB47=z8DRw(x+pX^YKX8*4UDYu>8i1qW^UTX&z#f(h z*QGY*hvZz1m)fn633a$JZrMcdKB{|?a&psVUaWInwPQqW&0al!>^{2h(#L>>KQevZ zE7%)YI-l8Z_H4>LDIh;Vj*t7D?@nk9+Gf zZR7w0EnN?-ymyc0jWk!!o|Ce|-AdEaO#SoO4TWKiZmxA4d8VFBzHRQ0d8N}JW0#)-BMK0X>-qTGDf4j=uGR~p~h->Sz$=iL_+W>-@z`X0Nm=EGObM%a}Cydrbe3qjElD%(k$i?#2_o8QD2bsQKx_lR-K4T6{X~)Vk)P=jXIi%oSD| zS084g+w11%d5Lw~-B;Z8ZS3hYh6$xzJI*Z5Ej{rhshgV0UHdb=GY3Du-Fe8X&a1!a z8|3(8HftO()Oz6QtycT}`*qg;tYVm@X|v)L<){8-i$@m~Ha`7uUe9oVuFJL4rM2=5 z?;96(T{>1_>8uyC_U|~*FXU&y7RASA8WYnxcy6!f?x`W)w%hMc=wjy5FfY;ac(P;#bPvnzw6SD|z!X^NVJVyT47|xR2Q@n%dsHQKuyPz{hSj zRywPuqy-v>p4>Dspv5=e86|~Y_h05HrOo$vH2eMCGmcpSF*Yaf|FyhThgt1=ZRqFU zvsA%-@pGN+Svg;hnPoUEUhs;gJ>h02Rop+@1;{mPAI!_q0U3H^H=Z0TLM>;p08=NTTps^%oz`Y??pNva7 z=ezjIw*97RhLZS$ee0{#l1Yx$9>42Ju-qfLNTYDNAC>zyIJ3CrI#;CWR7dvYMpH5 zmfC956&Ho8gP-XI>%B~GU@r*HO2X#Z^fs!_)tM5!i?_1b#h z>p<(YjNJ4!T^7`w_rA8}qF$F3UA3m0C>*s!V<=U80 z+3R%GU%m0@zi`#;NBY-g_APvSBlX#A^Y*e+n-7@tc1J$MlyoTA=Lstf9rizif_O-8-OeYLI*8 zdL5bh`*!TLnKM;w*n=(2%>$3@oK$ng@Q9!36JE*Hh@O?*WuwoaNBw6#bvKwdZ}#la z&RdV^k4^n_VeMO;A9A6`HP(%t$*?!)d z*X_+T^CO2F+^O+pg5$mWmMtHDOV-hBRBODE+qo5%rr#P(%ClUO*i!D%(T|0vt#6+@ zl(uBou8aqx)H-Z=7o&c7&zsMKXPM_c>9l^!lkf6R49@jdRNii3AF#@|y7nOTnIluD z4S5mVd(oi|MT*n5TS>WwNn!mX?~+4hUXQRU4af=TB zuMG7#>u&XW?v*tU+q%7w`(Uyy$)z}d#M;R#jG8;IvKnLBO7Dn9b;IsCUJFKRwutHO zreE#&p@JJXUJu@1eZ%1mE@Q(E+}*x&la1#6g`Ey7j8QD{(6wGM?aSrZ!#}RMcCL9Y ze8$Jsecm`^wDh>`R{QA%X)b)Nhw?UNqg#nK}V#ckV zzwEi3zIWH*9;Uj}6P?eTRVZof=zQkfA=i+j)qY%=)%5kf(pc?Rd#*b>tbaDM(9EM# z=FsW2WEN~4VrQhZApPkUmkcAv^}AO^ugjMVZ&EKfQB_xd|A-5>Ry`ZlB5v^8)FERw zw)Y&XZ20YlV>SDJ=hJ!}&A4V!F#O(yOD%E^b~QhuUU0|%>gj1|26tL}HSscdoZa?8 zp}MVtb&OnS@e`X_pR8Z0&uliuY|YW*YhHgpu{I*(RQqGS_ZNqobccyg6NXWQ)ShMgtW31)W`@wd;Ad<;eqG{I6~gzP{70VUvu@ z<8F7a5#HRllisC}qS7Sem)Vl9SFYH18NF@)!op=E(t1{ZD<3ytcJG$g3a+JI&5N>a zn(vk1aN*6vrTaKscdSlR>o=omk1dOhO%ytMX4}f#OGZ~v z$M1xv9G|iN)5PuN*BARP_guLt>3~OI%h%O6958v?)qLsEwEGSxmp;w^ z{@rc<%=Dn%`e(Xm6-}$F*;HM2M)sa_+IQR*?M-qy+wSnYtuLNfzKj^S`OTu+N6u~Z zd+k!RXK$PA{knN7os`YyG_3dg^8u%kmSZm!wCgYXYZSFR+;YinOZU2tdCI-74$LZTHr4;H?FVk3zMop7*|OfNcdWP6>)|zOj$^Ok z4;-(wah^Uz&Dg%5^}LpYha2RLnf6HUh1^^z8 zrb~X5emRplb>*Z9Dj)s7hskV{u*|_NJK)-6ERCZ4>RFA(AALWpGT5&pvUx= zI-btXSZu+v|jt~ohonHV%>$>k*-;nC)G~K z(P<=~J=D8k*~U4~v@|cLxLN>dMB)NG)-iu#`4r$Cr0{F>zQ`v|KJddWFW|{uEPt5s7&pDBuZooX=)w=foO$DicsQK| z74mZo_&&hPF-q+D?*|)J8{lCZD%(4#pUQ$D|M|dM0FQO_E!Mv#{Ls1!@aPZwfU2NE z{#^m+1U#*KYOkylvYQ4xjUW3EJN^T}n}dJYM#@-Nu1ZhWNi zKNNV}f2unEOyH%--)rCBc`1cxmSUq?G>E;Fs2<$=@L0rODq);H5c#o&hh-`fFELy77+#UK;-= zftP0fb>O1Y9?lXkQ97f_+>^@Y0;WvWC))-w$|c{2u_`LyG)s7;(?PXx|k3{nHuvKHy&g_FgRX zy@0Fz`#ae+G?qwgfJgte;6?5KPI5=c?lkb-8IMQA#;@LpyZ)hn$^qx0;&<{tnDJC6 zt(Yz!=WgJwAbyM+E=3aDM}-vf^A31R;PJT&viLntNFly;H^nY?{1#0(|3Wzmk^F}NkIz3acA>e$K8Rlfd`IB1 z4Rc5BSg)v%T|V#uEPvEi5l6?!*0w1Ze+@WLmG#%J8T^|X@J~GTOMd=M$?hHSUOZlC z4vBYd&fR~Lf1&e8{6gTnf`2@B>it`=e`qJZSqq7zAMmvP#rmHFJgy()sLwyR`$rSm z9s<4_@KlGd>A5Q%L?71CP(|&^P5!NC?DV z1)hKY3vvz)h}SmZ&VM|AaSp`ty@2il!D&?ybX`X^FT7Eb9d!`GyX$6@$SH5 z{Frxqi5-6`@UVrJT|bZ&oBy4_4oj%)`==_40sE<} z(D=Io?*cs5g^nHjCVn>XIDRpEKcCMB7}u}Rc}Ert`LExB<8kiA6yiOB$NSH!#y=Z) z96!!mRq}V4#gDO9g;$5*@%$lP?D}&8-iF6hUa-H43fWBq9?u^wFjj*XJkAtUh`$Vc zSKx8}GzF2I5%C)E<x_%1HA@S3J?=J;k1bk26F)Hd4KUY@B))tb7`NQ=?ZI!Wj zoNU(vjMpC=H|9WSA^rvMa0^(Lf8=ow5mJb601sdA`6qsNR~G^@8{!uN-n!V(Le29LSrER=YhB7@fe$s zLcBU`zOaPL)*o_WuRlG3hb=6L@-8$FSaCi}imNcpN`nKd9fzazJ*?t-0U7 zBEB+?juSr(c({d0!aB8)O+`w!Il$Yn_=V0r_Dej%=%4cTcRLzYP{_{AhU3w9Rs07t zp8BIWDsWwPi0spW$Lkl>Nrm#~fXDk6p?;}f^3$Lzw|6Aqjt2ZsF2-Q;HU87r}m0iJVtg3aPg=47c&Opy@AL34_f!s zF4TW?ckcJMk*iAn@_@(lhx`idgX|69;_D>^KMr_l*6$_Y2TS4K1a2OONx?4w9`9c% zA7ba8&q6%a`zC%7@Ob^D{i8C!be#AS;PL*Gcv7aKh5!#C{JsCH{+q+!+R1+hxOl@U@Hda&gM<{~$Mbk>!|x$N3h`!;eB8fr?&u|k zj{qK0P-Xsrl)NH*`GYO=Z~tP)?+QFzLjGRAV&{JY@c8=!m^<_>^c>%=}|C{{6ty`o-AA#$OE{UceNT#gBc8<%a-I=Rd|L zcKi!~r|Td3#vBSMG=5EhY5uT{zQh{USLT~49i8p|oe}A5TAuQSup8`DQPiWlK zKk-?>+iDZ5*s_@Rh ztmB7O-M3tYPz6Kt? z0wiJF_!4{n))~IM(fNlzyP&u#iXD%U-Adr$5pbFRs_^fD$MGvNht#i7|Mo-5_y4N+ zKMFjqKg?ZK_{MPZ!y|+wj9bheLgSpwc-sHPuD^Ry`ZpfRt^ca}?+-kl|3c%Yd8P3$ z1|F~fRn7l%;Bo!ZeJ74XNFo1Qyt(IBxc^}xyOZRqn?QG z=*R6plz&`DLJINmz{4YmDqsI|fye!e{0s4oHsrsS|F82Wv>)Q}|Lua;AF7KT|03Y= z{vCN_#eRP(WIVDMJH-JXD=cK^8Bm@-p|NAf#P0##4&s+(-l)Ao=RDpTZXS>)PG}5d zKM{Dm{!v{le;9b&zpBdD4JyC>;F!hw_W~a8U#sf>0Py(zi+K7usL=d>03O#L&Yze) zfczB)bHD$L{%Td`ly{u?W+7buYQV8N@DdsW@jHOW^B?*8y#6cuNPM+n-2O-YQH2!Z zU4X~^7dgx!od=cmLHttS>E~~JrtJ6M;KAgQsgI5QjvbHXAH&bYKLWF z7v)bZe-8NW;2$}BiGBXtI*j}GQ^YIc-~<#J|0Lk?`XO}PP}js?W<1sDcx6iVw&C3Q zBhLV_^ZyWdI)5n#mHEcw~nyZ=Ke!Hs{HJJ_dOB3RdOuUjiP!f>)Vu6)zn>6?k}rQf2?eQu1Agm;d`;T(e@=-(uk5 z5o(p=e+@i50{%J@M*A^rmJ`28REFZ|h| z*!wr7aoqEJbXgVu{ej2(R~$dy1EC5j6#p#X?HR8MpxF4Y0pAvQoHx97iQPZtleq6M z=2JpCk zWngSo@vkwV{O5nX^i=wh^0xtaN9G^bkl6G8E%5mLo90g}KX_vK@9)!h=t_eJ1B$;* zvP5D7{*e*t8*Pb?0v_*QFn)aDJP0YoS5J{h;46ID{2?cH|H=R!fBsUFITh=_?j-K- zPhjl${0;X(A%)^!2mDYr{;KeDsoeU*y_fnF^J zRU)8yJu7)#0Uu9|RPNzZiI2f4J`H91wv2ohSY29FHvaDR%ut0N<01AN}Gzq>w`K z?*Sh3hq+UNmx>S=Y>0mbJno<57yA%Wh;KfF`}+yV*AxOW8{%I9?*#rae^r_P&NF|# z{}S2<`A-7gLJI$TnSb0rg!;x5kpFMM)BR6Xc+*+s@nfGDhmb=4V}YmdPsj<4f%wh9 z+p_#sg)ar(hR0(cLJIkBJ-dAT$cc?V2zbhWRrsaAij?D@j_$gkKg#O^7St?c0NBCcv}Be@jr+8uWJ5^dGS}4bN+D}Yr}cvGr|>>$}50-mm)ct~viKm5iE@r^d*ztw{B^(&O+^Zruui>2f*0#EA?b0;=` zDhtd1|4quki(NlGfXDemzsS;kAbhN_Q2vvEH)lKoLhk{I-wixIf5tk-FLwN;z~lEP zQtp7*p+~03Uj$-(;$6s5?zr|AS`3YUW z#QLAec;cv^U%maaM*hzOkKey2e(L9+#IG&Ht1RQbKN2VQ{Ob=qK0hE{?DxNAz<1}x zFLwWU1iU@ZKi!A^I@*8MX#CBWOUFktp8SfPzg@t02EtMxx? z#P?WHzJ9Px=s7@qD)3z)enmabhdL*fz7v0n@w9$OE9-#xnkzZ~#EH$nBk92pJHVoJ5GEq z@cjIX&7Wj-`SlmO7Ry@#&-X9({23)BzYlnR{>45&|L_}6d97?3$gbU*U;T^CU(|2B z*!*n<-jcWeD~p?sQ~XaDkM-(wP=FFYU@hmL>H^R*6SAKRJl?<2xhwYk%>%w8@YpH# zDfax4UsrzrDAYIkLbU;&fB!9qjRFdde>me2!8U5ItP8T+1U!9z5W4os7wSXCtHEb{ z2`%Km!Fn$L=$qzHNFT(z18>ipe<3W|5WfR>8b7uRokQZE0zZKHugdzj&EW1|YcStp z{Z9wp9sJ7!k9|;F@Ug-|b`OBZ=XbKqn%XOb;UL*{-N60)UFf={euVFRK`11?m#peGx@OHfT#qw=3x#vf8-yoJx1|HWR&0l4CqvMpnyTCj1 z;upGxiMQUwJ^#S9CzQn)h>ryx&p%whVhZt@z`FvE`v(@GF%Yk`nVUbFf3bWY#tY@R zD9gv61H20_|6=D~eao+Z|4S@C40v3>7{5>!$3XdOyY<(?Z*q#`7=q z93cJ;@c8`;&tLRS?Lz%K?2|}rfEVhQ`XxV$fXDk6^o@Op9ly$c?)d|<_)>?LkV5_k z0gw9^#ZUJkmGwdVF5oeKw8u8F`(L%}U)K-zBczajZ{Ydy3t`cQ_&vZ&bN(nFDF6OT z_rOA8Apf?&3rvh&!1%Cs0C*Ua_p>dFX$AiEA{S2}3PX}I_`FjF9-apfM zBR2j{hkpJ3AU6Iq;0Lh$VGQyDJD~g(0Pn(hp|PV2;@ceN)*sbXMEVZ|9-p6~Z=vTt z-~USBasAT%CD8rf@!w&*27nlY*!&shl>hrX^xYKS1Qg1bUrsrXdndJ5)&<$^1>TC~ z4;fmALirECWB#bW$~Zbs_HA;@pZ}n5vHpXBZwvmhUz`J>h5Tm&Z^OoqZ8(QQ3h`fo z$NXU(uN`9fjz`MJkE~ce8hG+AByO^y_)i0G!^V%t#QLv!l#8GG6U%o6-j0`limx&y ze<{ED$2kyEh(8E?8#aGJII z{`Di-eg@tFcr?VZQ#;lxDr9GUlH0$~e|30a4uurr$1q-A2*k4?{xtA*5I@yT<4+X9cd6H|yU1>S=3IDcZ-Z`aem{{2(xx3XBs zF7`LRGLDWDzYTc){R?wE`FjaGe*YBem*OJ(&S%S? zzX_c??1T8Rz}tX-JpZr=?SuG3jHf!+OXcrm{{wiOKU%+Z9#z%@@tx0c&(G+*6B~a5 z@cjE%vHT9;%^`l8e~L?J{7-<#^A98_fP1b+J7_uN0scBGyj-(Y!f;L;`4#;4C5z`+W+D3 zw+6DScah`qFy@foMU`QHnCe-?jL`1_Y7k~rY8KcTUsAM)>Z<=6jz zk*T zJY9cq4O4q%U6I{;;JZWoRrTNbI=BAu+^@?1oxpf8dnb;K;@<_l3&fB9gbD}_ zc#P~m-sXOOhiyW~&gX~SDgXcP2+cj8&jH?vFX7_EsSHq436!L$);Mf2EnOI)8ko)&jw06X<|0v*b z|HqK4GXCAbdjgN+L0)Y9Y7e;m3weBr z6ZrPPW8Q?a7$5mJe8fG!!Zw_Lp@n#V;Q7zr#Paij$LBwI|0kx9|BJxGeRmS=yK(|M zAzrqa^N(Yvmyj-qj|ILn_?Kmn(EDKGw=(}&NB=_SkoXedar`uQq?L_<_$H6J_%VOz zTde4$Zi<$=HQ>^ zUu^z20*~tl`$PX?<1Yr@8hDCbp~9%k4^sRVCEULsA^$Xw<(7Z`MEqpn(LdG2@~42e z;l=-FZ~ts1f7(yE>o<*GZ2pG;-;Vjm-(L`$|5?D}{39on#nDjwCxFNMf2<3QozH7M zEC2bqm>9^v1MscDKb?O<$3Xlk;2nU+n1r(Y_*I{CKfk8)5042c{x{ML2*U_PwNlcXdGpoRq#gqI^c2tMNZ5d z5MKa1=8x(Xxcuc1*&Dxhv(u9wQ+$$lj8`1}|5U(6k~ zSJngBZ3iBI{}SUDntSY<_~*dW^ABtn+6VDAZ#e&W{$LTCzd67=L;OPT8#piJ@qYl` z33!@+vGLo!<@P^XyJGptjHml9$_vH$Z%TF@-br`=n+QCfKXm;N8~q z`NuJdo&R3%rTh8UO5kz-qWu3`zJG70{NHCh&cD#uu`lAAe<=U`4{R5kzu~~+{2_-g zTF2N|MTPwD03NU3IDVnNv0vh!18)O7?f=w{HWd}JYx$AOKV5sMy&@Kmk=-!H(r63qghaxBA5G-*p)!cK!l^w}JR^{};Q4k-?V(yn4e6 zW9kDhEPSmEbsu=)90xKD1;uT_n#`WB zxPI$FJFcGw?07|rCc4ayuh@?PYe&U(Y6ObsUUN|F--I1U#du6X(awyu^A+touy$1Z zXaR3%--)&J745sQc2xXm#p`?Ke6Vu*I zov{%LD%!iSIx2qb%j&3j+>O;yF)j~K%-dkr&R4Yef_4?qP*7ZFBS3K<8Uu>sNCHK> z383(wWFo#X#d(^-j!$Dc0~AeW;@iI{9-76D^A($B!yD=xrgO0Z1r_64&gy){rWNpp z<6Q%a^|koM6hE$mHylR>Xf4ovplE*(6#kPOf;Sv*E>@W0$0P8D+%cvnK-Hms9S<@^ z?iSRszkH^JOdo;b@h710pQHrenBvE$to<3&=b#$U{u31EK@Oa&f@)w8OtC*LsH4^c z#rbW>>IR_jpTr2?m_k<)W2obNwP43hSltX1)se>$3x?o zj%RkL*iR}bj%y0j>7ekRWG22b#r|ipIx2pg18?|Ua4D-V$3`d>DUK@x?6AKL%$~2< zzLB-_73Xs+N1NH#1;u!E z8Hb8>Lr|O#6W0Deq1bgBc*D3%nV*Ujwt+X)Zp;r~F;07E$9y}m|@Ky(O0w~%~1jYGC!-Gt*eKOQB zo~f*zuh>2v+R^`9cDy1*`*~o8rVE*UMT+B12Rr1JF?%73aW7}~s90aY>U_n1S3!G2 z&>T>#A7S=KnLR4<$Cw^xdV(EC#qpnGbuo(l=CS_HGoG(_`~qv|E4E)`?Wj0!*Fdr9 zCcNSCTdaN;8=;_Ldp@i472EGYJN8oqig|d%>Q9+HD%w3`$6v7eOLiO;kC(FJuUR`P zw!dL@RP6s9Yk$w$QE?tLaMFKMVx)?>j+KdyT=%+5TN5y(QR_80W>#%md zVtajPN58u4I4ZU`WU9wh9~95url43*F*Pk%9Tn@XSY3#s-!{y?B1IE3ctd-0P^@=i z_0FK^*BTp{;yST|I*!906xV+*R(E1{s5qW}pxAGJP~--IqP-8(AW+=TB0%9kNesN9 z-3Y8OMgOB%`#7e_plFiD>eI0T1ri<}6z?y8vUYTeybL;LiuGzx2NQ`r zC~`H~aa8=M3U7G4Hd8I8I-qE;%jyQ8SWxl$-G zJC2IH1FNIr$6l;1jbgt}te-zA#_7V2^A+dIgSG!j@z7v)92N606cqh=vv$5>yANwe z#gBpThV4P1IR8=D_?zPKXx3hlqFoHLL&b3oXLVFumkF$nit&zSbyPe)1{BAg%-Snb z^q0cy_=@M_RA|TkXMo~*o)3zqOW+N;bXH#piuUV4(QYd!#=R4?>^uU+lJy70rv0oP z75mL*dVsa_6@G3b`3UV8*B55@6%@}aB?toV)3iWw{JN~(6coEM0Y$qmti1;)o);dV zSWxk~SQIF(>oK7C{cI{I{3n?WZ+QQ@6cqc}42s9ILGk(1B~awf3(6zvUIy)h`}p$RA!zCsrg6V_glV!x(fhxs>S_Ndrzd#2`0JFw$? zMLP?2ydyh~iv3wK?ZVno(cX&HQL%2#>VHzqLl0)x6BOsuiD@6kp<+KSpy=0?wf{+x zcW1}>ihO_8&R6Vj0Bc7@J5Nw-9|Ved_F;8DP%M8^Y%u;`Om$fT2{yF z)_>2xF&o_TaLmVl&%e2S^1tWb+Z{bI_|gp=ibUdvi18=C^@a;Fn`_%uQe{=iqf6u?mmd7v8!EwL&@A)^Em;auB zb9wmF^KjfpDthjXBp%26-v6F||M&bG*TJ8jd*eF6_Wz!L|M&d+zvtioJ^#l0kN=*3 zX|gcMX*enmFH=ofG|H@B&r8Rf|Is){mO;n%6Jhvrv)s z%JUf>ZKY)LvX6MO5XcDwQVq0u-U z=Xt50Tdk>c)TDR(kEn+@eu|fU%_{3!RUNfo4n)iLR z`(oM;FK>l+Bh)sP}=-cJ1{`L}O%f5*P4 zqal}Tg`a!zZCk5?0la?k*#Q;V0g-JyUp;8tFQvGa(##q~J^U6#)fwl1s%ssCg`>|N zbC|W~opIfOr&(Q8)`X1DKmFp`#|3du#&v1x*Rr3XY(ZAWJzl@~Ga4$g>rIE<2(=4Y zWw6ow?V1)_zdhg3Xl8Dg%=0ThABx|-ll8k$B5$}SE#YNiNmTX6=2w0iEmO2S zex~3RuV4I`92MCMsT&)Ad|)2!)L=sE!QYSgEqimzH>_A?YyOyaOKv4Mc6@05qlH@i zLp#)Zrsf(CZL(3`W^|9o^;gXt(stm=Gux^e^7_S}ZBmiFb?Lry(&``ScSF6`lt!oq z&3RXA;IsU~OXj(UBNJVWx0$CbcCXobOxQN6*RrXf~~PuWz;28FX5%Yw_$st1eq6TzxP^Yj)RGCt8Mi zI_-ErJ=kef@`Q2e#nFqJI}hzKIrRoVzxcCRDzftCn_MlnclL1oba~C01*WI&^;xXe zq0=CfD~~%J7}omI&23JOofg*^=H77S@mzhaCkeLOTE5V>dY3t=$<=F$UFx0Z#f$ff zRAg5IV@BVX2SxV@<5g&8`X zhb`-$wY}z@%yp9#=MCBGHEXT95+QmNCPjM>kl^B zYin*@n;F;ZUeW-s8;$jO@#4?UsmRU>ksl?m5U->fnNqa&(nsF&pl(6?Aqz+ z#JxK&U;XqZM`>}*mpz7b>>E9&e&c!j`(?CgJya!LanJ(mCl4F)`o-V2>e^xCv%t-&~j`|M`!V719+EQHqKrM~0 zO(z*oUG1OI*#>`Coc2llStu1*!y#4?<1LTAzZc&1jeMsk8U=T17|pJy=h)m=BX8H} zcE%RN9G&ea{tOH(tah+^{at|zbuu~)A9B{e(ZP(k3{h`}# zFKVXXx_j5Q@tW%AlhUr&?3H)d@?+gaR#WtneXEW9%eC8x9>=PsUYGx9F(PkV>Zozg zat@C)ToAM(iPtaw4ks1aYhAVM$F5E6MQN&?%#JM{xb=Eb{{rut>MzHJ zXHJl>|82+cP7kMb{O&gAhk|2=n=Ni;JZ`xubjKHE{5J-a2Tke}RCZd^!!~P`Zy)TL zJ%3M=EgE{>9UbQ+P0&?Z*YnMWs}@UE9Px1Kc{4iem~7viOEbHE%IdMFm*-OFCRQm0 zMKZECCmHkl#h+bLk=4=*t(&jBVn#j9ZZV1Oz1^lAG;#WVuXNnStvgyt%p+xkXKHkA z)Vtf6ujk~}J@$X)-PYeuvgGP*i)-h%PTl0Lzkt`THgyUrd!|Lf&+fZkxos&*u?}l+ z!G4l_Lioj9YaW??b5E$*N+xW9-15MgKC9a_E~v9_Lh_*p2FJ%G-yW?x;kE3{K_jYp zx8n6n|85PtmyKTZ#jLGeX$`kE6TNqqE^0C{;eGg4hviBN8z0{-em{F+c&F)E2VXr2 z@p3q)eb#Ma(}VglZ;JXZNh`?fSMQ5=Ecb6yeyHI!6My%FitJ;Z>SM+&Z@uTXOvl?3 z0%oq=9)3jcQfbz@HfJ_A@ZbN?r1XGp-G+PRKIslvwDeQaAg7s&zTE3=ej^}nMS8pW z+V{EN;pgs4bf{BM*{rc%&x_hO9Nywi@ouFbnwRT4M;kX-X?*fsM$M&z@|L6*e?ONz zVAbfEyIbwf{jQ{;yQ{>w<>1|tQRiY>_h@%SEr6GY`uu*|ZL{%M6A;@f;!8w^mAaZ% zHMesL8AE@Jzt}nEK*!(@L#_IZJlkyl-SaB*O`LX)n9=LC&T^M?ldLA%~(Ft?P~FaC}y z71_S=)0)>lpX<7$G*ZF!s@hQPn{LlGjcEMBb;sd1g~_jsl@IN8Ua$04xq;Co1=rR) z)nl_yZA%_s|G3NB2i3dj47K6)t4E!J${s(Ndi!)?)*+`412(FjPCmM9?dY*z7Mc6k z8dp^7@TFsyVn$VuSg>dM4wnVim%7b-b2r=mTT`cL(M2DY-5%I*+p&JUe)akNb~v^n z@qH85rGr`&o%4xo-ZLoReV=VP*U}2BZSR=$=H09otK+8)-9NnJqrTdAe%zl?vUG#W zsJF#w3B4CBO$~Fg^}_)VjCRd4t=oEdx^*8rJWn?DPFX4Er}G3Q5qXA-~_=qrdpZ zB>Ep8GW6jBPnY1Ij)^86TJ)V246|?(XjH?vU~^(BB!_uovd*BWAW;=2g%C&XD-4IZg45__r2}-;Ld^fU5dXPkK$q9FU24I_ ztw-WyLniPE12roNclx#7Z;gcRNgvZq^n7xs>iDeaY}P_}LX|r<(Q|!SQ^aIfCWb^J z{!m%wc<4VD1V-qONe*;VcE3_n$&J-HO~jza28%Q&iN4fO;NEtoa@7QmT0W9IP1qlz zL(_{E(@^`|Au^MDC^5o>FoqLr&J!(*(?$P%&Ol&@{+JX%7dKpNOF^uPy~3KpTZOFD zv*!shx01yU%!YxmR{b-#Bx~G ze^yW5TuPvOjM#8_6h{N`8~-QN>|q-304c4hD+0%zH+QhaG-?d-!e9aV$SFjP0IONa z?_;iSaIZyd?hIMoWZ_26GX5{D|Eq^Lk_zZ*?cRHrpQ*FS`54K=wP9&nk=wfs%^=yB zr<+vBP7^QN3RTn|TBt67C8H1=gNz%@V5thAs%HRRl-)Cd&_3Z zE!s7lbM3DG<$W9L&;Z>wtS3%uTxw>XA_`y20o?MM#w>67m;{(;szLfk%x^l*r(l6x zofUO4av@kgDzw-6BgPUl&Cf}uKhX3ifw6o7 zbuD+`@c{`f^Lz|(_V_Qa|9|Ht>4C0tTrp+avw_9hl0X5J6+A>gL8Hqe$)Jf)TtN_= zI9?rN1B9%H|(D8xN4Uue$U7%M=@dssmEEKVd4g@j-& zm!|_Lr(lGETAPLiWFR9%25=%LH`mkap%o`Rr(K_l@Uoq7|6H*DTo$0~__QAj*_1Cue3GVI)GgLD@Ti4bZre8TA^-d3Or7>uyTeVQ zvmW$I>wF)%X5E6MKBP3A(1q4c%d#&?&2Z#?f9n_KZ4S!{bOl-pEtfX+zq=wh!`mI# zw|3{vWdpj|3iMddBMNwT^i@x)jK8MHPl`nsF4_FkLx^ctWvD+2!?S!w7~-&6232x+K!9+)p;_!;$>yve|g_r4xo#(5D-vS ztZ^%TQZVfAQ_$c2GxGMUziwN2xw+yg-<<(;D3bKc&>j+H?o3JO3+HU71d6J|fzyT} zLYcJ%ALZNK-amg}-u~hQy2wWL*HHbp#<|Mh=)-&`)Bt7bbGDnL$l_C?Lit!v~2 zHYp-u-b>1`a`pZi`(5}vk$58te11gng@V}CeQl7_Bxsqt8Yjp5^;p)2ev@*Tzw^yM zD_nnEZlDV~Ds3%l@FAiQCGQaKy| z!kVmOH}_-sTm3YNMo)%3sHyFLdHw%8-}>iUAOBY%Zer$Gtj&EF0+|mLT#-y)iOzph z4(HGp%DJc<1db z`o99)#y!pokdT?^9(A3MpF|TG z{qwxQ1Y1YN2STKMg{&@k|8qfL-rCUdStv__3%6IGD&tdZ!3%r zR%lP*PsyO%tN*!g@0$S7UDalBJ0vb(ri;Uq*Al_yzYLx!kMk#}YUGAp%(2(nu-Gz_ z9XTf%7F456%WL1H*X2Ap61s2ls6?evf}DsGfBR{ z|IdAU--P~8{ZkK~aMB>u7r()?@aE)9OP&T|V7`{ZunJbS745W*TesRYzn{28>|;tb z;nmUY;Bgt3j^KU03OzXz#H?nfyjc6I~TJE9`-g| zVlfB7dog>00i85GrqkSPc}<^tTMFEvc_;<-6gyY^^`CEbN}|6B0zV| z`u2lEZpXmOYY$vL7`9XZoPOlqWoh$ZK_Q@8IU1qokqJgtiJdH-wIMS(8;EC%1N>qD55(t1luV zzmuBk#%1x>4qOXF-pNCA*XgrQC2Q7leHf9{<>(lAZM0D{)S?cfV*Xq}epcRMIZBTU z$SV$X?S*m>vS}tP2E<71cHXJ~Vy)fJBYMY<9c6tZu}h3?*r9{sf1%`0fwBf>J^p-a z4C3pNA?-h-&4FR6IL0E-{Ga=G9`JS+{$GK3$beK_FC)RvZ?R#17mD-LqO#p1lfGXg zG42z7p}|g^5oR`z#?3!-U#2A0n;AHTa$ylEgmFMD8ZOSl^Xe+_pZmApl>CSO`GtrD z3v{Y7v`fQgK$~eA-M0DSU8{sMSoig{1C?^1wzF!h4e2EJr16`TdL)&^WlH|(rfSKb z>HK>ni72s0yb3yiD+P4B89tg$|7w=89?BE}1Gmo>yTnk0Qh?UJlEcE*Bzj;bC0~$e zs&Nuv+QWn%C}4wm{?wRG7<3(>)nriZIgW_-KliQul?J+OA`@N2m==XTLL(%j(;iQq ztWpvI@baU?PDG&@vRF+Tltqe_oz3~7N-zgIL%!)kzw;)yqPsaO;FWQgwJR9^bN`M@ zWPq-I`UflmRVoRM@yM;yfulQA92q#lw3=-=xMlxuR0Zi-&?;t%ELcJ%MbFrIiWTEv zmszYfAB>f7xQwkRmvuq`t}M_E5(jl$rmVM$5gl;<_|&)bvY>q-qblr{eL}XI)KYz` zdxu8D;((s7oBX^NpMjEIwfD(Y;30{gtECgea*6uwTOKD%| zUM^BuKi2ySv$h1Ufs1g26m8SaO4dqJZ&u5r(9)jXDXM5Mx_*+2rwr)q>M&(HXT|QO z|GEBuOnIQ2lm^xxxs{kE*Q_ba`&+jt`La3R4}8=E$@8mpHL^}Yzp^;xg!5Vg&iq>)?M{vp>E9&_(>Hf~$B6Owyk1#7=V336U}X$-X8V(t>U)Jh46gHXlHH?-q_9r0&9 zA%yOH8iiw-pm=9}Tdf9MFHr*K?Q?+1BL|~M%|gB!?4)q)?$wK%K_gEZX#Xl~kqo_w$7L%7Kiq#M?9|b%V@wtao^h z!1Z+%pc@!X?St<^PGg2}IuU1aqv6fpwHW&S`G<-c^+1&s4<}hx3ua#<@_6Vzbj2Wf zTB*wUvY2*$@fhbba=iGRSsoy-D$wm(z@2IOM3_+Vd$^Y0J6Ito1i9YNJ3s8!QA_Zv zh;%C3vIZUagFI&ani`AT(pFdJc&_`_Xd5xi{Cvoo93a==^Q_B96fYF4*UEa67ts_7yLO)udP(a5 zt~${DzD85B-#?ph|KWuJb^M(Q8d=Xg(oe=`|K8_W&q(n0$9H`RyVvyAir6BL^mHnAj|Z;(zM|yeivuxAH^3zo%{OZvBW@FiF^S_`i#}w3^uRl+qZuH ztxswJ-L(N4+aA2L#>1jHg<~~miEx2Wb_Zb_sjw|Cl{M8SWOD8B@5W@tBXkvWwqX2X9*ANPX|g5&t_r z`}@A>0$o+pOPPG=cfz6NI2f=dGO>H*znj#u*G(M5d=c1ozcPC1Sh%7!_1OHwr?8sTSd zCQ~hEq_+O8hR;LK-rf2c_FQ;BGkc|q8Wqz{wj~B~5nlh!WB=yW2fFy|v;k;dHDR`< zMLam%Wj<()yq$TsP2S8`jkv;=6NIJjt)LDx*Usa4rr@?6uyQ9!haw##9zbHFk;N=g zPJnqo09{s^q6iL&mV@TUWM%2%0IcWlQXM>Z!u1Jq*x3`od<=#cEFM#H6>iD=)@kJH zJIJ?3_*0#!#&Td(FNR^msla_{1E4#V_Inw`f(Lgu*-+-wyZhQ}*{PVv9{sEKjR?k# z%UGSk@Z@(KcQ7P+-;2;q)DaQemqH+PO1VIoaFo;&;tr+w0Iz}iV7{d}^#OyEdNm9_1`cCF{olu_i^|84(QmE=H!eGTr@Z;-Ow z2Y_n?bXQL7g3U0rEyv`8hs=x{a|VfQqza(&XUdD)r{jt z(T_hRvATV>l~|v(Ex(q*g$Qtsfv)l0NCC3=K#Mp=?re+pJnTvGz$f+z3y$UAANW

&beVt7T9mn_sMQ<=e5msm zL8*^6Qk%ygoVN2*D-kdNt{Ko(#gZ~Zjw5l?Z~FueTADujIQP1@$oQqQMnpR?qsTLv zVL!#IG(*&!+rCD!SVHabyXZ&hzJ2r-#`;Y-s)ZU_fctj8^Iw6`mbxj|(-E0#2`V_; znfQjyiHXIRbwMk8$+i=j3IWeQ;@M3SzFFG14y#&D5AD(o2ii6$qBf$Xo7>-M&)F3X za4r6!e|{l`(32^;Wty32jyc#Uf6YxE-=EhIoc8LvBaqn+#CZ*1L6;P!_jB!5A-v3r z)|#iK)pn+|-k*qj6MV2EME}+fa4msu82pE3=u!4`K@~k{VkI-`B>k~AKHE^I{t7mh z*BP$6IAPeJfaO-3%^I3%+x{u+XuUa0%BcJ>?F+5|bWi?DfNKSGQKxQ3^h?;DaDQ>n zfy}(SUC`*YTh_ic^A*F@ChBY6I@S}{>eW&t9MfkmIL!(krLlbkbK3k7Bo?KdnVK8) zcmDRb-LVF`G+%_oK{!d_Ut*Zp+UDP>w{6^|+0-aI%$#~+*>+>>Oq?tWV&0`AI=~Yc zw=Qw(5r)M6@XaM1o0u=DgwUb__M30_asL$vn-D1BqFBNlb8a`ROwcAT*i~T$l3yP< z6`n_O!3DmWiGrDag!+`-a$Q^PwKUlb1^0WkO@pg@b0@mAR%{cd?`o-V)JD z$W@49OdfcQj{{sgpzB6jV*-D{Fc9Do?99EfSduXno90F;?^cr)!fH^lz!WU;n*Pq6 zYzxLvcnAx{oSD|>))Wln!Yei&5!y1RJz4s8ctyv&C%Oc zwI|-CgC`I(m1A%odg2@BI}VEw^Ha*PukG+S+(mxAN#35)@@X(yXy`vmEC9F;|Ik0b z5VH^=+M*MBKd8%RkwxxBcP!r=Fvn2Q4if&Mt{fBKIs;wt zSv6lxh@S(jG^8{l@L=rfl&iV4s@KiTJjtLMTp8PYQ3=au;#k}CaYb=7)#`* zcI45!?Igc$1YD#KSI)6ZVn6qAWVU;kg#8;qpG9=LTF#5Pufa^LPXewvWLEq_x44$zH)le|)Uvy~vcm3?|d+Y{u z1D}`0^U+j4Zt=k&*0%&mm-AF@R2rJzC-RUv3+{qWo zcB@^nV@rgcp|A;puJ@LDa}^!1^Ftk*8Jz4qpN+1u4K$;s^z%5vC#|?N&bWw*v;cWM zfUe{)yg0{)(JanU+o{+5&6MYk28&R1E~ka6!ZZ0%Y*Qgpb^VTu_pbI=YAGCr;7qI2 zJY@Ozp{Vu?y_(Y_drtuO6VQdkj>5C0R>>cbw7eYpK?}9>3WBm7YdJ=>bA-n>v0M|Tcli|HdIDV&Cd=7-$DwqBpYEQl+}>!!ssI#yHxt{2cP z9Z~5oK9EqNjhOBqRCLdx!5!wb5}^8CVYqPUzo$~-8op7N26Iu44ikyS|Hyks_mt*_ zwYEH1qjl43XVvThaJ_-&4W#TvK7$r@k zii#6>iiG?=&4SPyIn;#Zj!gr23R!yGA2z}*0q)y(i2oG`y7T4B)O-FJB4v1%=`c)$0=6%6+%^5k6zrO__E8ydA~`f0w}k?Tb+!loB~0cR zk9tFK8)X=G%Z|K$l-fSzJkQ7zB|S0nFx1H?x<+PD;Ct*3boaGgS{uep?M9YsCe(r0y2n!h+#sN9vO9U87=DtF&KloZ$&MWn z_Wc|9_lPD65Y2Wp=d8?JU3JIh!3SSvldxSVyoeYG0R;%@F$Ro&P+Q3ZH?#y;fEx^S z*U-J#E69v5!Rev6cVQP+zYC-tL4Y2rmTL*9No<|*9inurbKtFFhZ29&D4m z8+F4U12a7-4M3 z0;&U(DLoog`%KN8x)A#n@Q*{j%11(UNBTygI;*0J>sh=6zBj@!K9o202RG)y^&c zLJQA9;{%de(OR@-_uXk+YVwSgYtVk5I-UlCrM7ay#s|EzQbqPsE<&EIM~?t*B+%8C z8gztbwi3sGpKCu(5$zOS^ zm!=`9;bI{6xA!H$jRLwnSAsv1z-cTA;gCBA`xh5M>wGvWCwk_cIO%mmV4=f}Z8P2R zy`6ZIC^QfXm}}CIhSU8rG3ni@#Z4$6Vo-qRNTY!+xe{UN++4?x=32sed@!N+=zg_# zaM`^T@CfnEA6&Ypvnu;w5f>r2S2EfegIbcY-0i4_R6N?584Sx1A6N5$>klzN_iSoP zpWiXDzAu2U?`u10#gfaE>>~2<-eU(#p=67XD>IyhdD-tulq=?Rq6DFsAJOVQ^NlL? zmv)tj#=2#L*?@ZZ40Np(mBtczV?u9qp$W#~tvV4SbD;%I!)2wuCP0~*kAffL!oJh^ zNoZC@Q$5G}U|&Vf1nrZ^hxsYsS5FJQZTs7|iT^!ku|PNcM&@9b)B(eO?U%>1`A4)K z7^Zl+QaGw&{h3GJ@)@aBFo$v8{J@@HzxXe2J=HFo0%KPzEY!SJb4(;5iROX*W*pF^ zFED9*mk1f#IZj}Ke(C5`jxeeg`(?Y+)J1rn@273ATn=_Chot|Sr=qcF{Nj6nn->Nzhye+(%uPP(TfffR4YWW zGECt_ob#|F?Vv(Cya%|6K(~PA`{fu}+?c3aMHtd%QZL)a%Kommb>1}|U1a#>ZO?bq?ljN1nx(F6T!&Sbx zl|hn^TR}87$`@pv*>`{krBfV+*@xPrMM%Wz#-Yi32!%hBlDLKbM)2{`R`>*PlYwp& z$Z|O%3W)WuYs{K$U&`sp7MD+jk~w;ml_({ry_?Q#fg`~KC0SO16TF`Z6#eNT@JQ{r zLitAQmWEsvXP(IcZVJ#HxB-J0cku9>VR6Jv(JuV_fR~!i1;)h;jSDGp=RhE;M1o7@}>dZ zLbmg<3&B3=178iI=NA}Rx1xK4s$HBa+o9jok^L;6=^?KPTy(JK18*d1Mjf3>_bpn# zr^xn%#WeFClkF=T1Kf0=d#=e%^I2;{Oo~D}`u$yqpi;xrrnAY8g!-9$Uy;9CX6;79 zKl+n&LkC0Tr5bAi*Uh020+2To z=sM(+w@qdK#QBM&7!h2b2nu~U3Dz&jQ0wHGaPs=wOBq7p5nM3uvzYxX=2qoFFZfOb zjq|%MdUsuep~t^~J(%#ga}ZS>6Mz5vvlw%?7$DZmr1K1Zn!!o|9WK8;t|w zF(>p}nN^qQwnOExS8yhDW6ScCu(^Mh>jxOyBQx2zl}Nf+V#a-bZc9Sts?<&c+#I0$ zqkN`LIRQ4bQkGvC)|Ie@ms0acWQke?ine(RDPlztBuFx6IyV36y~D)O?^2zj z)sG4%gf!QW5HllXdQC_Qc0B&l7tGyd=441%PCMj z9|A#=-+TyAIE2-;SQ3~>T$qwKOBA?iilr>4#3la{?fhk~nJMKdPh&>| zY*k>@B>qKuhcR3J;}eKmtLaKzpvKRg^2E(9#*?@d?A*CaMTka|&FFk?StJ~E;J7az z=rUd&>T=ErF1w6Nw82!ceX;76#Q15pQoGKV_xopz8usz%?=5Lxbe*B*%R$;Iu#Apa zrWMJ-Xe*-tK~G(<`Y(XI1wi*l?iT{;+K*`KQ@gCL7TcxElC!g$w^(zTAN2){9UB6^ zG1=hT9~sC`B0pfpBDZ?M@fkdun><3dmR&1$J{bVV>xDo!A@&!MigMS7#}81pX5Eh2 z`SN>&gAoT$rE?|I0=mMAm*DIM*BKw7G%>>4@-$alK(X1d8*ZE2Ncfg0<-J#*0eOpn zZhysK@OAB)B9{o4w{6U_N9th?MHcNwFy_Kn=^KZa`fLTUEv{6{LnG@j)9BmMaHn*# zLDI6IE+`we*`TRNJb+sabZJ>|3198>E8N?CNd1l|SV2YVJ-NEw^f_z?hP%Ezefy46 zUbvr-qfrpT9ZzjW+>PL4x+RgZKM$jy-m+rN`1XG!{OjLJfNl)R^7kP4rkR2*l}z({ z8$+n@Rm#J{HiM$qXkRM!wYx9-vo(_JX%Fg`GoM9|CoS57wzcCH-lOk)-#|N(e zmIB>rh@`n(klfs$c>2yEBxSwK+Ulc(kFCeWhyB_4u5J%FCmi4})*50=8gWv#&6ehD0B$+Zy}KiMxMWaTh8>nOX2ShM zqHM`ZQc{z-K^a0`q3b@!>y{uTseaK zI(X2{3K~u^URftWfhNU`cCZogkz6{XG2IYJ3^o`17OZDk00Q;67(6*deegXDAa6C$ zokMR+3-cnwPxTxZrAgEA!LbcLo@$0UR)*0Q#9hb|9lo(}29ktWM|KOE_Z*snHeWk+cA z;oqNv+Qil@1Px)P+}bu#R2??F=nuB8t88!F{Q%^x1-e>~0(K2dJGljq1VxrJ$#g#F zox~@(w6B7Gr43lRJZOfvqg6%I2uVzJ3@hkbNMnSmdCzqs1l<-hth2jzSEvBD4(OsJ zWLyMc=qs;kC-!-!9o?Jg`nru`EIzw$5!74X$@Ztn#YtaHV&%*86`wf_%sRR&$A{<^ ze_0L=6=tNe%;^TW^*~q08u6ky9PIOgd`JdpM(FtsM5wR97lEJVZC@a>e>SbZCVmU; zwP7b7ncgEljOJWC*ZmY5%8D@2>^FG{dA@c7aK8fG)>Ku^S45~|Hmy&{3bo}32d)#E zjSvyPb}}S$xw)=-*vm^ujjf_m_pba!emS8tb4GJSZW3sWa=#X`q|y9J1-K1B*8~v) zmPR^HD`cVY`!A+8PI!@xqR?h#vWY^_^Ma{iGUZD_RqY*D{5nW%)=sk+M`*jR8Q3G` z=oboKlHgLpQ6RCYY8b$w5fJE6Ef! zP{Z!u$+-4{w`Bd2ew}{&9q#28nIb~!@VlOHS)oW8738G0L^vs~efApQHUr&EcE7Va zlY@=xQExh@;K{oB_A0_x6@FYZM@RBR1Is$t1>yk7G8pgWB97v6@|Tt?(X00^8tjV^ zQSxia+VaW(w*~0xGDgT#3@D%?>4Tvzbr$z$^nX2CBjqtK6}ZT|jih8~_pRvNCD6;3ERu7ik5$^>|)+h#)-&uaIc?3EYfR*~po~Fi%qF zdTin5IXqAKS#~8uVM^=Im{3sIcOlJ951WBk+H*TX!5>L4f0lj=0px81x=sVQ#tMNw znx?5_2&0`XZr*oGl0!J6(}`I3B~9;ec{)gIt76=xt9QGNj@M6ceC=6}J;@ZxP6rPj zAU__60sEnLpxgUO?$$UQEv!=ky}h>#1|FSdvOelyKGo9g zEA1@YVBOSb>HXyM$LYR9WUsq<&l5o24xl@k^6fWL;kSwdxNq|OYJtqVz0~KAFo-c_ zITfRlF~M6>WH@>YGcwIf_KtP6Ayr%KtWp=s2hQ^mI^o4?ACSEP?l+*T4|ckjl`lQ! zkSR%IfYd2VaF2a2#5V)JQ0e7?=G#Xp>|h*dhxy@dm61pofpy|EUEg=vG^G#)uVfYj z4kQ)0AJ7SOg+;|tw)*MsYr0OE!9Kw_8wdS6xyJ+eL1M+qO-K%w}d~-kJjZm|+xK9&u_6T;#a122h`)+#R zQ_NH?7o}@?8eP;O_!g8%^feRu27YBYUsp194>`wJWUl?)Kml$y&~^FvZpgNUriijP zQjd4xRf^Jb>9m~@O!`@_9Q$T7pwz$<>^Xx50@HuV92WiL`9V=)ta8f8j0p#v_=S(? zeJsH30lMnnS^CvO6deYzQkg`Ywd_tzV&D+v8BjmNn#-_#PfkLxS(9ugl*ix2+^h?t zVn%@SkhU$iT?MX3^#WZUM@qs0(hLS4^6|93UKQIKJ)9JElh24W5Yfkq zVqvwL^z^~k=t_Q)3ha47E4;-mZ*nX*uDO4ner*jtsmeH0$o~Z=<&&PABS@YjB;Qp9oUcE#&x>a3&a!84MKo`_T?$&WIuRP^8xq4NND z2-A7(Y3++m?8H3Dy{hT>Osa0*34e=GE1;wy{_8Lu{+xSp8 zsy?TpsMIV$f0c0GfDL$lXBg=2H2JPlZGf(SfWt&57``}Qt}EP7p#h5|yKt&;_!z^n zUPB*=6jB(B`5@mn4;^GaQLw=>AztkuWa>MK>|Zbi$U6db)l><-kUZ_>%sMa$RQpap<#jte(Ngch0y_*{N=JCuataz;(}2 zpsRmcfjDcHU_KxwXC-$KA3OJvxv1C{2ET}=N8~)D{QXxIUl4+PNxM8spGGZX$~tvg z!IvMO-D3L&v3|mlSP%j7jse{c|2*w^iPgGBsd2H6*lyYl4T>eR>N3^#=9Ox_S_Nqw zu2r&UAt)=6MG4KPPPqYyM)Nst1Xr5s;7%PLF3o@Uu;0eP<3JauJCHc8|0Qelw&Db> z()VOq4cl||j5C4On~BdEN1RLLaqiFHVy1$Z>th+av^rnVPtnTn8Utofq1iI}P&dH+ ztO=ky?aS4{Rh*M0J`>G2K%$Z%8Hli{Dxn=SgYOJQ0L>ui7@;qkBSxiXeue>A@B<^f zQEdm#P<#O=GBG+4Qdt%_pPvM}app0$cWz&5At&MNKi(m@Ur_Af2E$XMN^V+c6;c*_ z@7C97(+Uy6W<4{sC?sOJYX?aarodH27nc5}qw@JV7f`=bKo=%5Q`4F?%a!TOM<(}Y z=G0a+Hvh|~ReElw^e$es;m9P!crxOM;#aYh9f6yuK;$^wb@f zA~NR{vgm8lP3MzC&iOCLesTchodLS`3Cx{UoDnaHoRG_RuA^rvvSI$noo;&#oIH9v zPs%zi`rt_^t&aKfARQ6D4&%Sz5!a=QR82>P_-=Cb>x=OL?kvzH78Y_EzosO~^t&>F z&UP12uN~wJBqJ33RjGO`C5*Qz_h8~hKi>)c3CA&Mn8EI5pc0R1c;yE@9&x(m{i|sN zz?}oScu&+=(c38w6uK1FxLE~}xg(3VID-0$Hrf*Tbct~R3{16(vtf}>jQpO1i#51p zW0?dei$8xWcxT(8`9qdz0^ILFSD1qV@d#~3*B)=qfrT*evr5FTdShpyXeGFUMERH4 zaRCW6t3L8u>v$9@BNw~)tb)b~TwP6@-RaT*Uc8XG6@WVrbiLpf#n`$_A$Jl!R6XCn z*P14N=O=YX4VQxSHCm4BvQUMu61&FR3KEy837M(ZgTsbipV^d%y@cwH&kplmnKjBpE!Hn`{Pb%`_dDfkeIf^p}%rO0{OXD zu}m*{cY-zGG~U7Y0^A=!*M@}+sb!q`f+6TPvn^&eqKc+d49>CWnE=O4KbWFTz*?lC z=6#koUJZ&c{+2`4{Q-3!;;PdFRtZxxEDEOraJ;hubem0rlEr&OsUJT}d&(`;K8>lP z>JDQcNFxxpd_SOhs>G;`a}*7kH}C0ImxkLkbohBp0!lxHvxC_h6Odbt4cs4E1-gDz z-5}(iKZ53O?VT`ahR)eO4Okjp71NUo(5;<%#Nzle-su)`aKwcLV`vO`?RMc2^KUW7 zF((WcsZFWJZ2-?d`~*o)Ply(4(4S2hA1Yn9jb zC(Zf?B& zZNRj{W|I4MqEP2~c3OGXeN^o)!AuxWhdk&xkl@@Z!PMIn)e_F`-1$ovGMz6@Me1SgDma`C!C>+&$n?^ z=IF$d4>|=M2DOgS-hX}YuD!0jY6e5|0&HuEV~;9%Tj}5(tkXOm*VfE@FDeh6{TN z7U%tC71EdqN!pToDPS^UI!V8b`vBZMpu4H^<4F<)1|N??uIlTC6*1l?+qt2!<9&=l{SV!q5yCLBVpRjSJU`28 zXuxLyc@Kc@$vZe74>YC`*5+dS57z00M-_A?Q@$_{DJ$_7t;#qN4qxP&M#J#d6Mnj& z1#VwqyZZ#Ol$L@b#^hPh%9Rx#0^CEOyF>&cJz|;l8u89hp6vdoskil@`NL{X z8@R7`40OltlVMX!vY$-kiis%?wT3{h($MBRj`U>>nd}kFa!P}<*J!`U1q^Hl5C}&& zK4wY~H+W^6_gMP|J#$x&{sf)_IRU!nI=(?zl@A~}wE9^;X?B)wHCHH(QIN4s7;1#f zl;q~L=!D~ikv#PXTI@NCIY*d@il!DEzeQ$a-vwS=)qVX2sNYkdJ1Ef6-G7Gdra%+1 zn~xOLM<-n~niw8)b`JLtkGx`J@g^8i2Y8RYOtT!$~U z@YK%wnX*o863M&RD4rg4a0hVDfv!S6SRd^OMVw=YA%Z#A9i`QkK|f}Ro$if3)yd9< zT2`y!e5!5SJSow&T~hynjrsw0B)JBK5LmK}+yWPf3$R`P4Ro)uItxU!0`^s{l!TFj zXh;m8J}fK*eRJdWqV-*b4#~nVcS0|1DkZ97UTpr}_hUdtrE7ZGmih!F6t8GEsUNsr zeF1doal&tp@%`yr1;tT`lKH#f@JN|56Nc>Zkrm!s?hnAR$j$4wgLS`?+sco$7fT)i zeVz5mIRBrb|oh&6C^oRl`u50QGwXbahq}6vSG%N@}(}uU{hSI~TBn~y4CndQm?yx zMFUq5URAZ?qP$R<^{5NTdjoV|Zd1<^beXD27pcx#R@eH+S836YI=w5nf2V%MpVU?^dJx!^~eY_ND z9qbGoeE$`p7MVep;E7zTi@u1BV%#FJ<)aSW~Y61;2RJ;E)uZ@#$BHhqR&(Y@b71 zU0E*0mp`xLRkl~M<_L=)L~Lx8SL*a`!NdXEooAry00NJUvsrc#>LZ&&Wt3e=cX~^( zM-E>q;SX8jPua6?1aCoGUONo(ddg{?mE6rJHTpq}{>6mL+NDJ{U99Q((;D)d)+RRx}@8ncWYW3l#+7I3?Td!Qgn5LRjcu=)Q%%ny` zI+A3oiF)2*;Qf|Yp!-|PEL=rM8#6INcLpUqd1-!8!RUDN5;2SJlw8Z!88+1V`Pa45 zZ-xd+*O;GXN$T!d*t{V=@21>h?wa z8{Lhs$KyWtjvVttIFsJ}T7T^aIUR~`u3?C$n6ZJFCR3x!4%3!Dar)j(W;`?kjX zuRzdaf2jV7mFs_`So*7I*WD<>kFkd#3FBlJ1J9VR6(nNg*e!*Ix~zhr!&RsH8YU8mg z72EEr%GkWBKY{iMHW*0b-V^OyA{wiV(ZK3*wtxcrH*lcK10U;7se6AjBuvgF8fr{e zcBvDK**kP^m$K4%`ubdBDIZCLe7jI|=E*EI1^R16j)cQ2NRnfbR+_DeAdA%Wdo(>K2+7po zkXO4Hs1@i^$YRE`gLKh_+z?nd+iVTZFDZH~u8!#H0QYT8{9l1scv%mCFge7Y?bfmW zF558b`3l3#G~2kpM9^5~ImSM-oO+S#(0nMX(L@RAS{Yn=Q~emt!l%{)oeO7ik-KjS zaNq7^|0@uD!bRY4gO}uk`dAC=7K8RkZDgsdEd@(|0@te=5S~` zbVh>Cak(N)8{iBB+~nvP+VGWN5#r1TR4)t2t(T_aw>_XR!{}!|F|~YWxDS}H1q`dC zVXL6prRQ(&*uQfia6s30j}E?W>%%?h4*s?rX?wiXjC(@^M8-$f--?4q;}8cO7V^EGu7iQ*Kwbo(Yb#TF zRDk+n0BXQ!emm%jU&>z>4kD(hMUC#)bj!e>&9~s2FMgE0)XZ&mlj0Y9aGxd)_ZZ5# zXX=gE0?_V6fcqD3cUu1yh|69zPKEr!4!Z2uEDgWU#6dM+M?I{J)Ar& z;)hG+7;hubPJ5B=Z>MxI3$ou3F{TMtdw!TzoelJr9^Ox=M0pF6lh(Cx4#%mFSk z(DkAjnB@v7ifwjpcvzoUUnn)TwSkwRoB(6u`5r=Q&K(77Io$X;3VQM6I@}TofiP`E zc!kS;Pi6J&?2|*E@>?7B?|DE0x<>pGrB_iGA{Hx>CZ4MA&u-Bra+Yq9q)ffm2O#DN zzLwkAMyIrNDPPi1zvI?y4IVB$Qr3d~fUd-^6ncIxn+I^;)*}8D2tP8IV8e}xTmHhT zLc)Tb>q$_A_#76|!!IO6hP+0@3sfA4=NdS$Ov7&1rY$Nt*HnE28eeP`t($es_LhE{ z1LtpVYuEn@1nI{K%=I7wYYkX`^wWw`)-p-IRfh6efn3s@j=;TqN<@;A;|=1)VMrevIQ?| zoZ+%#nbFtyp7Qs{52G6oc%JgWbB>rmH`IJ7#P69|6moapt6}GY_WsKJy3A@#&k}|G z*aqzIsT!_0IhjmiyUJR2sU%HGjJY6y*#5N_g;ExNP50u&;*a(3IeR-3{jWg0v#mF& zLioh${E5seZX&#f9_j|i=IqrJIferH7D`AqKWR`)Hj-?rKV})Jg6{uOb(c|9ZCwL^ zi9?sPba!`yq|!)tr*wBocOxL(-Q6t>(jh6G(kXnGdtb)b-yHmYm}l+1YOa0uA@%aF zt~j{#DQ9Y}?}@*iULF9r?}2V8mK$~?sx4WZ>S%rBwWq&3aq~`pMsdbWl)^=3t%)bh z#QNH-JKCPNRdME&oGtCR7jKL;HVz40v&HyhniIbTwI__ zxixPtRa=m2%AGK(1=+M7PkhLpjqq8Cdny?c=g=O(fPuBkU{aH)*z#8B=?t6HgLBHk z@fO#P1JeXeV`mvSAHUvnZvmRj@uZudyNn~M*%m1>y{t@+ow8t_-`a;e@Ycpu6-1ed zdjEHm25wwz&}+BZW@3x-k7@&Wy=LW~^eFmKq<-K&>-E{hw*Zv}(o}rE+yg;TK|+=s z5sEukt+hsWzNfLa*Hfd&u_aRbT{l6Docpq;=6+6TTngs5o8X*F%t1!W9K^do?l1;u zhu1Tmw*a-A>nyo-EN);5M6ljHQ=MJ&c~%j55(uK65r49k(rYT7nSjtF=dSr>+<0s9 zvo5AJ=|nkUapBkYz6@507=JRreLbsx3y_Owiw{QI;{$bF2&oo+;0Q4j5RJ1;H_4C*?A6=w;Kra)l zqwTvgH5nX7`0|bVa<{voCJ-Y#uHzEmk^tTDc51n9%NDM1m_^01j!nzTT+FSHEjh-Y z+nI_b%|_snY=nLj;GubhQTN2j{-X5Qi0CgFE{s!azxuu4*rM~gHht^Iq(JvTW*dGo zGtx|B*FYTM@~dLo)ym-ar3dE;7gM;DZf>+(P(_}$fZQGWw6`zza`Eulw9D?~+utS)XK)O8%_cF^KS$Ym|DS)mHi+cMM z8q296|0>_Y(qzRn1Hydb^eDvEU$+!*VR#0cX>Waq5L$=*dBti6pLr##=Fg3!aU-1Q zp90dWs+582UrL}GttLWB4C-*mC>#1x_3d7WO^29=37@gftJtog%3LCV@ucNSt2F|Z zX}Gevr?bnFPGFB}%&j9zOg$zem}SlckS`U`mB@*oOM#slV`0dRKzV;qMKeAYAQp$Z zxXeV6<<}RD@YfXky|VxKO^ZcX1Z}-G-PdG{^$Nvd{V6-6Pf%J-!0$;=1Ks4}qiECU zY`A%^VSoOvZ=7C5rUae;oHOV-C!$Mp4+#$)+WPg@HhxaXO$jtv^}Q1wRViTt3riU& zBMUAcSa|{DO9ON%65=82sBiitU?vlzsM(y_%;WUQN3~ZbX1FxadaX0BUR=?^mSR)( zKA$mU-(?UknOyWSI7mmgH^hWi9tQRSTw0*3^UsfTFwb`uQLL7K_KISWJjA5q55;(* z2Q!W!?ad(1N7UwolMYHLM;CwT^%P(HWT(gp+T4Jk+W0zpJeYM0+|SbiUFOAt+H-h% zO_d`#lAb#isp+Y#V00$s}4wMowI zu(ozQG4I?gM_bQd8sEIhbt7lcQ{LZSc}-=UL6(RSm?~L>TniDKqkacN3?)$qU$i;L z6OOt4=XDhTmkH?BM-4~&2fV{HFYm^p3mupEG0GB_8=|1hZ9S*vQ$plZ6g~b4P4_%@ zc}_;45XCBm&=|rp(bq??!Lk<&JNTysz-0!y$wHSGf9VdsTXEe&v#;o;S!88bLz%jb zS^bI_!^(=`&gY)YPHrB@zt=ldhUDSGk6F666`c%61-HRjV8aj=?yUL=lk>-( zb?+u^AOqlX0Ns(!>YV`M7$*Nm9=Oo&pY%=U?;O>+Z+{BTmfx2~q?Z$*vsvOk<20vu z2Yq!-?H0rz>7vs-!PE<{mU`G>bier5{qH@IoIscVJ(EH+q0|_e>kmA$Reh%rSi~RN zs~j+DNoy%m_Q_@5Y`w+9(~j{QzR2s&)0_l3m!TFMaZH67cuT{?bh8`(y8pe0o(t%j z`?ya1)cYYxBl3Nb<+|9-nrqDBC+vX92U~)?XEpa?_2FO72c+QrNY>wOSxMN#hD|>X zYkA-(8S897u1jC({?C2)&jsTKy6P_Ut0VI7l3(n{5>`azCc@Q4Bt6MXez9ZX*>)~s zX;WwqNYXdBW;RUG;yxmq+E1FQmGUR{4&7AzTD=)ncLlgSKo^E-d4($H{!HtRu?>5Y z%9$RMQpay?0RyR{1G6aunJK_5fNrL)v~QjRx%#Ny0WvVy)y5*1NzItZl%;acXYyYc z4A$?Te0hQHAG+m(#$m2T$sgYJ32o=^&6Iy0Qx;scKn9$;($BVrtVZ+^y9L;UQ{)OD zVH#6uKAAlIsVprzP0Z(%RqcX#?PLG^{rjF?KA`J|0joAV`qY*KjxOQRN;~dV$i_dv z2VyJL(pAk%$}I=^#7zj!ELwazT(z;;kE|2uQiUh0G-rb zE75(s5w^EFCU^U40F|KIUWlSiSYsUZ*d?!5%2-x{_WAt4!4`H*!M$a}ME+jvgi4pu zjX-M8?_z!$`M>iw(7#Cw0Nq`3Y@KdGZaa<9ZZt1@(|K9smtUxhvM02JokIHsCdpHw zLyQlldp@zM`nOydkFn0?$+jI=M{^0=LSMn3eE&Od`R_fEfuM-A69~UN%6))h3+jBL|;#7 zMH=msjZ>BkWA$mLTFD}ajF9i|{m=cUCBQxa-TEs?HESYh&YL5O?}GW)CbW?7xRlJ) z-(*Veq7P-H0wAy{`0loBNun%Ac>PzQP{Hvu9qes3kztsTVvpIhtx z%oAicBVxzlp0|*YjyC+{$)=%$a^~{uPVFv(>4;J&#ey*uk2;!3+#D+C`I}1FH+SAO zqLFgnqQDGn;5n~2&>iy8;(tmjDBC~oYWdyl>KOV9%9V}|@w@Q*KTaZ;;OmAuco#2D z*RYJ=B*BSIXC0(ivq~Sh;Kp&V6=s5tLY)BlzSjQU0#u3ONWLDtds=_!$ysx zmsv-Bse&3zTh^FSXhgD+dh_8YbMO@fHe;TEns(0G$-s9h0*eb%VlGRW$zy=~5$Fyw zh7LI*+)w(5I@e2w^nOUF{T@{^aySs01cTbcO27c&R=#B!nTjZlkenAZA|vPIAv-E+ z+M(1me~VU(DEV4L{pauB_vU{By74Bh3i`nTM!$xId9Ergr^U0V|6q;=7Br|;HXRkx zlbBOw{B)wg8t4}tJJFRb*L$hwB&wY$-TcVk`T5TLBK&{u>$olnbSrQv!7A5CMV{+j z*!7>Z9Al3x&9M`soAA4;W^WDWDvlA3BxQRn348-D%NUcvu-&4Mq?s23uQKu74zX~v z{+s9j_x@j?8*TDVjgV_yqm@4Bc?OCEswGmdzSoDv9V;N@fcP4I#jzz!fqUndhODg}vro0L$chY~G_Nbf7FPfD%s2$6m=DCxiay#LqzFJBp;>qmH# zn)g)sWu(?2WprS+#jR^&kAaddHfpp~eMwTZcq!~lh2TTb1RXh%o7oqxGirji5tljm zJ$NbWvN(7*nE$!2ql~@pcw}*A@(0^vg}17I$p8U_Dt* zLN!(6NQ7l1$a?JS8f{k%SUT|$Yilig50$W#!T(&pe_T1Bn@W0DnYXn?akvxoWthc98kVk|KcKh2eSU~$?`8I0OVJ|rS4+rLwd=`yVa6^d*pE7t zSuGO{FYg5ypXJIaFAl0YqyOi=-e(Fxw~(S{U67fp*K5)@dRMgV_i@<~SC>;sOnUSg zW6^YR44(a90#3Aj7)cUwtf*^z3&`D8xm6j)sZpP5?Hj4H?f=}@_WcZW6FxIVl@PV+ zzsuqJrSlO+I7KnP#Ta6f+^O4&?5;`+#enH_ppizw@k8wz?)enhy9zA@@H*I&&|d#g zrYxVyv47qFo>x``x~m_~?rcS>Nu%csBk@r5daI_@lkvzt@)*_5&O$9_b7=+2`Vk;U z?a$wC^|WIN&TK0vMn^^od!!rupjM9{=c9;2zG8!b_*A$cB=h8OOxEUagOSTv{0wkk|HoT^>V+r4 z$!H^5XnQWS;^j6SpFzQzBXFd@TDuY)Zpb4%H%V^dSlkz)3ZeWx%uTRn_RTEH1sQO6 zgwc!-6d`)*0ImwqEmAYo%0Fb9zB+ZZ??6X9MMvxl;n6-6nx`Dy%!b7^_G?<9kdajj zK$g-xBwF5{Xr~r{ie8dn3O3hktGPn~?gLbT?rx#9jw+Ga6u1lT_x4+EaU;6phXaAa z0Lk4HqfFaxPaH8h4+(r&bdP0VBG}(JDMo5cM+KB^AsFt!*d64nZ2Q*sb)ZYTG1do5Qm=}Z5^lNqWu#8U!bAC}aAo+h+#|IC-_%~)(c0DFAOSm8 zr{w%XYqe)#cAP9o)8nsO`xd9A2;n-w)d0Gcf8p4`>Zu`CY^>lACC@v+GAW5_tQkg7 zOgU z7s}nu0$eSi+slPb2q(yYYuZ8R#c-x`#{OQ5sft=$K=PEkNWTo0ZHuuZh>bd7L;L%V zuoMPkYq!2@?7copYm=M@tpMXR@Z3}z=wgbNzsG^8_-Y47!l3ak)EjyUueHH5d!wHe z*;6@DlzHu?bwOu?`*e~O-Y2VtO#wncqbwX9i%C5?Vx_T>>7#dKRrg6%^J zf}f#=zEek5n{1P&qgrEa3gC2XrH5$4#|pw+>lLD0iAeaN>og{TANJNz!)7_XxHKdws!&`BSZ?9odBNjb>{pPV2tw(otn&KiSnZ^x- z-({Bya9`)7w*dL#_HD~SS*mm_I$5J8q?>i^$-{NMQdx@LU~P}PDi(wW7!qOw3N`7dPeZi7cs zCiP10`MyuPefNfvH9LzWTf=S-nB5D=C*iBybKoAi6!X>V_;yLjmRE8`gn)dFfUXYc z;@DfQ+o1{Z9L9H7R+BanbzQi}d!cvy>ElnMWkk<~QMxqP9n&B-!mb4Ss_6UWFz)YQ z6uEWq0S`N1>&O82^AsBP_v)Ji zTvMR?;rmO->R&%zTlq7^&;}EuHVp(X;pNo_Gm|yEdLvvoYxIEc`ilK0v1NqD z$;~(V`Cw`Z<*Xz7VI1vrJ*VuT4z%gj2Vv3yUt^Oy;C%d=`&)o|{T#3z`wE2z6q^z) zYxe89N z$oI8I_ZFbk4y_I}*>eiVbg*e0BZ0P?t7D&j2PD{E``E}^t6AQBm3qU!P}EVX*7T3) zOR>|LY&Pi>QH&9AO9|K`E=-~U?iZlTu&UyrWTUsNgY9du=!WawX@Hu*Y34RtSx2k^ z#&~Hy){y2|=|AA#PKt0hn7Xy|ZLp`EczSI2&4}ElVz`v$l$N{*owf46FeI1JYWsA2rjDujXtU>7X z^)zK6n%&MV?15oe@s{SrN74a{Fsh?-dq9`Gf|Q0^Js-MD$Y>E$mn4^w8(%1m0N~mK z-MLE`b1Zv$t$8xJHgMHJin42WEIzL8z7D^HZhb@arwZ!#DCgxfE{J7x6scD~HfI(> zG&<$|*`qIE+myzeU-$5D*UbUwKIG{+1Q?%*UPwRIqzUXqBpALU>sFTiyKy3iN!1!!y1FBsNOYJ*@+ z>*$qfgXtC<;SkJ(Ok_pJw=}nLTjH$;-*nL)SC)-iDKgsV`GXk4K6;Vfk$&sGZNtx){H;RjRQ`Xvuq+WNRdpCaipZJ;fvqyGs-y?xTY zHtWZcg<2~6Qq;kj)(^~WIzYb8KzHVkKskShP!rta7>DCrz_s4XwfXfnRU`A$lLTY^ zx@Gy~Z#}ms`)wGc)jFT5Lg_;m>Y2)}p?UDNrMm)f7T`IK3(ze~YHLv9n>(qAbWuu% znL!R1_)AktZDZiP+#${Gc13a%R9-5UYb_d9A)#DDlWX$vrK4O_FwO3xZ!PB&27Un` z-`76!7NB9Iqi&TrUr1%6n)}Wq_MTf~4w|#{l%haAuXVBNq;iuPuVY^3tWH=t`G^K!HvZx|>0jIyJs$%*L-8-;_`=HB`-CS6*^Nm-4`zgojt1?GePzm!AEQZk zi2tjv|2IB(0NrU*_>5q}>ueH-A>DltBC0o&jD?%0gZyI8nM)LY$bU(@GvuIU@O$X0B6|e5uVe9BfPNYJ z^ZFJpHvA2*3PaH0-Ruu4tX1D)y^f@qaY$W9CH$x_Tkzsfq(M>_d(&miY^ooH!~N-A z+1b~vWD-~S6j+z@2D;}{4_HUY!n6ln^Uf7BGA_#5AGS~~#~}$z_Je~bQor^m8qk7u z4R_(44kR=jcEU8W?|5B_BB*P1z3XtgbV&!~>jQKdC%xDC>b%RtSE>{{U{RJpGxz<}zx*p-9E3<@|2PWyiYpvpC_XxPg%O#{5$+tIi)?@wt7fD zE*=xMwn?`RSoib;y1ZheHwRgFu8l~k7-c`6sFsP&NFT)FLsvpnNSIAIZiLgCJH~?* z%1an^NpprTr`txAgPnz8)Wda-&N7wVUiUz6MZ7v-G|xggh)a-FiKT{Z58GmUy;g7TSD-67 z7~|rtBRsZYqw@}vD}mSKBc{m7`WiMRWGrW7Qcz~u^2Hu=tsdyseU;dKN$3{OCy7jN zV{uSZ?;|tv&hcxTzPSNFHzyC=LpM7*OzN_t()z@#|!Bf9kAZlFKQkFzm30%}K z>S=XKF@4>?6uSFQgJ5$=uvXHxgtcAdwehaY7L)@+Wvu^jm+-Nbht={v^&6?%dXRXqACJlXr&x5o5)J{Y$7O&U3Unt> zeg!pLxZ%l&Dz2bh$LClFl|@Xig|KDgi0w7<}2C#L`Y>m3FWe7L;B_6 zdRNJg4B}kEs0P>$VL;a@T^?Gy_jn~6#;A`8UO=?hP?lh9^;Ae3(zs$PUJ2E!^LnJz zTHUAe-OiGi&7MiClS+~}F48BcrY65Xf>Dfsd|zwKZvj%x%^O2oi>i;5|4M9QqL0jL zY*?rI`Ky@~N%3VT<8dE6i6txp<}+{7i$4!PYw>}+f{FJc0j86GJB<@BDa3g_kBD1@}55(c0!VP7ZO*l)x`h^L1i_v?+Ytz2meI%joNZfMh zLN=h`cl`KgP^^le9rv>iRC7yZp zYozwk$V{v9L&OsMBtX6~KsRGy_$l3`)ny@+Kqy^H3M+t zfNm8BtAPCUX*-6G5-Y>Fm<*SHjOgcPhj8%G)A-0g!9$jZ$mu)lY#||>{;-QnjZV(4 z!3<^`{5qZ+e=Afka$e`Ow|wJ)E~f8apC(4zOguMi8$Y)`xW)VdZn1x!_r7&vIDVXx z**qU16M@hgD|gdQmi>j2qzhi!;fs?ck@YphN}ya69C)sm0CZ;}MXNB-k(h(JpFbXA z7}Q|n-QwFW;cq*PWn=T?e+md2bzBomc&uLHQbEI`aitE(Y!F(;n0c5ZD(e&5AXWn8 zn+SAM7f(ns)aW7kZEQDxI9ErVpBzvVd&nIU$#L=Z21as>|CTXad-6R@MpNNOi2OvV z16t`QWmGROqJN)gmLI+ka9{6@w*XyuZr6|5eb zNvKM7hOOL2cL;zS$3u3F4QRJZV(9(B!k*H;)dFwB8~3F^&CAbby9DZ+KoT{U9j?nc z&fe;Karu+kf651*k8q+``;1U)}}D`7x2de{yH$r08og$ z8io$QO#`|{6tMf?j7HZbO})vC&>yRV_z%iV!=;OP^|NvE=Du>r&38dh>9@@U1ST>?=p(&SC_u%s^rO- zqanXzS1$#I8H#v66ie?!n3XUM0P@WQx|5i`Qr&_|krkffJhRUr1S%txtIm>LhKyWR zh)HWdyHX#7LdzS!58V5gu>xi}@PY5k2+%%ihr%hUt;t#&M zMG%DB(F=j}2~Iu@Ye5V`tZ?|CnY&7U*2st#%FMk{#^$}UE&L}U0zkgGKzH*`{@E}3 zz#opR8D&IrrJA*3TeK9#%_0iE(cxRfN@sB}@*NvXqDPo;DVonpzRSp{l-1gI?N9Ifn3e>z=u z;L?ZeH+yGg-_|h}M<|PL3!g61oCq9u@`0{M!#9{+1xck{5<`-D(GH@0W`aCuLlSp* zynU*z4r7DC=K(9r@?4K@$XWfaAJOHg0_?il)bpnT;wmZ=w`i|(#M`*@y0?D|kg}ZO z0h{K75M`}2#jzJze@`EqRTQ0bZ~PH#a?*VQbS2dF=}%G3G|M1PNK!Z2in!>NO@^{K=;4#Sv6z4o!Ue2ai?f$1!HlbEame-fmmdml@V>yIO% zLOzpsc_Y)QS+QlfV zK(^hKYVJhBrO6Px=~jMp<7qZ_CiD)+cR+ao;9}z^c6nG zAA2?k&0MWTty6yJ9R;{0K$qCZ`YVT9r2Jr@JF1%SmP$ZhOlvoj(9>DbPLg*~xOlrg5weDiNKane~t1 zqGMAvLHlmFyNtItsJbWQir%`3VG=!+knyZ*Hx@XDM$lMltTIwh(x$w`LzfKMN+kNJ$Ah9ZL+mzy}fiD%1J}BO{*RXw|4t?oPw-e&2 zdJ;!j7G(D^)qicJ;&twMYu|F9iz>-P%Nv9F0cYsIz$k?c;aw)#2f`vbgzi9#nyg)V zwE{{CqV!6S=x9l1_P{gxm}QY^+uRfE?G^`g^3%f@;J8x(bd?)Z9DF%ke$L6Uz_@n% z6f|`VIhSY`DkDrxPWYQvYYvx!+aaAD4m`NCsvUcct8B|}sjecXNX(1YsF1k%zSex- z@~s5AFv4`kRS$;-Cusf7a-#icG6bVL0Ukqzu@m|QY4^xOJR9#5AQROZf?7&ghroa2 zTQJ#sr(J?-e7WJkSK+Q-&#>OyDxe$B;H$d*316XZz{s+8=%fB4FH`*)<4GUW_64y4 zz4+)HDo#zV#QL_G@u`8msncqxr5R)hk(Buo`RG;&UFz#+e{)~YlHUTf9oK`f^tdZ? zf0<$AAt@qgs_KBW`T4XwI@fkY_Blv1$h-)q+l{4`D{6ss_bWVxV}63oQpeLQt3^Fw z(A}8^z^wtgkUaVh-QN|)3S+@0C}%0xAv>hqtp#q7F$A5~-3di}WvN*48eNR;KMGTv z_8D6Eo^xr{JE*P+dVM+=2{IMh1-P|9*Cpfl*0z#8%0N9_{=N5aItvzb3z?|7)vApj zmo!U+-#i2)KOPaL+wAOR59e|o)MXY%@p;>~Ewo+fFt%jCVFBDapxXd(GovDQ=n z2jF~B4|MB(k;pnwu~35*U8?9P*{ESkjTzjO40Bn)sSv&Ebpl8EIs@M`fmK#DMf;ur zbrFw&x}zMU+wK9?GS?#1hWz!jzg@QmpnE}3tSliLKYa)C4ujnim?pj+_Ch!$Q#~$% zpb>~Q1;Kr^Tq)+Ga3Y@+mzNE@>MeHBj>uf-0CDUDo<8(13Ig2Mx#KNBNl8oB!z4RI zjG-#l7oiv&FD0b4&1eHmtG-1%X1SYGyN2Z-k;jjfN_|QWM>$zW0B#G=O@o+3<*_VK zaW+8v^i}f?U63!JktJHkB&HF*mJGtyW{%uf_0OElKGt8?Ve)+2Q*clVglYHhkw#*8 z3hGi0W`NrYbVJXaYDNvN_3_dW9&_Po_v$(f;|npccC)1*@R?ouyVYmTMpRq<%!w~x zWj&mwY21#6`agdrJE2Eg4k%;zz5#ICfNo1-OD*>K*vS#DR=pj=O0*Fa?ZDYRO{&Qd z%~bZy@(Ope$-%a`Iv!uw&k#G4twQHTZSWuO8&KhBMb~vDY~TTIJJ7Xxhuvn~M`1}v z7Hw~FLlcLL)X*E*W6_Q9I8w1%@Zcqbbu=OGJZ8X3wj6GI@z4%-svc60>v%uy_HsMH zJ~9JvJAiI|oHb`Kcmtj>*FV31?$KO_XGAwhoMVENADZZXA{1#K*IRaJghM-xnq*xU zEG-n~r)@8B3V-2pYZ%uA+x`FEx6%o8b7U^~^p?Jt9{1;egw*;m^%D{=3#LNQ(2;0) zf=LEjdERBkT2lvuM4Aj0OgLmaG`>}5Mb!xH`Jf52woc<0u)lQy-Ob0P5H>=O4M~Hf zpSty*L_B8=p1$>jMbs=tojC6*&%)CoX|oNsXiP&+BN-^Gik1jzl*GN?jlJ8-XjWm| zG6A$hH_$a6jHf+CC-6%=9Prp^);}y>pASEWUerX;Z79^dr~UmS&o1hF_Y5KFpO-k= zrOt%vnJcYNVHKi5HG{mO(N07Fw+HB&?nKuQhQNz@4~7J?cGy^37vWgaY&p<=`yC^U z$Iup8;+|EP-~wL)x`K1oeY&A7FV%ox3cE2ro=F_(<(L)&xV=CZnt#;az6j2IR?~x-|~IkbG-$K)HFLl zYj~aU!N@FaX5zXx`Fm+*!}^&}mE>3Y4|*=kU(e9de1r_04tAj;BXdO734RZcqJ#Ch z&j+SEI&KZTUaL2E5a@bAY%S&T)?#m*Uh>eAJ9kf;nYBJB?h~t)i9#M5qRuRdB6c-4 zy)$?JBZYR#u%fwKf006FF%Y=LlJw#!FTMqEU)O+d0TLXa`6EwY%vpGF&B{qe?jh2o zBaN%@DgS{=?)HXE@SopOC_Tj<6^C+a)5R0pBE<%^8>BQ(u2LK@V(P6Z3NQiOA)u?= zHY@7g`{>|vkriO^Qe;3@9!UtTo8kSJX&E=V{`;bHSHbcicfzHGl!~ELYmSKFcRCLwYF*RpT(KzWNJB!D?fV5(KRSOPlU*wrT+P zb?yEZppoOHRwT1nFkEN_;c$^uImBK!l>GVQW@BwEu$e46WXJ7#1GP^`kj+T3C2fRX zI|SS!c8%CEv`EPE9-FA1&X{e$};Deq&3{JNdUkd2fE(OM4VQNwzJXdQ{JTh zEsi}K2Sk43!hI52*wvg?oSEhQ;K)MK#zJoJv9r<7B_V=-;_~R0k-V15c2md?Jmvs* z0_bMtgJZ8hYlBMda$9@KGW84{!| zU5w3X)8EvTsH59Lok#T}W1-G5fzj$-GEJ?G4QM(--^nsT2yD+a1k}8S){0|*`xEGf z=Ry3wp}-(7;G1kDWsrDpVF(M&t+Nn(7Q?)Ig<*ZPW5riLPW|I$e?K~WN*S%I=7emY z1B(5RgjDvefhiRVz?}xV90SgjY%SUR`*b$NY@9>%av@r6N!pvEBIn0}m{^;l1jc4t zMrHR<+sq;J9rg5QcT}0)i7=FFiV_eFdx6%W0QYrnehU!3>~G}~N2MuTmh$nI2WqiQ zjRK1Z(hz|uq(=?eFjlkDl`#%X_^+m8FlJt#@9Am`9vulQla+GA2p?+b*w2CWs9B&} z5Rw~pV*|C$C;v$zOIU9HO2`3Xdtdegead<)12xqTyj}^DRbGAR!8uT)?4$W_Tv)F2 zL;Zn{x-|a^$;S;gK)!Q8_cF8GLD(t#Wi`xGaf{6=3{gmNtHm3<{o754isG77MApLP zq!lVhgi8CK)m(S^dsrMO@>tJw5+xhCL}u|G9)LR!ba9ROF49hVf?CmjEr`on8f{f` z7hSF!^_`L$>_?XNRY8ZA6`Sev7T`Im|90F_hO^0ZO8K-9@;n}YP*DzxN)2!qfbJul zO*`$+Adk(VgE+*D!wburt5o>W#IWZV>Ti}d8!AS_5hv(8_%9w(3vuWNXxW>c%o{if z33}Pp6ePaZ>hS<~5$I|jEXGGA&n^&nfm<{;P`8W{E3{-IVi>!ko@*fQ(tUha2Dvrb z!{Om}Du%nsIByJ-5^tBGMtp0aEG%`N7a;|3mw;|ll!}DtdmG^kxUlFhqKD5oTMY@P?e2_f^XhOzfsoeWrDvqO?eUK&XmXwiMEmd|r@1F+F}+mi4$w ziGQYcXrALRGL%2Si(BO^czix(@lgn zOAT*EGd0{@K%*mG0sZGA9sAufhF>|Xx@-uvB=w@_7}0In$hS#hft);~*FD=?zN_&qd)c%vc3?spOQ0+>#OH*OK`JxrC~8Pi zT74zIFS>5^3^0VGoEc9eh=hEw8t#1g)b_*b^{oFb-*uqNa_pVq`nQA7FKA$wwZ-wt zS>GVoLfO#6P-&xAcQzx(B6CQsE(iWOxCQsyEdmP?D-RPoL50ZD39ied;%ox2zij|r zW@GyANKR&?g74C}x2Y6S3E5D)O4bz*5Wy{$GB)C9h1S=_#1k3Be+dx}tQ)=~jQEau%3*#66CU(pX7K)!oGckJ7NZYB5d?M<)&yRH?8k4rV&$G+P)mJ}U7aQbF0>Iq|x`dfhAzlcBe^Cg9Ojjrk z0^8jGluy=6zzZX@4%|S!JMC}xVjJS5a@qfw5oL;kOIy{T+@lquEB{$Lm(+kk_8j0I z09~%F!<@UHD|~fw%rTS1v(9uRqXD>w>TN~0T=~dPE&Oa);7B&mvz6AB0SL&>LqqeH z(GSr90leNt10jvRx7Pso5a?Q!UL${svb3O+Zmr>5u&lSEoY;lNgxC_*nW+swb{0%z z;6b4;tK8cx$BNwv$Q6$g`=h6S5!g4l)YRVmaQIr|d%J&+fNnsO-c22p-c?M555tgt ztpbLa$k+TY=>$esSDSEuWjL}3-@R{&8JdQ~3~wLV17~xW3XS~W!(i7nlKco!^1tuL z{`b7x>%H+7pk6VEI7(`6{Wxkdag8w<<-T-%+3;EDCYo=Fv7VB4cg#u2ERlTCNf@I=$aBEb!n#?JVnCSQ^U_?`i=Lw#l=A%s=%x z6=#ao0It{1fUbENLz!_Oud7H_h#@+yw_?>m@=5Njg>>A+H)3`)#L+Yi*$Dv~xk9ww zVPwSuQVrQoI=A(F*+IIt^iXdPRH1*Yx9j!?=&s3#sS3k4$(-3|Y{Fr|UbL14Aeazu z%4kabN(8HlR`~O$xkK9A`0tuCcNA6C9c;l249B6J-&U#FBkL56#p~Yc&3(O>-vYEp zai-2fT(X}MirtXZu8!S+2o}~wGD6eebHoC6xaK+Hz&1jPWt=1uaR<&v)#jHUP2O`- zp4r>!7M&<3Bt{5uFMuw^%_2Qio30DGYy*AT_xiXgWtFp98bu9Igjft>3MKJyOP1Z{ zURtR%Hvwel$5y;3bF>wjq$A;!{3tg?WcSxG_buN`po=;jU4(~on_ZRomx~kDn&>k< zqslR}>BOY&eX?v|ZwLJuW+tqkwVstaMelJbtDKo|S0U+Q)+PsE&} zgNt!UWcuvm6B+9O+}A$+7NF?^J!iU%AKx zTf`*_ITyaCzPBI#;w$kEYV~SxU64g`@-Q+^+2eID_?GYMp6e|@q;0mGcjpM2?1%II z;EImvlG~wP(wO`kxy}TmLynkg0j=JQa{Trcs7dBOzr@MfQ4stDofFmlGgLCq3x*uY z0^D1mdo}>-*u9*57$!qILvOT3T0bK}LM41@VkH#)xK?t~-$vMNp#K#D@m=NlC_0Cc z+~Jg}Pr;$8np`0?zaIKsP1#`>n1L_V2H|-;H)^ zJ=K-vog-`r|FX*BOD#{^Mr??dw2Ey7W6CTnWYEJy%QB|G(!o z9)NCbsi>wFOjb4im667WABkgEXl(2rnt!oq&~1g^v(x5m&X^s@3w8*PD@gr_(zDeH z5%|m<88wQ^;$4nO!_xqq-yVVPwzK?VZ<idm`>@ZT9Vccmld|X3nZ8yQ^Mzt3_*E z?}mbmtua#&mlQ87o|z;$nOkNrJht~dzTTG3MM9>mG10&9Y4uTBByA$Z+bQ@hPszX5 z4&U6rK({PXu0i|&fvdP#9@;z$twE3u1*c0X z*y^4j>Qg1BW1v1&kk_xO*M#5Kp*Qb5xJB(Qb}WFsaPR}<`vP=7)70AXlR2@0EM(Z$R-#5|!I>>B&j8&%2y zaKRwndK(xRNc^7sX~Y+?b6AzDsZ9^6Un@^UdHmy-R^dxDX2R}vV*6VqiI|(^wGUb2 z6+{PfqPED3N!TyxynCmKbg=ZXuQ|T8FF4TsizpWOhY1gQ>N|t{j~=hv>HE*);bytk zdT=6ssdEiL>|r>L|vZP?hICn4?9#Q~d|&Rk zKACoX7JW77-t{}bG}F2FX?H6k zl)3fa)8IlsAFO;;3m0AOd~Zs45m^UtK|mLS-h{eNBHpRRy6QUli+VU4#<+K89cIe_ ze4Z9Qr<=W=v=N$f&K&Vi(_mh~;#Jk46xr}E_0F;AT+1J){El9qrFd&!D4@#;=S$>* zJ%s5k-Y@H-N^iQ~orcHK$8UkcD~g6HtPEZe#0ZTH_p34Xx1WZuLc6=dhs)Br21jB= z_RBAqe0lpU_2wiVYDBS{HmQKGsWx~+{)ANM3Pvi%dKJwU1`9okM3(?6|@56BM-VoMgVOey}PN@O&Nt=z3d1W*aMrLU~4XY+?^%5r>ZC|JXb0uqeX5-_z11B_-Vrl8VxeD5!*VH%JMR z(h`zV5=u!+NQxjJU6P74Qj#JPB6#k7A3W#&<9*H_=Q{6m-sd{Im!E59e>3~J@A=N` z#O|yUP>FBlY@JLSmb190HBvSdy8X0|Yu|I%9TEYsFP{s$IGDFQ1tQ z?cjJoUIN(be!UgH$^&(YUl}c^b*r}Du~sZPBXs*KS(q1JYxl6NgJ^T$bb+9=>CDBl7W0CcroKq4cl*5EHxX6YQ18VXfTZ?~OKTk%((ZxS2u}jmzEkD+2vV ziUt{niDwkzTJUdwFT&oBxo;_yHLP_1ScDBkRekSl_Iy!u;?iuDbJ(%?=?b613E%BG zyqsC%NKXs3%!zfP17WT1&vOHqcJJS^pyj^!@qNryVAJgKDFx0pMX_=5 zGEM7~*Bk?lm|9`@KTW`IzMZ+W;a<5M(ct>c0K?kzq5ORfhp&<&Y19`{UE%dc4tpP5 z#b(x~V+gg(DQVkR9X?t^sY89A*6EH#NE;>nn_^;gH}ddj4wYk$)ZlNejr(^5nLEBa znDV2231dHE8+eR3C;OWN1?<%o^zW3mSFjCU`Si=-NqkgJ9#7~OhcPlYvJ3qVrE*eT zX%kqVoYqgp_Suy=J8W1Uc50|cjQ++YeanB(hmye)@>0TH9+i8ZQUcFjO_cspp&(G` zisp=)uY9W0POp zyfmGS(BF*kTb{xC>>4|!=}@2+9ag?pu89s}Mns{PvQ-atyn$)Ff9s-$#&_4)%W6G>Ui^q*?vFa5vm( zph^h+xBn=k(sP0FMcfw_>d#7+Yg~Q((vEqFchpNNiCo6AHy9PFI*YstT+Tdlv`XRY z0D9P)o6)zGGhsKHXx#S0miMku*sG3+C4iPz2V(lXwKDit3A>V6%07Q*Dm> zGR|?9rzxrGr@gIJ>}y~$4)ymk>>V^J-F-{Y8*}quQ~G{ObsNk~57shLVg7#pN!mZclEMML$L?F$ya=gvNw&QN z*FNM#K0Fz7>5+K#GeHq^WH4dqlO${S3*zqSp8^+!L$+nuE`9JqxyQL*o@usYPr4X| z3VB&zFY7{0sX}_9oDb8~FBx@z<1#*3bN`i@rGu719jUwhcY5ul^N6%u*oE_6U&Ew) z_3T5M!9_jPkqYNp4^bc0ba;DUg}p<$J-ZG0zfKn)klRFa1no@`v(UU&im6f_q#wM} zUG$(%SY~?AEN;5;VL7?P*TeS1kq{DhtPQr4h$W%ssBrjw!3KNp7MrssqM+Lu(sd(C2A zrrj05DYNUWK8x&Yk8a{P&4u#ifW2}mr*u^H884lvLOmYpS-p*WWTD{NG}B$$|5E7v z;}QLSmJij6_&&OJlWTfR$W-o8^7*DU*AF_ehrgtcHPCZ zBB;Myu-C)sN>j2i@1wWMsou^QRz0CKXdyu}Lr${vQG1trYv11@{#oScXIZY>1Add6 zb&x%+yXCdzrJrTg!tdNeh3bz15aRd+90XYu5WXVHzdTRGD`IdEBI(nw;3gp6at<{^qo*m;8-O z?++h0d10?^Kihu&t=mtZ-OAJWweLhC8TMm@!7BBR@@G1V3mM-!S}q2c;0Hgddy0m- zNlND>iPvM~dmWea*Ku>~8>bppczfW3y%f{jG>7FEL_=c2<6o_@_EpvtAH6hsd^hX+ zh0mF`NtE}3-sZOWbJB@q97@vD_6PU*oe?-R=+E8F{W_*NnhJkE#1DIOGf1z#UF`?b~O-bRP>JuY_(2S#R*O?_&*0{=5 zgskvqF%{iSe|PBL)bqf9J+&+@P?8X>PO@Zn>Z#oJ)!96B;C(PoA=FA*dkC+?Ch!I%5)+l!xF!qHKGkn9QsE1OrBhSesWo< zn7zB71sfwkQmd|;xF$)y;&uoIS|4@=)L#+UtG;)yq|J^zUpK?#S?bB^bu*#du-n2d z!<-!j^=<7Eb?cW?7PrW;(&jeQ#m>q%0xDkxN21c-5jH_HlXpk29EH4BVDD6$BCBW$ zLsFS zdb&skn;QPUTNL*8XBNl=U)ig~lw38s7nBex7$H|<&`-EF@a7ux&-VpRUyIR&+Hk6p z`YA6{sH7{{n5j(toGH5Faqrs1#p{!~@b_6_u-AdF<5``GP1=wH`q`K&ui3A(%I4Vz zMlMk~#!)#c8cGDYxIPa~ID?fZ2i>(f#8-Z6wtAw<+?glRX}`~5e*wOqRvh+fruz(^ zMQFv1)_-eR&ZhP35oTMPYbzYoRNZC9K4By!E%3(4luqc*BEmlI6bdrOn=}k9^5(ov zRfFfZ&Et;vuJrHuuLSJvQn23ZRX)Ls9NSLV#`$84ef2ukLy?bPHuEAxO)0pTI~GV| z3k()x17Z@a*wUzoHhP}#C?Am2C6d!{S7XKbKwe4MYxDS+l77V=Q-4l_I>p4ASd&Tg z*OgcD-}$P$&tfF_-~M_WJBLT0vL?^wP>z|J8K~C4pkv%il3eafIfJWBD-C(2U~kCY zJ!6s{3pFCLaf2F+uAL=(eUA8SSy?_vl#|B@+!KYCbSL-XVOMBrZ*cKLlmojJ^x+t z>)GSgj2>axbxECr%$Hv%KeJy}@r=TX;S?|$*0r@?R5LMna;$b_UbExZ{t z8T(3BU98+ZhEK;H<12fAvN~onx%7FMguRLX(6=d-H^C_PeFL~A8L@IY`4E4j_jkQX z!(OR*NnT5D9>EcoYkkq49HM;WeBFQashSc?J0~5qP4mTFi8wx>K96_1_Z{5y^*q-4 zm#vOhjRkq<0IOe&*BwX5dmZ+UjmlP+l$RZoJ1%!F@Z*qQcbxoO|242M@{h1?op;WD z+NwNzYgg})-%?A=Rj-KMKd&8Y&-6d=DM$F=*=j3!L*5&(*KvyF^ieNE%rD>!GUmn|WTe(gel4jL zF~jlW5VbD*GZiVTdpp_q$MoAown|5mCUcz<vD}2bS2zz&SGAU^8cljO9 z&g+-7wkriYbx@XF5NNVQe~~7F{oIvqy{5$QQ%Ijc1&!wCrBEvOniqQNDRqh0duf0B zDA6G9z5Cl=CD{8?B1=p)^MseVT9Efe?9~dn*^(3&0Tt{d=JIty<0U=ggnr8U65Vnt z4$Xws?(hp4PGn)F{>zdDpCU(E>*W#mCj9j(!(NOkF-=2l1Gk-&X4W~bI`eX-<5JG| zKBZI?6bY))wVsoYtI`=Np)2G@{4+d-4-|V?&WMo0{MDI zEM{BPY8HBC$g2u_`A%iLJkm(ht+m^_R?>aaTo zw;al|aHPI)+Sdp8Qtk!`V=(hw=b`>yGvX3-OUO9*4I5b^PId;f7M~{-q#|_ zbXwFT8I(N?h7t4W#%q@uU)Ji~9~fvNrPno!SQ5}?|H0Ug-^qDxGs$1lU^z$blp)?5lO$0S<3J*dTj9ZxFhF_RNJ4ey zLvqQTqSe!a2Jsl;AE8dqtcdEIZhG|)~2>_6~{ihRo#zSCTEv2-kK}eXy>dfXYuPdV+WsVa4pAko-HjGq{7#Q zcVMq%!C+!KhRpn>@0JripPlv}m=BF7yUX(1nWMRSmlr8AYpQge^_J=%Y0Vx9aOm6` z^D%!rtSaZf>X1s+Zx{9e>hE3HJKTNmaQ9)vyZA?%%+gJcgw8>){|J4vXmaMR7^ayF z8eecUy6I_fp)Skt?H4`_=FdCSZ(axxN`B*28dY$_k5-1f_h9dm&p{f^kCV1r3`G;{ zTT8nc=JEwT)HXl-UR*C`P|4yX$c*~4UYjGIX>t3a#dmsnca99np>&){HCtMXF3TeysA;&A*u@4s`Yeo1XCSo^{C9One+~;VwH_ zkCOCvDq~zpaEchNzb5$dwg%K+J=p6|8J0|^)j=tg<(Ih}8EhPk6?@mY&zuB9M1!`< zS0^@qV@KgOPa@ffJJVsT@+pn$LvsB6`@VL8BdJqGs>XLAuRiRx4xPe&m#HN5YbVwAi66HG^lT2a)y%CW$UI49QHK^ir)&o;w#0fzpS7p`|`o9X9us>q*6xF8(7yl4nC9! zP?;W`P@!h6-N%m2{4>TCAruAm_de`h{>1KKUJ@Bnu-x;>CehUH_5glvF(rQRg2b5e;O z`JnYZrQWBoDdpp-_xwXL;OftHaiiN;_+CC&VL2_z=C+s8n`9Z4t?%PE7@bq;-+k9a z&!K|6Mz9y-V}@?s%*SOS+juRk_w`SwU$yVrq1LK&^Ede2j2J+# zPDQIWDOsl)Y1!TZ!XQo}Tbre9%Tqhk~My{}|=%O8IJ-niEhN?Cap zrBHuOVQ&VD_O$r3t3EGs6E|32lJw;APbpIcs3hG*(U1t{!}eE=x*rmWm3^AvBv)4W zE=7C^?O0cI)c9=}8?T1<#xCOCu)q6tGuVq_xUqR=Xh8Z~m^MP=BC7-$fz7-q(+x~p z$)m*AI*m3%Z>SXsL~T>#zKpxXuB8NHQ`$~CJ$^joAWWSQizTE8dCg(3t)@KNBEe^q z*;ICvLi-PxzXCFXP(sbDwZ;Y?5eH^*#3} z@RH%$HOOlLdw)-!+~gVU%B$-k)pVFA-L!L5}ZZ z?e^dOwgiUU<{BK00m6-xsxls(G^CK%6827%yz=q!Cp9)b@_Otl2H>ifVX&>aAwTz(V}%(w=3Ha@=n{iDWrvm&kpjo-(ax zF>#>}!Jivh!`^pofn|7151h6}uZ1<@pm9wYh2^u&>C;n@2XN}VDzm8io~D6zwpS}? z+FZPMn<0tyN$JrP#u}<=KHhuv!MG);zc#SuEMQUkx5jw5|)%1N#_$O_Fs29G@P3JFfRBiTAf2noj8LBM*&}gS@t|x5Hao(*Ch( zlrnn9#Dqz=aQ=am&P(IOQ>OW?S9a_aB)B85NSLFu@JjkS6g?QxbJ>@(9)$h+$NaK&&NAVYxZ7Q}q8556ro9liyLH%`ry?>Y&I%2GA zT?VQcxB9D6pKI&{jOeYmGF9cv*{#{mGhXPtd(;@5*fAe@@a zk#b)z&%^ikIl|tYHK_#Og~)E=*;!&-foDMy*`hSV4g#yJ%T5P>u2fW8)(j`7U)&qt zyJhUO&d8SSDRicCPu$D<;jY!M{F(>wcIE_oS@7{iS}i*=UlHliK6?DLrU(7ui*VUK z!#wTjt9~msDmRQG|Hz~yzWZ|7$f93~xmmNv_(gC^JU72xOyNb{{J+;1i2qN*8TNK^ zMa$3&qArZqUv|=W&L*eCmP88@Vh*~#ni9ow65CCVTV@b{Kc`NE1C>kQLM2^oVz_PY zJlEWnPk;2C=i3qAS^V9fxxn5OJFtyTDtDFhCPwp5rHWr6&iS!=yRFnYduXl7^HIrD zP7k`P<#)|4J{;2MJPtO{Hw`uq!?;os(-fxaCOHe=zvBvfpO9Ebex%p#P8s;1)c1}q zOx^VK^KXKIt0b4)yKwOZ?248acN7?MB0JCe?Vi<=+r*<);P5DG>Uv$jNjNa}2?yF9 z9>8Ar_OgCPx@i<0ZzgP2yADmJ@;-KpNdiv$HH@=w+-V)_DpBboiC>Jhb*2;cDm$W{ zq7VyrP6$ycKXWP0q-p<~?-}sB!Cq;M`RZNEx4xQ99el*IyHRl>l9p4L@7GgV5`8Fn z*44`QBheXDuT1+kWe@qvdu_-)b>`s0^0vBdgH|))Oa27vuRH9e$=kQ|zuJi_-1W|P zTb15;g6GqO(3)+M|d^e*ey$oo_r}giun_Ss9s+~}$b84j}%b(|z z`K;gnE-m&Yzrlr@kWO5eY zl!o)0kJwWdYHry2p=8sz*U?3nC)pZcP+v!5Wn{z&4VAbK<=_Q-8PYm5MDq=s@Vf3j zA;tIjIjgqLqQ3S1=xwmepE=v#10J;8DVNBjP72s?oj&N^ipVW=)c?vM%WLTNJZ@7y z0^ZIZ!d}1YhbG!R1E&0^WhRaGl!zid_GeH~As{(8e+?PWrRTU^H-tMXy5sz#jmQ8VwkHkxmKxik0X z^OkQ6;f?JuWn`J5rukoM$vMWANztUUxm}BQe#!>HJlUTcqBr zCToblUH$t$egu2bva_#5<7zziE0lb8>{UxYO5#Y=;cd%2Rhnm=S59KYr1tBIV}e9g zn?YjWLB)q{!lj^l*SDDIA|;xfif%SoLtbClo4_i0__$!kR85`lm|!YL__W7v^|181 z%ZIPcM$^y8N>FdD+?exU#@jBxRXVyEBNtlqc?CasncH}8^}wS>Py+J$!CtE%3k=%R z!9R+6-%eU1c06dU!X1R%>~IYTAFpxK>&>?GmdSA+dnaYT&guzmeP)39q-4~#B%oEa zWH%XiNz4NB`orE>9^%R`A^bbh3xUdw^&|EhYea*w%-pgsX0Cfw36jV45w#mj8UKl2 z{v>!pv3qgazm!8%%D+!D#U(w^`EmRv%;nFU#)|)8Kip0LU8*dxc~lw_m!a ziT?{*AE(Plw#`q9s@Q8|C9QX7`%}Zg-PaNXT@P^5P?T@H*O(Jr#nzTVJH}xZJ#A4iIAk=4#eTjc0Coso(} zh3@&@r}_qY!(s2+LzAS|P4Vw8a&x~WYdoS;@80`-QLD78*$QuEBDQEc1yk%ZjkBuvi@ODR4TCgP!z3{MV zRK72M$uq)zypobsf>9{h9kp@cPvZN6=E%Xyi;Z?2zmw9#?Wsk;PtpIc{aGaJZ8uoi zM2*-Ry>~Zikh|v74WCx(B!%rfydyc=n|GU*TxOTk()%n7bj<=I1ACd7_=+>Et2bA% zO9=))igp@We}ueGV6Szv>}F-~ap0$1_wgV4`nQKJ&pca6IFi(<(aI~hJjg|OvFOBi z?Zz~&>Z$R8#1w0%B<;MM#V!_FY=rEU(oFdK!YJ7LM$Zb}!{8u*)RNublx>@wzA


10pRNB)&W5f-!e*>%VsE+5dXP|&?@BjIHjEimop_)nVvVOnL3f1j4{4?`v2 z@K|}7ua#&?i(4qU1h-s|E{bM(xW6;sV*P9SCFG5Vy*+c8%vn#Y3SM^<*t7c?5swQL zfj{YaB`%TaUBT8FZI~v8PI!V+byJrR;S9&+Wl4z9`uPk z7H_Q;<3suu!&St2@89!>MA*x)Vr=a_^maDiC_x*8yO13Hqw3yU0dZ2dTQWbMTQeo& zN)Z`lE2%tfv%}3urD8 z2Wr(UrEec^d%eqLuf@pguAO0uGVe+ZNAKJ5K5Akdc(Y0kc~fBT*wZ(~aa)V*#(_0h zM;Gs=H|8~M|M^_s@VxHG9M!k4fAXF6qB@1xHN(%5bsNz%S=hAkk{LnKRUdF(eM`XN zfIt6Dg}uXJ_3OKXw$fi0Bd%P%Qg7Nwr)PIq*-`rz_aIqxvp~D-vn_6++OU)Cwboaq zr_yTo4(+6c>ztBWhF&DHTo&bl`kMxOk5Odq6IVnYO01vIG*M`+b3C8JEq3#GW>@O4 z+Re{aM_4~6Mr+a5er-)H-E`olS}mEz#c_KUMp<6v3*HOXzK}N^_F53WQgZ*3r(Rxy z&VZ7)e@4u|7fAU){Y!#{`)vh|N}_1D5LfEQ7K#oR0v~1Dt}l0EV~q8i&opv9b;=YT~j-KOLP5IkLuxAPXH`a?J!AR@=VWWyfXLx(aguR=+j{F})9u#7F z2?@7;?Rv?1?~q=mbgJr6Z2;eVQyeqNlNfnHo?`j3_r#d!+aErbL?q7RkKSD|48ssr z=l;Y8^*0OlUh}<-MZq^hEPI^@JzGXvSCnR}@X&*4T=!KSt^M!$aD|1dp*g%l)g*?) zc7p8!JHVtf{CDW9Z<@RTAGeg8LY+=+eKR zV7Fi;)I7YQ8&7{{#%3Jl_b{vEaf-a5dz2$LA3vIT9K+orGBO>=n*)1$uqFd{233;f zK1DI{>93ts&aD&p5~uentI_53_deW9c`!5o_Bsb|dz^c8XJh&?#q|(^9R{gmS2ry; z-(FYv`}r5J*X(`KBad*Nic4mEyt?9|?7Xv%vR`bhioGZC)+YkHynzJ$FSi6>FgDXWa!)4 z+i4i>ngx0DVDDb}I4L@g!DHVyUE|mJ8AeC%%V8NhqskXkw&lYfTWfBGV}`pq zm>(@K?oRwV?5EqYsH^gxalrdz?b!bOaKA*DSweoFJI>+AmR}G4T%!Q?eptV4&)79r zU~;kcZkTRUzT;8I#6U`WZc9RPx~$wHu_4hT)9+$m4-TkbS4*dP#9EP91f_frsVS_$ zuwPietqS$G5caB6WuSZ}*w7e{is-O9yT-e$BIGYbgGV#)oTF4#h?KkR^~@@3xPg>^ zwLeMZIx2eJiZaEGt*mktrArkl4>w;x-dC_U$HZUzl8Ih7aRNTat~CbDg@PA?j?=dZ ze-=zRTuP_G?9L%gJF?gtN&NZa*(^0--!qPYD~u!eEL*GftOtE);r+| z@g1^`F&tcnJubU3gFBkV%V#wd{a8Jx;&@#2GK$EC+opUA~b_v3tpy7SyMZ)57E>4nO{34Qt@tn zIBA$hY4V*P6XU$5!9C=0XPiB~_(1#!@|M8f6(jPVB;U2#fTXK>8uYoZT2RO|5{84+ z1Wp@K>o<9|&^x?LFC2aQssnf5;buKgnnWZrc=`#m+ehVA90~CiQ-c0$-1EJU1nN{x4`75T&B*$Cu!Y& z$LMpdcOU9+8SJG&8|}M0 zZu-aCVlbO`nOhn!NK<}TV(NyTJ*N=8w?@kN4dE#%vl9EecpI7+4~>!zd)1N&7IlN7JuDvo`dM(5JV+EL7z5iH zEv!#|(F*=M?ANf@JF-T`#PRm)yOyRkUICKgCIXWMeus&R68fSSYM1oby1YN;FK=Da zD{fqo3k#Bu@p+F;c*Bp8Z8UgK_2(y93#h-9u=nE?K}|*#{XdaeR|*!NK6tmmk|(QX zD8?S#j#FRguO=W|XeTXx+NGasdLcNImB!<4m*1I%doEjunRX%nn{Fk@TLpVN$se_M zymM4%y(W6(fSx2v^ZT?TVI_QyZTa^l)Z#hw7)nJ)0<-M+DK=r(etDv(Q2+9b?|Q?` z#VL5qM45f?eR$QdxAs(z<-we~#N@>ib^5bIGmJ=E3r`e-r>#9(nA?;5--F1!M~9hK zU%PPx9ZMWJwBhsV=@pOQghfzce6E|+fcNV)u=ikRGmD|b+~rvB3x>RJ*TtY(6V`7z zP73NCOxS#+jE%nx+dgB)Nc2?5bBWR?jy8163q;$!8%t5dz-W>id!7U3@CNo0?;e*J zq;%g`$>ZvK&VJ>&_qSQ}^fghL7p>S4F^RSefm>RUt^}I^J-%37y>`?hod(55=6Mt*F{}=HiA>eqEFPk#K`L z-Ep#zd)&rk?In%#6Bg}Uu9O_jryJ#kzi}Tz{jGz&L>rXAbZ^k@|gTx+^rQ#%xH zqh(JQpT&KMm+)KN73{xeZ9~Z`R!g|iyC{5wrz|Q}Io0pX&-h{&-E5;+9P-w~UXT9b z5UI&AKFMUy`*(ASxECA+$;jP<2{v};R-{A+qmrs#G%=T8c5*A@ZTFg#HFJ&+@_0EN z3LU-HTf2kv4gOrN0rsL4>zmzL-J2!7Qyq@SpRv05LlUS0feOL6Z(N5F658o5LXKa)nyklC4Eii*XN+`i~%sifMj@O3~F>`l~g zXl1$)gj0?p;BFEwy(N|Y=7+O7x#o*}3O14VKk3QYMZDjRR|jM~{3M@jp*`}0Y){#! z4R!os-9b8&<5(ut-?y-rdFY~P^7Ee$Q|ZXW|9DB1i_bk)Uf^e~eYOx0x)7pJt+loy z)pIq&&b^4hQ~w9wxYi%tR52?1)vA(k>TRZPa1PC|mzclig&fs5b;1006kk2_o4_5{ zyBGWF7zy3P<(kDtMvu6$51W47`i6emi?jZTNu4{s_+(7?_lqf z@~k_zB^TX^H7K|~mB%bV-&Li(RAwwe9Y-96_ zjAX~q z<(qvnzNl>6h1ktJCW+pIn}jlj#9O6`To2m~%}uig1Ygd4v4}NYjsM;I^geta|9jY* zqV7rfYvRi++QEF=iwke=u!;wrdG70xS{E5%8C`r*pme%Zqf~#}*Bw=K!!va{hY)qQ zIcSco;g=M};T@X;_`ZrZ*gJ@+*BJB6HTANVN@;ds?5r%oBl5TMsN!j_;=ZFh>~h;) zWvLQk!`bpIMv9*+dOn1+?`gwawofl=mRSEa~I{k!G}Zeey0=m_Qpq6nU;BY5I!7qI&|NA z7<8HRqGa{DIU4oXm0fZNefvi zhd)sUnqe00W^Ksjr<`IKy3=j#kMl8mFUj@_-3++%6#hP|2li?yDys#2=?i|mw|98m zTHBE1V*W}Wv$+{_Mt8Yr@SkMfa+!N)qXumv;HRn||JiOsiz#Ak)mOwUa3NND(D@iX zkLrcJ3#xPw=1pkFvBIt`?GC0o z-_PD`QFEc1r_yvLH5*?3mOu3gOUjMBtoAUPsp?%Sdk2(nKkV%jzF7BJ-uZpSj)>U{ z<$P5f<&OhAY1tNEr5iThwey;7={`$gz@*6fwsc~G(0Fy$Wh|q2nv8J- zkaqy~4rVy7Rv%I)oJq+_-5JFZp^$Bwze%SG09lX#DF^8Wt)?FWvI@25k1D(#4&{tm%j(_0ZwZK+dh61xav zDpS`kP>!_q?DupTRauJ{eia=bbdz53C@_u*AUEtdF{D3^(fKNA$c8nK$=f^kA?o$d zb;vsmd(8vLuQ5&+%h=<_Tg!dfR`D)w+&F&W_9V|bas5R}g^$>!p&Q9}8(ApFl;;bb zNy03QTSlH>Zx#PPMJ*$p5iPR=o`Bo8e6HTZqgw2x&Z6R2tG=Bv zh%xs1{>45qDRoXy7Pp?uZl}*wgtRGI{J)8>oaT>M3d-P;L*5bC+gci@fNRdKhFw?| zQpRRT-QCY>PhFT!BFdo@wjH*atoi3t_l0~RpS`m;k7A1(=J&2ocjQgCe zgFpWrg}p{0dP`DByKM@u9oU;ky>y`0+15Zkq4 z95d^sT7tLvn)oLC$T2RZn>}XnY-g zo>XtdlH)%8D2=l*`8}*|?fEQ|_5cw^llzRY5ve`S_-)gWHOM;-dk0?ol``G9ssAOG zRnFPQj^oyV$y<}!n*_IL^`^sIh+KU{rwt5~(6ZcLjx`7#1+ImTE>mRIe!;xK6JcbL zL<-;6HUWDTNYJFaPoC_2cTUGO?Zc$wvL*Yx`Si?FLgyprJK^i;?8cZ1b62d=Bnezr zJMre@2~&DcHGLO;FwAu-;k;l4U-bOX&#Nb4ulwe^X}o7MWv{Q)Dlsix$`@mZwvW<2 z3F1G=aEoF7xLnXL`jhA*2FbQ3@jrQo*R(&6YQzo{~#F98M@mVR`PcZpbbzb$Qg!UUK zQ+QW{n>BDC?`POc|6mY@ZYQSms^d2TzDZ{5?A1=cbboVeQZ_Hg*&; z@{8Brw65nks+L;YsO^|L+|vBLLUhXrSFBo5BS`PzXYuetPe;i=#h2$3t>y{fa-W7)Zb~?8yLF8Rie;f%FwT)fg{6fM_yGx-dWh&c)u7; zbs=lX?jY{bnuv_166tyr8$R0Jx?P!3X^^cd+HN14nU0?IJe{3I$aA=S%osI|b1fH%muP%LgDkUKA8?NDBO~ zZSlXK9dX)~Cy0VV{(rFSi2UsSvH79A6hcA4`9E;{s}uQ0pPikqfQDN|{&nveM7yTu z&JUcx5L5KuBddS)y7}84A}b!re`It84F7+8U;fPkIYcIaOaPg{|8EH(jsf1Ljt+?B zHnVl&b3~jxA|J>EkO?3YKql~iB>}`fw#e}x=h=U+&;PH8k&=I_1Q6SSovphqqSNUQ zQBa8g4X)$=Uj>YD-tgby|Eqrdzibz=y`(<=$9Hl6%KZkSU6)W46l$=2J^@4e{|~oQ zL6|iYy`%Kqi1p0GR+X0b~Nm1ds{*?-D?q-}iA2_O?dCV)%; znE)~YWCF+pkO?3YKqi1p0GR+X0b~Nm1ds_J6F??_OaPexG67@)$OMoHAQM0)fJ^|H z05Sn&0>}iA2_O?dCV)%;nE)~YWCF+pkO?3YKqi1p0GR+X0b~Nm1ds_J6F??_OaPex zG67@)$OMoHAQM0)fJ^|H05Sn&0>}iA2_O?dCV)%;nE)~YWCF+pkO?3YKqi1p0GR+X z0b~Nm1ds_J6F??_OaPexG67@)$OMoHAQM0)fJ^|H05Sn&0>}iA2_O?dCV)%;nE)~Y zWCF+pkO?3YKqi1p0GR+X0b~Nm1ds_J6F??_OyHjMaugJNt z57ZEo2>>?`3j-bl=kpPQaVyXvL>C9oYeZn&`h3}j&Ru{;~+HBK0P`UFgjgoT`axvR4nXuz32+^76>trp z3XlVc0}#t34L~e6VmT4}DS3bjKoY1p>xgxZ*rpNN zp%g#?a07r?*NAn1$mI$^27q|~Aofd$_deo%gV;_H?^|U66F?Cl3P7wA1^{ARiUAPs zC&c?l7QhT(0T2KX`zge>iP%mN+czS6e1I?LG-6+B4lo5=0)bKkr~m>WN5nog0)W_$ zJ^@4lj6nFt026>IzzkpxumD&BtN<=x<+uVK0Neo{055<&zyY8O+SUgc0PX{Rg4ZB6 z0Ugk;HsCJ64?MpNh716D03Bct%zFy>0oVr|01g31fM0+Uz;8enARB<#*CUP*h+_ic z7=YO4BlhvP!Ln%sZUJ-vcL06>e?R~r5D)}-31pbpRgc!Ml_0Eqn-Vn1aDme(9$0k8zt74j2P00VV;b)z$d_Gz${<^Fb9|iECN;l ztAGi>4B#tZ1dt4tB?XWQNCO~_?-_vSfJ{IZARF)!kPnap{on=g0R#Yo00ICZAQ-?8 zmXR003Sa>+0hj?i08RiGfDOP7;0AC2!~nv8s{jdrI6x900+0fT0}j9foZP* zrGPR(IiLdY8c+qO0n`Glz`Tz^-tK@V&<^5!MH!$2&<5NA_N=|paqBpuX_rJ1H=Oo0EvJ%V7^*F9iSf20B8hAfN9bINx(IL zEIS3z9_kOB|{%c=+R)Cb%Ir~p&}@&E;Z zG~hab2fzj31zZG>0q6lN0CB)2fGzMl0c-#c07rl|z!@M3`Xd1l0|)_HLECHKHQj(7 zKp$WjUa(~2Y@)? z!Uo_1@Bl}k_7iXnKn2gy09b$n@SFe)i2=XBI1U)%11@-!1;hd30TF-!z%(Ed@C1Ok20I260ua}a zqhJ^dUXQrm@&Ns50R2N;KcRtnZ-D3G068%34d!(PAg+a+0f_4#2LR%l2XT$V4dzP& zuSHzPAg)_T0ElaqYXEyNO&H9B7}rM(L3@aCgy*jpG5+hku66!8S@8USkoO*dO&m?w zuuSim-h=5?23ILx+UkdvAeILJuVnYAD|` zyL&oW(#kC@-iR&ZprT zwL4Ql6t^z|QH&H1qJFC{jugXEEK4zMG>Brb)F6tZntVRs3s)160N`gv&ih+uP z-a!_{xlch9H+Kbf0Z}_Q4{4I!+k&VLQ~j+7qQ0*rs04`0q7ClLk7ITam5(=wVqc1Z zDK@4)mEs0!cd7mT5i}P>@$hUA`3{QGW*eB8ozoa4H;BeAG)|zg28}sr{E-!eqGe+k z8Z*#Xg2plE$~0nHhcyexBD>G=8jWKduTeU5jmk)NRK_g|PU$M=%8}$K*OhCehjNYN zQ{L!4B@fwgPWRHE;v-qaHCOs5HX(jjd6|yutwH2Z1wqw7ML^_B1wi>h`9M6yzxOZ6 zAzr%M2S*x5eg>)vssgGEsssuK(U>t16aexE`GG2eDu8@JT2OgVIS`FA%YaIQN`Xp( zN`Q)kih+uPT7gL4mY^1(=AdSvrl2OE#-K)^hM)$Z`k;ECx}Z9s+Mp0nEl^ER4N!GZ z574)u&LFbSWYAd97|?J~cM$n^H&9nl7tj|Vy1x_Xb5KW6dr&(N*`zI~1BmR@1INLj zVIUJ|D5xJO0rVAUASec81@#6E0euOI0_i}+(-Wizg@HmrMo=Uu0%QP%gD73Prx%Fi znL*K@9w17Ou8|&t#BWNU(pJ*7;GAR-&j663V_%%dg8G2sK=I;8*XW-9AmUN(r#vKr z6h696>5K-A0*wTX0FiC!_t&8DpmCrHpoySKpedklKvO}}Ks`WtK{G(DK~yHQK$NGM zAd*Em**qnVIY4tjxk2ecxj;EV-k`LgOdz_K{^0=%pdUcXLEnRxffj=ZTOy82#c>6Wlm=mRJSNUJ;Yc!$ zg0_LSfHs0QfYyW7fk+0OZv_!f=ft;JoGbSo#&5ErB6APUcY}!UXV5MX@$UeU{GFif zpr1gB4AO<}ResZv_;=V{Bb@Z2>vUAoJB0ItAWHK9Xg^3vdmqmCf|PXa)1-Tp^b~z% zzezv)`|Zz<08{MR7U#D>S3vf@kPW|o0bK@>Uz`G+1S$TJ4(FLbj{ZTubWxCfdX7Bwn|OW$-2nXx zx(>PuQm|_{r|ZgZ$Mc(Zm?FdeUb6K=(C;9sFLyzAK&?QEk3Ycqeb7A+)eXX^4CzRu zl-D0Pe*&U345Z$vt9EtY@=ts~a&@<4VAbUS}j^BT$?Eg-={{>mEL90P; z#F2i}`~dx?xq`Pi(zSOWx<&yZ#meu+`3D?{=pID|wFAmr1kF#-+ylj8=|D6$kqSh4 zr#Xt$AWEO?Ky4J+U^_^e)0l^IvaQm#xbh+L1;y5sF3C`ANMl<{Z$7Tkn3mF`IS#7V zH0O~EL}Oo?`=Ik4xLy>;A|M|SjROmS^5S|wPy(nhe$!aF5QwfRMA$S?54&RyoKyYD z3GxOhb&&NS;DeetzOwayF;7eF>-)C;&Xvo?HYed99CY z4MDX*K_Iem2#z#ALGu(ewy6%P2C53G0;0KzN}ym+0}#om1F8qA3u*&u4Wcm=&418X zjK)JWR-$n}&4n}u(RiQ6CrTaYi1QAh_MpMggN`)TA^TB2Y0U5ii0no_MSa71&~6ab z&;1}84=MJh`Y;bfwk6w9Iz4bs*J=Kr>WY2+qU&_u9bmUWVW3bD*_&jN|5805U-=E! z^dLIFiQ_rY2GDBIkDyhcVW8!pp`fooUxEgM27#hM13~eizMwv!IFJDp4vGTBf=r-D zPz1;b>ILcvB7VBZ3bKHDgJM8t5MgvqN5Yiz0XXju>IX`&J5R*<5D@80Jaa+cgO-7o zfF^)ufkuIrf<}WDgBF2Cg64p}1I-4_1dRvH08Ix?1APnn1~d^g88itr4m1`t0`xU# zIEZA?Z#q(1l+GCOn|MhN!bv{yPXSE@(S4*J9VuP9hxkdZk|yy`z7~S!gXV!2fQXOo zSpp(HMK8KW=dLhHXC>$d&+^ec$!-c=m0fG&WBVOJM%O47AsbTLA`E5R?~`3zQX<8btFxDM6G5wRtIUPJPHc5T!{rRqU2t+(Xxu{85`s zd{2R;0q(>e{U*JK1`0Hp=(!gacr^d}4wQC{0(meVTNSOb+FX>_GZ^BP}{Ab|*h3-ki8jx@5+0d;e4XI~(u} zz^ISUj`JKs4%w4zNHTNd8p+Lrqf#bh&wL;%H^S}j&yU}9Kj~Kh=ZcL;AF>_gkM1Yg zg~Yvtk)9Mo)AiLLvZ<1fzmo?_hy1NLC?(Fn0F}jg8Bl3ZDNrJ=1>jf}R0UKC6buRi z(YhY3pU{22pz@${AX@h$9)A$6`_X+BK$Sr?K(y{ceotu@2Gz!IJ&s0D2+nJP3gMjc zR2S!Ka4ZY^%)_w}em4Zw2Q>iEx+={R(A+7>>VRWYP%}_FP+L$NP(4s$>?#^3uuJ3-3*be(wY-HIdm61C~nzQ^HrA5acp-{ANij^{v=aXtw&5i|ib2iGX>&IKBe z-??#o3Cae_2^xoMmvJ16;~0=PWRJ#?#^0lGOpW8$IF1CJz&VY_Xgr=4L}P#vxIP>- z3^W*29Q;FZ90Hl2Z9EH27vm5vH;Hn$_&a5$_P3ON&`v&PCiM0($n!?x=#1geU6wb zIb;jUr{ldcekvFHyx8YKu@l)xmJfxOmbT9ExSte%-Dto%e zQ3sMo`YSleCVk0IUHPTGEs0n0C$f{`Qwpyu{fXZ`55!ODlWxRM^^wwd)Ke)B;5!EGieprxe>LW#TAT(S>A?4U{^i*pqB5rgWc@H&^%4 zeWVNBqtpS}eRQ4fq4IHMV_%% z&_U2X&;jxLFpft+$3PcBr$8k41n2_jIOr^huG6)X;&=wf)1Y&p^PtNh;wN6BOCZv# z9_R|_SJ0+3)FYtpImnM0s(;}h+%>K;Hl+vK_K*4{-INYLNt{@r^VpK>Yg9>>!la8e zSYx^Oz%vr_=PHry%VKShs7Qp#97<_~cO3gDf2S7p(|L9C^Yg9f8^Bgn;Yo>eN+gyq zH_Q;K@>mSIFbLI5xO3rZz6pE2NnuN;2(F?QB=*l+?Tv9nyKoihV-B-YI;-oy?KyvN ze2|KS>B0!mJldA#ecb)5HB)$X3-+z#>ldgoTXYP`RO)cvu5;%30I3Ak{15?|tnmc3 zz52z?FRmnJ1mf@OM@XbO+Du5_s1n=DtZ32>NFYhWtcf|wL`dGlA^LHD&1eiH0MaTV z<}k(@2q~S)=-n{=n|L6>kOssWY3yYLQs^hsqjK%)eFX%P=#T6cnt)j2LXW|PqCcD= zB#<#h=`6i`ds;Qk5~>Y4c`cwikjlQWsh=iBYcfU|V>Qpawi@%!dZ;XrAg)1-*%}*W zwQ9QPjve~FS-LM7Pmr%aVxnkcxY3~3+^d)FMYp|K`cpboK0%srtJV-7YteNb_j`CY?2MdaqyeycsvwemLDK-I*@;TLY=g z^D`1Wg!J8a&bc3|jK*-~dpDefIr`o~0m^A3X*MkTt(k^r31=)(fPT@t7 zC|c9RGlHkpAk(&A*8KPbc*v`Q`I-drG;HgWV(-t_&I1YN`MD1ssy{EZs}oz@zS5V{ zspuQ%>tFGOK)Ouska)j)*9%M&T9Xm{$Kk*jvP{~zfF)txA2b52JYxm3R zg!uXe!`S)J!w@pJn*ZAp!;kWkLqZkVUJ3NKiA}ElTCMz@w?n}rY!Cn*LV|Z~FL*ia zn;}dZ^Y{=Tl;i4iel#9@TBDfC(?TFY52nuVx90|xsgTx1AdT}s(f-mW_g03mTGCS> zD+<)iyf@pSU=@i6g8nCQ)RUtJrnGKU2#Ba9G;`(+Byeem6ZMYtIHdB>+#eyok6K=Q zOvV$6H1I7ENb$n?bG-X8Lq70`T0-+z*@&l{_l=9)p3kCmeEkA^E3+9fLcVNN{QUf= zm3A7?oD?D2syTwj71=Z}g%>$hC6pS?1Z4+OuVcZIgQsO}1Zk)Xm3#x)JPRR3bEKMh zeb`W{jRB}7DAO!xl?jOm^$tC9ryp4d=@aB5kk!YccaD#FSr-T{6A$%D#51dhVNTtJ zF7uFmO0(s(_k5h5XJza6xx!u=2ZIYNbM5=e%rK_drf_053< zqGko+W`W#T>K9pIYk|8!M48f93Nb?B#8k6N7F<)dGm{3jd5=rT((*$_wf(a|Ng&jY z0--*MkZoTNAGzx56y!T78~USO43Sx)_v=*yGT&~@5b&V>+!M&Ym!A)Oyd~@xCXLnL z4+5$7#o51B{M=M&uaQnJbZsQ<-oyJ3o{cX*8$7}~v?DhUkTuUPf3@oN-DuK)r&9+A z)*}*EEUMga%CSmXmJZXpvq1ie4}6%o?KXJ`l>*YCeT&4?JoDrAx{aHt&cU2iKD6^J zACU2tLaQ}CrS)U!_)}|1d+iDU8M}DthSntunSfOB^$S8N(5||IKyHUFpM79q=jA|1 zYgmZ(zG0l7*dhFu*RZJ9uY|N9XyDLmdLutom_>`=U;AEDMFLZCq|MF6y}k)u)dA9w z8#GF;rLhd9!|)bSU7~L2?xssD-E*h#BD28+ngT%b1GzJ*Ko_s>C3A6v>H_T+r~EYh z;<{$o;o;w?JgtFHDSVg3baG^qt&>!QcAk>m`n3;FJF$DVF&rT$q2~*br^NXS^)dUl z#%C7@Y&{=Hb|7y`Mn3P|uL0Uf(v5g%A3l}Oq!Lp%oNZjYBS+xYY}Y-d6SO8n#(<*H zXer1=Q9PPk;PC;HfBnHU{SUN$r%FqWaJ3{5ujiJuBVL7ECd8lFpd1j=ZCUHyVfC}V zKwqM0&6eSGY9N0KElkua&qer&n2Ap}!H} z@dnSxt&30P?R&Dkia7hhx473A_d5H5g3yx(u5Iv7X+ZlcsYU8P@bdA9ocq83=WQVO zQMnD|w*D{k)J9|A16#N0m@>F&@3v@n(2*qLy`JPpUKc!(<_V9yOg+)<)7CjpG?4q7 zyj7OlKwj(REngNCLs9t7<+ug)J{$NsUhH>BfFgG0<8#3q>HAg|VUx_~S-hanv$K8Q6~tOnThk0G5uEGgzy8$1XsDYCQ1 zMd&O@YV*;sGuu-iSgG=mUZ@YY`JJ3zi(!?|aAgmUG;SQUDb9MJ@>zdKHf~naWGS5kWcuJ_82$bPJ5uMVpOSVC&wpaPgwM&W%R-pbRkYp?QiS zD3zS`>qhU_Eh;_$hzP9S0wLYXon2r#*=ubVhM-K57dTyZAXl$$pS|4wrz)w?1NsMH z(nIqZ5Q;+%q;2(WgS%0?RKy<$jU?LCJW;M-p=?W3q>bn?u0{73l(mMmgUV zsJP|yy|B(I(i=z)AQkJ(=)N&c))Xo-RHTzRPrZBJt?aX3MP`U}c4;0pEO{?Qh>ENP zLisUnuD&!dP1J1_*&%o?e-+-qZ|bF;Dsl!0X)u4;pq`J?l{lp$cYsh%mp3qWwbUv# zOGVxRp=kZ&Jnuq#O5ba#A~~~hyOqseI>2l6&3P(P8VE@<^_c(R+{DebRivii3C;aB zeb&M`U#LhMAe5gwQ)8dbtMy{5is*rmMa%cfllIx}o()ta0SM)1WU3z`yJxE!uOefB zkOuzQ26nib*MIo^X@>lVN~p+WAT-8kJ>$`%khv${ ztH@fxbIyP3hp|sPda1}UAXNL)#GcZdjy;2qD(U<#r2SYTRo9AlhviU_j2Hk?I+dUF zZ<)n=#7Pw?3xpzv(c|CE+E^^xeHCd2q!^G_-KGx;oOS4fikN|r2DfXMNcX&My*(;2 z7D#a*T@UuIU$4nPc(0<{8X!JEDwSGM+5fK-@DK$#353RGCBlbx`|9#z)GP&gDx^*N zvS6n{eV&(Ak=!seY0&QK_IV@z{4&3a1OkB>5^JSx9c~$UP_H7*fne=Fu~5I3UbhCE zO;nMdK#)}8kfw8bJy@2ZnTiYrLM4~=#!0{OZx6p!k-0+JNXyJpS4K9@tRk~vG-|Kc zz5eXdgv9SQp?$05>mPu=aVZcggLTmq0wt%tMDjF~NVO9*Y`o?Iz7Tt4}+r}*U*SEGpnwVX= z30|sMEgQ}WzWaH3_%)j;@x!hGlPQ6u<=$TMQud5mnsxW%z1d6flmgGFnm2|9ggmF& z2pU#blJ-5e~dYNt5w@KQWv}Fa_f%gUT+8i zB4*uz7_4EK-EVa9X{td%(}9SYdLXe0F$PVQ7qOW#CQPPigAhz%$nDk(8lYKBY*Dvx z`MqWC6ao+RKUJ_^4}_wPdgpde|809-*bVUkn_f@nt$G_6A&@OG)EwntEE|Ra%1!XmJXW%blC$kWDR&|)+46NqO(;dnz*f5ewLQt zbGNzQ7p=d2)#-7NhWRRgbb+@@@>Xot+RcrM5Sqh>heQ~n ztqIX#njZOER+`lzmsgD{d|fe)!+x!bh?B7Ez*Mr>oSN)E2b&O@7S1k z?=L4#y#EadSqF2MIe<{V-RkkYC7llp;PD%qnnU%~nRI%?Ths1$&A*864jwUs?y@ep z%&E86K*&!I`3ZO}ml4aDPrqG#F8_ccPnk3}DvfM&Utw~gH{KJjfaH~-h*K?_SW z==*_CKXW{pZcv2U%6}ZjsE*+Ki+_^NgGDHg|uz|efuiQz?*orgFGKwt)}0%p-chZ;{{HC7(t(+$ zF@%jvWIPq68U(s2(+aQyX;5PC;&LQrx=nqSMCy90{U%3YSq0E%v!dC(Ilj z9%_!)5mX)uSW{o%t8nP zKWgL0tsVN=+pZbTrNG>V59#ftr1vK3E%jeLKI%N7{?+&h!g=0OLFDtj z_rOcFKF!6evqtN4^%3h#(E6WRpOde`$!9d>D{AmU=5POBuYt)e>dD-#+y z$tuF9osVBrowpQiZoQZ{`~3DmQ5#VgwgI8Oac#cBO?T!fmX_5L*4Li~LXlmq4%_># zjA^r8<+%rh;>0V~_#Lat)TBO(>Nd5#lATHw-R$V7UxWKj$?I>kzAJkpQI*yj2*srl z)zYmRU>;Tlh-hKE0iiL|k0tZu8Z$HO2Nm%|nw&?DgzAhYYJV@?%M^Qi(-|`~5aR{C zp^wQNgWdY(78Q1tx=wqT(7cj$uvNtlaHJx!R6?2}D>&uJzAIhb%g@{rJe| zY0J=FDUptxCvz>{&IJycP^8-NowPqBSsuNicO1<}TlIsT^^7tb~@`V)I7ez6W zu)#PW6o)ieKEyO4e&1dd`4$Ko^u$9&e|a%$&o`d|5gu{?NI@Xhy+iJ-UDbr@k3fC{ zf(e$ya&;T*tUL4(^_l`nTbIY9`5$%kt@FbCN<|6+A>CSLe;ru$X=mDFCV1o}Czm$2 z9dxni z-98WLh>~*|Ik0*v*IMo&i;)hEK6?LF``V6~8#1Z+*$#wS*v)IM>|K|(812>)1+dt;q)s#!ZX%0p3{0@X>7GCVz(6U#N_jbM`FQ3c}x$guwe7WHK>%y~CX>xxn z3Lc6E_f&4&;xn(?#Z{i_Kqz*J>ecU$K0kImtRfu+Pll>JigwdQ)>YC&kUVzNI+_T9?6idnb?_s0+ic4I11d-x^xS@b|?m zKA!PO%@OBaqMnSVl?9Os*86WLp;!E=c2=ygHZ+rVpNt-PNM=;^x zW|wHjmb`ELq2aoEcS~hFKs^3zH+K<%oL^S>=1+mk-*9P|*6?U&seJ#Ae5ChJjf3T5 z^nZV>d03P}zG0u&EtI+&JryHb^6NloPBl{oL(PJbg)w5G8E-0W`Ti=oC(3Og-+d;x z&I{;9Bg(g%CN4WyqSs8W8{&6OmR20u_HxndgiD=Ps7Og5S-`X4yTb8vX3vdRk$=h$ zJkh{;zDn6)EVqt4KXPgE@{vpXw6h-5Tk~~5UF*xUZd@8vi?xX?!gtmhd%uFQFRY^D zJfAo13S-D`kd_Uly{u)Sld~qB!ptd~PlSs+2STfG7t@Zt({aRDiYaJc8X6us@@f70 zpR%Yt9cPOwUe&)Xj}!OItho2pkaQ(jdk{d?ei9IB4=(FN8>QbB!biw#-8t53GKQk= zHJEed`}ujc^3g6^u~`BhZ}3$5DecCs1zYZCrNDOjZ3F`S5^D@^d0@@7W}h>Jtwer| z*1I4D)AxzN?Ree(dR3rzy}&Zx3Tc?daj6^gDo@h+r=+!O&*PBKrvGv+erv9KEI%G= zEh#y4>7OP)51}oV-4o@~wq>+#I^=(jT z%A!dA1QN5G zIom0lnx8lzIgr!buZQF;9Fm1blwy8IK4&K%9m;3suZb+!wjug&K~gry;~oL z2Eq^IIhD_B%X#FbFuyaO0US4PZToivkGF>gVpUP{EF4-fmrvs{zuo|8v@b2;%BWw* zo~d0&%}=;4&J>GQxzTh>nYF$9+sz|=+A&kO*%GBgLQhipVvu)o6GX!ifcFbeM|BHOSMAuVNQ7UQd(xb%tP88(CeD=hR||FvO#tdRKkd ze{uapdg5B_Br`!8Sv1|eAN@~zwMiiB@U^LaBAszhBm8!j8A|ilf=4dxxZwGH(1}%D zPrez-bVFZ@H(*E+c~0f|k(3v3! zCIgCGVsJV;31Fi+{>%D zzyFXPsY!&==zi{ZUG-3-Yy(+~K^9 zukF9Q*!pd`?4p`dK*&d1^dC7q^@8n@sx&_!l+!|Sy0JUWFMm>z^bx$h*4D3AIq$^| zcIzc_X^#7=APsu}u@SyvO~jQYk(Sx^PUML57H|%u}O5;+{t}a#H9wTC3lJVlLjsk#^X|JHs|2U&NxqaIzT8n<#*}Q+>qVirw z@_rZifqdL3pIwpn!JkF(Sgh*oYokK8R;IZP5xe*U@rEb<*(P7x=(yzoUV?F35f7BO@->%luOjmwZ>4e0QMpY@X6P$Lc~ub)hChy4OGS>3Hug%`1!A z=yDy9&Bx1EMV#YatiG^Olgo7(hJ^R#^Zrd<9WlqoA{r2Jdvt%xkdnL_8~P zOfC3ndV?n$YL=n!UB5wlWXu6$Y(%~-8;Kt*!<^XRV2TK&$3nbDbcOL-T6T(QU!<)cqVS1yzQv9 zVOkaGB#@C0+AUtUxEY=;SJFuU0t+RcZ9n@^Wb^w6Rb+-hrf2SuZ)c-t*;QmKkfPuz z?$_G4Zs#rIRpbf~+J$(pS?X2~y?Ua{SJI)+d=YZ6+Q1$a<3_Yqkzzn7QVle1^AD~a z_m_$|zGnfYAl{51FS)P!@-{y5@w&%t0%Oa72eT3M+J;|%&?-;mCWjvR&&(eWgr3*L zI5<`pLyo99@H#N#H`>#D&kvg^DUiTrbQ5qH51BCn_pY~MSrCVlhQIX$(P&(Sk zQmH$QO?_TPy!vt72A=$Ix^M5}kt$LU2+a~0A}`+m`nRiRRK)pxkZiUU?}PMYW^TbC zKK_|Fy?(n+j|)@Jg!jVHoX}bz*`Ptjm1)0?+dKd(DKx2zWT*t#I7@lUsJAWlcezIs zVW&GD$G3mt2|joS{mJ5kiW+$v@A8d$Y`oASk(a`q73Hr?Ztz0~@AFD#!qy4O>z zA}INh>oy8JS>dB=hh5%OxB#^-3Xgnt^3%@u%5{_205z-Q5Z=oiO}V&N{h-zKB(#X^ z;($<{YjE;>-oW*LQ9MdbIe3Nx$quC6jrP}%UtU7bE(`CSCU{QGyYFB0re6&pB1)6T zRWc8$sFBa_$mi@Pp{~-ri}HThoL+DN_E+mGUgr+%y?$^>k5{Ron}{}|3|KS5%V%n2 zIXvd+7cY8})?975w8@Z0t^c13>$X0BWb}OSP(%T*`W^_)?+i{n5<27b>McOnGm!zB zO+YAGA5wA7j#u;dJz_lcQUJ|Pf$ZKNytc>G5ZV`w=bftnIs}B`__eoEHaQTV{~{3a zM@w6xYwe_&;7a_R36DiOYx1@4lp~E_T_EVO zXxqdqAk;U0xo^;!&OZ&qS}0`)uiw$=bqRPIF}_A|XiMOOQB!DkAFU|<4@fzb=jZ)~ z-J4IoG)x0RPieyjz3`kOts-np+_7`y_!BvKI>DG>>J@1*N9nLT(-Sa-&V`N+zPCt4`N;wpjj=DTpzYKSQB|+0}!e|mGM?`kxr`$O$%Ia^J^%3 zzcMhq#S#e3%-z*w-;(iBsXIVuH3{W&9Y|3iQ_3cu2-iP{ZiC2qQFL#H@w)w_Qr=NT z@^x)Xy1}49_?p9T?xUCM7I>GXW`!?Pcva>m(2NE`5z3@pyDsKxl@0!;@W{)@F{kwY zQC@#ejo`K9V!K7X&)>Atn>-1Cf8GP)jdTXTzNJq+L5nA86y!A!ig(7P8TM25$lbYA zB>PC-BIOM!`MSFQ1?(DEc!~+6L7p@hf4QF*`y3R+S!>}`8o4w%k6dd{q}>_C#{kU> zS8sZ1Rmln4emisrE!)f7RxnP#_+m*E=!f@?75mYhP2B38R^br`RvO3<+|tT z>KCb&iuV|m&{9a-9cPOwbz2{x)m!`EyT(tdFL>)yWC#3>sv!P=L>SDdLw5pmd^NIC zrzD6j9+i8`+HLKO9}MV!?0EDRU4-V~<7`KZ{#yKc62yS%1I@imW!J6M`MgYm&_lhN zfL}ALdpqbLRy^&b#TugwnuK8Qt&@h$#oh=zq@URwrKvr8P`cR@9!^Mt=qwgpLd{&& z0=-`!UX}z2wZvH?UmPo3^z%Zk&L%;^BFz?kfm4-lok;in{iKN5WVY-b{H)cOgwIjh zb{d2muy>^Xq2|lG+_{9AAA7`X2{Sz2cQ>(a*37>oLFm02hNAJ0YgZY6dnd-H+|*=i zYosp5pe@w&!s{sm@+RRS8(7BY*ynfS<*h*~(qSC;kle$9)6FZM61}FPL3e??I@qj2 z`su$PQISX>)F!5Xb^PuZJMxuNHLwVzVeuhvi`8o~MCBO(gnFiN17dP)vKB>uibkCM zjS$G`l9#vmHu|lF$}>qI)@kW~ds3$FB^8+sgvQ+)j;EWLBEwCrV<=iL6UdnP8D{mK zw4}C*tOtVWfW&-WSL*D1KjpEC>;jSnh<5w>LJ{Q>161UMKynxAnEU9Tx2C8$y$Xc- zk||4vznS{%6-Jbb1}KHFq^0fWqxB)wIUNO&^8j)1UOOHKAN3dM^qj!U^t!3Ug+)yg zP#1{D9~#KhaV`bBbR6oOg1F6(gTE;}j^*R+rD**v!Zh-#oeLHXjB7L&Eg!l#_ICwH zHXu7PA6q(bwTa$AL_IAsg#0+zfWCAKJ8=kUrT~M@k&gmKOP!tGh z4&`%2AkMx+Ii}s$b;?VXgh;!?Y=AHd*n8I^>a8=DM`gYZQXe7?- zqopsuL$|z4NkjKqo#8uzr}9)@=U!(nx;|IP+$Sl#O5+OkZ_c(>G?3>MJPvKV;s?&Q zR-`%lj#}sBbz821T$-Hc(`c>a)LClgh*G6Fh&-;4S%#RMr8nH*G<*QC9e9x|tlwG1Kz-)vjl-uSP*~0iibD+~Tvzkpog= zB%vV11hOW}_m!)E_E&!u@e@eNJD>XoRv$7@MQRBo=uPhEFn`(&q)2Nnka4{W>xN{W z7N;U#2t+%m@H(Fgv}&dB7=h3j(6`crpr*^(=~X0NARAh1+E*?4bFhkx5Qstl{N3}7 z@fZUt(xwU|Q;%Gi(x1JD_hc!^B7vm;kZqiAuR&j^$U1>cd=s&?W9rp!RAje67Ojpr zJv_zjIx2EXAa@7cK3DYHA22FV(zz~>AJ0^IRrL9z$|~{%2(=g;$H$E58=Sd~iqMB& z2wA`5&7Q!eE%0oGBF!5Jwf^f3mXQ6weuH~VF?Rd6#c$}{NjNkLGQKiW}(G!oC*HgJ}azt)7 zIgcEX+o0GY?mOQ{_R5oQRa$ylMYKCXKq%^5f3{V##PLN?f9Ne$)QY*BrDih=^o1zL zx4dB?16VH#67fR2|jm4tRG5d%5G?$kew|n~Oz38VxBix8d?_SeW~vcbizv zralHu>bG5%-!8j7m(UuB{B27!mhd%+3zmY@=4@zEnDMZgl2~JOf+5}z7BjVYzUN0@ zVWxzY0;I`jLFKEl^7qx0U&d?Wt#@}iJ}bV<8|jF>hH`20z58lJU{QdtQ504GIFu51y}wz1{Pzu@zqfV`VDWO+L3S*A2@unhZ$C z?h9TtD+S-V3Ju7sa@EPb-?!}uUsY?lwBR1H-LJjP8hsXfA3kglgZHo-Fcc}VYxyFdB5@`lY}Fa> z3X{HnN4}1Z*ZShE^9F0i$XCX|?@!E!G%x*+cR);D!F#}&%?>OdyFwQa4MhE+@1av)pX57T%mzPzhrDXp z(JOgUY6fal9{IB`pLTzUynN(2{l8N;d45)|B!&6 zBDZLm^tlh0x-pBkUc=W*YLxH0cfq@@cD9y3*Cy9serN8Z&QIgXBM12^X_~csRGlOG z@QJ&VODs|SAR7>hDR!T)_xqQ3`eRg}w93VSP>=rG?_2Wk@OgI<2o~DNcjRra<2PLB zxp#j=2Kb6Sh8{F?>ijgTi>FMf27}G<8zInu`g$Oa>E!fU4Q&fUzC0aITIGg6avP+m z^Lo|Mx=*n-rL+{Sfl$9t;?f*#;nNfIsEFJ{KJ7EO2568M8jL-WwocYA5z|x+1_H?e zWK#6E7hm6ci?sqp>j^+;mcYF0*CT&qUy9WVWn3}`2#pu+J+B$EqILFLDza4YOqn>b zSM^nUd{ks35E{#jh-kljNq(=XDzX?QPQH`mJFBdgq(DzgJkH-T-3@8vA@del`fXiY zh@xn=bXQR$_eA-QffJBM{;=q`JE{D_+GFiQv4MQ&qCD=Ed%QeCk;h{4S}%VNOrBG@ zZt`^G?WlYQkz51$mtJ$N=MmR~iNQgsMwk5@r6Bqrmv3uk9x@U<zM-AVwz%}Lh*yU zK&Y>ubZ7R271IjWS9#=b$CFz}-tNfTgT)QFb)4V!C+{&LHt?1FuCJFr%TeiSMKwS2 z9bR&+<+VgU`jF>TUKivxxQF~?Ln+K&)A_qHvs!-m-|{2ZTHgB0wU*mWZUcEv<*onw zjeL*h{lqLWAC_1;s5bCqZ{DZ%5V=J?vBAHepHJ)Y^3wh+My3GjAp3eU`km zhl4iTW5@6N2lX2U&+Amwq$fNi`K^8EPa(rUF{+C{|j zSXrK*Piwcm&@DTa+@uv1x|hu}^l#bC)t56^ELQ9%Ugyg9U)6qXn=i2QN{OE2{erxF ze@1<2cLx!vSmbsG7}1?%L_bM&iCr<>3oS`m+WqP4sYaWSS9>Bx1Hw|w#l#gjHs zuJ!-lT0Xgt%E$HCd%(u0$~P$ZoBME4vX-qu0MzN^7@18<_E-u4JfCusN4qA zcXI1ApZ;^Dn;FKeRCBseAeHMI=7*-tgOvw*bd1tj1%zfJ(%da_qvEJhoMvUqD5N~b3K6l0=x2Q2X(qJ*hTC>f#pC|9HXCk-;)N8KV#phxB)_iQ( zVVZz7u;gv<6L?6sD`WOe%$O-H_LGwdNNZQ6&C4{*Z0==5*-bvZ@HfA<4{0`BFpb^I z`_mQkdUZV9z9wvqmYfx85lE!*w)#yPJS|B8pSzdzfidU_yYuVK>z+y)`Jew|izdphhOQgoXOgsha| zUdKYU=8nOR8>KfhTCwhpbe5Lyc&Tmn`{z|2`8SZlty+9q#G>mu@cW=ngEEn=G5>~c z%gy)N9v$R%TiI%9u_jA4&`PA3BYq1cH*}lOWJ|y0qX!W}Js?VsR^)ZiZD6NPg?jus zxgZenjwo}qf$f9rSao^#vDv(6U4{_wOgqGV^jwPuOCF5B9tcDmo-2B0EE_D(ZVhV4PaN%Mr(P@WcT<1BG5VG$Gbth4xDi z1np_QC}4SJ-*DSz=fheMHA<|<3{PtT7f46tYb;t>-OD{39rC| z&8_r2Q)hu}^}BK1yH2`VtjAz}tp`H3E^@g-w^H4TV7CeSKh|FN(a|%#*qe6kvi{7$ zF~});8KxZdv|4?rX7}EOf4tkbC_07L)DzsBguiz20?($KaTsQgJpDPZItfP2H2OuSYY|2@1MV|aiY3^3E2mOK2tnTWn#kc1_ zaUonqrU5};m)JJ3RCuk2x9rw(HUlXMWd7b)#b#LQR8V<5nF*GUay*&+@?=(5K5zN& z>n7Jg{{0g9n^xuDm+{1Ia^2(>m8auLY0FmxwQ4TI@32LwmKZJ9V1FMmO*~xUyzi)i%R~^ zt6zj}sm5*HcqGqdJY`DT{rwP!$lsuKItACw`}L{;nQu2Prsh=sB@p@B%3eVOYW-{E zzBwjuuF~G50rCnBp0waI!Oq_Z$#s$YTidDQXGJvkL1afm46?_YU` z(L1vnN?{qa&IM#?`5~j){#l?TPltHw3TfN4&uRzH-i^Lq(LnzFl8)fX18IrdI)<13 zBPB*3EFEZl3BE(C*d4-ec@2wtjW`5h1D?0mXrjSW3P_>a4>zA`JP*;WqV*IY)HgOf zak2F3qsxBhh(B~I2WgbfnrD~4T6Oy_`cox8^7jPGzmp<=zoq=Ums_A4`RL`SVSC3d zdG!_7fb4b(2+hrmNCvYaK7FL07reym*_ zB9AuysrpmuDxYO5;NAAs(6tLNDo|o1dAsvz{aW7t$So=_1-U2wmrKEu@4oz(OHLly z$w#~LQRt_Q2IamZkNMWZVxKezE|qYZf}$m@^X zcjSGryv}{vQuz1VH+lKUZ7ugvxyQ@PM_zyAB_}T*`8>J2BP#FIEtUJCM(`?TdF zj|Sy2pM0KAUfS~5(vuOg+(YDEvN=j~xCf8_4IY{wWRiV+#P%O02M? z=8oILLI1 zW{W|ix!-2r!3&1PJ%NZ_)|mv)%3rqrUc_3Sc*H&~t4YV66?r&2eaGyrS7t~{M~#5OSJ$(`;~#zyv=9ON9(^?v+dGB+NVl8KCz#og+R8KpLwZT`@Btn z&;}9c)(r^d^t<6(>n?qlWwA&HJa~mv?CoC?T~~gN7H6T0!_o=ILy^6)Kcdf#CZ{X57~2(yc$lZe zJ-$2rXr|_8I{Vv2pdshYmp#^Mp&fI)C;`FZ+=;^S?(?ufCTZ< zj(x!MqZ#t^(gO`F`+$hIybTgap`zD|?MPpAGZ3;UJY*!0jL4!Uq0l@|$;+Z3mcP6jZF-7aqjt+bo;Q3*=9hZL!%AT@ zkgPzWcXw{~a#_g)Ahh)gJdS(nfe5b(H6fe$+grBz(c>4(&<=g^PK(gE@Nk0#NaK2A zO847IZ?2$PLN&|jX(HzF*e6u=cdo(Vhp9iWU2=Occmnu0C?q_PHXb~bpRK2Ehc30Q zyNQ8Vv?R zrJz8~(Drff+Z3*c9cw_~SQ?GXlH=(-;wAUHzH|F4)pyh*9#m~GnSqejo%RK{x!>e9 z_Oij&OoQr=xrcQ2nsvFs%Q>_QUOZRe(yLg>xhx-6a?wwCI`rKe($ z8!DfQKr#Xu=H10)8tOF*2vt-dLyPltp6nSIGwb}3)5OD>40avCI9aGNe^=8IyDRkp2Q}tUDP@8l+PvbcthH& zH~k}beg4g&6kfY=h4#J4edh!2AP>=Z+P&+$taDSR@VbvHxo{=(pFF4SPyb%LpV_+; z(}0zBSs>KQ_+|LxLD>d5Q>F0AjVn2Er8kabw-yz4&ChpecTSNX*v*C3RB}LE=qCC( z7d}dMgLNc4K=jUaf$WBK0-tkR{9yK5XL=O&9dx7q2S}k8JU^kmi`NLrn6)wxDs4zJ z0ioXP8ss%g(hh=v~ti9ulOwQ!>oP$>4~V- zS*s5`+kVcJ4pVqb-XT*J?WlQIw-X+VkD#3GpSAAUUkc|m04MJ@R9znPbt?U5cuBuM zy{La(=-oT%&i#Dc6;ATYSFNFIHGEPnNBM;LBa!Y)jQjHM8okIW(yzHDqIAQ%t1JHz*8>#u~Jga+t+zwQ8*~y1vmCLnQL2wVL80baAm} zu!bAs4Hlj$eDMlCD`d0B$(iqI>8WaCSey>;>&HuHEl@ZFDdT{w>cV0}qRsfeWd-OS zX;2Zf$rKk8rALOeaj`}dX>Iol{6ZD+fwU0znLj3yGz_um^u~B?oUsD?YKc`Sr8im( zVX?+O20Z+w2<~IF8e>D~>x!(@w7kC9Tc(%R*eKR#A++;?+q9Z63 zmXW0TA%F39S7OatDjlJ97zP|xZL}`Rpr{sZF-OOu5Me=8d2UyHi`E*_M;C20nRNV# z*s$0NP$PuYK$c@r8pdd?qYk#rI?6NRb=OwMizZ#D!KCKef>&jSn=P=e)17*w&SZ|T z%bK0)c2|*MayPx1yoNa*6}sKMjvB<6W6Wr&oN}s9fSrtCcA_C9%1$_QNpeS%F}jyF z(tx6mj&M?27lxOL=;7zwsK#hm+DhJFj@Ft(MX5VlnJld3_H|S@TGz)I0S5qklp#9K zP3`3gqROKR+s?Ik>jG6})MIN{q(L8NGFXz6VdGbM-LTlig3WfWMZ`nF0FrgXnoM48 zdN|ksZ4+YT01~cBh_T=SVybiMC2j=NOcS@iGZKYo<_+-ZaHnE~@8bJ?v5qXl+0D_; z0Gdaul|G@XMN8;hyNr5A7HSX;(P2&sOLKx(MriTzDtrVytQRTmD3IaMn_d#i6rmQ; zNj%*J4bg~d4H$xeG=*RfJatT z+8~=$sCA^)@+1y+``!jSkJIO`sFP<+CovWntnwe+VJ*W)TVvKDIcMFxzxs zC7lHN$V}QA7KpT+GZ`Q27PgMarkw4{N9v^XlB~b8NfZ5_N4lv!uAMlg6J`=CCFfXf z0wl<^3zxJsMCUEEBM&uCP}B%U>Oqv9 z7{t45bVOfYq3P<-gl>Lp3WJIFr^?SdK)e~8%}HT!9T^*q7!onImEv?mRHy;Hs@{#W zD$kW8b>$R&6iu?ZZ&g&Cz^VSAFpa*ans?F60`F~`}^<;G()JBrDAuZa@GD$jGW@)lW1eR zV#mp^8Fc~vHl_I`li3^{;dEn^F~%Akh*70t2Yz!>OYm#>WK>umCjr)gDiz}$xb3yZ z+nJoRCobCGXO1yMJKv~WwZGFE7j3p`?XSSl0saoA=GT(gfnNfn8^5>Z#C9{>#&&M! zIDcOT>0lE=KRlAc9LXkz{rX5bHoHNJ?ffHU*p&yQ*!}WQj*=@yk|+66oe3(bKR!|; zdp}j>c%l!lEpRi4v7LXUHk^;uEt?ef%Og3W=Gr8Q-yX?gErU%G`{j`wrQxxOQZ9HT zO*C#cVdA$(vP6C5Mc_ew<&27F+x_xL8_2Qc3g;gw!>%8J6uVy@%29HqNMgS~QkLi$ zc?D5_cqTK2|3ssv#-Lxt$CQ3}Y)$ z6ve-d zm;;=|`y4R)`W6u#=LU_6vzi=WJDs!!U8vCkw#n8)GU1V!Mof}l^$wEm@)n*q)~>k7 z8W+HtFIpH14y2@9CZaUr$>A=t$rFh&_cd53;>E11ObG9DoKdyfI_Rc`N-i>|lpAWA z0~^-}*QJ^eMp<#=qEeZ}r+TiM7G1Y=Pv6!V#4eJatBHaEG?Y z(O3?!M>%n564fLe0FQ+8T%a#u%?EFi)f8Nz|6?*Rw>%Xa=usk${gavxN5muXHe-mk zRu%7v{LkWDxGV4OJ#|*4kz`U-#gkn2NNetcYAmA4c0@c9Z&z?^#3;h6B$quBY#(~5 zI@@3Un_~HH86Mt25I?~*9=)^M{S=_nHUYcIV@R_5g{ew0AsP}&n;ftu#9|PqV(Wx8 zAv#tUuT^*HVCzj)F>4i4vj#TYRPqh>sSYuijP9xHqvM-wu(O3e?}EL=PTH`P#LfXN zd#$t9sCPFh>Lh{onP_R72hD0$;xoD-hA1=bIAEJ)^aeu=b}sb9O0iaN3=gN@Y#X`B z5{KnzY$}K{V6TR@cbvhJAokuwVPBF3Gl!9wvbE@}#;{&a>0L|CFxqa-%6H2@Il8h~-hHf~^R1+!UGP#Fvb?$y%&o_YHq>J7i#=dg6DBQe`HaF>5wv<;tS)IX zeX#u&SIri!IfhzecmBjJDpYV}RF!i?xMs;!bZ14GTiDd*Qj%+E?6VGj`=4lTu$aZ% zlDStv#flaED*06o!)8QUb2nkV)j*rLuz6974S#G$sxdB#w!dN{6ecN9x8q`9EQ24e>GV{AlvA`Q$=AIkSpsvC->hH3rabr6 z*OjG64HpJPp701laB>b{Uk;FD{}U3h{?5t^E8OlCv-M1nVLQhHzL;>+9b7h z8mS{Rh8x}P)l4eb6|<_KEA*o(f6|N!^;P-80OI;bNk$9Jx&a7NPC0*a+Y)TelG0B= zg4$`=>Cn@PMx&P@+Fc88y9YvS=V+Ibj{$@lw(bi8Z0B4F)@X$Ev>5dfhQ0=qiE!G! zz&b6v%g8?8*Fb=q+F&m#2@f}+Dgz+}+8nk!#hwTIPR*u125p?hXn&b^ue7a#tmrOO za>E4`9OSSzCb_1;UZS0PkY@LbRr+KjBs+uJ*_oG$Vs&Pi%G6xhCu}vuVg=r!OZc0% zx7|;fR(O$Jv9ZLLtxB+-SqzKZ?f2UOvu6MWUxe?42)3e87j3 zLML~5m2*Ko(IO-fW>@K`UGAAo$jxwwVn zmhdND#*`Cr#;u%O%Zz%5sg5wPi8B@@RMhf8164cmP?Gx!K@Us?K?plSZQAJOKEjB? z)^sq?0ZEE+b4;%YKIP)31meQY;KRRQqXC@zWNZ?_V><_D@(~4!BRtd`4?b~*mdFo} z>m`#OzL7_fBz-;2z{-yqf~pw%q_vxjF&wPyh$$*;_De2zt|p!!!PRBxlKfyd98WW8 z<6Dcj!$8TLP!tmK+oyT=s4&c-s>`|5{>>DeGAE?RF4GbUd-?QVk@doC0l z4aAbEKT6NaA7FEug@sJn1g%|%mZV8krd;WnL3jQtXF{T)2Nh{EQFiIM@hGQRAZ4pF zges>fAJMI`-O^-x+lg404H{p|(7RN2rY_~H#1YJ?wjtx6sY;lG$N<>vWPcx}97j z#bHT)|E+GzK9@ED>;*o_w3}+CNDbwNs0z3}tcGlD_pUqV4v2bU(e=Fd? zgk@YrBt-EO7Q-gjq}iJmoc2GFX7ZgKo?6An*TKn8&~A&hC^zNAKL80He!|*=o(N%?3*S<0&@_1 z;P0v8x)p>_$A%H5K}xzGBFG+FhdddimVht|yAd-#2o=~n1JE!>>M$pTC)nw6V*}d2 zFoT^mgs!|^;IVl;Dvzh|Z6~}TS7gOuy%-;Nb|p>WOv2zGm1B6_S`=19`Wdjot;~)( zq-RZt3N@P?F0#20H$Lg=cCuPh&G?K8ML@~QTcXgL1 zsfjY=MdF-H5?Gb*pt7yx-IRmkELiY!>3B#xxm(nB%a)*)TqV|MaMyP`-;A`JFRJy$ zO)eZVu4pW;0aBAj%XvJ}#FiiD1Z@!HCchSA%V82hwxO`^=v zN!y8VG%sfC8^LQ|(6%J4b77Hj(Y*|M$E)TjOnl&JZR+Rfg)(?zSTsMpp+GBz7R=$I zGL4LdBnQNbMaEcaRxlmz2*ri6g#(~}zsRjBQxXgDI6?)Pv6F4qlV(j=#96KYm}7io zb7G%=XTkQUC(?OgILqbNJ&|ZvlFnl7Ku=`DLmhm}ISJ=`J~E{wE}ukx667PLI@GWv zavd-xnFZpizbi@Wu{X{b&Gtn&gY4x!&X>`^Vqj+TR7F(ZNG#jCtk?8bBJ5^U0|J$q z2UD?8CT*-Pf<+;Jzve~Svn4Trv(%5YE)&4srj$f)=R}?EfO2NL5fz zMb5!B?`Uy$?eDksM3*P8WN{&LXE^Y4Xc#-x- z(c2nbU8gtD*mMy4uu`?6WVWX7HLoCxK#q!J}<%dT?4V`-GZB+}@DN79%L zZKBvOCMVfF;7+gLqY|;^1asgiSqnIdfT)S#7mh&8`Ojy+Uv)leZ2ILit4|HMzmMi)P3s zEJ@>p5*Eecf<+q&XQFpZVm>4Rn@+9Rzhg4Qnxo12?cdSFD=Y?uItzPMnzD?*mP`1{ z(HvJ4_*J&J;Qp0qcCwKLyI;%@EZox@Vo^1I^9*BbUG&sZ)P#; z*{6?e%eJay$)gWfIVTT4Km5VVuX{z(u31d^oD#`PfFKBxAPIud{t1hR9__YhUsaAZ zMf%bQnRKeL!$liu&zwJBu*W{EJTcpRH@x?Fc;yV}x_SQF`q^e<`;kC)g(;okB>W&) zELY;Gu_C9Kt@pImsI^G`Rl`|G_=Z3Krmbd?4=Red_KVd)8}*+Rov?_#B1e+g<4lfe zUd0OSJj2lL_W7XJvY(;+IZc(IR0LID!EP#VTYgIw+eFrU!hv}C1M+3P0-T&nK0=yp zb8psPv)nIFcCD#trTMRKTU;emhzCI0*UT46D5;#v9XbgrsRvP+0PFgvNf||%Pu-HJ z?1O5lq0p?}&4shBuAve>Zoa>yWq#be%RTirq`w1c^u+y#Mcg8+Cnpc<%g!`c{idok z=rZ5My8hzEs%_ciKD}x$mQRavjF1Kcfh_eC#dzE6h;`)a;5nrkLP8ClF zg3M>O?G*EGxTneH^kKh!yc{pj>-s~oqHDG2#L9pz0a2e>TGk-3Sf6JbMN~P#TcsfW z;na~e`j@Tu;MI5hh|`m8iFY~L=6j=vDknu$Dbf=(SojB<$9iWqRnCYs!X~Lz@{sAM$OVbl~i5o|!7l6SX0xJ*0G4HDOl= z{Bo-qW12s z?&kdk9P`8>*-0HZ^+&_D&`pQ%E%$mbE`N~B^j0H0a=?{#!=Zp+dOWK2li?@U07<57&!(}W8=ws!vRdnmt6fxk@w?WzoKuw-%_d@aA)uGmsC?GpO3 zF7b>Egk?tXmI)dFRiiYtM4KGOWR^u+JfZosD#3Jsq!SD4J05MZ(9`BS7g7~#^xiTf zbd(80EuD&W_)zL7te%F6b#N4ATEaw8`oz_eEgMDcisd;ov;j6DZD46|AB>H!L)gQS zjz%!h`ABAiWY;)%>5&#N1lw}&8s>jlOQ6yoJ}UQUb+(7egEGwNyN7{X^2BVC9`5%1 zF8^g(Tzknj;GaURbam^GKTMz{ZzjVh`e8hswpg^Wf!6d0&z5x2Vuwx!w` zM|}N}kp-SC#$?BpX1_gIWSy>@mGy#+?#`gd%LOR+YS5@li6=rNYGRU+^xle1bR~3C zidV2Z+!pnj>YiZ>ULC+@_u@J{w}ZY~KERbpi>uOpSFC$V;nx9!lrIXhL^PI={6XZx z?heJp+wLURFOja(DUTf9lHJm(Ll1kR3EVF17J*su)Vj)hJiM|&wbD6DOZXaKv8WuE z7uoAvc6E_0<7{h4$m+x)#cckVm2m0GLH5FxMp&mSs!IB-_pg%r#Ms6uNP{JHR!Xar zWXmpLzrA}|{rVdhi`ncmA25y`ryZKVc?n8Sfw54IG!f!`9bZkQRJ3AW_He%e!rnpQ&?{FQs1wsq9MDoYflNk}OT2Efo1lvqm;>_z1^79ShE$NHY#HDD$)*tF{oNdxpxi3a+2p1A(r) z*@1#)#VkYdE}tEs772&%929X8Zqj4^I$Ep%DObPJ@m25~`6{_2UHCY;2sE#>8PNf4 z?l5oRbK$3N;k!FdoE+QE(Wmd`BXj|QI~kPsTXTeb4MTLE*vZi>B{?~4VM)%9-_=mI zN8#^RBm}YX7QUqheP1!PSH@UI{o$hpL|T)VOqam=!~aDA?k97O-`!*mxL}+MOes+n zdnQ5qG1`U2oi693Psu-ye9jib{R2!X`f=n*k@``p^ARLs&J#b1I2*Vk-Yo5-XwzUB zv8JT=Ben4!M6#QSlfwp;4r7sf77}u&)9B)LR&doxb`}z>Yn zob(hzxv;;s`j_E`h%5$EP#^nKZF-#6X4@D`JT=3iFP!3VsdwACXKvY`Lb*S&bnFqI zKbQqv9!1mm9Bgoc;SqE|L3&{;n?Q?XAMrxhE54M2%)+NM*AyS~eo=hL2Uq&}^0zdz zEPoFp_QJ<$XevHsov9(9*Fb76XV@~1k0MU24$j1jV#}`|Mjsl*NA86J()S-JBDGS@ zqS(kO%a0V~Q^M6k!i+c^fLIRIQxm>{X^#kaWDEcJ5 zAVy)p_yF=W5!H03FJox4kq%T@OzX_UEi zhI%EQ`~RmYvu%Bx4>+GN??q>v@*deWZAh(l9~}$ynWcy#%$I-TjHf-@&C1Lvo#>Fw zj+5LVqpA&vs1t1JJBymV`3|f!39mXhFw5!HMDND44RzQbm}&jiMRR(F#64UM8vvV z+xzda8<`{Ymc~})1T)^e0Lf4{cpaftD!}?3gAKLj7XI@(ywR)8(~ywyGVaTGct9>%6ubwT&9h@=~c;8WrrHDopiA zq#CLjajK_=#T!gm&g*X|?$4V)Kk$ll2(w?kX_=WN4o=unnS)=To7*D}QS1S!KWCR_ zcQ?3AD&Apc;<<8k|6F4dJ?Fmck6{g6qn-sN-jmni7DYZ=ST~6Nt(# zkSIl1dd#Y8WN4t>b*DH3_nYe};}SUXD0JAV6g}AHDLZ}N+_#q868F3y)F?3K0YMBitoJ=-MZF2g7^Y)| zYj8nB!#UweCek+NH0W5$4&nk_%kiF>&HegKeL%lx9@bHNV`rufT4j=Mh;<44?2X*_ zpAG6WnN7QZ@r2#Bn*cXH;0^)Q*E0q(gkBE2x5vYt(EVn&zUllI<$hLWoImK=q$lzV{fH4FAO9D9;`L@m)GVw8wMz- z{-C9js*lsAmqxKM4;W(Dwdc-2YVB@!izE_(>=++qcN8#35TkfzB+eYkc=4+A+gPMn^&p6P%J0@q&>L91yf4kle?v@{!`LuneUBWKVQ%;tCX zr_Uz83YhZni>0)LNK@5az)97)h-7pBKzD!G_`O^zqhR8-=}7md*H5-eL`#~YB!!rL z_~g+xZRGLV5=i)ME9ey?lIB&$<2N9W-xxmoo(>X1U{VBS%APl(EAdp7ar4uk{_!Vq z=d{L+=j!(Iz)zzFc{VWTQK@#lz7#bwfMeK9UPs#J?&)x}^$P8yVB%kcVlv;NvIWsp za8w@dkLyP-&O)P5|2oJ84CPvA6l%?51zwhgMxh=z_BsVc5dg-PaJ20wGw1Jj6W=&= zcn_Pd-G57i;$~xn$z)fNosAJT9`!7_DQ%{MAh{;IRJGCY)lMn(tWwgt)i+p~sz=Zj zi0FxZ(T(U0RC~Fm6_`zi2BqEl?Ql8cDyCU}mJ2jlOpAzdW;g-9nOsyQ|1hLe#;O$6 zvNr84PcrJOx)^kC42fIiz#<)XyFdlT^c7=kiqJJu#5z}sS+`Cx)74VUCb1MV^)JP& zW2czJGetGTq^L%!ENUx&6xBSFMQyrDQLR+6sLjkNDwQ-veU?vg*B4Ses2(XH=rJi_ zQsT0du!c)1lVX@sqNbUWqN1IWk}{vtvISI1v|5GLUP`Q%Y>jSu6~=|7LZmUXrx_%a z8EuX(OOQ&}6a0iKtGlDh5;XdEPw9P6ln$`Cs2ryxwM?i}xy> zE~gC}Cu3TPnD!@rgi149nld{?(`;h4r9y8iD$9YGP~z&h-+#3eQ+E5>6MJf%E6byC z*V3N!{gLlu-{jJL>4k1`!@T*McaT!Y*<9d2pmHQw8;x~bx-_xO;dWtl^# z6&lOE(S~1Ksg3h(wg?*sU0j2VOYGuOWE^~9ZP9xoF3IY`uGF}sF06&dnYOUF8LuxYifZOzSke}1@3XY(uG=?f{!XHU89lsMUEXrgV6hebxp^HWOG z?t2m)3j->KZI8b@iZY)~Pbp48R&R{Hy&~hYgcKRWTD$ikH{WH>4qOOnBe!>pi+N`D zuwmaE{i~_T{;=~Dq%?DZ*&ZSmp7F$f-Zl{1aJd9fL6;aayYG&tGhqTen0=TOsWDYL zeptR@#lnTqJwHc|O3deGb3+t|>uAvx{t4L?OD53fhE1%< z2KdIfKN(le46Q&|+g`DiY`aA5R-DH&Irf54bLaRD_2btd%A>cpa}Y6ePdT2PhzSvWn$ z%DS^rmog*Nl?f8K-YVibx##ElG&srxX~e2$m5#ehE{2(lyz&E&%VW0FAbKhLtXRh~ zB;O@Ip3Rx)njy`;y%+Q2gZ1gGC=!wuSEa`nB<&>RxZYKt9}b7}{(z&A#CgMJ-VwfT z&R7IsE>wNv-Zw9hl=;u=(^t+~7%(Yikl|KN2{*4*`CW|a$@z>jzpcKMobZ|bjd@Ce zp!Q>*Wkw^ofE-%0RiqTWp0l9%oG2a>YUAT+Ol9IYhhEtPGVa z3A|Z--0;R_(;avNsw}+5sYdDPIXCXFf%EL$PVZ-XAoZ&$1Nr25iA5WERC=(i~Zxjs(ycts@wdT@wBXT$2y1ohniC4d=_ln##-puya;#cx! zQ|h%G4dCbO?Yk&z=7ztoL@ap2j&RMYp`4`ZC7QM9*u0|Non&xs^=?RI8?Yc?3A1EU znpx=X%JjO$POMSrEHCBE(uTx6kzcSK))%DLAAblpa?n7|S2c@zSt4(G4ve@RkV>U(n+DQz>0O`C*m-lh^K7@DPX5x2CZw8suABE(^IqzL2WT&)xwz{&k@xO&y#|$>8fRBE!iUGpJRTdDPmqd-dXGf3QV7HF???mpnjg_75+nRa@!n zZs^F%mn$XUFTYbsowJ2qOmn=`>`Ir=6OSmake){+j=%Oaab3}6Z0DI+JG987Ah3Hp zn!NN5^X0e;FPoS<^oG1-xxkslL{o2@&fgQJFoT^69+FcHN)9*$DY0*Rd)n+x8?+y0 zC0ZeJZMEUC#v|4h+*`w+{nLh1Ro)TmPlI_jQ0Gxd>f2moB?6wsV1!*CX&x|&i8bhgdbLDs>(k%=Z1+srW<0+Aw10X2a{B3Td_;A5+8noMD|-s6?4Ig! zzj=}AGe3!Y4IV{Nr=S=XcY4cldo%ZV(48L-4b=<(2(q!(FJd1q7d9x=h@38pWQsgexN^iVrWpa;Z@T z57hF**X=+TpXCMXERCkO!_{_o^%4OOMcYiLMwe=MIUm^Pf7pI+b}`!zow9)`kJ>v) zxMUL*4|t@rJ9|tkY~*nY^YhO?|MJ_a-@`ibw7Zy?WwCS~_Q)Q_<#uIRun%ZZ zQf35QnJ_dy@UXJ^q<&fJxw|Ze^ag`K=BHbxCoL?w^VuM%$_YhPN+N0cpey(TD3vcm zsQA!wOt+Bmi~{f!)cRS1D?dx@=-xg3q>J(?-k?eOL8o6;sW+BxPnd52q2I)WKg<^* zy|Jy0-7%=J%?Y6W=9_IJH0rjH8>$vkQ?oR%6@-ANh=ZZOn#ijKf;_4d{@$pbvk3S+I#h#< z+s22)d$nrMmM^s$z$v1^t4DckdO%}XH1{A6dme9*nP~_~o_Z33~XRpJgIs%&_$kAW@khUR5Ij?}uquz|#^yRiG z=e-CB*G)e)ut{CNw+F^U2>Soh_cV6<#$9lL^s7m}SLJX@h{*w9DUwFwbQ}`*yjJ86 zS#288xox8^b=Fi+gdRKIyh4hYgyLq~aMm9;3`;b;SyAW+-IU@hpsU*4;aa}Q=;GRU zt4T7wSva&YYTO3r>c>ZVF{A3WB1)^82EWG;G`LolG&E&gFEiAtSzO4Ye$rdRTrVk@ z3|-|f-&L31WP7cIqKzB%Df0ag_a>=p6gFk~p>pb8&?v9YI#e!3LT}%W=4u?wDPm(< zShQw|Hgh+m`{Cqg>|mOL&=3wX8*o(#B_Yxl5;YAp=6>_EF~>b((iCCF?Q1BuslQ}L ztCQBw(j66S_PDIoVIdh>`gZuFTMTx=resZ_#J6-F$ zJF{W6vBhq`%s%gU5f~3p>)qjT_U-h-VKN&-Y#i0!@M(DW_4a_#+C)q!?iH`G$2!%n zFf#G2IQG8O-|mqRnPM${R=I*M}?}lCZAza2Tqt21yk4=NZEN#_qMs)TxSKR!-!QqtujGR zjJ7ux6J2r=z+7oQYu+ca*r4-g81qCC`CeJZn_bj=6V@#8W3p4}+X-{2hYykb!sVyut=6O-VuP`Np#FB? zvNE1rHy7J-&0>_<-=4HhDo(qA3?V%7nj_mzue9sjx3-JwffWy;gdN_S({_Jv53C)Z zE~nx&?xHZEu*1ya6nRSsn(fCXCl`0>$Cv!|5k8jmhJ7vBtP+)=C)ZgzX~M z0}b6fWum+k(Kc((am5P@%W0_|!;C>@57!>cwG!8Uur24^nV!tV%v-oyBs6GFnT$sf z0`>-Zr5pGuv}_w1n97V`D-+c1R2LR9T{SG`0Z$C8&3kg9qry+t0CvjyiT>I*`N|p^ zz$_-2?kN+{XCO|gfNc+7O~5CX?3UJGT*~Aq0))?NoyM<477Pfp39kk!NnM@?H#mEq zG-^<@klT?4P?4pM=f~DuT-|M228{(mdRf3m2vlY|)fVr(6hQN+swjETK)q6zvwiJG z^{Sj;tWt!uXJ%=G$^TeI>=y_bpLXi8xt#3%8(V-YWMKVO@b!ToO!=|+NrOeNciioC zy~ciqTA%>`MNlUi3P(kpxQzBPl`+g+NYSfAb7h2mg_(U6!b zry8(;WkLz2zFyxy?l*Qmew0Z%mW&v`W7*Ouq0C4DWdefIcN19_EiT!i-9!sG`qR1l z!!<_9F|MX%^#pU)$rFoBYwGQ0v-#~WzXBaX6xr95B4#UfDg{WpG28A>r94@&g@9>n z=}AQ{pJ2$NR6g|1&GX^Q*5^QcN-?}_*Lz8HqeaV%pe_>-*}ffLR<0pPRJcjvNZq`1 zhMIXq6T4uo-+Hwg6-k-Ek|K#a4cL4n06LG-I={n$b_-{N3gB5R_2O3PPfUVVto;Jf z*87TbYnBG60^j@qdSiP zH@1z5@%`Nsx30fnAje8?{dF>nGiwpQLTzP9i z5Rh5nKw85JHG&jJVYL}g~3D{-R9DI->}FBD&=QE9Og7k z1OJ{6yf?T)U~tTnR(D@cQ#K#PZgiRT7t0ieHL`p=u03LTW8~(ay6qa{hOANPm=8*0 zSjtZtG5xdK56SH2{ZoCelxMgvn8lWg?5yg1_xG4aV`R5WNh@)T@tU^(h+-Ip)&fvkvY z$P?c^dB1*csd6b(L}U<*IKcx*gCSAe0F7bWIijnyXUbN)n_aQ27`b)Njo+@JeiFzD6*JFtR4r$?MK@X0$TR*pKKN1+x2GR-QI0iej(Ae R&{C8^BV2;~kN>Ct{uiTo{J8)C diff --git a/docs/vocs/bunfig.toml b/docs/vocs/bunfig.toml new file mode 100644 index 00000000000..a38b9b61752 --- /dev/null +++ b/docs/vocs/bunfig.toml @@ -0,0 +1,4 @@ +telemetry = false + +# ensures runtime is always bun regardless of shebang +run.bun = true diff --git a/docs/vocs/docs/components/SdkShowcase.tsx b/docs/vocs/docs/components/SdkShowcase.tsx index 5f878206a84..442d6676f4f 100644 --- a/docs/vocs/docs/components/SdkShowcase.tsx +++ b/docs/vocs/docs/components/SdkShowcase.tsx @@ -1,5 +1,3 @@ -import React from 'react' - interface SdkProject { name: string description: string @@ -43,16 +41,16 @@ const projects: SdkProject[] = [ export function SdkShowcase() { return (
- {projects.map((project, index) => ( + {projects.map((project) => (
{/* LoC Badge */}
{project.loc} LoC
- + {/* Content */}
@@ -63,11 +61,11 @@ export function SdkShowcase() { {project.company}

- +

{project.description}

- + {/* GitHub Link */} ) -} \ No newline at end of file +} diff --git a/docs/vocs/docs/components/TrustedBy.tsx b/docs/vocs/docs/components/TrustedBy.tsx index ef50527f8ea..41b78e8787a 100644 --- a/docs/vocs/docs/components/TrustedBy.tsx +++ b/docs/vocs/docs/components/TrustedBy.tsx @@ -1,5 +1,3 @@ -import React from 'react' - interface TrustedCompany { name: string logoUrl: string @@ -27,9 +25,9 @@ const companies: TrustedCompany[] = [ export function TrustedBy() { return (
- {companies.map((company, index) => ( + {companies.map((company) => (
{/* Company Logo */} @@ -46,4 +44,4 @@ export function TrustedBy() { ))}
) -} \ No newline at end of file +} diff --git a/docs/vocs/links-report.json b/docs/vocs/links-report.json deleted file mode 100644 index 830568362a2..00000000000 --- a/docs/vocs/links-report.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "timestamp": "2025-06-23T11:20:27.303Z", - "totalFiles": 106, - "totalLinks": 150, - "brokenLinks": [ - { - "file": "docs/pages/index.mdx", - "link": "/introduction/benchmarks", - "line": 110, - "reason": "Absolute path not found: /introduction/benchmarks" - } - ], - "summary": { - "brokenCount": 1, - "validCount": 149 - } -} \ No newline at end of file diff --git a/docs/vocs/package.json b/docs/vocs/package.json index f8d43111c51..035fc13b699 100644 --- a/docs/vocs/package.json +++ b/docs/vocs/package.json @@ -13,14 +13,14 @@ "inject-cargo-docs": "bun scripts/inject-cargo-docs.ts" }, "dependencies": { - "react": "latest", - "react-dom": "latest", - "vocs": "latest" + "react": "^19.1.0", + "react-dom": "^19.1.0", + "vocs": "^1.0.13" }, "devDependencies": { - "@types/node": "latest", - "@types/react": "latest", + "@types/node": "^24.0.14", + "@types/react": "^19.1.8", "glob": "^11.0.3", - "typescript": "latest" + "typescript": "^5.8.3" } } \ No newline at end of file diff --git a/docs/vocs/vocs.config.ts b/docs/vocs/vocs.config.ts index 56f304a8233..cee55bc2d9f 100644 --- a/docs/vocs/vocs.config.ts +++ b/docs/vocs/vocs.config.ts @@ -13,7 +13,7 @@ export default defineConfig({ topNav: [ { text: 'Run', link: '/run/ethereum' }, { text: 'SDK', link: '/sdk' }, - { + { element: React.createElement('a', { href: '/docs', target: '_self' }, 'Rustdocs') }, { text: 'GitHub', link: 'https://github.com/paradigmxyz/reth' }, @@ -68,6 +68,6 @@ export default defineConfig({ } }, editLink: { - pattern: "https://github.com/paradigmxyz/reth/edit/main/book/vocs/docs/pages/:path", + pattern: "https://github.com/paradigmxyz/reth/edit/main/docs/vocs/docs/pages/:path", } }) From 3c9ff6e157c40ed42980a14f4055131b71424caa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?luory=20=E2=9C=9E?= Date: Fri, 18 Jul 2025 11:56:59 +0200 Subject: [PATCH 206/305] fix: change hyperlink to reth_codec (#17437) --- docs/design/database.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/design/database.md b/docs/design/database.md index 1ce75d3dc25..0afcbabfacc 100644 --- a/docs/design/database.md +++ b/docs/design/database.md @@ -19,7 +19,7 @@ - [Scale Encoding](https://github.com/paritytech/parity-scale-codec) - [Postcard Encoding](https://github.com/jamesmunns/postcard) - Passthrough (called `no_codec` in the codebase) -- We made implementation of these traits easy via a derive macro called [`reth_codec`](https://github.com/paradigmxyz/reth/blob/0d9b9a392d4196793736522f3fc2ac804991b45d/crates/codecs/derive/src/lib.rs#L15) that delegates to one of Compact (default), Scale, Postcard or Passthrough encoding. This is [derived on every struct we need](https://github.com/search?q=repo%3Aparadigmxyz%2Freth%20%22%23%5Breth_codec%5D%22&type=code), and lets us experiment with different encoding formats without having to modify the entire codebase each time. +- We made implementation of these traits easy via a derive macro called [`reth_codec`](https://github.com/paradigmxyz/reth/blob/main/crates/storage/codecs/derive/src/lib.rs) that delegates to one of Compact (default), Scale, Postcard or Passthrough encoding. This is [derived on every struct we need](https://github.com/search?q=repo%3Aparadigmxyz%2Freth%20%22%23%5Breth_codec%5D%22&type=code), and lets us experiment with different encoding formats without having to modify the entire codebase each time. ### Table layout From ca116aa7b7eb44f359643982a444918a8ebc147f Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 18 Jul 2025 11:57:07 +0200 Subject: [PATCH 207/305] docs: add code example to extend_rpc_modules method (#17446) Co-authored-by: Claude Co-authored-by: Jennifer --- crates/node/builder/src/builder/mod.rs | 33 ++++++++++++++++++++++++++ crates/node/builder/src/rpc.rs | 2 ++ examples/node-custom-rpc/src/main.rs | 3 +++ 3 files changed, 38 insertions(+) diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index 0779196b89d..923cb1e5327 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -547,6 +547,39 @@ where } /// Sets the hook that is run to configure the rpc modules. + /// + /// This hook can obtain the node's components (txpool, provider, etc.) and can modify the + /// modules that the RPC server installs. + /// + /// # Examples + /// + /// ```rust,ignore + /// use jsonrpsee::{core::RpcResult, proc_macros::rpc}; + /// + /// #[derive(Clone)] + /// struct CustomApi { pool: Pool } + /// + /// #[rpc(server, namespace = "custom")] + /// impl CustomApi { + /// #[method(name = "hello")] + /// async fn hello(&self) -> RpcResult { + /// Ok("World".to_string()) + /// } + /// } + /// + /// let node = NodeBuilder::new(config) + /// .node(EthereumNode::default()) + /// .extend_rpc_modules(|ctx| { + /// // Access node components, so they can used by the CustomApi + /// let pool = ctx.pool().clone(); + /// + /// // Add custom RPC namespace + /// ctx.modules.merge_configured(CustomApi { pool }.into_rpc())?; + /// + /// Ok(()) + /// }) + /// .build()?; + /// ``` pub fn extend_rpc_modules(self, hook: F) -> Self where F: FnOnce(RpcContext<'_, NodeAdapter, AO::EthApi>) -> eyre::Result<()> diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index 6ab2395cd5e..17ed50a286d 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -283,6 +283,8 @@ where } /// Returns a reference to the configured node. + /// + /// This gives access to the node's components. pub const fn node(&self) -> &Node { &self.node } diff --git a/examples/node-custom-rpc/src/main.rs b/examples/node-custom-rpc/src/main.rs index 9aba7c9922a..8504949d9d9 100644 --- a/examples/node-custom-rpc/src/main.rs +++ b/examples/node-custom-rpc/src/main.rs @@ -32,7 +32,9 @@ fn main() { Cli::::parse() .run(|builder, args| async move { let handle = builder + // configure default ethereum node .node(EthereumNode::default()) + // extend the rpc modules with our custom `TxpoolExt` endpoints .extend_rpc_modules(move |ctx| { if !args.enable_ext { return Ok(()) @@ -50,6 +52,7 @@ fn main() { Ok(()) }) + // launch the node with custom rpc .launch() .await?; From 1b6f72321ac01bedddd5f0a58dbf6f6469dbec9f Mon Sep 17 00:00:00 2001 From: Rez Date: Fri, 18 Jul 2025 20:21:51 +1000 Subject: [PATCH 208/305] feat: enable CLI support for custom block headers (#17441) --- crates/cli/commands/src/init_state/mod.rs | 17 +++++++++++------ .../cli/commands/src/init_state/without_evm.rs | 10 ++++++---- crates/ethereum/cli/src/interface.rs | 9 ++++----- 3 files changed, 21 insertions(+), 15 deletions(-) diff --git a/crates/cli/commands/src/init_state/mod.rs b/crates/cli/commands/src/init_state/mod.rs index 76e7791e1d4..7a80997b976 100644 --- a/crates/cli/commands/src/init_state/mod.rs +++ b/crates/cli/commands/src/init_state/mod.rs @@ -1,14 +1,14 @@ //! Command that initializes the node from a genesis file. use crate::common::{AccessRights, CliNodeTypes, Environment, EnvironmentArgs}; -use alloy_consensus::Header; +use alloy_consensus::{BlockHeader as AlloyBlockHeader, Header}; use alloy_primitives::{B256, U256}; use clap::Parser; use reth_chainspec::{EthChainSpec, EthereumHardforks}; use reth_cli::chainspec::ChainSpecParser; use reth_db_common::init::init_from_state_dump; use reth_node_api::NodePrimitives; -use reth_primitives_traits::SealedHeader; +use reth_primitives_traits::{BlockHeader, SealedHeader}; use reth_provider::{ BlockNumReader, DatabaseProviderFactory, StaticFileProviderFactory, StaticFileWriter, }; @@ -72,7 +72,7 @@ impl> InitStateC where N: CliNodeTypes< ChainSpec = C::ChainSpec, - Primitives: NodePrimitives, + Primitives: NodePrimitives>, >, { info!(target: "reth::cli", "Reth init-state starting"); @@ -85,7 +85,9 @@ impl> InitStateC if self.without_evm { // ensure header, total difficulty and header hash are provided let header = self.header.ok_or_else(|| eyre::eyre!("Header file must be provided"))?; - let header = without_evm::read_header_from_file(header)?; + let header = without_evm::read_header_from_file::< + ::BlockHeader, + >(header)?; let header_hash = self.header_hash.ok_or_else(|| eyre::eyre!("Header hash must be provided"))?; @@ -103,7 +105,10 @@ impl> InitStateC &provider_rw, SealedHeader::new(header, header_hash), total_difficulty, - |number| Header { number, ..Default::default() }, + |number| { + let header = Header { number, ..Default::default() }; + <::BlockHeader>::from(header) + }, )?; // SAFETY: it's safe to commit static files, since in the event of a crash, they @@ -112,7 +117,7 @@ impl> InitStateC // Necessary to commit, so the header is accessible to provider_rw and // init_state_dump static_file_provider.commit()?; - } else if last_block_number > 0 && last_block_number < header.number { + } else if last_block_number > 0 && last_block_number < header.number() { return Err(eyre::eyre!( "Data directory should be empty when calling init-state with --without-evm-history." )); diff --git a/crates/cli/commands/src/init_state/without_evm.rs b/crates/cli/commands/src/init_state/without_evm.rs index c839aaf268e..3a85b175eb4 100644 --- a/crates/cli/commands/src/init_state/without_evm.rs +++ b/crates/cli/commands/src/init_state/without_evm.rs @@ -1,4 +1,4 @@ -use alloy_consensus::{BlockHeader, Header}; +use alloy_consensus::BlockHeader; use alloy_primitives::{BlockNumber, B256, U256}; use alloy_rlp::Decodable; use reth_codecs::Compact; @@ -12,14 +12,16 @@ use reth_stages::{StageCheckpoint, StageId}; use reth_static_file_types::StaticFileSegment; use std::{fs::File, io::Read, path::PathBuf}; use tracing::info; - /// Reads the header RLP from a file and returns the Header. -pub(crate) fn read_header_from_file(path: PathBuf) -> Result { +pub(crate) fn read_header_from_file(path: PathBuf) -> Result +where + H: Decodable, +{ let mut file = File::open(path)?; let mut buf = Vec::new(); file.read_to_end(&mut buf)?; - let header = Header::decode(&mut &buf[..])?; + let header = H::decode(&mut &buf[..])?; Ok(header) } diff --git a/crates/ethereum/cli/src/interface.rs b/crates/ethereum/cli/src/interface.rs index f1bace672bd..e62dad13d09 100644 --- a/crates/ethereum/cli/src/interface.rs +++ b/crates/ethereum/cli/src/interface.rs @@ -1,6 +1,7 @@ //! CLI definition and entrypoint to executable use crate::chainspec::EthereumChainSpecParser; +use alloy_consensus::Header; use clap::{Parser, Subcommand}; use reth_chainspec::{ChainSpec, EthChainSpec, Hardforks}; use reth_cli::chainspec::ChainSpecParser; @@ -13,7 +14,7 @@ use reth_cli_commands::{ }; use reth_cli_runner::CliRunner; use reth_db::DatabaseEnv; -use reth_node_api::NodePrimitives; +use reth_node_api::{NodePrimitives, NodeTypes}; use reth_node_builder::{NodeBuilder, WithLaunchContext}; use reth_node_core::{ args::LogArgs, @@ -181,11 +182,9 @@ impl Cli { ) -> eyre::Result<()>, ) -> eyre::Result<()> where - N: CliNodeTypes< - Primitives: NodePrimitives, - ChainSpec: Hardforks, - >, + N: CliNodeTypes, C: ChainSpecParser, + <::Primitives as NodePrimitives>::BlockHeader: From
, { // Add network name if available to the logs dir if let Some(chain_spec) = self.command.chain_spec() { From 0f449f2b391e8db0fbbbae3f977dc2f456ee85ba Mon Sep 17 00:00:00 2001 From: Femi Bankole Date: Fri, 18 Jul 2025 12:54:36 +0100 Subject: [PATCH 209/305] feat: add Middleware generic to AuthServerConfig (#17373) Co-authored-by: Matthias Seitz --- crates/rpc/rpc-builder/src/auth.rs | 65 ++++++++++++++++++++++++++---- 1 file changed, 58 insertions(+), 7 deletions(-) diff --git a/crates/rpc/rpc-builder/src/auth.rs b/crates/rpc/rpc-builder/src/auth.rs index b1a4f4166bd..777081a7e6f 100644 --- a/crates/rpc/rpc-builder/src/auth.rs +++ b/crates/rpc/rpc-builder/src/auth.rs @@ -1,9 +1,13 @@ -use crate::error::{RpcError, ServerKind}; +use crate::{ + error::{RpcError, ServerKind}, + middleware::RethRpcMiddleware, +}; use http::header::AUTHORIZATION; use jsonrpsee::{ core::{client::SubscriptionClientT, RegisterMethodError}, http_client::HeaderMap, server::{AlreadyStoppedError, RpcModule}, + ws_client::RpcServiceBuilder, Methods, }; use reth_rpc_api::servers::*; @@ -21,7 +25,7 @@ pub use reth_ipc::server::Builder as IpcServerBuilder; /// Server configuration for the auth server. #[derive(Debug)] -pub struct AuthServerConfig { +pub struct AuthServerConfig { /// Where the server should listen. pub(crate) socket_addr: SocketAddr, /// The secret for the auth layer of the server. @@ -32,6 +36,8 @@ pub struct AuthServerConfig { pub(crate) ipc_server_config: Option>, /// IPC endpoint pub(crate) ipc_endpoint: Option, + /// Configurable RPC middleware + pub(crate) rpc_middleware: RpcMiddleware, } // === impl AuthServerConfig === @@ -41,24 +47,51 @@ impl AuthServerConfig { pub const fn builder(secret: JwtSecret) -> AuthServerConfigBuilder { AuthServerConfigBuilder::new(secret) } - +} +impl AuthServerConfig { /// Returns the address the server will listen on. pub const fn address(&self) -> SocketAddr { self.socket_addr } + /// Configures the rpc middleware. + pub fn with_rpc_middleware(self, rpc_middleware: T) -> AuthServerConfig { + let Self { socket_addr, secret, server_config, ipc_server_config, ipc_endpoint, .. } = self; + AuthServerConfig { + socket_addr, + secret, + server_config, + ipc_server_config, + ipc_endpoint, + rpc_middleware, + } + } + /// Convenience function to start a server in one step. - pub async fn start(self, module: AuthRpcModule) -> Result { - let Self { socket_addr, secret, server_config, ipc_server_config, ipc_endpoint } = self; + pub async fn start(self, module: AuthRpcModule) -> Result + where + RpcMiddleware: RethRpcMiddleware, + { + let Self { + socket_addr, + secret, + server_config, + ipc_server_config, + ipc_endpoint, + rpc_middleware, + } = self; // Create auth middleware. let middleware = tower::ServiceBuilder::new().layer(AuthLayer::new(JwtAuthValidator::new(secret))); + let rpc_middleware = RpcServiceBuilder::default().layer(rpc_middleware); + // By default, both http and ws are enabled. let server = ServerBuilder::new() .set_config(server_config.build()) .set_http_middleware(middleware) + .set_rpc_middleware(rpc_middleware) .build(socket_addr) .await .map_err(|err| RpcError::server_error(err, ServerKind::Auth(socket_addr)))?; @@ -86,12 +119,13 @@ impl AuthServerConfig { /// Builder type for configuring an `AuthServerConfig`. #[derive(Debug)] -pub struct AuthServerConfigBuilder { +pub struct AuthServerConfigBuilder { socket_addr: Option, secret: JwtSecret, server_config: Option, ipc_server_config: Option>, ipc_endpoint: Option, + rpc_middleware: RpcMiddleware, } // === impl AuthServerConfigBuilder === @@ -105,6 +139,22 @@ impl AuthServerConfigBuilder { server_config: None, ipc_server_config: None, ipc_endpoint: None, + rpc_middleware: Identity::new(), + } + } +} + +impl AuthServerConfigBuilder { + /// Configures the rpc middleware. + pub fn with_rpc_middleware(self, rpc_middleware: T) -> AuthServerConfigBuilder { + let Self { socket_addr, secret, server_config, ipc_server_config, ipc_endpoint, .. } = self; + AuthServerConfigBuilder { + socket_addr, + secret, + server_config, + ipc_server_config, + ipc_endpoint, + rpc_middleware, } } @@ -150,7 +200,7 @@ impl AuthServerConfigBuilder { } /// Build the `AuthServerConfig`. - pub fn build(self) -> AuthServerConfig { + pub fn build(self) -> AuthServerConfig { AuthServerConfig { socket_addr: self.socket_addr.unwrap_or_else(|| { SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), constants::DEFAULT_AUTH_PORT) @@ -182,6 +232,7 @@ impl AuthServerConfigBuilder { .set_id_provider(EthSubscriptionIdProvider::default()) }), ipc_endpoint: self.ipc_endpoint, + rpc_middleware: self.rpc_middleware, } } } From 0aef0c35c880114a6ccc57cee85dc18af20ba229 Mon Sep 17 00:00:00 2001 From: cakevm Date: Fri, 18 Jul 2025 14:20:25 +0200 Subject: [PATCH 210/305] feat(alloy-provider): implement `receipt_by_hash` method (#17456) --- Cargo.lock | 20 +++--- Cargo.toml | 10 +-- crates/alloy-provider/src/lib.rs | 24 ++++++- crates/rpc/rpc-convert/src/lib.rs | 2 + crates/rpc/rpc-convert/src/receipt.rs | 99 +++++++++++++++++++++++++++ 5 files changed, 137 insertions(+), 18 deletions(-) create mode 100644 crates/rpc/rpc-convert/src/receipt.rs diff --git a/Cargo.lock b/Cargo.lock index 0691b3aeba3..502a9cc21e1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5975,9 +5975,9 @@ checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" [[package]] name = "op-alloy-consensus" -version = "0.18.9" +version = "0.18.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8719d9b783b29cfa1cf8d591b894805786b9ab4940adc700a57fd0d5b721cf5" +checksum = "18986c5cf19a790b8b9e8c856a950b48ed6dd6a0259d0efd5f5c9bebbba1fc3a" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6001,9 +6001,9 @@ checksum = "a79f352fc3893dcd670172e615afef993a41798a1d3fc0db88a3e60ef2e70ecc" [[package]] name = "op-alloy-network" -version = "0.18.9" +version = "0.18.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "839a7a1826dc1d38fdf9c6d30d1f4ed8182c63816c97054e5815206f1ebf08c7" +checksum = "ac69810db9294e1de90b2cc6688b213399d8a5c96b283220caddd98a65dcbc39" dependencies = [ "alloy-consensus", "alloy-network", @@ -6017,9 +6017,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-jsonrpsee" -version = "0.18.9" +version = "0.18.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b9d3de5348e2b34366413412f1f1534dc6b10d2cf6e8e1d97c451749c0c81c0" +checksum = "490c08acf608a3fd039728dc5b77a2ff903793db223509f4d94e43c22717a8f7" dependencies = [ "alloy-primitives", "jsonrpsee", @@ -6027,9 +6027,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types" -version = "0.18.9" +version = "0.18.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9640f9e78751e13963762a4a44c846e9ec7974b130c29a51706f40503fe49152" +checksum = "f7dd487b283473591919ba95829f7a8d27d511488948d2ee6b24b283dd83008f" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6046,9 +6046,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types-engine" -version = "0.18.9" +version = "0.18.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a4559d84f079b3fdfd01e4ee0bb118025e92105fbb89736f5d77ab3ca261698" +checksum = "814d2b82a6d0b973afc78e797a74818165f257041b9173016dccbe3647f8b1da" dependencies = [ "alloy-consensus", "alloy-eips", diff --git a/Cargo.toml b/Cargo.toml index 480bcd54c51..b0c126c8bfc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -511,11 +511,11 @@ alloy-transport-ws = { version = "1.0.22", default-features = false } # op alloy-op-evm = { version = "0.14", default-features = false } alloy-op-hardforks = "0.2.2" -op-alloy-rpc-types = { version = "0.18.7", default-features = false } -op-alloy-rpc-types-engine = { version = "0.18.7", default-features = false } -op-alloy-network = { version = "0.18.7", default-features = false } -op-alloy-consensus = { version = "0.18.7", default-features = false } -op-alloy-rpc-jsonrpsee = { version = "0.18.7", default-features = false } +op-alloy-rpc-types = { version = "0.18.11", default-features = false } +op-alloy-rpc-types-engine = { version = "0.18.11", default-features = false } +op-alloy-network = { version = "0.18.11", default-features = false } +op-alloy-consensus = { version = "0.18.11", default-features = false } +op-alloy-rpc-jsonrpsee = { version = "0.18.11", default-features = false } op-alloy-flz = { version = "0.13.1", default-features = false } # misc diff --git a/crates/alloy-provider/src/lib.rs b/crates/alloy-provider/src/lib.rs index 477327aa23c..c5df823d725 100644 --- a/crates/alloy-provider/src/lib.rs +++ b/crates/alloy-provider/src/lib.rs @@ -44,7 +44,7 @@ use reth_provider::{ TransactionVariant, TransactionsProvider, }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; -use reth_rpc_convert::{TryFromBlockResponse, TryFromTransactionResponse}; +use reth_rpc_convert::{TryFromBlockResponse, TryFromReceiptResponse, TryFromTransactionResponse}; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_api::{ BlockBodyIndicesProvider, BlockReaderIdExt, BlockSource, DBProvider, NodePrimitivesProvider, @@ -379,6 +379,7 @@ where Node: NodeTypes, BlockTy: TryFromBlockResponse, TxTy: TryFromTransactionResponse, + ReceiptTy: TryFromReceiptResponse, { type Block = BlockTy; @@ -459,6 +460,7 @@ where Node: NodeTypes, BlockTy: TryFromBlockResponse, TxTy: TryFromTransactionResponse, + ReceiptTy: TryFromReceiptResponse, { fn block_by_id(&self, id: BlockId) -> ProviderResult> { match id { @@ -484,6 +486,7 @@ where P: Provider + Clone + 'static, N: Network, Node: NodeTypes, + ReceiptTy: TryFromReceiptResponse, { type Receipt = ReceiptTy; @@ -491,8 +494,22 @@ where Err(ProviderError::UnsupportedProvider) } - fn receipt_by_hash(&self, _hash: TxHash) -> ProviderResult> { - Err(ProviderError::UnsupportedProvider) + fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { + let receipt_response = self.block_on_async(async { + self.provider.get_transaction_receipt(hash).await.map_err(ProviderError::other) + })?; + + let Some(receipt_response) = receipt_response else { + // If the receipt was not found, return None + return Ok(None); + }; + + // Convert the network receipt response to primitive receipt + let receipt = + as TryFromReceiptResponse>::from_receipt_response(receipt_response) + .map_err(ProviderError::other)?; + + Ok(Some(receipt)) } fn receipts_by_block( @@ -522,6 +539,7 @@ where P: Provider + Clone + 'static, N: Network, Node: NodeTypes, + ReceiptTy: TryFromReceiptResponse, { } diff --git a/crates/rpc/rpc-convert/src/lib.rs b/crates/rpc/rpc-convert/src/lib.rs index 04821ff4d77..5ea281c4ef8 100644 --- a/crates/rpc/rpc-convert/src/lib.rs +++ b/crates/rpc/rpc-convert/src/lib.rs @@ -12,11 +12,13 @@ pub mod block; mod fees; +pub mod receipt; mod rpc; pub mod transaction; pub use block::TryFromBlockResponse; pub use fees::{CallFees, CallFeesError}; +pub use receipt::TryFromReceiptResponse; pub use rpc::*; pub use transaction::{ EthTxEnvError, IntoRpcTx, RpcConvert, RpcConverter, TransactionConversionError, diff --git a/crates/rpc/rpc-convert/src/receipt.rs b/crates/rpc/rpc-convert/src/receipt.rs new file mode 100644 index 00000000000..5f37c1cad5e --- /dev/null +++ b/crates/rpc/rpc-convert/src/receipt.rs @@ -0,0 +1,99 @@ +//! Conversion traits for receipt responses to primitive receipt types. + +use alloy_network::Network; +use std::convert::Infallible; + +/// Trait for converting network receipt responses to primitive receipt types. +pub trait TryFromReceiptResponse { + /// The error type returned if the conversion fails. + type Error: core::error::Error + Send + Sync + Unpin; + + /// Converts a network receipt response to a primitive receipt type. + /// + /// # Returns + /// + /// Returns `Ok(Self)` on successful conversion, or `Err(Self::Error)` if the conversion fails. + fn from_receipt_response(receipt_response: N::ReceiptResponse) -> Result + where + Self: Sized; +} + +impl TryFromReceiptResponse for reth_ethereum_primitives::Receipt { + type Error = Infallible; + + fn from_receipt_response( + receipt_response: alloy_rpc_types_eth::TransactionReceipt, + ) -> Result { + Ok(receipt_response.into_inner().into()) + } +} + +#[cfg(feature = "op")] +impl TryFromReceiptResponse for reth_optimism_primitives::OpReceipt { + type Error = Infallible; + + fn from_receipt_response( + receipt_response: op_alloy_rpc_types::OpTransactionReceipt, + ) -> Result { + Ok(receipt_response.inner.inner.map_logs(Into::into).into()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_consensus::ReceiptEnvelope; + use alloy_network::Ethereum; + use reth_ethereum_primitives::Receipt; + + #[test] + fn test_try_from_receipt_response() { + let rpc_receipt = alloy_rpc_types_eth::TransactionReceipt { + inner: ReceiptEnvelope::Eip1559(Default::default()), + transaction_hash: Default::default(), + transaction_index: None, + block_hash: None, + block_number: None, + gas_used: 0, + effective_gas_price: 0, + blob_gas_used: None, + blob_gas_price: None, + from: Default::default(), + to: None, + contract_address: None, + }; + let result = + >::from_receipt_response(rpc_receipt); + assert!(result.is_ok()); + } + + #[cfg(feature = "op")] + #[test] + fn test_try_from_receipt_response_optimism() { + use op_alloy_consensus::OpReceiptEnvelope; + use op_alloy_network::Optimism; + use op_alloy_rpc_types::OpTransactionReceipt; + use reth_optimism_primitives::OpReceipt; + + let op_receipt = OpTransactionReceipt { + inner: alloy_rpc_types_eth::TransactionReceipt { + inner: OpReceiptEnvelope::Eip1559(Default::default()), + transaction_hash: Default::default(), + transaction_index: None, + block_hash: None, + block_number: None, + gas_used: 0, + effective_gas_price: 0, + blob_gas_used: None, + blob_gas_price: None, + from: Default::default(), + to: None, + contract_address: None, + }, + l1_block_info: Default::default(), + }; + let result = + >::from_receipt_response(op_receipt); + assert!(result.is_ok()); + } +} From 8fb0fbba7375eb3e66494b8d6dfe7986bd11c3a6 Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Fri, 18 Jul 2025 16:27:18 +0200 Subject: [PATCH 211/305] chore: fix reth-engine-tree dev-dependencies import (#17487) --- crates/engine/tree/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index 550895798dd..2609466b28e 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -72,7 +72,7 @@ reth-tracing = { workspace = true, optional = true } [dev-dependencies] # reth -reth-evm-ethereum.workspace = true +reth-evm-ethereum = { workspace = true, features = ["test-utils"] } reth-chain-state = { workspace = true, features = ["test-utils"] } reth-chainspec.workspace = true reth-db-common.workspace = true From 537ffeacaca4fad4ebc26031382b350cc0f581d6 Mon Sep 17 00:00:00 2001 From: ongyimeng <73429081+ongyimeng@users.noreply.github.com> Date: Fri, 18 Jul 2025 22:44:28 +0800 Subject: [PATCH 212/305] feat: continue opchainspec support (#17422) Co-authored-by: rose2221 Co-authored-by: Arsenii Kulikov --- Cargo.lock | 1 + crates/optimism/chainspec/Cargo.toml | 3 + crates/optimism/chainspec/src/basefee.rs | 29 ++++++++ crates/optimism/chainspec/src/lib.rs | 12 +++- crates/optimism/consensus/Cargo.toml | 5 +- crates/optimism/consensus/src/lib.rs | 32 ++------- .../optimism/consensus/src/validation/mod.rs | 67 ++++++------------- crates/optimism/evm/src/lib.rs | 6 +- 8 files changed, 79 insertions(+), 76 deletions(-) create mode 100644 crates/optimism/chainspec/src/basefee.rs diff --git a/Cargo.lock b/Cargo.lock index 502a9cc21e1..adb4d5e471c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9130,6 +9130,7 @@ dependencies = [ "alloy-primitives", "derive_more", "miniz_oxide", + "op-alloy-consensus", "op-alloy-rpc-types", "paste", "reth-chainspec", diff --git a/crates/optimism/chainspec/Cargo.toml b/crates/optimism/chainspec/Cargo.toml index 80de3edcb70..e35b5b77c7e 100644 --- a/crates/optimism/chainspec/Cargo.toml +++ b/crates/optimism/chainspec/Cargo.toml @@ -44,6 +44,7 @@ miniz_oxide = { workspace = true, features = ["with-alloc"], optional = true } derive_more.workspace = true paste = { workspace = true, optional = true } thiserror = { workspace = true, optional = true } +op-alloy-consensus.workspace = true [dev-dependencies] reth-chainspec = { workspace = true, features = ["test-utils"] } @@ -71,6 +72,7 @@ std = [ "serde?/std", "miniz_oxide?/std", "thiserror?/std", + "op-alloy-consensus/std", ] serde = [ "alloy-chains/serde", @@ -84,4 +86,5 @@ serde = [ "reth-optimism-forks/serde", "reth-optimism-primitives/serde", "reth-primitives-traits/serde", + "op-alloy-consensus/serde", ] diff --git a/crates/optimism/chainspec/src/basefee.rs b/crates/optimism/chainspec/src/basefee.rs new file mode 100644 index 00000000000..b28c0c478d0 --- /dev/null +++ b/crates/optimism/chainspec/src/basefee.rs @@ -0,0 +1,29 @@ +//! Base fee related utilities for Optimism chains. + +use alloy_consensus::BlockHeader; +use op_alloy_consensus::{decode_holocene_extra_data, EIP1559ParamError}; +use reth_chainspec::{BaseFeeParams, EthChainSpec}; +use reth_optimism_forks::OpHardforks; + +/// Extracts the Holocene 1599 parameters from the encoded extra data from the parent header. +/// +/// Caution: Caller must ensure that holocene is active in the parent header. +/// +/// See also [Base fee computation](https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/holocene/exec-engine.md#base-fee-computation) +pub fn decode_holocene_base_fee( + chain_spec: impl EthChainSpec + OpHardforks, + parent: &H, + timestamp: u64, +) -> Result +where + H: BlockHeader, +{ + let (elasticity, denominator) = decode_holocene_extra_data(parent.extra_data())?; + let base_fee_params = if elasticity == 0 && denominator == 0 { + chain_spec.base_fee_params_at_timestamp(timestamp) + } else { + BaseFeeParams::new(denominator as u128, elasticity as u128) + }; + + Ok(parent.next_block_base_fee(base_fee_params).unwrap_or_default()) +} diff --git a/crates/optimism/chainspec/src/lib.rs b/crates/optimism/chainspec/src/lib.rs index ba3f317d198..3a7e69fd3a3 100644 --- a/crates/optimism/chainspec/src/lib.rs +++ b/crates/optimism/chainspec/src/lib.rs @@ -34,6 +34,7 @@ extern crate alloc; mod base; mod base_sepolia; +mod basefee; pub mod constants; mod dev; @@ -47,6 +48,7 @@ pub use superchain::*; pub use base::BASE_MAINNET; pub use base_sepolia::BASE_SEPOLIA; +pub use basefee::*; pub use dev::OP_DEV; pub use op::OP_MAINNET; pub use op_sepolia::OP_SEPOLIA; @@ -56,7 +58,7 @@ pub use reth_optimism_forks::*; use alloc::{boxed::Box, vec, vec::Vec}; use alloy_chains::Chain; -use alloy_consensus::{proofs::storage_root_unhashed, Header}; +use alloy_consensus::{proofs::storage_root_unhashed, BlockHeader, Header}; use alloy_eips::eip7840::BlobParams; use alloy_genesis::Genesis; use alloy_hardforks::Hardfork; @@ -286,6 +288,14 @@ impl EthChainSpec for OpChainSpec { fn final_paris_total_difficulty(&self) -> Option { self.inner.final_paris_total_difficulty() } + + fn next_block_base_fee(&self, parent: &Header, target_timestamp: u64) -> Option { + if self.is_holocene_active_at_timestamp(parent.timestamp()) { + decode_holocene_base_fee(self, parent, parent.timestamp()).ok() + } else { + self.inner.next_block_base_fee(parent, target_timestamp) + } + } } impl Hardforks for OpChainSpec { diff --git a/crates/optimism/consensus/Cargo.toml b/crates/optimism/consensus/Cargo.toml index 92e1642b5ba..2276f911cd8 100644 --- a/crates/optimism/consensus/Cargo.toml +++ b/crates/optimism/consensus/Cargo.toml @@ -32,11 +32,11 @@ alloy-primitives.workspace = true alloy-consensus.workspace = true alloy-trie.workspace = true revm.workspace = true -op-alloy-consensus.workspace = true # misc tracing.workspace = true thiserror.workspace = true +reth-optimism-chainspec.workspace = true [dev-dependencies] reth-provider = { workspace = true, features = ["test-utils"] } @@ -49,6 +49,7 @@ reth-db-api = { workspace = true, features = ["op"] } alloy-chains.workspace = true alloy-primitives.workspace = true + op-alloy-consensus.workspace = true [features] @@ -69,10 +70,10 @@ std = [ "alloy-primitives/std", "alloy-consensus/std", "alloy-trie/std", - "op-alloy-consensus/std", "reth-revm/std", "revm/std", "tracing/std", "thiserror/std", "reth-execution-types/std", + "op-alloy-consensus/std", ] diff --git a/crates/optimism/consensus/src/lib.rs b/crates/optimism/consensus/src/lib.rs index 3e4201dc73b..5e256593ef0 100644 --- a/crates/optimism/consensus/src/lib.rs +++ b/crates/optimism/consensus/src/lib.rs @@ -34,9 +34,7 @@ mod proof; pub use proof::calculate_receipt_root_no_memo_optimism; pub mod validation; -pub use validation::{ - canyon, decode_holocene_base_fee, isthmus, next_block_base_fee, validate_block_post_execution, -}; +pub use validation::{canyon, isthmus, validate_block_post_execution}; pub mod error; pub use error::OpConsensusError; @@ -178,29 +176,11 @@ where validate_against_parent_timestamp(header.header(), parent.header())?; } - // EIP1559 base fee validation - // - // > if Holocene is active in parent_header.timestamp, then the parameters from - // > parent_header.extraData are used. - if self.chain_spec.is_holocene_active_at_timestamp(parent.timestamp()) { - let header_base_fee = - header.base_fee_per_gas().ok_or(ConsensusError::BaseFeeMissing)?; - let expected_base_fee = - decode_holocene_base_fee(&self.chain_spec, parent.header(), header.timestamp()) - .map_err(|_| ConsensusError::BaseFeeMissing)?; - if expected_base_fee != header_base_fee { - return Err(ConsensusError::BaseFeeDiff(GotExpected { - expected: expected_base_fee, - got: header_base_fee, - })) - } - } else { - validate_against_parent_eip1559_base_fee( - header.header(), - parent.header(), - &self.chain_spec, - )?; - } + validate_against_parent_eip1559_base_fee( + header.header(), + parent.header(), + &self.chain_spec, + )?; // ensure that the blob gas fields for this block if let Some(blob_params) = self.chain_spec.blob_params_at_timestamp(header.timestamp()) { diff --git a/crates/optimism/consensus/src/validation/mod.rs b/crates/optimism/consensus/src/validation/mod.rs index a025ae8931c..0846572a3d9 100644 --- a/crates/optimism/consensus/src/validation/mod.rs +++ b/crates/optimism/consensus/src/validation/mod.rs @@ -3,14 +3,15 @@ pub mod canyon; pub mod isthmus; +// Re-export the decode_holocene_base_fee function for compatibility +pub use reth_optimism_chainspec::decode_holocene_base_fee; + use crate::proof::calculate_receipt_root_optimism; use alloc::vec::Vec; use alloy_consensus::{BlockHeader, TxReceipt, EMPTY_OMMER_ROOT_HASH}; use alloy_eips::Encodable2718; use alloy_primitives::{Bloom, Bytes, B256}; use alloy_trie::EMPTY_ROOT_HASH; -use op_alloy_consensus::{decode_holocene_extra_data, EIP1559ParamError}; -use reth_chainspec::{BaseFeeParams, EthChainSpec}; use reth_consensus::ConsensusError; use reth_optimism_forks::OpHardforks; use reth_optimism_primitives::DepositReceipt; @@ -171,51 +172,13 @@ fn compare_receipts_root_and_logs_bloom( Ok(()) } -/// Extracts the Holocene 1599 parameters from the encoded extra data from the parent header. -/// -/// Caution: Caller must ensure that holocene is active in the parent header. -/// -/// See also [Base fee computation](https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/holocene/exec-engine.md#base-fee-computation) -pub fn decode_holocene_base_fee( - chain_spec: impl EthChainSpec + OpHardforks, - parent: impl BlockHeader, - timestamp: u64, -) -> Result { - let (elasticity, denominator) = decode_holocene_extra_data(parent.extra_data())?; - let base_fee_params = if elasticity == 0 && denominator == 0 { - chain_spec.base_fee_params_at_timestamp(timestamp) - } else { - BaseFeeParams::new(denominator as u128, elasticity as u128) - }; - - Ok(parent.next_block_base_fee(base_fee_params).unwrap_or_default()) -} - -/// Read from parent to determine the base fee for the next block -/// -/// See also [Base fee computation](https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/holocene/exec-engine.md#base-fee-computation) -pub fn next_block_base_fee( - chain_spec: impl EthChainSpec
+ OpHardforks, - parent: &H, - timestamp: u64, -) -> Result { - // If we are in the Holocene, we need to use the base fee params - // from the parent block's extra data. - // Else, use the base fee params (default values) from chainspec - if chain_spec.is_holocene_active_at_timestamp(parent.timestamp()) { - Ok(decode_holocene_base_fee(chain_spec, parent, timestamp)?) - } else { - Ok(chain_spec.next_block_base_fee(parent, timestamp).unwrap_or_default()) - } -} - #[cfg(test)] mod tests { use super::*; use alloy_consensus::Header; use alloy_primitives::{b256, hex, Bytes, U256}; use op_alloy_consensus::OpTxEnvelope; - use reth_chainspec::{ChainSpec, ForkCondition, Hardfork}; + use reth_chainspec::{BaseFeeParams, ChainSpec, EthChainSpec, ForkCondition, Hardfork}; use reth_optimism_chainspec::{OpChainSpec, BASE_SEPOLIA}; use reth_optimism_forks::{OpHardfork, BASE_SEPOLIA_HARDFORKS}; use std::sync::Arc; @@ -255,7 +218,8 @@ mod tests { gas_limit: 144000000, ..Default::default() }; - let base_fee = next_block_base_fee(&op_chain_spec, &parent, 0); + let base_fee = + reth_optimism_chainspec::OpChainSpec::next_block_base_fee(&op_chain_spec, &parent, 0); assert_eq!( base_fee.unwrap(), op_chain_spec.next_block_base_fee(&parent, 0).unwrap_or_default() @@ -273,7 +237,11 @@ mod tests { extra_data: Bytes::from_static(&[0, 0, 0, 0, 0, 0, 0, 0, 0]), ..Default::default() }; - let base_fee = next_block_base_fee(&op_chain_spec, &parent, 1800000005); + let base_fee = reth_optimism_chainspec::OpChainSpec::next_block_base_fee( + &op_chain_spec, + &parent, + 1800000005, + ); assert_eq!( base_fee.unwrap(), op_chain_spec.next_block_base_fee(&parent, 0).unwrap_or_default() @@ -291,7 +259,11 @@ mod tests { ..Default::default() }; - let base_fee = next_block_base_fee(holocene_chainspec(), &parent, 1800000005); + let base_fee = reth_optimism_chainspec::OpChainSpec::next_block_base_fee( + &holocene_chainspec(), + &parent, + 1800000005, + ); assert_eq!( base_fee.unwrap(), parent @@ -312,7 +284,12 @@ mod tests { ..Default::default() }; - let base_fee = next_block_base_fee(&*BASE_SEPOLIA, &parent, 1735315546).unwrap(); + let base_fee = reth_optimism_chainspec::OpChainSpec::next_block_base_fee( + &*BASE_SEPOLIA, + &parent, + 1735315546, + ) + .unwrap(); assert_eq!(base_fee, 507); } diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index e493d6d9c52..db42bf929dc 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -22,7 +22,6 @@ use op_revm::{OpSpecId, OpTransaction}; use reth_chainspec::EthChainSpec; use reth_evm::{ConfigureEvm, EvmEnv}; use reth_optimism_chainspec::OpChainSpec; -use reth_optimism_consensus::next_block_base_fee; use reth_optimism_forks::OpHardforks; use reth_optimism_primitives::{DepositReceipt, OpPrimitives}; use reth_primitives_traits::{NodePrimitives, SealedBlock, SealedHeader, SignedTransaction}; @@ -187,7 +186,10 @@ where prevrandao: Some(attributes.prev_randao), gas_limit: attributes.gas_limit, // calculate basefee based on parent block's gas usage - basefee: next_block_base_fee(self.chain_spec(), parent, attributes.timestamp)?, + basefee: self + .chain_spec() + .next_block_base_fee(parent, attributes.timestamp) + .unwrap_or_default(), // calculate excess gas based on parent block's blob gas usage blob_excess_gas_and_price, }; From 623920c63d1b386efdce2b65c746093990adf115 Mon Sep 17 00:00:00 2001 From: ongyimeng <73429081+ongyimeng@users.noreply.github.com> Date: Sat, 19 Jul 2025 00:06:37 +0800 Subject: [PATCH 213/305] fix: set correct timestamp when calculating basefee (#17493) Co-authored-by: rose2221 Co-authored-by: Arsenii Kulikov --- crates/optimism/chainspec/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/optimism/chainspec/src/lib.rs b/crates/optimism/chainspec/src/lib.rs index 3a7e69fd3a3..a2b91249351 100644 --- a/crates/optimism/chainspec/src/lib.rs +++ b/crates/optimism/chainspec/src/lib.rs @@ -291,7 +291,7 @@ impl EthChainSpec for OpChainSpec { fn next_block_base_fee(&self, parent: &Header, target_timestamp: u64) -> Option { if self.is_holocene_active_at_timestamp(parent.timestamp()) { - decode_holocene_base_fee(self, parent, parent.timestamp()).ok() + decode_holocene_base_fee(self, parent, target_timestamp).ok() } else { self.inner.next_block_base_fee(parent, target_timestamp) } From 2ced40914162c8e2e30ffa7b010c4042be3b6030 Mon Sep 17 00:00:00 2001 From: cakevm Date: Fri, 18 Jul 2025 18:37:10 +0200 Subject: [PATCH 214/305] feat(alloy-provider): implement methods for BlockReaderIdExt (#17491) --- crates/alloy-provider/src/lib.rs | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/crates/alloy-provider/src/lib.rs b/crates/alloy-provider/src/lib.rs index c5df823d725..c2d2b5d15da 100644 --- a/crates/alloy-provider/src/lib.rs +++ b/crates/alloy-provider/src/lib.rs @@ -464,20 +464,26 @@ where { fn block_by_id(&self, id: BlockId) -> ProviderResult> { match id { - BlockId::Number(number_or_tag) => self.block_by_number_or_tag(number_or_tag), BlockId::Hash(hash) => self.block_by_hash(hash.block_hash), + BlockId::Number(number_or_tag) => self.block_by_number_or_tag(number_or_tag), } } fn sealed_header_by_id( &self, - _id: BlockId, + id: BlockId, ) -> ProviderResult>> { - Err(ProviderError::UnsupportedProvider) + match id { + BlockId::Hash(hash) => self.sealed_header_by_hash(hash.block_hash), + BlockId::Number(number_or_tag) => self.sealed_header_by_number_or_tag(number_or_tag), + } } - fn header_by_id(&self, _id: BlockId) -> ProviderResult> { - Err(ProviderError::UnsupportedProvider) + fn header_by_id(&self, id: BlockId) -> ProviderResult> { + match id { + BlockId::Hash(hash) => self.header_by_hash_or_number(hash.block_hash.into()), + BlockId::Number(number_or_tag) => self.header_by_number_or_tag(number_or_tag), + } } } From 81b93ac58ba210967516e6f5f5f9054ce38b22d5 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 18 Jul 2025 20:02:51 +0200 Subject: [PATCH 215/305] chore: downgrade threadpool init error (#17483) --- crates/node/builder/src/launch/common.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index 2de4bbd7de6..49381462fa9 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -230,7 +230,7 @@ impl LaunchContext { .thread_name(|i| format!("reth-rayon-{i}")) .build_global() { - error!(%err, "Failed to build global thread pool") + warn!(%err, "Failed to build global thread pool") } } } From b0aed0dded2e482f924f92e5671993b215d164d6 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 18 Jul 2025 20:12:43 +0200 Subject: [PATCH 216/305] fix: force set basefee to 0 if gasprice is 0 (#17496) --- crates/rpc/rpc-eth-api/src/helpers/call.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index 707aa052543..269ce4f5a17 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -771,6 +771,11 @@ pub trait Call: let request_gas = request.as_ref().gas_limit(); let mut tx_env = self.create_txn_env(&evm_env, request, &mut *db)?; + // lower the basefee to 0 to avoid breaking EVM invariants (basefee < gasprice): + if tx_env.gas_price() == 0 { + evm_env.block_env.basefee = 0; + } + if request_gas.is_none() { // No gas limit was provided in the request, so we need to cap the transaction gas limit if tx_env.gas_price() > 0 { From f0572fc9d303c5aad14a41dc42c9f0251afed5f8 Mon Sep 17 00:00:00 2001 From: Dan Cline <6798349+Rjected@users.noreply.github.com> Date: Sat, 19 Jul 2025 02:44:39 -0400 Subject: [PATCH 217/305] perf(tree): add metric for payload conversion + validation latency (#17499) --- crates/engine/tree/src/tree/metrics.rs | 11 +++++++++++ crates/engine/tree/src/tree/mod.rs | 6 ++++++ 2 files changed, 17 insertions(+) diff --git a/crates/engine/tree/src/tree/metrics.rs b/crates/engine/tree/src/tree/metrics.rs index f78756f72e9..d3478b6c3ff 100644 --- a/crates/engine/tree/src/tree/metrics.rs +++ b/crates/engine/tree/src/tree/metrics.rs @@ -71,6 +71,10 @@ pub(crate) struct BlockValidationMetrics { pub(crate) state_root_duration: Gauge, /// Trie input computation duration pub(crate) trie_input_duration: Histogram, + /// Payload conversion and validation latency + pub(crate) payload_validation_duration: Gauge, + /// Histogram of payload validation latency + pub(crate) payload_validation_histogram: Histogram, } impl BlockValidationMetrics { @@ -81,6 +85,13 @@ impl BlockValidationMetrics { self.state_root_duration.set(elapsed_as_secs); self.state_root_histogram.record(elapsed_as_secs); } + + /// Records a new payload validation time, updating both the histogram and the payload + /// validation gauge + pub(crate) fn record_payload_validation(&self, elapsed_as_secs: f64) { + self.payload_validation_duration.set(elapsed_as_secs); + self.payload_validation_histogram.record(elapsed_as_secs); + } } /// Metrics for the blockchain tree block buffer diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index b09828bd93d..a029df3b3e4 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -526,6 +526,8 @@ where trace!(target: "engine::tree", "invoked new payload"); self.metrics.engine.new_payload_messages.increment(1); + let validation_start = Instant::now(); + // Ensures that the given payload does not violate any consensus rules that concern the // block's layout, like: // - missing or invalid base fee @@ -573,6 +575,10 @@ where } }; + self.metrics + .block_validation + .record_payload_validation(validation_start.elapsed().as_secs_f64()); + let num_hash = block.num_hash(); let engine_event = BeaconConsensusEngineEvent::BlockReceived(num_hash); self.emit_event(EngineApiEvent::BeaconConsensus(engine_event)); From c1a33a2e6efa7c14365141b02ca3be39651c6c7f Mon Sep 17 00:00:00 2001 From: NeoByteX <160131789+NeoByteXx@users.noreply.github.com> Date: Sat, 19 Jul 2025 08:52:59 +0200 Subject: [PATCH 218/305] docs: fix outdated file paths in database.md links (#17486) --- docs/design/database.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/design/database.md b/docs/design/database.md index 0afcbabfacc..a560b7a14e6 100644 --- a/docs/design/database.md +++ b/docs/design/database.md @@ -2,13 +2,13 @@ ## Abstractions -- We created a [Database trait abstraction](https://github.com/paradigmxyz/reth/blob/0d9b9a392d4196793736522f3fc2ac804991b45d/crates/interfaces/src/db/mod.rs) using Rust Stable GATs which frees us from being bound to a single database implementation. We currently use MDBX, but are exploring [redb](https://github.com/cberner/redb) as an alternative. +- We created a [Database trait abstraction](https://github.com/paradigmxyz/reth/blob/main/crates/cli/commands/src/db/mod.rs) using Rust Stable GATs which frees us from being bound to a single database implementation. We currently use MDBX, but are exploring [redb](https://github.com/cberner/redb) as an alternative. - We then iterated on [`Transaction`](https://github.com/paradigmxyz/reth/blob/0d9b9a392d4196793736522f3fc2ac804991b45d/crates/stages/src/db.rs#L14-L19) as a non-leaky abstraction with helpers for strictly-typed and unit-tested higher-level database abstractions. ## Codecs - We want Reth's serialized format to be able to trade off read/write speed for size, depending on who the user is. -- To achieve that, we created the [Encode/Decode/Compress/Decompress traits](https://github.com/paradigmxyz/reth/blob/0d9b9a392d4196793736522f3fc2ac804991b45d/crates/interfaces/src/db/table.rs#L9-L36) to make the (de)serialization of database `Table::Key` and `Table::Values` generic. +- To achieve that, we created the [Encode/Decode/Compress/Decompress traits](https://github.com/paradigmxyz/reth/blob/main/crates/storage/db-api/src/table.rs) to make the (de)serialization of database `Table::Key` and `Table::Values` generic. - This allows for [out-of-the-box benchmarking](https://github.com/paradigmxyz/reth/blob/0d9b9a392d4196793736522f3fc2ac804991b45d/crates/db/benches/encoding_iai.rs#L5) (using [Criterion](https://github.com/bheisler/criterion.rs)) - It also enables [out-of-the-box fuzzing](https://github.com/paradigmxyz/reth/blob/0d9b9a392d4196793736522f3fc2ac804991b45d/crates/interfaces/src/db/codecs/fuzz/mod.rs) using [trailofbits/test-fuzz](https://github.com/trailofbits/test-fuzz). - We implemented that trait for the following encoding formats: From 627658bda06b1987bfa737b64dd1862b19a63722 Mon Sep 17 00:00:00 2001 From: viktorking7 <140458814+viktorking7@users.noreply.github.com> Date: Sat, 19 Jul 2025 08:58:50 +0200 Subject: [PATCH 219/305] fix: correct documentation for block_mut method in SealedBlock (#17489) --- crates/primitives-traits/src/block/sealed.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/primitives-traits/src/block/sealed.rs b/crates/primitives-traits/src/block/sealed.rs index dd0bc0b6652..9e160728192 100644 --- a/crates/primitives-traits/src/block/sealed.rs +++ b/crates/primitives-traits/src/block/sealed.rs @@ -349,7 +349,7 @@ impl SealedBlock { self.header.set_hash(hash) } - /// Returns a mutable reference to the header. + /// Returns a mutable reference to the body. pub const fn body_mut(&mut self) -> &mut B::Body { &mut self.body } From 03ceac7e79d4e8151c5e12f636c48da2525dff02 Mon Sep 17 00:00:00 2001 From: anim001k <140460766+anim001k@users.noreply.github.com> Date: Sat, 19 Jul 2025 13:08:34 +0200 Subject: [PATCH 220/305] fix: refactor trace log key and comment formatting (#17459) --- crates/net/network/src/peers.rs | 2 +- crates/net/network/src/state.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/net/network/src/peers.rs b/crates/net/network/src/peers.rs index c0694023ceb..d851a461ccc 100644 --- a/crates/net/network/src/peers.rs +++ b/crates/net/network/src/peers.rs @@ -956,7 +956,7 @@ impl PeersManager { if peer.addr != new_addr { peer.addr = new_addr; - trace!(target: "net::peers", ?peer_id, addre=?peer.addr, "Updated resolved trusted peer address"); + trace!(target: "net::peers", ?peer_id, addr=?peer.addr, "Updated resolved trusted peer address"); } } } diff --git a/crates/net/network/src/state.rs b/crates/net/network/src/state.rs index be01312bff0..57d1a73198e 100644 --- a/crates/net/network/src/state.rs +++ b/crates/net/network/src/state.rs @@ -497,7 +497,7 @@ impl NetworkState { self.on_peer_action(action); } - // We need to poll again tn case we have received any responses because they may have + // We need to poll again in case we have received any responses because they may have // triggered follow-up requests. if self.queued_messages.is_empty() { return Poll::Pending From 1175f6c178aea698020293bdce1ebd93624d19f8 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sun, 20 Jul 2025 09:14:55 +0000 Subject: [PATCH 221/305] chore(deps): weekly `cargo update` (#17506) Co-authored-by: github-merge-queue <118344674+github-merge-queue@users.noreply.github.com> --- Cargo.lock | 87 +++++++++++++++++++++++++++--------------------------- 1 file changed, 43 insertions(+), 44 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index adb4d5e471c..6d783a3f05f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -97,9 +97,9 @@ checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "alloy-chains" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5674914c2cfdb866c21cb0c09d82374ee39a1395cf512e7515f4c014083b3fff" +checksum = "4195a29a4b87137b2bb02105e746102873bc03561805cf45c0e510c961f160e6" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -107,7 +107,7 @@ dependencies = [ "num_enum", "proptest", "serde", - "strum 0.27.1", + "strum 0.27.2", ] [[package]] @@ -640,7 +640,7 @@ dependencies = [ "jsonwebtoken", "rand 0.8.5", "serde", - "strum 0.27.1", + "strum 0.27.2", ] [[package]] @@ -1922,9 +1922,9 @@ dependencies = [ [[package]] name = "bytemuck_derive" -version = "1.9.3" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ecc273b49b3205b83d648f0690daa588925572cc5063745bfe547fe7ec8e1a1" +checksum = "441473f2b4b0459a68628c744bc61d23e730fb00128b841d30fa4bb3972257e4" dependencies = [ "proc-macro2", "quote", @@ -2613,9 +2613,9 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "4.2.0" +version = "4.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "373b7c5dbd637569a2cca66e8d66b8c446a1e7bf064ea321d265d7b3dfe7c97e" +checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" dependencies = [ "cfg-if", "cpufeatures", @@ -3712,9 +3712,9 @@ dependencies = [ [[package]] name = "fiat-crypto" -version = "0.3.0" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64cd1e32ddd350061ae6edb1b082d7c54915b5c672c389143b9a63403a109f24" +checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" [[package]] name = "filetime" @@ -4386,7 +4386,7 @@ dependencies = [ "tokio", "tokio-rustls", "tower-service", - "webpki-roots 1.0.1", + "webpki-roots 1.0.2", ] [[package]] @@ -4790,9 +4790,9 @@ dependencies = [ [[package]] name = "instability" -version = "0.3.7" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bf9fed6d91cfb734e7476a06bde8300a1b94e217e1b523b6f0cd1a01998c71d" +checksum = "435d80800b936787d62688c927b6490e887c7ef5ff9ce922c6c6050fca75eb9a" dependencies = [ "darling", "indoc", @@ -5278,9 +5278,9 @@ dependencies = [ [[package]] name = "libredox" -version = "0.1.4" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1580801010e535496706ba011c15f8532df6b42297d2e471fec38ceadd8c0638" +checksum = "4488594b9328dee448adb906d8b126d9b7deb7cf5c22161ee591610bb1be83c0" dependencies = [ "bitflags 2.9.1", "libc", @@ -7135,7 +7135,7 @@ dependencies = [ "wasm-bindgen-futures", "wasm-streams", "web-sys", - "webpki-roots 1.0.1", + "webpki-roots 1.0.2", ] [[package]] @@ -7584,7 +7584,7 @@ dependencies = [ "rustc-hash 2.1.1", "serde", "serde_json", - "strum 0.27.1", + "strum 0.27.2", "sysinfo", "tempfile", "thiserror 2.0.12", @@ -8934,7 +8934,7 @@ dependencies = [ "secp256k1 0.30.0", "serde", "shellexpand", - "strum 0.27.1", + "strum 0.27.2", "thiserror 2.0.12", "tokio", "toml", @@ -9666,7 +9666,7 @@ dependencies = [ "revm-database", "revm-database-interface", "revm-state", - "strum 0.27.1", + "strum 0.27.2", "tempfile", "tokio", "tracing", @@ -9742,8 +9742,8 @@ dependencies = [ "reth-ress-protocol", "reth-storage-errors", "reth-tracing", - "strum 0.27.1", - "strum_macros 0.27.1", + "strum 0.27.2", + "strum_macros 0.27.2", "tokio", "tokio-stream", "tracing", @@ -10166,7 +10166,7 @@ dependencies = [ "reth-errors", "reth-network-api", "serde", - "strum 0.27.1", + "strum 0.27.2", ] [[package]] @@ -10334,7 +10334,7 @@ dependencies = [ "derive_more", "reth-nippy-jar", "serde", - "strum 0.27.1", + "strum 0.27.2", ] [[package]] @@ -11125,15 +11125,15 @@ dependencies = [ [[package]] name = "rustix" -version = "1.0.7" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c71e83d6afe7ff64890ec6b71d6a69bb8a610ab78ce364b3352876bb4c801266" +checksum = "11181fbabf243db407ef8df94a6ce0b2f9a733bd8be4ad02b4eda9602296cac8" dependencies = [ "bitflags 2.9.1", "errno", "libc", "linux-raw-sys 0.9.4", - "windows-sys 0.59.0", + "windows-sys 0.60.2", ] [[package]] @@ -11455,9 +11455,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.140" +version = "1.0.141" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" +checksum = "30b9eff21ebe718216c6ec64e1d9ac57087aad11efc64e32002bce4a0d4c03d3" dependencies = [ "indexmap 2.10.0", "itoa", @@ -11831,11 +11831,11 @@ dependencies = [ [[package]] name = "strum" -version = "0.27.1" +version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f64def088c51c9510a8579e3c5d67c65349dcf755e5479ad3d010aa6454e2c32" +checksum = "af23d6f6c1a224baef9d3f61e287d2761385a5b88fdab4eb4c6f11aeb54c4bcf" dependencies = [ - "strum_macros 0.27.1", + "strum_macros 0.27.2", ] [[package]] @@ -11853,14 +11853,13 @@ dependencies = [ [[package]] name = "strum_macros" -version = "0.27.1" +version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c77a8c5abcaf0f9ce05d62342b7d298c346515365c36b673df4ebe3ced01fde8" +checksum = "7695ce3845ea4b33927c055a39dc438a45b059f7c1b3d91d38d10355fb8cbca7" dependencies = [ "heck", "proc-macro2", "quote", - "rustversion", "syn 2.0.104", ] @@ -11981,7 +11980,7 @@ dependencies = [ "fastrand 2.3.0", "getrandom 0.3.3", "once_cell", - "rustix 1.0.7", + "rustix 1.0.8", "windows-sys 0.59.0", ] @@ -12624,9 +12623,9 @@ dependencies = [ [[package]] name = "tracy-client-sys" -version = "0.26.0" +version = "0.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f9612d9503675b07b244922ea6f6f3cdd88c43add1b3498084613fc88cdf69d" +checksum = "319c70195101a93f56db4c74733e272d720768e13471f400c78406a326b172b0" dependencies = [ "cc", "windows-targets 0.52.6", @@ -13117,14 +13116,14 @@ version = "0.26.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75c7f0ef91146ebfb530314f5f1d24528d7f0767efbfd31dce919275413e393e" dependencies = [ - "webpki-root-certs 1.0.1", + "webpki-root-certs 1.0.2", ] [[package]] name = "webpki-root-certs" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86138b15b2b7d561bc4469e77027b8dd005a43dc502e9031d1f5afc8ce1f280e" +checksum = "4e4ffd8df1c57e87c325000a3d6ef93db75279dc3a231125aac571650f22b12a" dependencies = [ "rustls-pki-types", ] @@ -13135,14 +13134,14 @@ version = "0.26.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "521bc38abb08001b01866da9f51eb7c5d647a19260e00054a8c7fd5f9e57f7a9" dependencies = [ - "webpki-roots 1.0.1", + "webpki-roots 1.0.2", ] [[package]] name = "webpki-roots" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8782dd5a41a24eed3a4f40b606249b3e236ca61adf1f25ea4d45c73de122b502" +checksum = "7e8983c3ab33d6fb807cfcdad2491c4ea8cbc8ed839181c7dfd9c67c83e261b2" dependencies = [ "rustls-pki-types", ] @@ -13780,7 +13779,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af3a19837351dc82ba89f8a125e22a3c475f05aba604acc023d62b2739ae2909" dependencies = [ "libc", - "rustix 1.0.7", + "rustix 1.0.8", ] [[package]] From 8f38b42e3f2e3ee46f0de21b1b9797927073bf73 Mon Sep 17 00:00:00 2001 From: cakevm Date: Sun, 20 Jul 2025 13:04:48 +0200 Subject: [PATCH 222/305] feat(alloy-provider): implement `receipts_by_block` and other methods (#17507) --- crates/alloy-provider/src/lib.rs | 184 +++++++++++++++++++++---------- 1 file changed, 128 insertions(+), 56 deletions(-) diff --git a/crates/alloy-provider/src/lib.rs b/crates/alloy-provider/src/lib.rs index c2d2b5d15da..2726765912f 100644 --- a/crates/alloy-provider/src/lib.rs +++ b/crates/alloy-provider/src/lib.rs @@ -21,7 +21,7 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] use alloy_consensus::BlockHeader; -use alloy_eips::BlockHashOrNumber; +use alloy_eips::{BlockHashOrNumber, BlockNumberOrTag}; use alloy_network::{primitives::HeaderResponse, BlockResponse}; use alloy_primitives::{Address, BlockHash, BlockNumber, StorageKey, TxHash, TxNumber, B256, U256}; use alloy_provider::{ext::DebugApi, network::Network, Provider}; @@ -33,7 +33,9 @@ use reth_db_api::{ models::StoredBlockBodyIndices, }; use reth_errors::{ProviderError, ProviderResult}; -use reth_node_types::{Block, BlockTy, HeaderTy, NodeTypes, PrimitivesTy, ReceiptTy, TxTy}; +use reth_node_types::{ + Block, BlockBody, BlockTy, HeaderTy, NodeTypes, PrimitivesTy, ReceiptTy, TxTy, +}; use reth_primitives::{Account, Bytecode, RecoveredBlock, SealedHeader, TransactionMeta}; use reth_provider::{ AccountReader, BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BytecodeReader, @@ -53,12 +55,12 @@ use reth_storage_api::{ use reth_trie::{updates::TrieUpdates, AccountProof, HashedPostState, MultiProof, TrieInput}; use std::{ collections::BTreeMap, - future::Future, + future::{Future, IntoFuture}, ops::{RangeBounds, RangeInclusive}, sync::Arc, }; use tokio::{runtime::Handle, sync::broadcast}; -use tracing::trace; +use tracing::{trace, warn}; /// Configuration for `AlloyRethProvider` #[derive(Debug, Clone, Default)] @@ -163,6 +165,7 @@ where block_id, self.chain_spec.clone(), ) + .with_compute_state_root(self.config.compute_state_root) } /// Helper function to get state provider by block number @@ -209,8 +212,16 @@ where Node: NodeTypes, { fn chain_info(&self) -> Result { - // For RPC provider, we can't get full chain info - Err(ProviderError::UnsupportedProvider) + self.block_on_async(async { + let block = self + .provider + .get_block(BlockId::Number(BlockNumberOrTag::Latest)) + .await + .map_err(ProviderError::other)? + .ok_or(ProviderError::HeaderNotFound(0.into()))?; + + Ok(ChainInfo { best_hash: block.header().hash(), best_number: block.header().number() }) + }) } fn best_block_number(&self) -> Result { @@ -309,12 +320,16 @@ where Ok(Some(sealed_header.into_header())) } - fn header_td(&self, _hash: &BlockHash) -> ProviderResult> { - Err(ProviderError::UnsupportedProvider) + fn header_td(&self, hash: &BlockHash) -> ProviderResult> { + let header = self.header(hash).map_err(ProviderError::other)?; + + Ok(header.map(|b| b.difficulty())) } - fn header_td_by_number(&self, _number: BlockNumber) -> ProviderResult> { - Err(ProviderError::UnsupportedProvider) + fn header_td_by_number(&self, number: BlockNumber) -> ProviderResult> { + let header = self.header_by_number(number).map_err(ProviderError::other)?; + + Ok(header.map(|b| b.difficulty())) } fn headers_range( @@ -520,9 +535,33 @@ where fn receipts_by_block( &self, - _block: BlockHashOrNumber, + block: BlockHashOrNumber, ) -> ProviderResult>> { - Err(ProviderError::UnsupportedProvider) + self.block_on_async(async { + let receipts_response = self + .provider + .get_block_receipts(block.into()) + .await + .map_err(ProviderError::other)?; + + let Some(receipts) = receipts_response else { + // If the receipts were not found, return None + return Ok(None); + }; + + // Convert the network receipts response to primitive receipts + let receipts = receipts + .into_iter() + .map(|receipt_response| { + as TryFromReceiptResponse>::from_receipt_response( + receipt_response, + ) + .map_err(ProviderError::other) + }) + .collect::, _>>()?; + + Ok(Some(receipts)) + }) } fn receipts_by_tx_range( @@ -554,6 +593,7 @@ where P: Provider + Clone + 'static, N: Network, Node: NodeTypes, + BlockTy: TryFromBlockResponse, TxTy: TryFromTransactionResponse, { type Transaction = TxTy; @@ -605,9 +645,22 @@ where fn transactions_by_block( &self, - _block: BlockHashOrNumber, + block: BlockHashOrNumber, ) -> ProviderResult>> { - Err(ProviderError::UnsupportedProvider) + let block_response = self.block_on_async(async { + self.provider.get_block(block.into()).full().await.map_err(ProviderError::other) + })?; + + let Some(block_response) = block_response else { + // If the block was not found, return None + return Ok(None); + }; + + // Convert the network block response to primitive block + let block = as TryFromBlockResponse>::from_block_response(block_response) + .map_err(ProviderError::other)?; + + Ok(Some(block.into_body().into_transactions())) } fn transactions_by_block_range( @@ -643,13 +696,7 @@ where Node: NodeTypes, { fn latest(&self) -> Result { - trace!(target: "alloy-provider", "Getting latest state provider"); - - let block_number = self.block_on_async(async { - self.provider.get_block_number().await.map_err(ProviderError::other) - })?; - - self.state_by_block_number(block_number) + Ok(Box::new(self.create_state_provider(self.best_block_number()?.into()))) } fn state_by_block_id(&self, block_id: BlockId) -> Result { @@ -822,6 +869,8 @@ where network: std::marker::PhantomData, /// Cached chain spec (shared with parent provider) chain_spec: Option>, + /// Whether to enable state root calculation + compute_state_root: bool, } impl std::fmt::Debug @@ -848,6 +897,7 @@ impl AlloyRethStateProvider { node_types: std::marker::PhantomData, network: std::marker::PhantomData, chain_spec: None, + compute_state_root: false, } } @@ -863,6 +913,7 @@ impl AlloyRethStateProvider { node_types: std::marker::PhantomData, network: std::marker::PhantomData, chain_spec: Some(chain_spec), + compute_state_root: false, } } @@ -882,9 +933,19 @@ impl AlloyRethStateProvider { node_types: self.node_types, network: self.network, chain_spec: self.chain_spec.clone(), + compute_state_root: self.compute_state_root, } } + /// Helper function to enable state root calculation + /// + /// If enabled, the node will compute the state root and updates. + /// When disabled, it will return zero for state root and no updates. + pub const fn with_compute_state_root(mut self, is_enable: bool) -> Self { + self.compute_state_root = is_enable; + self + } + /// Get account information from RPC fn get_account(&self, address: Address) -> Result, ProviderError> where @@ -935,18 +996,13 @@ where storage_key: StorageKey, ) -> Result, ProviderError> { self.block_on_async(async { - let value = self - .provider - .get_storage_at(address, storage_key.into()) - .block_id(self.block_id) - .await - .map_err(ProviderError::other)?; - - if value.is_zero() { - Ok(None) - } else { - Ok(Some(value)) - } + Ok(Some( + self.provider + .get_storage_at(address, storage_key.into()) + .block_id(self.block_id) + .await + .map_err(ProviderError::other)?, + )) }) } @@ -1018,36 +1074,41 @@ where N: Network, Node: NodeTypes, { - fn state_root(&self, _state: HashedPostState) -> Result { - // Return the state root from the block - self.block_on_async(async { - let block = self - .provider - .get_block(self.block_id) - .await - .map_err(ProviderError::other)? - .ok_or(ProviderError::HeaderNotFound(0.into()))?; - - Ok(block.header().state_root()) - }) + fn state_root(&self, hashed_state: HashedPostState) -> Result { + self.state_root_from_nodes(TrieInput::from_state(hashed_state)) } fn state_root_from_nodes(&self, _input: TrieInput) -> Result { - Err(ProviderError::UnsupportedProvider) + warn!("state_root_from_nodes is not implemented and will return zero"); + Ok(B256::ZERO) } fn state_root_with_updates( &self, - _state: HashedPostState, + hashed_state: HashedPostState, ) -> Result<(B256, TrieUpdates), ProviderError> { - Err(ProviderError::UnsupportedProvider) + if !self.compute_state_root { + return Ok((B256::ZERO, TrieUpdates::default())); + } + + self.block_on_async(async { + self.provider + .raw_request::<(HashedPostState, BlockId), (B256, TrieUpdates)>( + "debug_stateRootWithUpdates".into(), + (hashed_state, self.block_id), + ) + .into_future() + .await + .map_err(ProviderError::other) + }) } fn state_root_from_nodes_with_updates( &self, _input: TrieInput, ) -> Result<(B256, TrieUpdates), ProviderError> { - Err(ProviderError::UnsupportedProvider) + warn!("state_root_from_nodes_with_updates is not implemented and will return zero"); + Ok((B256::ZERO, TrieUpdates::default())) } } @@ -1606,7 +1667,7 @@ where Self: Clone + 'static, { fn latest(&self) -> Result { - Ok(Box::new(self.clone()) as StateProviderBox) + Ok(Box::new(self.with_block_id(self.best_block_number()?.into()))) } fn state_by_block_id(&self, block_id: BlockId) -> Result { @@ -1823,17 +1884,28 @@ where }) } - fn code_by_hash(&mut self, _code_hash: B256) -> Result { - // Cannot fetch bytecode by hash via RPC - Ok(revm::bytecode::Bytecode::default()) + fn code_by_hash(&mut self, code_hash: B256) -> Result { + self.block_on_async(async { + // The method `debug_codeByHash` is currently only available on a Reth node + let code = self + .provider + .debug_code_by_hash(code_hash, None) + .await + .map_err(Self::Error::other)?; + + let Some(code) = code else { + // If the code was not found, return + return Ok(revm::bytecode::Bytecode::new()); + }; + + Ok(revm::bytecode::Bytecode::new_raw(code)) + }) } fn storage(&mut self, address: Address, index: U256) -> Result { - let index = B256::from(index); - self.block_on_async(async { self.provider - .get_storage_at(address, index.into()) + .get_storage_at(address, index) .block_id(self.block_id) .await .map_err(ProviderError::other) From 2c62cd8b46b4869cce6772acda8179816041e49b Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 21 Jul 2025 11:14:46 +0200 Subject: [PATCH 223/305] ci: dont expect callenv to fail (#17516) --- .github/assets/hive/expected_failures.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/assets/hive/expected_failures.yaml b/.github/assets/hive/expected_failures.yaml index f155a3478c6..da8cb1606d3 100644 --- a/.github/assets/hive/expected_failures.yaml +++ b/.github/assets/hive/expected_failures.yaml @@ -6,7 +6,6 @@ rpc-compat: - debug_getRawReceipts/get-block-n (reth) - debug_getRawTransaction/get-invalid-hash (reth) - - eth_call/call-callenv (reth) - eth_getStorageAt/get-storage-invalid-key-too-large (reth) - eth_getStorageAt/get-storage-invalid-key (reth) - eth_getTransactionReceipt/get-access-list (reth) From bec451026df4c811231fd421a80e2d3f16482caa Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 21 Jul 2025 11:18:01 +0200 Subject: [PATCH 224/305] chore: migrate from codespell to typos (#17501) --- .codespellrc | 3 -- .github/workflows/lint.yml | 8 ++--- Makefile | 12 +++---- crates/rpc/rpc-eth-api/src/helpers/trace.rs | 4 +-- typos.toml | 39 +++++++++++++++++++++ 5 files changed, 50 insertions(+), 16 deletions(-) delete mode 100644 .codespellrc create mode 100644 typos.toml diff --git a/.codespellrc b/.codespellrc deleted file mode 100644 index 771985af191..00000000000 --- a/.codespellrc +++ /dev/null @@ -1,3 +0,0 @@ -[codespell] -skip = .git,target,./crates/storage/libmdbx-rs/mdbx-sys/libmdbx,Cargo.toml,Cargo.lock -ignore-words-list = crate,ser,ratatui diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 2d30f5b69b5..dd9bce693f4 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -188,14 +188,12 @@ jobs: - name: Check docs changes run: git diff --exit-code - codespell: + typos: runs-on: ubuntu-latest timeout-minutes: 30 steps: - uses: actions/checkout@v4 - - uses: codespell-project/actions-codespell@v2 - with: - skip: "*.json" + - uses: crate-ci/typos@v1 check-toml: runs-on: ubuntu-latest @@ -278,7 +276,7 @@ jobs: - fmt - udeps - book - - codespell + - typos - grafana - no-test-deps - features diff --git a/Makefile b/Makefile index fdfd0b6ee3b..5a631c4402f 100644 --- a/Makefile +++ b/Makefile @@ -415,12 +415,12 @@ clippy-op-dev: --locked \ --all-features -lint-codespell: ensure-codespell - codespell --skip "*.json" --skip "./testing/ef-tests/ethereum-tests" +lint-typos: ensure-typos + typos -ensure-codespell: - @if ! command -v codespell &> /dev/null; then \ - echo "codespell not found. Please install it by running the command `pip install codespell` or refer to the following link for more information: https://github.com/codespell-project/codespell" \ +ensure-typos: + @if ! command -v typos &> /dev/null; then \ + echo "typos not found. Please install it by running the command `cargo install typos-cli` or refer to the following link for more information: https://github.com/crate-ci/typos" \ exit 1; \ fi @@ -446,7 +446,7 @@ ensure-dprint: lint: make fmt && \ make clippy && \ - make lint-codespell && \ + make lint-typos && \ make lint-toml clippy-fix: diff --git a/crates/rpc/rpc-eth-api/src/helpers/trace.rs b/crates/rpc/rpc-eth-api/src/helpers/trace.rs index 31085bdc08f..5b84ead6275 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/trace.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/trace.rs @@ -364,7 +364,7 @@ pub trait Trace: /// /// This /// 1. fetches all transactions of the block - /// 2. configures the EVM evn + /// 2. configures the EVM env /// 3. loops over all transactions and executes them /// 4. calls the callback with the transaction info, the execution result, the changed state /// _after_ the transaction [`StateProviderDatabase`] and the database that points to the @@ -400,7 +400,7 @@ pub trait Trace: /// /// This /// 1. fetches all transactions of the block - /// 2. configures the EVM evn + /// 2. configures the EVM env /// 3. loops over all transactions and executes them /// 4. calls the callback with the transaction info, the execution result, the changed state /// _after_ the transaction `EvmState` and the database that points to the state right diff --git a/typos.toml b/typos.toml new file mode 100644 index 00000000000..25f54392661 --- /dev/null +++ b/typos.toml @@ -0,0 +1,39 @@ +[files] +extend-exclude = [ + ".git", + "target", + "crates/storage/libmdbx-rs/mdbx-sys/libmdbx", + "Cargo.toml", + "Cargo.lock", + "testing/ef-tests", +] + +[default] +extend-ignore-re = [ + # Hex strings of various lengths + "(?i)0x[0-9a-f]{8}", # 8 hex chars + "(?i)0x[0-9a-f]{40}", # 40 hex chars + "(?i)0x[0-9a-f]{64}", # 64 hex chars + "(?i)[0-9a-f]{8}", # 8 hex chars without 0x + "(?i)[0-9a-f]{40}", # 40 hex chars without 0x + "(?i)[0-9a-f]{64}", # 64 hex chars without 0x + # Ordinals in identifiers + "[0-9]+nd", + "[0-9]+th", + "[0-9]+st", + "[0-9]+rd", +] + +[default.extend-words] +# These are valid identifiers/terms that should be allowed +crate = "crate" +ser = "ser" +ratatui = "ratatui" +seeked = "seeked" # Past tense of seek, used in trie iterator +Seeked = "Seeked" # Type name in trie iterator +Whe = "Whe" # Part of base64 encoded signature +hel = "hel" # Part of hostname bootnode-hetzner-hel +ONL = "ONL" # Part of base64 encoded ENR +Iy = "Iy" # Part of base64 encoded ENR +flate = "flate" # zlib-flate is a valid tool name +Pn = "Pn" # Part of UPnP (Universal Plug and Play) From 54855e1798df315a28714b736326b40bd9cf5725 Mon Sep 17 00:00:00 2001 From: Micke <155267459+reallesee@users.noreply.github.com> Date: Mon, 21 Jul 2025 11:17:38 +0200 Subject: [PATCH 225/305] docs: fix Sepolia URL description (#17495) --- crates/era/tests/it/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/era/tests/it/main.rs b/crates/era/tests/it/main.rs index fa939819189..86bfb3b3ac5 100644 --- a/crates/era/tests/it/main.rs +++ b/crates/era/tests/it/main.rs @@ -49,7 +49,7 @@ const ERA1_MAINNET_FILES_NAMES: [&str; 6] = [ /// Sepolia network name const SEPOLIA: &str = "sepolia"; -/// Default sepolia mainnet url +/// Default sepolia url /// for downloading sepolia `.era1` files const SEPOLIA_URL: &str = "https://era.ithaca.xyz/sepolia-era1/"; From c78f7e4501f0defc428c35c6b3fd879c76fd43d6 Mon Sep 17 00:00:00 2001 From: cakevm Date: Mon, 21 Jul 2025 11:19:04 +0200 Subject: [PATCH 226/305] feat(alloy-provider): compatibility for non-reth nodes (#17511) --- Cargo.lock | 1 + crates/alloy-provider/Cargo.toml | 1 + crates/alloy-provider/README.md | 12 ++- crates/alloy-provider/src/lib.rs | 147 ++++++++++++++++++++++++------- 4 files changed, 127 insertions(+), 34 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6d783a3f05f..1bc7acb439d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7202,6 +7202,7 @@ dependencies = [ "alloy-provider", "alloy-rpc-types", "alloy-rpc-types-engine", + "parking_lot", "reth-chainspec", "reth-db-api", "reth-errors", diff --git a/crates/alloy-provider/Cargo.toml b/crates/alloy-provider/Cargo.toml index 14e9031666d..9e112b487b5 100644 --- a/crates/alloy-provider/Cargo.toml +++ b/crates/alloy-provider/Cargo.toml @@ -40,6 +40,7 @@ tokio = { workspace = true, features = ["sync", "macros", "rt-multi-thread"] } # other tracing.workspace = true +parking_lot.workspace = true # revm revm.workspace = true diff --git a/crates/alloy-provider/README.md b/crates/alloy-provider/README.md index 37a75f1b328..0c02dbdf32a 100644 --- a/crates/alloy-provider/README.md +++ b/crates/alloy-provider/README.md @@ -40,12 +40,22 @@ use reth_alloy_provider::{AlloyRethProvider, AlloyRethProviderConfig}; use reth_ethereum_node::EthereumNode; let config = AlloyRethProviderConfig { - compute_state_root: true, // Enable state root computation + compute_state_root: true, // Enable state root computation + reth_rpc_support: true, // Use Reth-specific RPC methods (default: true) }; let db_provider = AlloyRethProvider::new_with_config(provider, EthereumNode, config); ``` +## Configuration Options + +- `compute_state_root`: When enabled, computes state root and trie updates (requires Reth-specific RPC methods) +- `reth_rpc_support`: When enabled (default), uses Reth-specific RPC methods for better performance: + - `eth_getAccountInfo`: Fetches account balance, nonce, and code in a single call + - `debug_codeByHash`: Retrieves bytecode by hash without needing the address + + When disabled, falls back to standard RPC methods and caches bytecode locally for compatibility with non-Reth nodes. + ## Technical Details The provider uses `alloy_network::AnyNetwork` for network operations, providing compatibility with various Ethereum-based networks while maintaining the expected block structure with headers. diff --git a/crates/alloy-provider/src/lib.rs b/crates/alloy-provider/src/lib.rs index 2726765912f..39d23efeff1 100644 --- a/crates/alloy-provider/src/lib.rs +++ b/crates/alloy-provider/src/lib.rs @@ -20,13 +20,16 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -use alloy_consensus::BlockHeader; +use alloy_consensus::{constants::KECCAK_EMPTY, BlockHeader}; use alloy_eips::{BlockHashOrNumber, BlockNumberOrTag}; use alloy_network::{primitives::HeaderResponse, BlockResponse}; -use alloy_primitives::{Address, BlockHash, BlockNumber, StorageKey, TxHash, TxNumber, B256, U256}; +use alloy_primitives::{ + map::HashMap, Address, BlockHash, BlockNumber, StorageKey, TxHash, TxNumber, B256, U256, +}; use alloy_provider::{ext::DebugApi, network::Network, Provider}; -use alloy_rpc_types::BlockId; +use alloy_rpc_types::{AccountInfo, BlockId}; use alloy_rpc_types_engine::ForkchoiceState; +use parking_lot::RwLock; use reth_chainspec::{ChainInfo, ChainSpecProvider}; use reth_db_api::{ mock::{DatabaseMock, TxMock}, @@ -63,10 +66,22 @@ use tokio::{runtime::Handle, sync::broadcast}; use tracing::{trace, warn}; /// Configuration for `AlloyRethProvider` -#[derive(Debug, Clone, Default)] +#[derive(Debug, Clone)] pub struct AlloyRethProviderConfig { /// Whether to compute state root when creating execution outcomes pub compute_state_root: bool, + /// Whether to use Reth-specific RPC methods for better performance + /// + /// If enabled, the node will use Reth's RPC methods (`debug_codeByHash` and + /// `eth_getAccountInfo`) to speed up account information retrieval. When disabled, it will + /// use multiple standard RPC calls to get account information. + pub reth_rpc_support: bool, +} + +impl Default for AlloyRethProviderConfig { + fn default() -> Self { + Self { compute_state_root: false, reth_rpc_support: true } + } } impl AlloyRethProviderConfig { @@ -75,6 +90,12 @@ impl AlloyRethProviderConfig { self.compute_state_root = compute; self } + + /// Sets whether to use Reth-specific RPC methods for better performance + pub const fn with_reth_rpc_support(mut self, support: bool) -> Self { + self.reth_rpc_support = support; + self + } } /// A provider implementation that uses Alloy RPC to fetch state data @@ -136,6 +157,18 @@ impl AlloyRethProvider { } } + /// Use a custom chain spec for the provider + pub fn with_chain_spec(self, chain_spec: Arc) -> Self { + Self { + provider: self.provider, + node_types: std::marker::PhantomData, + network: std::marker::PhantomData, + canon_state_notification: self.canon_state_notification, + config: self.config, + chain_spec, + } + } + /// Helper function to execute async operations in a blocking context fn block_on_async(&self, fut: F) -> T where @@ -166,6 +199,7 @@ where self.chain_spec.clone(), ) .with_compute_state_root(self.config.compute_state_root) + .with_reth_rpc_support(self.config.reth_rpc_support) } /// Helper function to get state provider by block number @@ -854,7 +888,6 @@ where } /// State provider implementation that fetches state via RPC -#[derive(Clone)] pub struct AlloyRethStateProvider where Node: NodeTypes, @@ -871,6 +904,12 @@ where chain_spec: Option>, /// Whether to enable state root calculation compute_state_root: bool, + /// Cached bytecode for accounts + /// + /// Since the state provider is short-lived, we don't worry about memory leaks. + code_store: RwLock>, + /// Whether to use Reth-specific RPC methods for better performance + reth_rpc_support: bool, } impl std::fmt::Debug @@ -886,7 +925,7 @@ impl std::fmt::Debug impl AlloyRethStateProvider { /// Creates a new state provider for the given block - pub const fn new( + pub fn new( provider: P, block_id: BlockId, _primitives: std::marker::PhantomData, @@ -898,11 +937,13 @@ impl AlloyRethStateProvider { network: std::marker::PhantomData, chain_spec: None, compute_state_root: false, + code_store: RwLock::new(HashMap::default()), + reth_rpc_support: true, } } /// Creates a new state provider with a cached chain spec - pub const fn with_chain_spec( + pub fn with_chain_spec( provider: P, block_id: BlockId, chain_spec: Arc, @@ -914,6 +955,8 @@ impl AlloyRethStateProvider { network: std::marker::PhantomData, chain_spec: Some(chain_spec), compute_state_root: false, + code_store: RwLock::new(HashMap::default()), + reth_rpc_support: true, } } @@ -934,6 +977,8 @@ impl AlloyRethStateProvider { network: self.network, chain_spec: self.chain_spec.clone(), compute_state_root: self.compute_state_root, + code_store: RwLock::new(HashMap::default()), + reth_rpc_support: self.reth_rpc_support, } } @@ -946,41 +991,73 @@ impl AlloyRethStateProvider { self } + /// Sets whether to use Reth-specific RPC methods for better performance + /// + /// If enabled, the node will use Reth's RPC methods (`debug_codeByHash` and + /// `eth_getAccountInfo`) to speed up account information retrieval. When disabled, it will + /// use multiple standard RPC calls to get account information. + pub const fn with_reth_rpc_support(mut self, is_enable: bool) -> Self { + self.reth_rpc_support = is_enable; + self + } + /// Get account information from RPC fn get_account(&self, address: Address) -> Result, ProviderError> where P: Provider + Clone + 'static, N: Network, { - self.block_on_async(async { - // Get account info in a single RPC call - let account_info = self - .provider - .get_account_info(address) - .block_id(self.block_id) - .await - .map_err(ProviderError::other)?; + let account_info = self.block_on_async(async { + // Get account info in a single RPC call using `eth_getAccountInfo` + if self.reth_rpc_support { + return self + .provider + .get_account_info(address) + .block_id(self.block_id) + .await + .map_err(ProviderError::other); + } + // Get account info in multiple RPC calls + let nonce = self.provider.get_transaction_count(address).block_id(self.block_id); + let balance = self.provider.get_balance(address).block_id(self.block_id); + let code = self.provider.get_code_at(address).block_id(self.block_id); - // Only return account if it exists (has balance, nonce, or code) - if account_info.balance.is_zero() && - account_info.nonce == 0 && - account_info.code.is_empty() - { - Ok(None) - } else { - let bytecode = if account_info.code.is_empty() { - None - } else { - Some(Bytecode::new_raw(account_info.code)) - }; + let (nonce, balance, code) = tokio::join!(nonce, balance, code,); - Ok(Some(Account { - balance: account_info.balance, - nonce: account_info.nonce, - bytecode_hash: bytecode.as_ref().map(|b| b.hash_slow()), - })) + let account_info = AccountInfo { + balance: balance.map_err(ProviderError::other)?, + nonce: nonce.map_err(ProviderError::other)?, + code: code.map_err(ProviderError::other)?, + }; + + let code_hash = account_info.code_hash(); + if code_hash != KECCAK_EMPTY { + // Insert code into the cache + self.code_store + .write() + .insert(code_hash, Bytecode::new_raw(account_info.code.clone())); } - }) + + Ok(account_info) + })?; + + // Only return account if it exists (has balance, nonce, or code) + if account_info.balance.is_zero() && account_info.nonce == 0 && account_info.code.is_empty() + { + Ok(None) + } else { + let bytecode = if account_info.code.is_empty() { + None + } else { + Some(Bytecode::new_raw(account_info.code)) + }; + + Ok(Some(Account { + balance: account_info.balance, + nonce: account_info.nonce, + bytecode_hash: bytecode.as_ref().map(|b| b.hash_slow()), + })) + } } } @@ -1039,6 +1116,10 @@ where Node: NodeTypes, { fn bytecode_by_hash(&self, code_hash: &B256) -> Result, ProviderError> { + if !self.reth_rpc_support { + return Ok(self.code_store.read().get(code_hash).cloned()); + } + self.block_on_async(async { // The method `debug_codeByHash` is currently only available on a Reth node let code = self From c1ff79c074073a24f59f9822336d34370997b056 Mon Sep 17 00:00:00 2001 From: maradini77 <140460067+maradini77@users.noreply.github.com> Date: Mon, 21 Jul 2025 11:28:32 +0200 Subject: [PATCH 227/305] fix: Refine Transaction Abstraction Link (#17502) --- docs/design/database.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/design/database.md b/docs/design/database.md index a560b7a14e6..fdc6251c0ca 100644 --- a/docs/design/database.md +++ b/docs/design/database.md @@ -3,7 +3,7 @@ ## Abstractions - We created a [Database trait abstraction](https://github.com/paradigmxyz/reth/blob/main/crates/cli/commands/src/db/mod.rs) using Rust Stable GATs which frees us from being bound to a single database implementation. We currently use MDBX, but are exploring [redb](https://github.com/cberner/redb) as an alternative. -- We then iterated on [`Transaction`](https://github.com/paradigmxyz/reth/blob/0d9b9a392d4196793736522f3fc2ac804991b45d/crates/stages/src/db.rs#L14-L19) as a non-leaky abstraction with helpers for strictly-typed and unit-tested higher-level database abstractions. +- We then iterated on [`Transaction`](https://github.com/paradigmxyz/reth/blob/main/crates/storage/errors/src/db.rs) as a non-leaky abstraction with helpers for strictly-typed and unit-tested higher-level database abstractions. ## Codecs From a49fef80c122803856fcd870349f254ee45b88f6 Mon Sep 17 00:00:00 2001 From: anim001k <140460766+anim001k@users.noreply.github.com> Date: Mon, 21 Jul 2025 11:30:24 +0200 Subject: [PATCH 228/305] fix: temporary file leak in atomic_write_file (#17505) --- crates/fs-util/src/lib.rs | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/crates/fs-util/src/lib.rs b/crates/fs-util/src/lib.rs index 922bec6bf67..d3195ad27fe 100644 --- a/crates/fs-util/src/lib.rs +++ b/crates/fs-util/src/lib.rs @@ -323,10 +323,21 @@ where let mut file = File::create(&tmp_path).map_err(|err| FsPathError::create_file(err, &tmp_path))?; - write_fn(&mut file).map_err(|err| FsPathError::Write { - source: Error::other(err.into()), - path: tmp_path.clone(), - })?; + // Execute the write function and handle errors properly + // If write_fn fails, we need to clean up the temporary file before returning + match write_fn(&mut file) { + Ok(()) => { + // Success - continue with the atomic operation + } + Err(err) => { + // Clean up the temporary file before returning the error + let _ = fs::remove_file(&tmp_path); + return Err(FsPathError::Write { + source: Error::other(err.into()), + path: tmp_path.clone(), + }); + } + } // fsync() file file.sync_all().map_err(|err| FsPathError::fsync(err, &tmp_path))?; From 52a627bf4dc4372983b6cfa3a2966d3e49bb522d Mon Sep 17 00:00:00 2001 From: Fallengirl <155266340+Fallengirl@users.noreply.github.com> Date: Mon, 21 Jul 2025 11:36:32 +0200 Subject: [PATCH 229/305] docs: fix error in RawCapabilityMessage comment (#17411) --- crates/net/eth-wire/src/capability.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/net/eth-wire/src/capability.rs b/crates/net/eth-wire/src/capability.rs index 97e15dbe1f9..613ec87a4be 100644 --- a/crates/net/eth-wire/src/capability.rs +++ b/crates/net/eth-wire/src/capability.rs @@ -534,7 +534,7 @@ mod tests { let mut encoded = Vec::new(); msg.encode(&mut encoded); - // Decode the bytes back into RawCapbailitMessage + // Decode the bytes back into RawCapabilityMessage let decoded = RawCapabilityMessage::decode(&mut &encoded[..]).unwrap(); // Verify that the decoded message matches the original From 5b01ca773807987cc3e7ddb51d41f677be9f69b4 Mon Sep 17 00:00:00 2001 From: AJStonewee Date: Mon, 21 Jul 2025 05:38:26 -0400 Subject: [PATCH 230/305] docs: normalize dynamic CLI defaults in help generation (#17509) --- docs/cli/help.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/docs/cli/help.rs b/docs/cli/help.rs index c6e73318e08..e6813a483a5 100755 --- a/docs/cli/help.rs +++ b/docs/cli/help.rs @@ -274,6 +274,16 @@ fn preprocess_help(s: &str) -> Cow<'_, str> { r"(rpc.max-tracing-requests \n.*\n.*\n.*\n.*\n.*)\[default: \d+\]", r"$1[default: ]", ), + // Handle engine.max-proof-task-concurrency dynamic default + ( + r"(engine\.max-proof-task-concurrency.*)\[default: \d+\]", + r"$1[default: ]", + ), + // Handle engine.reserved-cpu-cores dynamic default + ( + r"(engine\.reserved-cpu-cores.*)\[default: \d+\]", + r"$1[default: ]", + ), ]; patterns .iter() From 4639f94535508d5055c4510f59b7bbfc9f3f7ce7 Mon Sep 17 00:00:00 2001 From: Avory Date: Mon, 21 Jul 2025 12:44:27 +0300 Subject: [PATCH 231/305] docs(trace): document trace format and response structure (#17517) --- docs/vocs/docs/pages/jsonrpc/trace.mdx | 184 ++++++++++++++++++++++--- 1 file changed, 164 insertions(+), 20 deletions(-) diff --git a/docs/vocs/docs/pages/jsonrpc/trace.mdx b/docs/vocs/docs/pages/jsonrpc/trace.mdx index 464832db70e..d1ddd3ca55c 100644 --- a/docs/vocs/docs/pages/jsonrpc/trace.mdx +++ b/docs/vocs/docs/pages/jsonrpc/trace.mdx @@ -4,8 +4,6 @@ description: Trace API for inspecting Ethereum state and transactions. # `trace` Namespace -{/* TODO: We should probably document the format of the traces themselves, OE does not do that */} - The `trace` API provides several methods to inspect the Ethereum state, including Parity-style traces. A similar module exists (with other debug functions) with Geth-style traces ([`debug`](/jsonrpc/debug)). @@ -17,6 +15,128 @@ There are two types of methods in this API: - **Ad-hoc tracing APIs** for performing diagnostics on calls or transactions (historical or hypothetical). - **Transaction-trace filtering APIs** for getting full externality traces on any transaction executed by reth. +## Trace Format Specification + +The trace API returns different types of trace data depending on the requested trace types. Understanding these formats is crucial for interpreting the results. + +### TraceResults + +The `TraceResults` object is returned by ad-hoc tracing methods (`trace_call`, `trace_callMany`, `trace_rawTransaction`, `trace_replayTransaction`, `trace_replayBlockTransactions`). It contains the following fields: + +| Field | Type | Description | +|-------|------|-------------| +| `output` | `string` | The return value of the traced call, encoded as hex | +| `stateDiff` | `object \| null` | State changes caused by the transaction (only if `stateDiff` trace type requested) | +| `trace` | `array \| null` | Array of transaction traces (only if `trace` trace type requested) | +| `vmTrace` | `object \| null` | Virtual machine execution trace (only if `vmTrace` trace type requested) | + +### LocalizedTransactionTrace + +Individual transaction traces in `trace_block`, `trace_filter`, `trace_get`, and `trace_transaction` methods return `LocalizedTransactionTrace` objects: + +| Field | Type | Description | +|-------|------|-------------| +| `action` | `object` | The action performed by this trace | +| `result` | `object \| null` | The result of the trace execution | +| `error` | `string \| null` | Error message if the trace failed | +| `blockHash` | `string \| null` | Hash of the block containing this trace | +| `blockNumber` | `number \| null` | Number of the block containing this trace | +| `transactionHash` | `string \| null` | Hash of the transaction containing this trace | +| `transactionPosition` | `number \| null` | Position of the transaction in the block | +| `subtraces` | `number` | Number of child traces | +| `traceAddress` | `array` | Position of this trace in the call tree | +| `type` | `string` | Type of action: `"call"`, `"create"`, `"suicide"`, or `"reward"` | + +### Action Types + +#### Call Action (`type: "call"`) + +| Field | Type | Description | +|-------|------|-------------| +| `callType` | `string` | Type of call: `"call"`, `"callcode"`, `"delegatecall"`, or `"staticcall"` | +| `from` | `string` | Address of the caller | +| `to` | `string` | Address of the callee | +| `gas` | `string` | Gas provided for the call | +| `input` | `string` | Input data for the call | +| `value` | `string` | Value transferred in the call | + +#### Create Action (`type: "create"`) + +| Field | Type | Description | +|-------|------|-------------| +| `from` | `string` | Address of the creator | +| `gas` | `string` | Gas provided for contract creation | +| `init` | `string` | Contract initialization code | +| `value` | `string` | Value sent to the new contract | + +#### Suicide Action (`type: "suicide"`) + +| Field | Type | Description | +|-------|------|-------------| +| `address` | `string` | Address of the contract being destroyed | +| `refundAddress` | `string` | Address receiving the remaining balance | +| `balance` | `string` | Balance transferred to refund address | + +#### Reward Action (`type: "reward"`) + +| Field | Type | Description | +|-------|------|-------------| +| `author` | `string` | Address receiving the reward | +| `value` | `string` | Amount of the reward | +| `rewardType` | `string` | Type of reward: `"block"` or `"uncle"` | + +### Result Format + +When a trace executes successfully, the `result` field contains: + +| Field | Type | Description | +|-------|------|-------------| +| `gasUsed` | `string` | Amount of gas consumed by this trace | +| `output` | `string` | Return data from the trace execution | +| `address` | `string` | Created contract address (for create actions only) | +| `code` | `string` | Deployed contract code (for create actions only) | + +### State Diff Format + +When `stateDiff` trace type is requested, the `stateDiff` field contains an object mapping addresses to their state changes: + +```json +{ + "0x123...": { + "balance": { + "*": { + "from": "0x0", + "to": "0x1000" + } + }, + "nonce": { + "*": { + "from": "0x0", + "to": "0x1" + } + }, + "code": { + "*": { + "from": "0x", + "to": "0x608060405234801561001057600080fd5b50..." + } + }, + "storage": { + "0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563": { + "*": { + "from": "0x0", + "to": "0x1" + } + } + } + } +} +``` + +### VM Trace Format + +When `vmTrace` trace type is requested, the `vmTrace` field contains detailed virtual machine execution information including opcodes, stack, memory, and storage changes at each step. The exact format depends on the specific VM tracer implementation. + ## Ad-hoc tracing APIs Ad-hoc tracing APIs allow you to perform diagnostics on calls or transactions (historical or hypothetical), including: @@ -71,7 +191,14 @@ The third and optional parameter is a block number, block hash, or a block tag ( "output": "0x", "stateDiff": null, "trace": [{ - "action": { ... }, + "action": { + "callType": "call", + "from": "0x0000000000000000000000000000000000000000", + "to": "0x0000000000000000000000000000000000000000", + "gas": "0x76c0", + "input": "0x", + "value": "0x0" + }, "result": { "gasUsed": "0x0", "output": "0x" @@ -170,9 +297,16 @@ Traces a call to `eth_sendRawTransaction` without making the call, returning the "jsonrpc": "2.0", "result": { "output": "0x", - "stateDiff": null, - "trace": [{ - "action": { ... }, + "stateDiff": null, + "trace": [{ + "action": { + "callType": "call", + "from": "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b", + "to": "0x6295ee1b4f6dd65047762f924ecd367c17eabf8f", + "gas": "0x186a0", + "input": "0x", + "value": "0x0" + }, "result": { "gasUsed": "0x0", "output": "0x" @@ -181,7 +315,7 @@ Traces a call to `eth_sendRawTransaction` without making the call, returning the "traceAddress": [], "type": "call" }], - "vmTrace": null + "vmTrace": null } } ``` @@ -206,7 +340,14 @@ Replays all transactions in a block returning the requested traces for each tran "output": "0x", "stateDiff": null, "trace": [{ - "action": { ... }, + "action": { + "callType": "call", + "from": "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b", + "to": "0x6295ee1b4f6dd65047762f924ecd367c17eabf8f", + "gas": "0x186a0", + "input": "0x", + "value": "0x0" + }, "result": { "gasUsed": "0x0", "output": "0x" @@ -215,10 +356,9 @@ Replays all transactions in a block returning the requested traces for each tran "traceAddress": [], "type": "call" }], - "transactionHash": "0x...", + "transactionHash": "0x4e70b5d8d5dc43e0e61e4a8f1e6e4e6e4e6e4e6e4e6e4e6e4e6e4e6e4e6e4e6e4", "vmTrace": null - }, - { ... } + } ] } ``` @@ -242,10 +382,17 @@ Replays a transaction, returning the traces. "output": "0x", "stateDiff": null, "trace": [{ - "action": { ... }, + "action": { + "callType": "call", + "from": "0x1c39ba39e4735cb65978d4db400ddd70a72dc750", + "to": "0x2bd2326c993dfaef84f696526064ff22eba5b362", + "gas": "0x13e99", + "input": "0x16c72721", + "value": "0x0" + }, "result": { - "gasUsed": "0x0", - "output": "0x" + "gasUsed": "0x183", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001" }, "subtraces": 0, "traceAddress": [], @@ -292,8 +439,7 @@ Returns traces created at given block. "transactionHash": "0x07da28d752aba3b9dd7060005e554719c6205c8a3aea358599fc9b245c52f1f6", "transactionPosition": 0, "type": "call" - }, - ... + } ] } ``` @@ -345,8 +491,7 @@ All properties are optional. "transactionHash": "0x3321a7708b1083130bd78da0d62ead9f6683033231617c9d268e2c7e3fa6c104", "transactionPosition": 3, "type": "call" - }, - ... + } ] } ``` @@ -430,8 +575,7 @@ Returns all traces of given transaction "transactionHash": "0x17104ac9d3312d8c136b7f44d4b8b47852618065ebfa534bd2d3b5ef218ca1f3", "transactionPosition": 2, "type": "call" - }, - ... + } ] } ``` From 0b1f25e56e375ad7b90183b778dfa54f693a42b5 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Mon, 21 Jul 2025 13:40:45 +0300 Subject: [PATCH 232/305] fix: `logIndex` in `getBlockReceipts` (#17519) --- crates/rpc/rpc-eth-api/src/helpers/block.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/rpc/rpc-eth-api/src/helpers/block.rs b/crates/rpc/rpc-eth-api/src/helpers/block.rs index ac70a4705b4..560002b8a1c 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/block.rs @@ -150,7 +150,7 @@ pub trait EthBlocks: }; gas_used = receipt.cumulative_gas_used(); - next_log_index = receipt.logs().len(); + next_log_index += receipt.logs().len(); input }) From ac2974867f763ed27c549238c7deaf8533c72de6 Mon Sep 17 00:00:00 2001 From: Rez Date: Mon, 21 Jul 2025 20:55:47 +1000 Subject: [PATCH 233/305] feat: make payload validation functions generic over block header type (#17520) --- crates/payload/validator/src/cancun.rs | 13 +++++++------ crates/payload/validator/src/prague.rs | 8 ++++---- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/crates/payload/validator/src/cancun.rs b/crates/payload/validator/src/cancun.rs index 5a4deb139fd..cea8aca5144 100644 --- a/crates/payload/validator/src/cancun.rs +++ b/crates/payload/validator/src/cancun.rs @@ -11,14 +11,15 @@ use reth_primitives_traits::{AlloyBlockHeader, Block, SealedBlock}; /// - doesn't contain EIP-4844 transactions unless Cancun is active /// - checks blob versioned hashes in block and sidecar match #[inline] -pub fn ensure_well_formed_fields( +pub fn ensure_well_formed_fields( block: &SealedBlock, cancun_sidecar_fields: Option<&CancunPayloadFields>, is_cancun_active: bool, ) -> Result<(), PayloadError> where T: Transaction + Typed2718, - B: Block>, + H: AlloyBlockHeader, + B: Block
>, { ensure_well_formed_header_and_sidecar_fields(block, cancun_sidecar_fields, is_cancun_active)?; ensure_well_formed_transactions_field_with_sidecar( @@ -72,8 +73,8 @@ pub fn ensure_well_formed_header_and_sidecar_fields( /// - doesn't contain EIP-4844 transactions unless Cancun is active /// - checks blob versioned hashes in block and sidecar match #[inline] -pub fn ensure_well_formed_transactions_field_with_sidecar( - block_body: &BlockBody, +pub fn ensure_well_formed_transactions_field_with_sidecar( + block_body: &BlockBody, cancun_sidecar_fields: Option<&CancunPayloadFields>, is_cancun_active: bool, ) -> Result<(), PayloadError> { @@ -89,8 +90,8 @@ pub fn ensure_well_formed_transactions_field_with_sidecar( - block_body: &BlockBody, +pub fn ensure_matching_blob_versioned_hashes( + block_body: &BlockBody, cancun_sidecar_fields: Option<&CancunPayloadFields>, ) -> Result<(), PayloadError> { let num_blob_versioned_hashes = block_body.blob_versioned_hashes_iter().count(); diff --git a/crates/payload/validator/src/prague.rs b/crates/payload/validator/src/prague.rs index d663469a826..9dff206d74f 100644 --- a/crates/payload/validator/src/prague.rs +++ b/crates/payload/validator/src/prague.rs @@ -10,8 +10,8 @@ use alloy_rpc_types_engine::{PayloadError, PraguePayloadFields}; /// - Prague fields are not present unless Prague is active /// - does not contain EIP-7702 transactions if Prague is not active #[inline] -pub fn ensure_well_formed_fields( - block_body: &BlockBody, +pub fn ensure_well_formed_fields( + block_body: &BlockBody, prague_fields: Option<&PraguePayloadFields>, is_prague_active: bool, ) -> Result<(), PayloadError> { @@ -36,8 +36,8 @@ pub const fn ensure_well_formed_sidecar_fields( /// Checks that transactions field doesn't contain EIP-7702 transactions if Prague is not /// active. #[inline] -pub fn ensure_well_formed_transactions_field( - block_body: &BlockBody, +pub fn ensure_well_formed_transactions_field( + block_body: &BlockBody, is_prague_active: bool, ) -> Result<(), PayloadError> { if !is_prague_active && block_body.has_eip7702_transactions() { From 84387f7c97236281e17e2719b325b7809d520260 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 21 Jul 2025 14:48:27 +0200 Subject: [PATCH 234/305] chore: sanity secp256k1+rayon activations (#17527) --- crates/ethereum/node/Cargo.toml | 3 ++- crates/optimism/node/Cargo.toml | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/crates/ethereum/node/Cargo.toml b/crates/ethereum/node/Cargo.toml index b9cedc660a4..7da04f6cae7 100644 --- a/crates/ethereum/node/Cargo.toml +++ b/crates/ethereum/node/Cargo.toml @@ -16,7 +16,8 @@ reth-ethereum-engine-primitives.workspace = true reth-ethereum-payload-builder.workspace = true reth-ethereum-consensus.workspace = true reth-ethereum-primitives.workspace = true -reth-primitives-traits.workspace = true +## ensure secp256k1 recovery with rayon support is activated +reth-primitives-traits = { workspace = true, features = ["secp256k1", "rayon"] } reth-node-builder.workspace = true reth-tracing.workspace = true reth-provider.workspace = true diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index ec4b9a127b2..94807bf3737 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -13,7 +13,8 @@ workspace = true [dependencies] # reth reth-chainspec.workspace = true -reth-primitives-traits.workspace = true +## ensure secp256k1 recovery with rayon support is activated +reth-primitives-traits = { workspace = true, features = ["secp256k1", "rayon"] } reth-payload-builder.workspace = true reth-consensus.workspace = true reth-node-api.workspace = true From 5bc8589162b6e23b07919d82a57eee14353f2862 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 21 Jul 2025 14:50:04 +0200 Subject: [PATCH 235/305] chore: extend exex ethapi example (#17481) --- crates/rpc/rpc-builder/src/lib.rs | 11 +---- examples/exex-hello-world/src/main.rs | 70 +++++++++++++++++++++------ 2 files changed, 58 insertions(+), 23 deletions(-) diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 3d5dc17ba8b..6824feecee6 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -793,10 +793,7 @@ where /// # Panics /// /// If called outside of the tokio runtime. See also [`Self::eth_api`] - pub fn trace_api(&self) -> TraceApi - where - EthApi: TraceExt, - { + pub fn trace_api(&self) -> TraceApi { TraceApi::new(self.eth_api().clone(), self.blocking_pool_guard.clone(), self.eth_config) } @@ -818,11 +815,7 @@ where /// # Panics /// /// If called outside of the tokio runtime. See also [`Self::eth_api`] - pub fn debug_api(&self) -> DebugApi - where - EthApi: EthApiSpec + EthTransactions + TraceExt, - EvmConfig::Primitives: NodePrimitives>, - { + pub fn debug_api(&self) -> DebugApi { DebugApi::new( self.eth_api().clone(), self.blocking_pool_guard.clone(), diff --git a/examples/exex-hello-world/src/main.rs b/examples/exex-hello-world/src/main.rs index 9b9710ac6af..4253d8185e4 100644 --- a/examples/exex-hello-world/src/main.rs +++ b/examples/exex-hello-world/src/main.rs @@ -9,19 +9,27 @@ use clap::Parser; use futures::TryStreamExt; use reth_ethereum::{ + chainspec::EthereumHardforks, exex::{ExExContext, ExExEvent, ExExNotification}, - node::{api::FullNodeComponents, EthereumNode}, + node::{ + api::{FullNodeComponents, NodeTypes}, + builder::rpc::RpcHandle, + EthereumNode, + }, + rpc::api::eth::helpers::FullEthApi, }; -use reth_op::rpc::api::eth::helpers::FullEthApi; use reth_tracing::tracing::info; use tokio::sync::oneshot; +/// Additional CLI arguments #[derive(Parser)] struct ExExArgs { + /// whether to launch an op-reth node #[arg(long)] optimism: bool, } +/// A basic subscription loop of new blocks. async fn my_exex(mut ctx: ExExContext) -> eyre::Result<()> { while let Some(notification) = ctx.notifications.try_next().await? { match ¬ification { @@ -44,22 +52,44 @@ async fn my_exex(mut ctx: ExExContext) -> eyre:: Ok(()) } -/// This is an example of how to access the `EthApi` inside an ExEx. It receives the `EthApi` once -/// the node is launched fully. -async fn ethapi_exex( +/// This is an example of how to access the [`RpcHandle`] inside an ExEx. It receives the +/// [`RpcHandle`] once the node is launched fully. +/// +/// This function supports both Opstack Eth API and ethereum Eth API. +/// +/// The received handle gives access to the `EthApi` has full access to all eth api functionality +/// [`FullEthApi`]. And also gives access to additional eth related rpc method handlers, such as eth +/// filter. +async fn ethapi_exex( mut ctx: ExExContext, - ethapi_rx: oneshot::Receiver, + rpc_handle: oneshot::Receiver>, ) -> eyre::Result<()> where - Node: FullNodeComponents, + Node: FullNodeComponents>, + EthApi: FullEthApi, { // Wait for the ethapi to be sent from the main function - let _ethapi = ethapi_rx.await?; - info!("Received ethapi inside exex"); + let rpc_handle = rpc_handle.await?; + info!("Received rpc handle inside exex"); + + // obtain the ethapi from the rpc handle + let ethapi = rpc_handle.eth_api(); + + // EthFilter type that provides all eth_getlogs related logic + let _eth_filter = rpc_handle.eth_handlers().filter.clone(); + // EthPubSub type that provides all eth_subscribe logic + let _eth_pubsub = rpc_handle.eth_handlers().pubsub.clone(); + // The TraceApi type that provides all the trace_ handlers + let _trace_api = rpc_handle.trace_api(); + // The DebugApi type that provides all the trace_ handlers + let _debug_api = rpc_handle.debug_api(); while let Some(notification) = ctx.notifications.try_next().await? { if let Some(committed_chain) = notification.committed_chain() { ctx.events.send(ExExEvent::FinishedHeight(committed_chain.tip().num_hash()))?; + + // can use the eth api to interact with the node + let _rpc_block = ethapi.rpc_block(committed_chain.tip().hash().into(), true).await?; } } @@ -71,30 +101,42 @@ fn main() -> eyre::Result<()> { if args.optimism { reth_op::cli::Cli::parse_args().run(|builder, _| { + let (rpc_handle_tx, rpc_handle_rx) = oneshot::channel(); Box::pin(async move { let handle = builder .node(reth_op::node::OpNode::default()) .install_exex("my-exex", async move |ctx| Ok(my_exex(ctx))) + .install_exex("ethapi-exex", async move |ctx| { + Ok(ethapi_exex(ctx, rpc_handle_rx)) + }) .launch() .await?; + // Retrieve the rpc handle from the node and send it to the exex + rpc_handle_tx + .send(handle.node.add_ons_handle.clone()) + .expect("Failed to send ethapi to ExEx"); + handle.wait_for_node_exit().await }) }) } else { reth_ethereum::cli::Cli::parse_args().run(|builder, _| { Box::pin(async move { - let (ethapi_tx, ethapi_rx) = oneshot::channel(); + let (rpc_handle_tx, rpc_handle_rx) = oneshot::channel(); let handle = builder .node(EthereumNode::default()) .install_exex("my-exex", async move |ctx| Ok(my_exex(ctx))) - .install_exex("ethapi-exex", async move |ctx| Ok(ethapi_exex(ctx, ethapi_rx))) + .install_exex("ethapi-exex", async move |ctx| { + Ok(ethapi_exex(ctx, rpc_handle_rx)) + }) .launch() .await?; - // Retrieve the ethapi from the node and send it to the exex - let ethapi = handle.node.add_ons_handle.eth_api(); - ethapi_tx.send(ethapi.clone()).expect("Failed to send ethapi to ExEx"); + // Retrieve the rpc handle from the node and send it to the exex + rpc_handle_tx + .send(handle.node.add_ons_handle.clone()) + .expect("Failed to send ethapi to ExEx"); handle.wait_for_node_exit().await }) From 42f791924a30b4434f1ae7103762a474ff7ec22a Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 21 Jul 2025 14:34:33 +0200 Subject: [PATCH 236/305] fix: ensure required revm features are activated (#17526) --- Cargo.lock | 2 ++ bin/reth/Cargo.toml | 1 + crates/ethereum/cli/Cargo.toml | 1 + crates/ethereum/node/Cargo.toml | 7 +++++++ crates/optimism/node/Cargo.toml | 6 ++++++ crates/optimism/node/src/lib.rs | 3 +++ 6 files changed, 20 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 1bc7acb439d..4c6c2e2df07 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9279,6 +9279,7 @@ dependencies = [ "futures", "op-alloy-consensus", "op-alloy-rpc-types-engine", + "op-revm", "reth-chainspec", "reth-consensus", "reth-db", @@ -9313,6 +9314,7 @@ dependencies = [ "reth-transaction-pool", "reth-trie-common", "reth-trie-db", + "revm", "serde", "serde_json", "tokio", diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index ab78bc9cb12..a590f25810b 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -75,6 +75,7 @@ asm-keccak = [ "reth-node-core/asm-keccak", "reth-primitives/asm-keccak", "reth-ethereum-cli/asm-keccak", + "reth-node-ethereum/asm-keccak", ] jemalloc = [ diff --git a/crates/ethereum/cli/Cargo.toml b/crates/ethereum/cli/Cargo.toml index 77cca65d016..a0a2a13fb64 100644 --- a/crates/ethereum/cli/Cargo.toml +++ b/crates/ethereum/cli/Cargo.toml @@ -46,6 +46,7 @@ dev = ["reth-cli-commands/arbitrary"] asm-keccak = [ "reth-node-core/asm-keccak", + "reth-node-ethereum/asm-keccak", ] jemalloc = [ diff --git a/crates/ethereum/node/Cargo.toml b/crates/ethereum/node/Cargo.toml index 7da04f6cae7..c62ce1b8fe1 100644 --- a/crates/ethereum/node/Cargo.toml +++ b/crates/ethereum/node/Cargo.toml @@ -45,7 +45,9 @@ alloy-eips.workspace = true alloy-network.workspace = true alloy-rpc-types-eth.workspace = true alloy-rpc-types-engine.workspace = true + # revm with required ethereum features +# Note: this must be kept to ensure all features are poperly enabled/forwarded revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg"] } # misc @@ -80,6 +82,11 @@ rand.workspace = true [features] default = [] +asm-keccak = [ + "alloy-primitives/asm-keccak", + "reth-node-core/asm-keccak", + "revm/asm-keccak", +] js-tracer = ["reth-node-builder/js-tracer"] test-utils = [ "reth-node-builder/test-utils", diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index 94807bf3737..539828f265e 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -44,6 +44,11 @@ reth-optimism-consensus = { workspace = true, features = ["std"] } reth-optimism-forks.workspace = true reth-optimism-primitives = { workspace = true, features = ["serde", "serde-bincode-compat", "reth-codec"] } +# revm with required optimism features +# Note: this must be kept to ensure all features are poperly enabled/forwarded +revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg"] } +op-revm.workspace = true + # ethereum alloy-primitives.workspace = true op-alloy-consensus.workspace = true @@ -87,6 +92,7 @@ asm-keccak = [ "alloy-primitives/asm-keccak", "reth-optimism-node/asm-keccak", "reth-node-core/asm-keccak", + "revm/asm-keccak", ] js-tracer = ["reth-node-builder/js-tracer"] test-utils = [ diff --git a/crates/optimism/node/src/lib.rs b/crates/optimism/node/src/lib.rs index 4ef8a706785..e62f5b1b439 100644 --- a/crates/optimism/node/src/lib.rs +++ b/crates/optimism/node/src/lib.rs @@ -42,3 +42,6 @@ pub use reth_optimism_payload_builder::{ pub use reth_optimism_evm::*; pub use reth_optimism_storage::OpStorage; + +use op_revm as _; +use revm as _; From 8f26b95643c151ec7fd6498264ada0974c7db3f1 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 21 Jul 2025 15:30:13 +0200 Subject: [PATCH 237/305] chore: bump alloy-evm 015 (#17528) --- Cargo.lock | 8 ++++---- Cargo.toml | 4 ++-- crates/rpc/rpc/src/otterscan.rs | 4 ++-- crates/rpc/rpc/src/trace.rs | 16 +++++++++------- examples/custom-node/src/evm/alloy.rs | 20 ++++---------------- 5 files changed, 21 insertions(+), 31 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4c6c2e2df07..6f4d5702c01 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -259,9 +259,9 @@ dependencies = [ [[package]] name = "alloy-evm" -version = "0.14.0" +version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef2d6e0448bfd057a4438226b3d2fd547a0530fa4226217dfb1682d09f108bd4" +checksum = "28de0dd1bbb0634ef7c3715e8e60176b77b82f8b6b15b2e35fe64cf6640f6550" dependencies = [ "alloy-consensus", "alloy-eips", @@ -373,9 +373,9 @@ dependencies = [ [[package]] name = "alloy-op-evm" -version = "0.14.0" +version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98354b9c3d50de701a63693d5b6a37e468a93b970b2224f934dd745c727ef998" +checksum = "0afe768962308a08b42fddef8a4296324f140b5a8dd0d4360038229885ce9434" dependencies = [ "alloy-consensus", "alloy-eips", diff --git a/Cargo.toml b/Cargo.toml index b0c126c8bfc..464f9212ac1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -471,7 +471,7 @@ revm-inspectors = "0.26.5" alloy-chains = { version = "0.2.5", default-features = false } alloy-dyn-abi = "1.2.0" alloy-eip2124 = { version = "0.2.0", default-features = false } -alloy-evm = { version = "0.14", default-features = false } +alloy-evm = { version = "0.15", default-features = false } alloy-primitives = { version = "1.2.0", default-features = false, features = ["map-foldhash"] } alloy-rlp = { version = "0.3.10", default-features = false, features = ["core-net"] } alloy-sol-macro = "1.2.0" @@ -509,7 +509,7 @@ alloy-transport-ipc = { version = "1.0.22", default-features = false } alloy-transport-ws = { version = "1.0.22", default-features = false } # op -alloy-op-evm = { version = "0.14", default-features = false } +alloy-op-evm = { version = "0.15", default-features = false } alloy-op-hardforks = "0.2.2" op-alloy-rpc-types = { version = "0.18.11", default-features = false } op-alloy-rpc-types-engine = { version = "0.18.11", default-features = false } diff --git a/crates/rpc/rpc/src/otterscan.rs b/crates/rpc/rpc/src/otterscan.rs index bafbf0730bd..92698e6eca2 100644 --- a/crates/rpc/rpc/src/otterscan.rs +++ b/crates/rpc/rpc/src/otterscan.rs @@ -340,9 +340,9 @@ where num.into(), None, TracingInspectorConfig::default_parity(), - |tx_info, ctx| { + |tx_info, mut ctx| { Ok(ctx - .inspector + .take_inspector() .into_parity_builder() .into_localized_transaction_traces(tx_info)) }, diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index 787b7dfd1bd..1ae984bc9a7 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -402,9 +402,9 @@ where Some(block.clone()), None, TracingInspectorConfig::default_parity(), - move |tx_info, ctx| { + move |tx_info, mut ctx| { let mut traces = ctx - .inspector + .take_inspector() .into_parity_builder() .into_localized_transaction_traces(tx_info); traces.retain(|trace| matcher.matches(&trace.trace)); @@ -471,9 +471,11 @@ where block_id, None, TracingInspectorConfig::default_parity(), - |tx_info, ctx| { - let traces = - ctx.inspector.into_parity_builder().into_localized_transaction_traces(tx_info); + |tx_info, mut ctx| { + let traces = ctx + .take_inspector() + .into_parity_builder() + .into_localized_transaction_traces(tx_info); Ok(traces) }, ); @@ -508,9 +510,9 @@ where block_id, None, TracingInspectorConfig::from_parity_config(&trace_types), - move |tx_info, ctx| { + move |tx_info, mut ctx| { let mut full_trace = ctx - .inspector + .take_inspector() .into_parity_builder() .into_trace_results(&ctx.result, &trace_types); diff --git a/examples/custom-node/src/evm/alloy.rs b/examples/custom-node/src/evm/alloy.rs index 67a9f90fdfa..6071a2c6dd8 100644 --- a/examples/custom-node/src/evm/alloy.rs +++ b/examples/custom-node/src/evm/alloy.rs @@ -70,10 +70,6 @@ where self.inner.transact_system_call(caller, contract, data) } - fn db_mut(&mut self) -> &mut Self::DB { - self.inner.db_mut() - } - fn finish(self) -> (Self::DB, EvmEnv) { self.inner.finish() } @@ -82,20 +78,12 @@ where self.inner.set_inspector_enabled(enabled) } - fn precompiles(&self) -> &Self::Precompiles { - self.inner.precompiles() - } - - fn precompiles_mut(&mut self) -> &mut Self::Precompiles { - self.inner.precompiles_mut() - } - - fn inspector(&self) -> &Self::Inspector { - self.inner.inspector() + fn components(&self) -> (&Self::DB, &Self::Inspector, &Self::Precompiles) { + self.inner.components() } - fn inspector_mut(&mut self) -> &mut Self::Inspector { - self.inner.inspector_mut() + fn components_mut(&mut self) -> (&mut Self::DB, &mut Self::Inspector, &mut Self::Precompiles) { + self.inner.components_mut() } } From 818e01773accb34ef807136fec343179678b40b5 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Mon, 21 Jul 2025 16:46:48 +0300 Subject: [PATCH 238/305] feat: `HeaderConverter` (#17490) Co-authored-by: Matthias Seitz --- Cargo.lock | 3 - crates/ethereum/node/Cargo.toml | 2 - crates/ethereum/node/src/node.rs | 28 +- crates/node/builder/src/rpc.rs | 4 +- crates/optimism/rpc/Cargo.toml | 1 - crates/optimism/rpc/src/eth/block.rs | 30 +- crates/optimism/rpc/src/eth/call.rs | 46 +-- crates/optimism/rpc/src/eth/mod.rs | 167 ++++------- crates/optimism/rpc/src/eth/pending_block.rs | 52 +--- crates/optimism/rpc/src/eth/receipt.rs | 25 +- crates/optimism/rpc/src/eth/transaction.rs | 28 +- .../primitives-traits/src/block/recovered.rs | 63 +++-- crates/rpc/rpc-builder/src/lib.rs | 70 ++--- crates/rpc/rpc-convert/src/rpc.rs | 12 +- crates/rpc/rpc-convert/src/transaction.rs | 104 ++++++- crates/rpc/rpc-eth-api/Cargo.toml | 1 - crates/rpc/rpc-eth-api/src/helpers/block.rs | 57 ++-- crates/rpc/rpc-eth-api/src/helpers/call.rs | 19 +- .../rpc-eth-api/src/helpers/pending_block.rs | 32 +-- crates/rpc/rpc-eth-api/src/helpers/receipt.rs | 12 +- crates/rpc/rpc-eth-api/src/helpers/state.rs | 10 +- crates/rpc/rpc-eth-api/src/helpers/trace.rs | 16 +- .../rpc-eth-api/src/helpers/transaction.rs | 4 +- crates/rpc/rpc-eth-api/src/node.rs | 119 ++++++-- crates/rpc/rpc-eth-types/src/cache/mod.rs | 40 +-- crates/rpc/rpc-eth-types/src/fee_history.rs | 2 +- crates/rpc/rpc-eth-types/src/gas_oracle.rs | 10 +- crates/rpc/rpc-eth-types/src/pending_block.rs | 8 +- crates/rpc/rpc-eth-types/src/receipt.rs | 2 +- crates/rpc/rpc-eth-types/src/simulate.rs | 25 +- crates/rpc/rpc/src/debug.rs | 30 +- crates/rpc/rpc/src/eth/builder.rs | 147 ++++------ crates/rpc/rpc/src/eth/core.rs | 263 +++++++----------- crates/rpc/rpc/src/eth/filter.rs | 23 +- crates/rpc/rpc/src/eth/helpers/block.rs | 45 +-- crates/rpc/rpc/src/eth/helpers/call.rs | 62 ++--- crates/rpc/rpc/src/eth/helpers/fees.rs | 39 +-- .../rpc/rpc/src/eth/helpers/pending_block.rs | 49 +--- crates/rpc/rpc/src/eth/helpers/receipt.rs | 31 +-- crates/rpc/rpc/src/eth/helpers/signer.rs | 21 +- crates/rpc/rpc/src/eth/helpers/spec.rs | 21 +- crates/rpc/rpc/src/eth/helpers/state.rs | 96 ++----- crates/rpc/rpc/src/eth/helpers/trace.rs | 28 +- crates/rpc/rpc/src/eth/helpers/transaction.rs | 70 +---- crates/rpc/rpc/src/eth/helpers/types.rs | 6 +- 45 files changed, 753 insertions(+), 1170 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6f4d5702c01..ba6ea52a162 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8965,7 +8965,6 @@ dependencies = [ "futures", "rand 0.9.1", "reth-chainspec", - "reth-consensus", "reth-db", "reth-e2e-test-utils", "reth-engine-local", @@ -9416,7 +9415,6 @@ dependencies = [ "reth-chainspec", "reth-evm", "reth-metrics", - "reth-network-api", "reth-node-api", "reth-node-builder", "reth-optimism-chainspec", @@ -10080,7 +10078,6 @@ dependencies = [ "reth-evm", "reth-network-api", "reth-node-api", - "reth-payload-builder", "reth-primitives-traits", "reth-revm", "reth-rpc-convert", diff --git a/crates/ethereum/node/Cargo.toml b/crates/ethereum/node/Cargo.toml index c62ce1b8fe1..2605efbf6bd 100644 --- a/crates/ethereum/node/Cargo.toml +++ b/crates/ethereum/node/Cargo.toml @@ -25,7 +25,6 @@ reth-transaction-pool.workspace = true reth-network.workspace = true reth-evm.workspace = true reth-evm-ethereum.workspace = true -reth-consensus.workspace = true reth-rpc.workspace = true reth-rpc-api.workspace = true reth-rpc-eth-api.workspace = true @@ -91,7 +90,6 @@ js-tracer = ["reth-node-builder/js-tracer"] test-utils = [ "reth-node-builder/test-utils", "reth-chainspec/test-utils", - "reth-consensus/test-utils", "reth-network/test-utils", "reth-ethereum-primitives/test-utils", "reth-revm/test-utils", diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index 804253f45f8..2860053cf25 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -6,7 +6,6 @@ use alloy_eips::{eip7840::BlobParams, merge::EPOCH_SLOTS}; use alloy_network::Ethereum; use alloy_rpc_types_engine::ExecutionData; use reth_chainspec::{ChainSpec, EthChainSpec, EthereumHardforks, Hardforks}; -use reth_consensus::{ConsensusError, FullConsensus}; use reth_engine_local::LocalPayloadAttributesBuilder; use reth_engine_primitives::EngineTypes; use reth_ethereum_consensus::EthBeaconConsensus; @@ -157,21 +156,16 @@ where type EthApi = EthApiFor; async fn build_eth_api(self, ctx: EthApiCtx<'_, N>) -> eyre::Result { - let api = reth_rpc::EthApiBuilder::new( - ctx.components.provider().clone(), - ctx.components.pool().clone(), - ctx.components.network().clone(), - ctx.components.evm_config().clone(), - ) - .eth_cache(ctx.cache) - .task_spawner(ctx.components.task_executor().clone()) - .gas_cap(ctx.config.rpc_gas_cap.into()) - .max_simulate_blocks(ctx.config.rpc_max_simulate_blocks) - .eth_proof_window(ctx.config.eth_proof_window) - .fee_history_cache_config(ctx.config.fee_history_cache) - .proof_permits(ctx.config.proof_permits) - .gas_oracle_config(ctx.config.gas_oracle) - .build(); + let api = reth_rpc::EthApiBuilder::new_with_components(ctx.components.clone()) + .eth_cache(ctx.cache) + .task_spawner(ctx.components.task_executor().clone()) + .gas_cap(ctx.config.rpc_gas_cap.into()) + .max_simulate_blocks(ctx.config.rpc_max_simulate_blocks) + .eth_proof_window(ctx.config.eth_proof_window) + .fee_history_cache_config(ctx.config.fee_history_cache) + .proof_permits(ctx.config.proof_permits) + .gas_oracle_config(ctx.config.gas_oracle) + .build(); Ok(api) } } @@ -516,7 +510,7 @@ where Types: NodeTypes, >, { - type Consensus = Arc>; + type Consensus = Arc::ChainSpec>>; async fn build_consensus(self, ctx: &BuilderContext) -> eyre::Result { Ok(Arc::new(EthBeaconConsensus::new(ctx.chain_spec()))) diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index 17ed50a286d..6b5561ef987 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -11,7 +11,7 @@ use reth_chain_state::CanonStateSubscriptions; use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; use reth_node_api::{ AddOnsContext, BlockTy, EngineTypes, EngineValidator, FullNodeComponents, FullNodeTypes, - NodeAddOns, NodeTypes, PayloadTypes, ReceiptTy, + NodeAddOns, NodeTypes, PayloadTypes, PrimitivesTy, }; use reth_node_core::{ node_config::NodeConfig, @@ -953,7 +953,7 @@ pub struct EthApiCtx<'a, N: FullNodeTypes> { /// Eth API configuration pub config: EthConfig, /// Cache for eth state - pub cache: EthStateCache, ReceiptTy>, + pub cache: EthStateCache>, } /// A `EthApi` that knows how to build `eth` namespace API from [`FullNodeComponents`]. diff --git a/crates/optimism/rpc/Cargo.toml b/crates/optimism/rpc/Cargo.toml index 34343670819..51d0037c7e8 100644 --- a/crates/optimism/rpc/Cargo.toml +++ b/crates/optimism/rpc/Cargo.toml @@ -24,7 +24,6 @@ reth-transaction-pool.workspace = true reth-rpc.workspace = true reth-rpc-api.workspace = true reth-node-api.workspace = true -reth-network-api.workspace = true reth-node-builder.workspace = true reth-chainspec.workspace = true reth-rpc-engine-api.workspace = true diff --git a/crates/optimism/rpc/src/eth/block.rs b/crates/optimism/rpc/src/eth/block.rs index 85ed4494cf1..0efd9aea988 100644 --- a/crates/optimism/rpc/src/eth/block.rs +++ b/crates/optimism/rpc/src/eth/block.rs @@ -1,35 +1,23 @@ //! Loads and formats OP block RPC response. -use reth_chainspec::ChainSpecProvider; -use reth_optimism_forks::OpHardforks; +use crate::{eth::RpcNodeCore, OpEthApi, OpEthApiError}; use reth_rpc_eth_api::{ - helpers::{EthBlocks, LoadBlock, LoadPendingBlock, SpawnBlocking}, - RpcConvert, + helpers::{EthBlocks, LoadBlock}, + FromEvmError, RpcConvert, }; -use reth_storage_api::{HeaderProvider, ProviderTx}; -use reth_transaction_pool::{PoolTransaction, TransactionPool}; - -use crate::{eth::OpNodeCore, OpEthApi, OpEthApiError}; impl EthBlocks for OpEthApi where - Self: LoadBlock< - Error = OpEthApiError, - RpcConvert: RpcConvert, - >, - N: OpNodeCore + HeaderProvider>, - Rpc: RpcConvert, + N: RpcNodeCore, + OpEthApiError: FromEvmError, + Rpc: RpcConvert, { } impl LoadBlock for OpEthApi where - Self: LoadPendingBlock< - Pool: TransactionPool< - Transaction: PoolTransaction>, - >, - > + SpawnBlocking, - N: OpNodeCore, - Rpc: RpcConvert, + N: RpcNodeCore, + OpEthApiError: FromEvmError, + Rpc: RpcConvert, { } diff --git a/crates/optimism/rpc/src/eth/call.rs b/crates/optimism/rpc/src/eth/call.rs index 0e644a54667..e929ef7ca75 100644 --- a/crates/optimism/rpc/src/eth/call.rs +++ b/crates/optimism/rpc/src/eth/call.rs @@ -1,51 +1,31 @@ -use super::OpNodeCore; -use crate::{OpEthApi, OpEthApiError}; -use op_revm::OpTransaction; -use reth_evm::{execute::BlockExecutorFactory, ConfigureEvm, EvmFactory, TxEnvFor}; -use reth_node_api::NodePrimitives; +use crate::{eth::RpcNodeCore, OpEthApi, OpEthApiError}; +use reth_evm::TxEnvFor; use reth_rpc_eth_api::{ - helpers::{estimate::EstimateCall, Call, EthCall, LoadBlock, LoadState, SpawnBlocking}, - FromEvmError, FullEthApiTypes, RpcConvert, + helpers::{estimate::EstimateCall, Call, EthCall}, + FromEvmError, RpcConvert, }; -use reth_storage_api::{errors::ProviderError, ProviderHeader, ProviderTx}; -use revm::context::TxEnv; impl EthCall for OpEthApi where - Self: EstimateCall + LoadBlock + FullEthApiTypes, - N: OpNodeCore, - Rpc: RpcConvert, + N: RpcNodeCore, + OpEthApiError: FromEvmError, + Rpc: RpcConvert>, { } impl EstimateCall for OpEthApi where - Self: Call>, - N: OpNodeCore, - Rpc: RpcConvert, + N: RpcNodeCore, + OpEthApiError: FromEvmError, + Rpc: RpcConvert>, { } impl Call for OpEthApi where - Self: LoadState< - Evm: ConfigureEvm< - Primitives: NodePrimitives< - BlockHeader = ProviderHeader, - SignedTx = ProviderTx, - >, - BlockExecutorFactory: BlockExecutorFactory< - EvmFactory: EvmFactory>, - >, - >, - RpcConvert: RpcConvert, Network = Self::NetworkTypes>, - Error: FromEvmError - + From<::Error> - + From, - > + SpawnBlocking, - Self::Error: From, - N: OpNodeCore, - Rpc: RpcConvert, + N: RpcNodeCore, + OpEthApiError: FromEvmError, + Rpc: RpcConvert>, { #[inline] fn call_gas_limit(&self) -> u64 { diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index dfbffaa7c41..461a36a1894 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -16,44 +16,28 @@ use alloy_primitives::U256; use eyre::WrapErr; use op_alloy_network::Optimism; pub use receipt::{OpReceiptBuilder, OpReceiptFieldsBuilder}; -use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardforks}; use reth_evm::ConfigureEvm; -use reth_network_api::NetworkInfo; -use reth_node_api::{FullNodeComponents, FullNodeTypes, HeaderTy, NodePrimitives}; +use reth_node_api::{FullNodeComponents, FullNodeTypes, HeaderTy}; use reth_node_builder::rpc::{EthApiBuilder, EthApiCtx}; use reth_rpc::eth::{core::EthApiInner, DevSigner}; use reth_rpc_eth_api::{ helpers::{ pending_block::BuildPendingEnv, spec::SignersForApi, AddDevSigners, EthApiSpec, EthFees, - EthState, LoadBlock, LoadFee, LoadState, SpawnBlocking, Trace, + EthState, LoadFee, LoadState, SpawnBlocking, Trace, }, EthApiTypes, FromEvmError, FullEthApiServer, RpcConvert, RpcConverter, RpcNodeCore, RpcNodeCoreExt, RpcTypes, SignableTxRequest, }; use reth_rpc_eth_types::{EthStateCache, FeeHistoryCache, GasPriceOracle}; -use reth_storage_api::{ - BlockNumReader, BlockReader, BlockReaderIdExt, ProviderBlock, ProviderHeader, ProviderReceipt, - ProviderTx, StageCheckpointReader, StateProviderFactory, -}; +use reth_storage_api::{ProviderHeader, ProviderTx}; use reth_tasks::{ pool::{BlockingTaskGuard, BlockingTaskPool}, TaskSpawner, }; -use reth_transaction_pool::TransactionPool; use std::{fmt, fmt::Formatter, marker::PhantomData, sync::Arc}; /// Adapter for [`EthApiInner`], which holds all the data required to serve core `eth_` API. -pub type EthApiNodeBackend = EthApiInner< - ::Provider, - ::Pool, - ::Network, - ::Evm, - Rpc, ->; - -/// A helper trait with requirements for [`RpcNodeCore`] to be used in [`OpEthApi`]. -pub trait OpNodeCore: RpcNodeCore {} -impl OpNodeCore for T where T: RpcNodeCore {} +pub type EthApiNodeBackend = EthApiInner; /// OP-Reth `Eth` API implementation. /// @@ -65,18 +49,18 @@ impl OpNodeCore for T where T: RpcNodeCore { +pub struct OpEthApi { /// Gateway to node's core components. inner: Arc>, } -impl Clone for OpEthApi { +impl Clone for OpEthApi { fn clone(&self) -> Self { Self { inner: self.inner.clone() } } } -impl OpEthApi { +impl OpEthApi { /// Creates a new `OpEthApi`. pub fn new( eth_api: EthApiNodeBackend, @@ -105,11 +89,8 @@ impl OpEthApi { impl EthApiTypes for OpEthApi where - Self: Send + Sync + fmt::Debug, - N: OpNodeCore, - Rpc: RpcConvert, - ::Evm: fmt::Debug, - ::Primitives: fmt::Debug, + N: RpcNodeCore, + Rpc: RpcConvert, { type Error = OpEthApiError; type NetworkTypes = Rpc::Network; @@ -122,15 +103,14 @@ where impl RpcNodeCore for OpEthApi where - N: OpNodeCore, - Rpc: RpcConvert, + N: RpcNodeCore, + Rpc: RpcConvert, { type Primitives = N::Primitives; type Provider = N::Provider; type Pool = N::Pool; - type Evm = ::Evm; - type Network = ::Network; - type PayloadBuilder = (); + type Evm = N::Evm; + type Network = N::Network; #[inline] fn pool(&self) -> &Self::Pool { @@ -147,11 +127,6 @@ where self.inner.eth_api.network() } - #[inline] - fn payload_builder(&self) -> &Self::PayloadBuilder { - &() - } - #[inline] fn provider(&self) -> &Self::Provider { self.inner.eth_api.provider() @@ -160,24 +135,19 @@ where impl RpcNodeCoreExt for OpEthApi where - N: OpNodeCore, - Rpc: RpcConvert, + N: RpcNodeCore, + Rpc: RpcConvert, { #[inline] - fn cache(&self) -> &EthStateCache, ProviderReceipt> { + fn cache(&self) -> &EthStateCache { self.inner.eth_api.cache() } } impl EthApiSpec for OpEthApi where - N: OpNodeCore< - Provider: ChainSpecProvider - + BlockNumReader - + StageCheckpointReader, - Network: NetworkInfo, - >, - Rpc: RpcConvert, + N: RpcNodeCore, + Rpc: RpcConvert, { type Transaction = ProviderTx; type Rpc = Rpc::Network; @@ -195,11 +165,8 @@ where impl SpawnBlocking for OpEthApi where - Self: Send + Sync + Clone + 'static, - N: OpNodeCore, - Rpc: RpcConvert, - ::Evm: fmt::Debug, - ::Primitives: fmt::Debug, + N: RpcNodeCore, + Rpc: RpcConvert, { #[inline] fn io_task_spawner(&self) -> impl TaskSpawner { @@ -219,13 +186,9 @@ where impl LoadFee for OpEthApi where - Self: LoadBlock, - N: OpNodeCore< - Provider: BlockReaderIdExt - + ChainSpecProvider - + StateProviderFactory, - >, - Rpc: RpcConvert, + N: RpcNodeCore, + OpEthApiError: FromEvmError, + Rpc: RpcConvert, { #[inline] fn gas_oracle(&self) -> &GasPriceOracle { @@ -245,21 +208,15 @@ where impl LoadState for OpEthApi where - N: OpNodeCore< - Provider: StateProviderFactory + ChainSpecProvider, - Pool: TransactionPool, - >, - Rpc: RpcConvert, - ::Evm: fmt::Debug, - ::Primitives: fmt::Debug, + N: RpcNodeCore, + Rpc: RpcConvert, { } impl EthState for OpEthApi where - Self: LoadState + SpawnBlocking, - N: OpNodeCore, - Rpc: RpcConvert, + N: RpcNodeCore, + Rpc: RpcConvert, { #[inline] fn max_proof_window(&self) -> u64 { @@ -269,36 +226,23 @@ where impl EthFees for OpEthApi where - Self: LoadFee< - Provider: ChainSpecProvider< - ChainSpec: EthChainSpec
>, - >, - >, - N: OpNodeCore, - Rpc: RpcConvert, + N: RpcNodeCore, + OpEthApiError: FromEvmError, + Rpc: RpcConvert, { } impl Trace for OpEthApi where - Self: RpcNodeCore - + LoadState< - Evm: ConfigureEvm< - Primitives: NodePrimitives< - BlockHeader = ProviderHeader, - SignedTx = ProviderTx, - >, - >, - Error: FromEvmError, - >, - N: OpNodeCore, - Rpc: RpcConvert, + N: RpcNodeCore, + OpEthApiError: FromEvmError, + Rpc: RpcConvert, { } impl AddDevSigners for OpEthApi where - N: OpNodeCore, + N: RpcNodeCore, Rpc: RpcConvert< Network: RpcTypes>>, >, @@ -308,14 +252,14 @@ where } } -impl fmt::Debug for OpEthApi { +impl fmt::Debug for OpEthApi { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("OpEthApi").finish_non_exhaustive() } } /// Container type `OpEthApi` -pub struct OpEthApiInner { +pub struct OpEthApiInner { /// Gateway to node's core components. eth_api: EthApiNodeBackend, /// Sequencer client, configured to forward submitted transactions to sequencer of given OP @@ -327,13 +271,13 @@ pub struct OpEthApiInner { min_suggested_priority_fee: U256, } -impl fmt::Debug for OpEthApiInner { +impl fmt::Debug for OpEthApiInner { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { f.debug_struct("OpEthApiInner").finish() } } -impl OpEthApiInner { +impl OpEthApiInner { /// Returns a reference to the [`EthApiNodeBackend`]. const fn eth_api(&self) -> &EthApiNodeBackend { &self.eth_api @@ -350,6 +294,7 @@ pub type OpRpcConvert = RpcConverter< NetworkT, ::Evm, OpReceiptConverter<::Provider>, + (), OpTxInfoMapper<::Provider>, >; @@ -420,26 +365,20 @@ where async fn build_eth_api(self, ctx: EthApiCtx<'_, N>) -> eyre::Result { let Self { sequencer_url, sequencer_headers, min_suggested_priority_fee, .. } = self; - let rpc_converter = RpcConverter::new( - OpReceiptConverter::new(ctx.components.provider().clone()), - OpTxInfoMapper::new(ctx.components.provider().clone()), - ); - let eth_api = reth_rpc::EthApiBuilder::new( - ctx.components.provider().clone(), - ctx.components.pool().clone(), - ctx.components.network().clone(), - ctx.components.evm_config().clone(), - ) - .with_rpc_converter(rpc_converter) - .eth_cache(ctx.cache) - .task_spawner(ctx.components.task_executor().clone()) - .gas_cap(ctx.config.rpc_gas_cap.into()) - .max_simulate_blocks(ctx.config.rpc_max_simulate_blocks) - .eth_proof_window(ctx.config.eth_proof_window) - .fee_history_cache_config(ctx.config.fee_history_cache) - .proof_permits(ctx.config.proof_permits) - .gas_oracle_config(ctx.config.gas_oracle) - .build_inner(); + let rpc_converter = + RpcConverter::new(OpReceiptConverter::new(ctx.components.provider().clone())) + .with_mapper(OpTxInfoMapper::new(ctx.components.provider().clone())); + let eth_api = reth_rpc::EthApiBuilder::new_with_components(ctx.components.clone()) + .with_rpc_converter(rpc_converter) + .eth_cache(ctx.cache) + .task_spawner(ctx.components.task_executor().clone()) + .gas_cap(ctx.config.rpc_gas_cap.into()) + .max_simulate_blocks(ctx.config.rpc_max_simulate_blocks) + .eth_proof_window(ctx.config.eth_proof_window) + .fee_history_cache_config(ctx.config.fee_history_cache) + .proof_permits(ctx.config.proof_permits) + .gas_oracle_config(ctx.config.gas_oracle) + .build_inner(); let sequencer_client = if let Some(url) = sequencer_url { Some( diff --git a/crates/optimism/rpc/src/eth/pending_block.rs b/crates/optimism/rpc/src/eth/pending_block.rs index 555f5d59ee5..5b50ea68f0e 100644 --- a/crates/optimism/rpc/src/eth/pending_block.rs +++ b/crates/optimism/rpc/src/eth/pending_block.rs @@ -2,53 +2,26 @@ use std::sync::Arc; -use crate::OpEthApi; +use crate::{OpEthApi, OpEthApiError}; use alloy_eips::BlockNumberOrTag; -use reth_chainspec::ChainSpecProvider; -use reth_evm::ConfigureEvm; -use reth_node_api::NodePrimitives; use reth_primitives_traits::RecoveredBlock; use reth_rpc_eth_api::{ - helpers::{pending_block::PendingEnvBuilder, LoadPendingBlock, SpawnBlocking}, - types::RpcTypes, - EthApiTypes, FromEthApiError, FromEvmError, RpcConvert, RpcNodeCore, + helpers::{pending_block::PendingEnvBuilder, LoadPendingBlock}, + FromEvmError, RpcConvert, RpcNodeCore, }; use reth_rpc_eth_types::{EthApiError, PendingBlock}; use reth_storage_api::{ - BlockReader, BlockReaderIdExt, ProviderBlock, ProviderHeader, ProviderReceipt, ProviderTx, - ReceiptProvider, StateProviderFactory, + BlockReader, BlockReaderIdExt, ProviderBlock, ProviderReceipt, ReceiptProvider, }; -use reth_transaction_pool::{PoolTransaction, TransactionPool}; impl LoadPendingBlock for OpEthApi where - Self: SpawnBlocking - + EthApiTypes< - NetworkTypes: RpcTypes< - Header = alloy_rpc_types_eth::Header>, - >, - Error: FromEvmError, - RpcConvert: RpcConvert, - >, - N: RpcNodeCore< - Provider: BlockReaderIdExt + ChainSpecProvider + StateProviderFactory, - Pool: TransactionPool>>, - Evm: ConfigureEvm, - Primitives: NodePrimitives< - BlockHeader = ProviderHeader, - SignedTx = ProviderTx, - Receipt = ProviderReceipt, - Block = ProviderBlock, - >, - >, - Rpc: RpcConvert, + N: RpcNodeCore, + OpEthApiError: FromEvmError, + Rpc: RpcConvert, { #[inline] - fn pending_block( - &self, - ) -> &tokio::sync::Mutex< - Option, ProviderReceipt>>, - > { + fn pending_block(&self) -> &tokio::sync::Mutex>> { self.inner.eth_api.pending_block() } @@ -70,20 +43,17 @@ where // See: let latest = self .provider() - .latest_header() - .map_err(Self::Error::from_eth_err)? + .latest_header()? .ok_or(EthApiError::HeaderNotFound(BlockNumberOrTag::Latest.into()))?; let block_id = latest.hash().into(); let block = self .provider() - .recovered_block(block_id, Default::default()) - .map_err(Self::Error::from_eth_err)? + .recovered_block(block_id, Default::default())? .ok_or(EthApiError::HeaderNotFound(block_id.into()))?; let receipts = self .provider() - .receipts_by_block(block_id) - .map_err(Self::Error::from_eth_err)? + .receipts_by_block(block_id)? .ok_or(EthApiError::ReceiptsNotFound(block_id.into()))?; Ok(Some((Arc::new(block), Arc::new(receipts)))) diff --git a/crates/optimism/rpc/src/eth/receipt.rs b/crates/optimism/rpc/src/eth/receipt.rs index f304305cc8f..cd16c4e1664 100644 --- a/crates/optimism/rpc/src/eth/receipt.rs +++ b/crates/optimism/rpc/src/eth/receipt.rs @@ -1,6 +1,6 @@ //! Loads and formats OP receipt RPC response. -use crate::{eth::OpNodeCore, OpEthApi, OpEthApiError}; +use crate::{eth::RpcNodeCore, OpEthApi, OpEthApiError}; use alloy_eips::eip2718::Encodable2718; use alloy_rpc_types_eth::{Log, TransactionReceipt}; use op_alloy_consensus::{ @@ -16,29 +16,16 @@ use reth_primitives_traits::Block; use reth_rpc_eth_api::{ helpers::LoadReceipt, transaction::{ConvertReceiptInput, ReceiptConverter}, - EthApiTypes, RpcConvert, RpcNodeCoreExt, + RpcConvert, }; use reth_rpc_eth_types::{receipt::build_receipt, EthApiError}; -use reth_storage_api::{BlockReader, ProviderReceipt, ProviderTx}; +use reth_storage_api::BlockReader; use std::fmt::Debug; impl LoadReceipt for OpEthApi where - Self: RpcNodeCoreExt< - Primitives: NodePrimitives< - SignedTx = ProviderTx, - Receipt = ProviderReceipt, - >, - > + EthApiTypes< - NetworkTypes = Rpc::Network, - RpcConvert: RpcConvert< - Network = Rpc::Network, - Primitives = Self::Primitives, - Error = Self::Error, - >, - >, - N: OpNodeCore, - Rpc: RpcConvert, + N: RpcNodeCore, + Rpc: RpcConvert, { } @@ -58,7 +45,7 @@ impl OpReceiptConverter { impl ReceiptConverter for OpReceiptConverter where N: NodePrimitives, - Provider: BlockReader + ChainSpecProvider + Debug, + Provider: BlockReader + ChainSpecProvider + Debug + 'static, { type RpcReceipt = OpTransactionReceipt; type Error = OpEthApiError; diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index 8127387b420..7b46db38cc1 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -1,29 +1,23 @@ //! Loads and formats OP transaction RPC response. -use crate::{eth::OpNodeCore, OpEthApi, OpEthApiError, SequencerClient}; +use crate::{OpEthApi, OpEthApiError, SequencerClient}; use alloy_primitives::{Bytes, B256}; use alloy_rpc_types_eth::TransactionInfo; use op_alloy_consensus::{transaction::OpTransactionInfo, OpTxEnvelope}; use reth_optimism_primitives::DepositReceipt; use reth_rpc_eth_api::{ - helpers::{spec::SignersForRpc, EthTransactions, LoadTransaction, SpawnBlocking}, - try_into_op_tx_info, EthApiTypes, FromEthApiError, FullEthApiTypes, RpcConvert, RpcNodeCore, - RpcNodeCoreExt, TxInfoMapper, + helpers::{spec::SignersForRpc, EthTransactions, LoadTransaction}, + try_into_op_tx_info, FromEthApiError, RpcConvert, RpcNodeCore, TxInfoMapper, }; use reth_rpc_eth_types::utils::recover_raw_transaction; -use reth_storage_api::{ - errors::ProviderError, BlockReader, BlockReaderIdExt, ProviderTx, ReceiptProvider, - TransactionsProvider, -}; +use reth_storage_api::{errors::ProviderError, ReceiptProvider}; use reth_transaction_pool::{PoolTransaction, TransactionOrigin, TransactionPool}; use std::fmt::{Debug, Formatter}; impl EthTransactions for OpEthApi where - Self: LoadTransaction - + EthApiTypes, - N: OpNodeCore>>, - Rpc: RpcConvert, + N: RpcNodeCore, + Rpc: RpcConvert, { fn signers(&self) -> &SignersForRpc { self.inner.eth_api.signers() @@ -72,17 +66,15 @@ where impl LoadTransaction for OpEthApi where - Self: SpawnBlocking + FullEthApiTypes + RpcNodeCoreExt, - N: OpNodeCore, - Self::Pool: TransactionPool, - Rpc: RpcConvert, + N: RpcNodeCore, + Rpc: RpcConvert, { } impl OpEthApi where - N: OpNodeCore, - Rpc: RpcConvert, + N: RpcNodeCore, + Rpc: RpcConvert, { /// Returns the [`SequencerClient`] if one is set. pub fn raw_tx_forwarder(&self) -> Option { diff --git a/crates/primitives-traits/src/block/recovered.rs b/crates/primitives-traits/src/block/recovered.rs index 5c3c9eb08c6..897e167bf76 100644 --- a/crates/primitives-traits/src/block/recovered.rs +++ b/crates/primitives-traits/src/block/recovered.rs @@ -590,15 +590,12 @@ mod rpc_compat { use super::{ Block as BlockTrait, BlockBody as BlockBodyTrait, RecoveredBlock, SignedTransaction, }; - use crate::block::error::BlockRecoveryError; + use crate::{block::error::BlockRecoveryError, SealedHeader}; use alloc::vec::Vec; use alloy_consensus::{ transaction::Recovered, Block as CBlock, BlockBody, BlockHeader, Sealable, }; - use alloy_primitives::U256; - use alloy_rpc_types_eth::{ - Block, BlockTransactions, BlockTransactionsKind, Header, TransactionInfo, - }; + use alloy_rpc_types_eth::{Block, BlockTransactions, BlockTransactionsKind, TransactionInfo}; impl RecoveredBlock where @@ -608,11 +605,16 @@ mod rpc_compat { /// /// The `tx_resp_builder` closure transforms each transaction into the desired response /// type. - pub fn into_rpc_block( + /// + /// `header_builder` transforms the block header into RPC representation. It takes the + /// consensus header and RLP length of the block which is a common dependency of RPC + /// headers. + pub fn into_rpc_block( self, kind: BlockTransactionsKind, tx_resp_builder: F, - ) -> Result>, E> + header_builder: impl FnOnce(SealedHeader, usize) -> Result, + ) -> Result, E> where F: Fn( Recovered<<::Body as BlockBodyTrait>::Transaction>, @@ -620,8 +622,10 @@ mod rpc_compat { ) -> Result, { match kind { - BlockTransactionsKind::Hashes => Ok(self.into_rpc_block_with_tx_hashes()), - BlockTransactionsKind::Full => self.into_rpc_block_full(tx_resp_builder), + BlockTransactionsKind::Hashes => self.into_rpc_block_with_tx_hashes(header_builder), + BlockTransactionsKind::Full => { + self.into_rpc_block_full(tx_resp_builder, header_builder) + } } } @@ -632,11 +636,16 @@ mod rpc_compat { /// /// The `tx_resp_builder` closure transforms each transaction into the desired response /// type. - pub fn clone_into_rpc_block( + /// + /// `header_builder` transforms the block header into RPC representation. It takes the + /// consensus header and RLP length of the block which is a common dependency of RPC + /// headers. + pub fn clone_into_rpc_block( &self, kind: BlockTransactionsKind, tx_resp_builder: F, - ) -> Result>, E> + header_builder: impl FnOnce(SealedHeader, usize) -> Result, + ) -> Result, E> where F: Fn( Recovered<<::Body as BlockBodyTrait>::Transaction>, @@ -644,8 +653,10 @@ mod rpc_compat { ) -> Result, { match kind { - BlockTransactionsKind::Hashes => Ok(self.to_rpc_block_with_tx_hashes()), - BlockTransactionsKind::Full => self.clone().into_rpc_block_full(tx_resp_builder), + BlockTransactionsKind::Hashes => self.to_rpc_block_with_tx_hashes(header_builder), + BlockTransactionsKind::Full => { + self.clone().into_rpc_block_full(tx_resp_builder, header_builder) + } } } @@ -653,7 +664,10 @@ mod rpc_compat { /// /// Returns [`BlockTransactions::Hashes`] containing only transaction hashes. /// Efficiently clones only necessary parts, not the entire block. - pub fn to_rpc_block_with_tx_hashes(&self) -> Block> { + pub fn to_rpc_block_with_tx_hashes( + &self, + header_builder: impl FnOnce(SealedHeader, usize) -> Result, + ) -> Result, E> { let transactions = self.body().transaction_hashes_iter().copied().collect(); let rlp_length = self.rlp_length(); let header = self.clone_sealed_header(); @@ -662,16 +676,19 @@ mod rpc_compat { let transactions = BlockTransactions::Hashes(transactions); let uncles = self.body().ommers().unwrap_or(&[]).iter().map(|h| h.hash_slow()).collect(); - let header = Header::from_consensus(header.into(), None, Some(U256::from(rlp_length))); + let header = header_builder(header, rlp_length)?; - Block { header, uncles, transactions, withdrawals } + Ok(Block { header, uncles, transactions, withdrawals }) } /// Converts the block into an RPC [`Block`] with transaction hashes. /// /// Consumes self and returns [`BlockTransactions::Hashes`] containing only transaction /// hashes. - pub fn into_rpc_block_with_tx_hashes(self) -> Block> { + pub fn into_rpc_block_with_tx_hashes( + self, + f: impl FnOnce(SealedHeader, usize) -> Result, + ) -> Result, E> { let transactions = self.body().transaction_hashes_iter().copied().collect(); let rlp_length = self.rlp_length(); let (header, body) = self.into_sealed_block().split_sealed_header_body(); @@ -679,19 +696,20 @@ mod rpc_compat { let transactions = BlockTransactions::Hashes(transactions); let uncles = ommers.into_iter().map(|h| h.hash_slow()).collect(); - let header = Header::from_consensus(header.into(), None, Some(U256::from(rlp_length))); + let header = f(header, rlp_length)?; - Block { header, uncles, transactions, withdrawals } + Ok(Block { header, uncles, transactions, withdrawals }) } /// Converts the block into an RPC [`Block`] with full transaction objects. /// /// Returns [`BlockTransactions::Full`] with complete transaction data. /// The `tx_resp_builder` closure transforms each transaction with its metadata. - pub fn into_rpc_block_full( + pub fn into_rpc_block_full( self, tx_resp_builder: F, - ) -> Result>, E> + header_builder: impl FnOnce(SealedHeader, usize) -> Result, + ) -> Result, E> where F: Fn( Recovered<<::Body as BlockBodyTrait>::Transaction>, @@ -726,8 +744,7 @@ mod rpc_compat { let transactions = BlockTransactions::Full(transactions); let uncles = ommers.into_iter().map(|h| h.hash_slow()).collect(); - let header = - Header::from_consensus(header.into(), None, Some(U256::from(block_length))); + let header = header_builder(header, block_length)?; let block = Block { header, uncles, transactions, withdrawals }; diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 6824feecee6..0005e2af253 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -42,16 +42,17 @@ use reth_rpc::{ use reth_rpc_api::servers::*; use reth_rpc_eth_api::{ helpers::{ - pending_block::{BasicPendingEnvBuilder, PendingEnvBuilder}, - Call, EthApiSpec, EthTransactions, LoadPendingBlock, TraceExt, + pending_block::PendingEnvBuilder, Call, EthApiSpec, EthTransactions, LoadPendingBlock, + TraceExt, }, + node::RpcNodeCoreAdapter, EthApiServer, EthApiTypes, FullEthApiServer, RpcBlock, RpcConvert, RpcConverter, RpcHeader, - RpcReceipt, RpcTransaction, RpcTxReq, + RpcNodeCore, RpcReceipt, RpcTransaction, RpcTxReq, }; use reth_rpc_eth_types::{receipt::EthReceiptConverter, EthConfig, EthSubscriptionIdProvider}; use reth_rpc_layer::{AuthLayer, Claims, CompressionLayer, JwtAuthValidator, JwtSecret}; use reth_storage_api::{ - AccountReader, BlockReader, BlockReaderIdExt, ChangeSetReader, FullRpcProvider, ProviderBlock, + AccountReader, BlockReader, ChangeSetReader, FullRpcProvider, ProviderBlock, StateProviderFactory, }; use reth_tasks::{pool::BlockingTaskGuard, TaskSpawner, TokioTaskExecutor}; @@ -253,20 +254,19 @@ impl /// Instantiates a new [`EthApiBuilder`] from the configured components. #[expect(clippy::type_complexity)] - pub fn eth_api_builder( + pub fn eth_api_builder( &self, ) -> EthApiBuilder< - Provider, - Pool, - Network, - EvmConfig, - RpcConverter>, + RpcNodeCoreAdapter, + RpcConverter>, > where - Provider: BlockReaderIdExt + ChainSpecProvider + Clone, + Provider: Clone, Pool: Clone, Network: Clone, EvmConfig: Clone, + RpcNodeCoreAdapter: + RpcNodeCore, Evm = EvmConfig>, { EthApiBuilder::new( self.provider.clone(), @@ -282,29 +282,21 @@ impl /// /// See also [`EthApiBuilder`]. #[expect(clippy::type_complexity)] - pub fn bootstrap_eth_api( + pub fn bootstrap_eth_api( &self, ) -> EthApi< - Provider, - Pool, - Network, - EvmConfig, - RpcConverter>, + RpcNodeCoreAdapter, + RpcConverter>, > where - N: NodePrimitives, - Provider: BlockReaderIdExt - + StateProviderFactory - + CanonStateSubscriptions - + ChainSpecProvider - + Clone - + Unpin - + 'static, + Provider: Clone, Pool: Clone, - EvmConfig: ConfigureEvm, Network: Clone, - RpcConverter>: RpcConvert, - BasicPendingEnvBuilder: PendingEnvBuilder, + EvmConfig: ConfigureEvm + Clone, + RpcNodeCoreAdapter: + RpcNodeCore, Evm = EvmConfig>, + RpcConverter>: RpcConvert, + (): PendingEnvBuilder, { self.eth_api_builder().build() } @@ -815,12 +807,8 @@ where /// # Panics /// /// If called outside of the tokio runtime. See also [`Self::eth_api`] - pub fn debug_api(&self) -> DebugApi { - DebugApi::new( - self.eth_api().clone(), - self.blocking_pool_guard.clone(), - self.evm_config.clone(), - ) + pub fn debug_api(&self) -> DebugApi { + DebugApi::new(self.eth_api().clone(), self.blocking_pool_guard.clone()) } /// Instantiates `NetApi` @@ -852,7 +840,7 @@ where + ChangeSetReader, Pool: TransactionPool + 'static, Network: NetworkInfo + Peers + Clone + 'static, - EthApi: FullEthApiServer, + EthApi: FullEthApiServer, EvmConfig: ConfigureEvm + 'static, Consensus: FullConsensus + Clone + 'static, { @@ -937,13 +925,11 @@ where .into_rpc() .into() } - RethRpcModule::Debug => DebugApi::new( - eth_api.clone(), - self.blocking_pool_guard.clone(), - self.evm_config.clone(), - ) - .into_rpc() - .into(), + RethRpcModule::Debug => { + DebugApi::new(eth_api.clone(), self.blocking_pool_guard.clone()) + .into_rpc() + .into() + } RethRpcModule::Eth => { // merge all eth handlers let mut module = eth_api.clone().into_rpc(); diff --git a/crates/rpc/rpc-convert/src/rpc.rs b/crates/rpc/rpc-convert/src/rpc.rs index 73061d55543..bd5555a3013 100644 --- a/crates/rpc/rpc-convert/src/rpc.rs +++ b/crates/rpc/rpc-convert/src/rpc.rs @@ -4,7 +4,9 @@ use alloy_consensus::{ EthereumTxEnvelope, EthereumTypedTransaction, SignableTransaction, TxEip4844, }; use alloy_json_rpc::RpcObject; -use alloy_network::{Network, ReceiptResponse, TransactionResponse, TxSigner}; +use alloy_network::{ + primitives::HeaderResponse, Network, ReceiptResponse, TransactionResponse, TxSigner, +}; use alloy_primitives::Signature; use alloy_rpc_types_eth::TransactionRequest; @@ -13,7 +15,7 @@ use alloy_rpc_types_eth::TransactionRequest; /// This is a subset of [`Network`] trait with only RPC response types kept. pub trait RpcTypes: Send + Sync + Clone + Unpin + Debug + 'static { /// Header response type. - type Header: RpcObject; + type Header: RpcObject + HeaderResponse; /// Receipt response type. type Receipt: RpcObject + ReceiptResponse; /// Transaction response type. @@ -38,6 +40,12 @@ pub type RpcTransaction = ::TransactionResponse; /// Adapter for network specific receipt response. pub type RpcReceipt = ::Receipt; +/// Adapter for network specific header response. +pub type RpcHeader = ::Header; + +/// Adapter for network specific block type. +pub type RpcBlock = alloy_rpc_types_eth::Block, RpcHeader>; + /// Adapter for network specific transaction request. pub type RpcTxReq = ::TransactionRequest; diff --git a/crates/rpc/rpc-convert/src/transaction.rs b/crates/rpc/rpc-convert/src/transaction.rs index eb4abe918b6..2d4aad69edd 100644 --- a/crates/rpc/rpc-convert/src/transaction.rs +++ b/crates/rpc/rpc-convert/src/transaction.rs @@ -2,9 +2,11 @@ use crate::{ fees::{CallFees, CallFeesError}, - RpcReceipt, RpcTransaction, RpcTxReq, RpcTypes, + RpcHeader, RpcReceipt, RpcTransaction, RpcTxReq, RpcTypes, +}; +use alloy_consensus::{ + error::ValueError, transaction::Recovered, EthereumTxEnvelope, Sealable, TxEip4844, }; -use alloy_consensus::{error::ValueError, transaction::Recovered, EthereumTxEnvelope, TxEip4844}; use alloy_network::Network; use alloy_primitives::{Address, TxKind, U256}; use alloy_rpc_types_eth::{ @@ -16,7 +18,9 @@ use reth_evm::{ revm::context_interface::{either::Either, Block}, ConfigureEvm, TxEnvFor, }; -use reth_primitives_traits::{NodePrimitives, TransactionMeta, TxTy}; +use reth_primitives_traits::{ + HeaderTy, NodePrimitives, SealedHeader, SealedHeaderFor, TransactionMeta, TxTy, +}; use revm_context::{BlockEnv, CfgEnv, TxEnv}; use std::{borrow::Cow, convert::Infallible, error::Error, fmt::Debug, marker::PhantomData}; use thiserror::Error; @@ -37,7 +41,7 @@ pub struct ConvertReceiptInput<'a, N: NodePrimitives> { } /// A type that knows how to convert primitive receipts to RPC representations. -pub trait ReceiptConverter: Debug { +pub trait ReceiptConverter: Debug + 'static { /// RPC representation. type RpcReceipt; @@ -52,6 +56,35 @@ pub trait ReceiptConverter: Debug { ) -> Result, Self::Error>; } +/// A type that knows how to convert a consensus header into an RPC header. +pub trait HeaderConverter: Debug + Send + Sync + Unpin + Clone + 'static { + /// Converts a consensus header into an RPC header. + fn convert_header(&self, header: SealedHeader, block_size: usize) -> Rpc; +} + +/// Default implementation of [`HeaderConverter`] that uses [`FromConsensusHeader`] to convert +/// headers. +impl HeaderConverter for () +where + Rpc: FromConsensusHeader, +{ + fn convert_header(&self, header: SealedHeader, block_size: usize) -> Rpc { + Rpc::from_consensus_header(header, block_size) + } +} + +/// Conversion trait for obtaining RPC header from a consensus header. +pub trait FromConsensusHeader { + /// Takes a consensus header and converts it into `self`. + fn from_consensus_header(header: SealedHeader, block_size: usize) -> Self; +} + +impl FromConsensusHeader for alloy_rpc_types_eth::Header { + fn from_consensus_header(header: SealedHeader, block_size: usize) -> Self { + Self::from_consensus(header.into(), None, Some(U256::from(block_size))) + } +} + /// Responsible for the conversions from and into RPC requests and responses. /// /// The JSON-RPC schema and the Node primitives are configurable using the [`RpcConvert::Network`] @@ -60,7 +93,7 @@ pub trait ReceiptConverter: Debug { /// A generic implementation [`RpcConverter`] should be preferred over a manual implementation. As /// long as its trait bound requirements are met, the implementation is created automatically and /// can be used in RPC method handlers for all the conversions. -pub trait RpcConvert: Send + Sync + Unpin + Clone + Debug { +pub trait RpcConvert: Send + Sync + Unpin + Clone + Debug + 'static { /// Associated lower layer consensus types to convert from and into types of [`Self::Network`]. type Primitives: NodePrimitives; @@ -117,6 +150,13 @@ pub trait RpcConvert: Send + Sync + Unpin + Clone + Debug { &self, receipts: Vec>, ) -> Result>, Self::Error>; + + /// Converts a primitive header to an RPC header. + fn convert_header( + &self, + header: SealedHeaderFor, + block_size: usize, + ) -> Result, Self::Error>; } /// Converts `self` into `T`. The opposite of [`FromConsensusTx`]. @@ -362,48 +402,74 @@ pub struct TransactionConversionError(String); /// is [`TransactionInfo`] then `()` can be used as `Map` which trivially passes over the input /// object. #[derive(Debug)] -pub struct RpcConverter { +pub struct RpcConverter { phantom: PhantomData<(E, Evm)>, receipt_converter: Receipt, + header_converter: Header, mapper: Map, } -impl RpcConverter { +impl RpcConverter { /// Creates a new [`RpcConverter`] with `receipt_converter` and `mapper`. - pub const fn new(receipt_converter: Receipt, mapper: Map) -> Self { - Self { phantom: PhantomData, receipt_converter, mapper } + pub const fn new(receipt_converter: Receipt) -> Self { + Self { phantom: PhantomData, receipt_converter, header_converter: (), mapper: () } + } +} + +impl RpcConverter { + /// Configures the header converter. + pub fn with_header_converter( + self, + header_converter: HeaderNew, + ) -> RpcConverter { + let Self { receipt_converter, header_converter: _, mapper, phantom } = self; + RpcConverter { receipt_converter, header_converter, mapper, phantom } + } + + /// Configures the mapper. + pub fn with_mapper( + self, + mapper: MapNew, + ) -> RpcConverter { + let Self { receipt_converter, header_converter, mapper: _, phantom } = self; + RpcConverter { receipt_converter, header_converter, mapper, phantom } } } -impl Default for RpcConverter +impl Default for RpcConverter where Receipt: Default, + Header: Default, Map: Default, { fn default() -> Self { Self { phantom: PhantomData, receipt_converter: Default::default(), + header_converter: Default::default(), mapper: Default::default(), } } } -impl Clone for RpcConverter { +impl Clone + for RpcConverter +{ fn clone(&self) -> Self { Self { phantom: PhantomData, receipt_converter: self.receipt_converter.clone(), + header_converter: self.header_converter.clone(), mapper: self.mapper.clone(), } } } -impl RpcConvert for RpcConverter +impl RpcConvert for RpcConverter where N: NodePrimitives, E: RpcTypes + Send + Sync + Unpin + Clone + Debug, - Evm: ConfigureEvm, + Evm: ConfigureEvm + 'static, TxTy: IntoRpcTx + Clone + Debug, RpcTxReq: TryIntoSimTx> + TryIntoTxEnv>, Receipt: ReceiptConverter< @@ -422,6 +488,7 @@ where + Unpin + Clone + Debug, + Header: HeaderConverter, RpcHeader>, Map: for<'a> TxInfoMapper< &'a TxTy, Out = as IntoRpcTx>::TxInfo, @@ -429,7 +496,8 @@ where + Debug + Unpin + Send - + Sync, + + Sync + + 'static, { type Primitives = N; type Network = E; @@ -466,6 +534,14 @@ where ) -> Result>, Self::Error> { self.receipt_converter.convert_receipts(receipts) } + + fn convert_header( + &self, + header: SealedHeaderFor, + block_size: usize, + ) -> Result, Self::Error> { + Ok(self.header_converter.convert_header(header, block_size)) + } } /// Optimism specific RPC transaction compatibility implementations. diff --git a/crates/rpc/rpc-eth-api/Cargo.toml b/crates/rpc/rpc-eth-api/Cargo.toml index af8bcb90def..44637d1931c 100644 --- a/crates/rpc/rpc-eth-api/Cargo.toml +++ b/crates/rpc/rpc-eth-api/Cargo.toml @@ -30,7 +30,6 @@ reth-rpc-server-types.workspace = true reth-network-api.workspace = true reth-node-api.workspace = true reth-trie-common = { workspace = true, features = ["eip1186"] } -reth-payload-builder.workspace = true # ethereum alloy-evm = { workspace = true, features = ["overrides", "call-util"] } diff --git a/crates/rpc/rpc-eth-api/src/helpers/block.rs b/crates/rpc/rpc-eth-api/src/helpers/block.rs index 560002b8a1c..badffeda7b8 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/block.rs @@ -7,16 +7,14 @@ use crate::{ }; use alloy_consensus::TxReceipt; use alloy_eips::BlockId; -use alloy_primitives::{Sealable, U256}; use alloy_rlp::Encodable; -use alloy_rpc_types_eth::{Block, BlockTransactions, Header, Index}; +use alloy_rpc_types_eth::{Block, BlockTransactions, Index}; use futures::Future; -use reth_evm::ConfigureEvm; use reth_node_api::BlockBody; use reth_primitives_traits::{ - AlloyBlockHeader, NodePrimitives, RecoveredBlock, SignedTransaction, TransactionMeta, + AlloyBlockHeader, RecoveredBlock, SealedHeader, SignedTransaction, TransactionMeta, }; -use reth_rpc_convert::{transaction::ConvertReceiptInput, RpcConvert}; +use reth_rpc_convert::{transaction::ConvertReceiptInput, RpcConvert, RpcHeader}; use reth_storage_api::{BlockIdReader, BlockReader, ProviderHeader, ProviderReceipt, ProviderTx}; use reth_transaction_pool::{PoolTransaction, TransactionPool}; use std::{borrow::Cow, sync::Arc}; @@ -38,11 +36,10 @@ pub trait EthBlocks: LoadBlock> { /// Returns the block header for the given block id. - #[expect(clippy::type_complexity)] fn rpc_block_header( &self, block_id: BlockId, - ) -> impl Future>>, Self::Error>> + Send + ) -> impl Future>, Self::Error>> + Send where Self: FullEthApiTypes, { @@ -64,9 +61,11 @@ pub trait EthBlocks: async move { let Some(block) = self.recovered_block(block_id).await? else { return Ok(None) }; - let block = block.clone_into_rpc_block(full.into(), |tx, tx_info| { - self.tx_resp_builder().fill(tx, tx_info) - })?; + let block = block.clone_into_rpc_block( + full.into(), + |tx, tx_info| self.tx_resp_builder().fill(tx, tx_info), + |header, size| self.tx_resp_builder().convert_header(header, size), + )?; Ok(Some(block)) } } @@ -249,16 +248,24 @@ pub trait EthBlocks: } .unwrap_or_default(); - Ok(uncles.into_iter().nth(index.into()).map(|header| { - let block = alloy_consensus::Block::::uncle(header); - let size = U256::from(block.length()); - Block { - uncles: vec![], - header: Header::from_consensus(block.header.seal_slow(), None, Some(size)), - transactions: BlockTransactions::Uncle, - withdrawals: None, - } - })) + uncles + .into_iter() + .nth(index.into()) + .map(|header| { + let block = + alloy_consensus::Block::::uncle(header); + let size = block.length(); + let header = self + .tx_resp_builder() + .convert_header(SealedHeader::new_unhashed(block.header), size)?; + Ok(Block { + uncles: vec![], + header, + transactions: BlockTransactions::Uncle, + withdrawals: None, + }) + }) + .transpose() } } } @@ -266,15 +273,7 @@ pub trait EthBlocks: /// Loads a block from database. /// /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` blocks RPC methods. -pub trait LoadBlock: - LoadPendingBlock - + SpawnBlocking - + RpcNodeCoreExt< - Pool: TransactionPool>>, - Primitives: NodePrimitives>, - Evm: ConfigureEvm::Primitives>, - > -{ +pub trait LoadBlock: LoadPendingBlock + SpawnBlocking + RpcNodeCoreExt { /// Returns the block object for the given block id. #[expect(clippy::type_complexity)] fn recovered_block( diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index 269ce4f5a17..5cf101ba00a 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -26,8 +26,8 @@ use reth_evm::{ ConfigureEvm, Evm, EvmEnv, EvmEnvFor, HaltReasonFor, InspectorFor, SpecFor, TransactionEnv, TxEnvFor, }; -use reth_node_api::{BlockBody, NodePrimitives}; -use reth_primitives_traits::{Recovered, SealedHeader, SignedTransaction}; +use reth_node_api::BlockBody; +use reth_primitives_traits::{Recovered, SignedTransaction}; use reth_revm::{ database::StateProviderDatabase, db::{CacheDB, State}, @@ -39,7 +39,7 @@ use reth_rpc_eth_types::{ simulate::{self, EthSimulateError}, EthApiError, RevertError, RpcInvalidTransactionError, StateCacheDb, }; -use reth_storage_api::{BlockIdReader, ProviderHeader, ProviderTx}; +use reth_storage_api::{BlockIdReader, ProviderTx}; use revm::{ context_interface::{ result::{ExecutionResult, ResultAndState}, @@ -193,6 +193,8 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA )? }; + parent = result.block.clone_sealed_header(); + let block = simulate::build_simulated_block( result.block, results, @@ -200,11 +202,6 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA this.tx_resp_builder(), )?; - parent = SealedHeader::new( - block.inner.header.inner.clone(), - block.inner.header.hash, - ); - blocks.push(block); } @@ -456,12 +453,6 @@ pub trait EthCall: EstimateCall + Call + LoadPendingBlock + LoadBlock + FullEthA /// Executes code on state. pub trait Call: LoadState< - Evm: ConfigureEvm< - Primitives: NodePrimitives< - BlockHeader = ProviderHeader, - SignedTx = ProviderTx, - >, - >, RpcConvert: RpcConvert>, Error: FromEvmError + From<::Error> diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index 99062612db7..0af1a69ee4f 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -2,7 +2,7 @@ //! RPC methods. use super::SpawnBlocking; -use crate::{types::RpcTypes, EthApiTypes, FromEthApiError, FromEvmError, RpcNodeCore}; +use crate::{EthApiTypes, FromEthApiError, FromEvmError, RpcNodeCore}; use alloy_consensus::{BlockHeader, Transaction}; use alloy_eips::eip7840::BlobParams; use alloy_primitives::{B256, U256}; @@ -14,9 +14,8 @@ use reth_evm::{ execute::{BlockBuilder, BlockBuilderOutcome}, ConfigureEvm, Evm, NextBlockEnvAttributes, SpecFor, }; -use reth_node_api::NodePrimitives; use reth_primitives_traits::{ - transaction::error::InvalidTransactionError, HeaderTy, Receipt, RecoveredBlock, SealedHeader, + transaction::error::InvalidTransactionError, HeaderTy, RecoveredBlock, SealedHeader, }; use reth_revm::{database::StateProviderDatabase, db::State}; use reth_rpc_convert::RpcConvert; @@ -42,29 +41,14 @@ use tracing::debug; /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` blocks RPC methods. pub trait LoadPendingBlock: EthApiTypes< - NetworkTypes: RpcTypes< - Header = alloy_rpc_types_eth::Header>, - >, Error: FromEvmError, RpcConvert: RpcConvert, - > + RpcNodeCore< - Provider: BlockReaderIdExt + ChainSpecProvider + StateProviderFactory, - Evm: ConfigureEvm::Primitives> + 'static, - Primitives: NodePrimitives< - BlockHeader = ProviderHeader, - SignedTx = ProviderTx, - Receipt = ProviderReceipt, - Block = ProviderBlock, - >, - > + > + RpcNodeCore { /// Returns a handle to the pending block. /// /// Data access in default (L1) trait method implementations. - #[expect(clippy::type_complexity)] - fn pending_block( - &self, - ) -> &Mutex, ProviderReceipt>>>; + fn pending_block(&self) -> &Mutex>>; /// Returns a [`PendingEnvBuilder`] for the pending block. fn pending_env_builder(&self) -> &dyn PendingEnvBuilder; @@ -364,13 +348,7 @@ pub trait BuildPendingEnv
{ fn build_pending_env(parent: &SealedHeader
) -> Self; } -/// Basic implementation of [`PendingEnvBuilder`] that assumes that the -/// [`ConfigureEvm::NextBlockEnvCtx`] type implements [`BuildPendingEnv`] trait. -#[derive(Debug, Default, Clone, Copy)] -#[non_exhaustive] -pub struct BasicPendingEnvBuilder; - -impl PendingEnvBuilder for BasicPendingEnvBuilder +impl PendingEnvBuilder for () where Evm: ConfigureEvm>>, { diff --git a/crates/rpc/rpc-eth-api/src/helpers/receipt.rs b/crates/rpc/rpc-eth-api/src/helpers/receipt.rs index 8db4c9a7199..7ff64be65de 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/receipt.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/receipt.rs @@ -4,11 +4,10 @@ use crate::{EthApiTypes, RpcNodeCoreExt, RpcReceipt}; use alloy_consensus::{transaction::TransactionMeta, TxReceipt}; use futures::Future; -use reth_node_api::NodePrimitives; use reth_primitives_traits::SignerRecoverable; use reth_rpc_convert::{transaction::ConvertReceiptInput, RpcConvert}; use reth_rpc_eth_types::{error::FromEthApiError, EthApiError}; -use reth_storage_api::{ProviderReceipt, ProviderTx, ReceiptProvider, TransactionsProvider}; +use reth_storage_api::{ProviderReceipt, ProviderTx}; use std::borrow::Cow; /// Assembles transaction receipt data w.r.t to network. @@ -22,13 +21,8 @@ pub trait LoadReceipt: Network = Self::NetworkTypes, >, Error: FromEthApiError, - > + RpcNodeCoreExt< - Provider: TransactionsProvider + ReceiptProvider, - Primitives: NodePrimitives< - Receipt = ProviderReceipt, - SignedTx = ProviderTx, - >, - > + Send + > + RpcNodeCoreExt + + Send + Sync { /// Helper method for `eth_getBlockReceipts` and `eth_getTransactionReceipt`. diff --git a/crates/rpc/rpc-eth-api/src/helpers/state.rs b/crates/rpc/rpc-eth-api/src/helpers/state.rs index 4fa4edee8bc..c9daa1790dc 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/state.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/state.rs @@ -8,7 +8,6 @@ use alloy_primitives::{Address, Bytes, B256, U256}; use alloy_rpc_types_eth::{Account, AccountInfo, EIP1186AccountProofResponse}; use alloy_serde::JsonStorageKey; use futures::Future; -use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardforks}; use reth_errors::RethError; use reth_evm::{ConfigureEvm, EvmEnvFor}; use reth_rpc_eth_types::{EthApiError, PendingBlockEnv, RpcInvalidTransactionError}; @@ -192,14 +191,7 @@ pub trait EthState: LoadState + SpawnBlocking { /// Loads state from database. /// /// Behaviour shared by several `eth_` RPC methods, not exclusive to `eth_` state RPC methods. -pub trait LoadState: - EthApiTypes - + RpcNodeCoreExt< - Provider: StateProviderFactory - + ChainSpecProvider, - Pool: TransactionPool, - > -{ +pub trait LoadState: EthApiTypes + RpcNodeCoreExt { /// Returns the state at the given block number fn state_at_hash(&self, block_hash: B256) -> Result { self.provider().history_by_block_hash(block_hash).map_err(Self::Error::from_eth_err) diff --git a/crates/rpc/rpc-eth-api/src/helpers/trace.rs b/crates/rpc/rpc-eth-api/src/helpers/trace.rs index 5b84ead6275..fe21f80756c 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/trace.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/trace.rs @@ -12,31 +12,19 @@ use reth_evm::{ evm::EvmFactoryExt, system_calls::SystemCaller, tracing::TracingCtx, ConfigureEvm, Database, Evm, EvmEnvFor, EvmFor, HaltReasonFor, InspectorFor, TxEnvFor, }; -use reth_node_api::NodePrimitives; use reth_primitives_traits::{BlockBody, Recovered, RecoveredBlock, SignedTransaction}; use reth_revm::{database::StateProviderDatabase, db::CacheDB}; use reth_rpc_eth_types::{ cache::db::{StateCacheDb, StateCacheDbRefMutWrapper, StateProviderTraitObjWrapper}, EthApiError, }; -use reth_storage_api::{BlockReader, ProviderBlock, ProviderHeader, ProviderTx}; +use reth_storage_api::{ProviderBlock, ProviderTx}; use revm::{context_interface::result::ResultAndState, DatabaseCommit}; use revm_inspectors::tracing::{TracingInspector, TracingInspectorConfig}; use std::sync::Arc; /// Executes CPU heavy tasks. -pub trait Trace: - LoadState< - Provider: BlockReader, - Evm: ConfigureEvm< - Primitives: NodePrimitives< - BlockHeader = ProviderHeader, - SignedTx = ProviderTx, - >, - >, - Error: FromEvmError, -> -{ +pub trait Trace: LoadState> { /// Executes the [`reth_evm::EvmEnv`] against the given [Database] without committing state /// changes. #[expect(clippy::type_complexity)] diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index 4f1252e193b..33cf0048e46 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -223,8 +223,8 @@ pub trait EthTransactions: LoadTransaction { where Self: 'static, { - let provider = self.provider().clone(); - self.spawn_blocking_io(move |_| { + self.spawn_blocking_io(move |this| { + let provider = this.provider(); let (tx, meta) = match provider .transaction_by_hash_with_meta(hash) .map_err(Self::Error::from_eth_err)? diff --git a/crates/rpc/rpc-eth-api/src/node.rs b/crates/rpc/rpc-eth-api/src/node.rs index 44e0cc812a2..0cd113d33eb 100644 --- a/crates/rpc/rpc-eth-api/src/node.rs +++ b/crates/rpc/rpc-eth-api/src/node.rs @@ -1,9 +1,16 @@ //! Helper trait for interfacing with [`FullNodeComponents`]. -use reth_node_api::{FullNodeComponents, NodeTypes, PrimitivesTy}; -use reth_payload_builder::PayloadBuilderHandle; +use reth_chain_state::CanonStateSubscriptions; +use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardforks}; +use reth_evm::ConfigureEvm; +use reth_network_api::NetworkInfo; +use reth_node_api::{FullNodeComponents, NodePrimitives, PrimitivesTy}; +use reth_primitives_traits::{BlockTy, HeaderTy, ReceiptTy, TxTy}; use reth_rpc_eth_types::EthStateCache; -use reth_storage_api::{BlockReader, ProviderBlock, ProviderReceipt}; +use reth_storage_api::{ + BlockReader, BlockReaderIdExt, StageCheckpointReader, StateProviderFactory, +}; +use reth_transaction_pool::{PoolTransaction, TransactionPool}; /// Helper trait that provides the same interface as [`FullNodeComponents`] but without requiring /// implementation of trait bounds. @@ -14,20 +21,31 @@ use reth_storage_api::{BlockReader, ProviderBlock, ProviderReceipt}; /// where the full trait bounds of the components are not necessary. /// /// Every type that is a [`FullNodeComponents`] also implements this trait. -pub trait RpcNodeCore: Clone + Send + Sync { +pub trait RpcNodeCore: Clone + Send + Sync + Unpin + 'static { /// Blockchain data primitives. - type Primitives: Send + Sync + Clone + Unpin; + type Primitives: NodePrimitives; /// The provider type used to interact with the node. - type Provider: Send + Sync + Clone + Unpin; + type Provider: BlockReaderIdExt< + Block = BlockTy, + Receipt = ReceiptTy, + Header = HeaderTy, + Transaction = TxTy, + > + ChainSpecProvider< + ChainSpec: EthChainSpec
> + EthereumHardforks, + > + StateProviderFactory + + CanonStateSubscriptions + + StageCheckpointReader + + Send + + Sync + + Clone + + Unpin + + 'static; /// The transaction pool of the node. - type Pool: Send + Sync + Clone + Unpin; + type Pool: TransactionPool>>; /// The node's EVM configuration, defining settings for the Ethereum Virtual Machine. - type Evm: Send + Sync + Clone + Unpin; + type Evm: ConfigureEvm + Send + Sync + 'static; /// Network API. - type Network: Send + Sync + Clone; - - /// Builds new blocks. - type PayloadBuilder: Send + Sync + Clone; + type Network: NetworkInfo + Clone; /// Returns the transaction pool of the node. fn pool(&self) -> &Self::Pool; @@ -38,23 +56,19 @@ pub trait RpcNodeCore: Clone + Send + Sync { /// Returns the handle to the network fn network(&self) -> &Self::Network; - /// Returns the handle to the payload builder service. - fn payload_builder(&self) -> &Self::PayloadBuilder; - /// Returns the provider of the node. fn provider(&self) -> &Self::Provider; } impl RpcNodeCore for T where - T: FullNodeComponents, + T: FullNodeComponents>, { type Primitives = PrimitivesTy; type Provider = T::Provider; type Pool = T::Pool; type Evm = T::Evm; type Network = T::Network; - type PayloadBuilder = PayloadBuilderHandle<::Payload>; #[inline] fn pool(&self) -> &Self::Pool { @@ -71,11 +85,6 @@ where FullNodeComponents::network(self) } - #[inline] - fn payload_builder(&self) -> &Self::PayloadBuilder { - FullNodeComponents::payload_builder_handle(self) - } - #[inline] fn provider(&self) -> &Self::Provider { FullNodeComponents::provider(self) @@ -86,7 +95,67 @@ where /// server. pub trait RpcNodeCoreExt: RpcNodeCore { /// Returns handle to RPC cache service. - fn cache( - &self, - ) -> &EthStateCache, ProviderReceipt>; + fn cache(&self) -> &EthStateCache; +} + +/// An adapter that allows to construct [`RpcNodeCore`] from components. +#[derive(Debug, Clone)] +pub struct RpcNodeCoreAdapter { + provider: Provider, + pool: Pool, + network: Network, + evm_config: Evm, +} + +impl RpcNodeCoreAdapter { + /// Creates a new `RpcNodeCoreAdapter` instance. + pub const fn new(provider: Provider, pool: Pool, network: Network, evm_config: Evm) -> Self { + Self { provider, pool, network, evm_config } + } +} + +impl RpcNodeCore for RpcNodeCoreAdapter +where + Provider: BlockReaderIdExt< + Block = BlockTy, + Receipt = ReceiptTy, + Header = HeaderTy, + Transaction = TxTy, + > + ChainSpecProvider< + ChainSpec: EthChainSpec
> + EthereumHardforks, + > + StateProviderFactory + + CanonStateSubscriptions + + StageCheckpointReader + + Send + + Sync + + Unpin + + Clone + + 'static, + Evm: ConfigureEvm + Clone + 'static, + Pool: TransactionPool>> + + Unpin + + 'static, + Network: NetworkInfo + Clone + Unpin + 'static, +{ + type Primitives = Evm::Primitives; + type Provider = Provider; + type Pool = Pool; + type Evm = Evm; + type Network = Network; + + fn pool(&self) -> &Self::Pool { + &self.pool + } + + fn evm_config(&self) -> &Self::Evm { + &self.evm_config + } + + fn network(&self) -> &Self::Network { + &self.network + } + + fn provider(&self) -> &Self::Provider { + &self.provider + } } diff --git a/crates/rpc/rpc-eth-types/src/cache/mod.rs b/crates/rpc/rpc-eth-types/src/cache/mod.rs index a055acac58a..6df612261d9 100644 --- a/crates/rpc/rpc-eth-types/src/cache/mod.rs +++ b/crates/rpc/rpc-eth-types/src/cache/mod.rs @@ -70,17 +70,17 @@ type HeaderLruCache = MultiConsumerLruCache { - to_service: UnboundedSender>, +pub struct EthStateCache { + to_service: UnboundedSender>, } -impl Clone for EthStateCache { +impl Clone for EthStateCache { fn clone(&self) -> Self { Self { to_service: self.to_service.clone() } } } -impl EthStateCache { +impl EthStateCache { /// Creates and returns both [`EthStateCache`] frontend and the memory bound service. fn create( provider: Provider, @@ -91,7 +91,7 @@ impl EthStateCache { max_concurrent_db_operations: usize, ) -> (Self, EthStateCacheService) where - Provider: BlockReader, + Provider: BlockReader, { let (to_service, rx) = unbounded_channel(); let service = EthStateCacheService { @@ -114,7 +114,7 @@ impl EthStateCache { /// See also [`Self::spawn_with`] pub fn spawn(provider: Provider, config: EthStateCacheConfig) -> Self where - Provider: BlockReader + Clone + Unpin + 'static, + Provider: BlockReader + Clone + Unpin + 'static, { Self::spawn_with(provider, config, TokioTaskExecutor::default()) } @@ -129,7 +129,7 @@ impl EthStateCache { executor: Tasks, ) -> Self where - Provider: BlockReader + Clone + Unpin + 'static, + Provider: BlockReader + Clone + Unpin + 'static, Tasks: TaskSpawner + Clone + 'static, { let EthStateCacheConfig { @@ -156,7 +156,7 @@ impl EthStateCache { pub async fn get_recovered_block( &self, block_hash: B256, - ) -> ProviderResult>>> { + ) -> ProviderResult>>> { let (response_tx, rx) = oneshot::channel(); let _ = self.to_service.send(CacheAction::GetBlockWithSenders { block_hash, response_tx }); rx.await.map_err(|_| CacheServiceUnavailable)? @@ -165,7 +165,10 @@ impl EthStateCache { /// Requests the receipts for the block hash /// /// Returns `None` if the block was not found. - pub async fn get_receipts(&self, block_hash: B256) -> ProviderResult>>> { + pub async fn get_receipts( + &self, + block_hash: B256, + ) -> ProviderResult>>> { let (response_tx, rx) = oneshot::channel(); let _ = self.to_service.send(CacheAction::GetReceipts { block_hash, response_tx }); rx.await.map_err(|_| CacheServiceUnavailable)? @@ -175,7 +178,7 @@ impl EthStateCache { pub async fn get_block_and_receipts( &self, block_hash: B256, - ) -> ProviderResult>, Arc>)>> { + ) -> ProviderResult>, Arc>)>> { let block = self.get_recovered_block(block_hash); let receipts = self.get_receipts(block_hash); @@ -188,7 +191,7 @@ impl EthStateCache { pub async fn get_receipts_and_maybe_block( &self, block_hash: B256, - ) -> ProviderResult>, Option>>)>> { + ) -> ProviderResult>, Option>>)>> { let (response_tx, rx) = oneshot::channel(); let _ = self.to_service.send(CacheAction::GetCachedBlock { block_hash, response_tx }); @@ -204,7 +207,7 @@ impl EthStateCache { pub async fn maybe_cached_block_and_receipts( &self, block_hash: B256, - ) -> ProviderResult<(Option>>, Option>>)> { + ) -> ProviderResult<(Option>>, Option>>)> { let (response_tx, rx) = oneshot::channel(); let _ = self .to_service @@ -217,8 +220,11 @@ impl EthStateCache { pub fn get_receipts_and_maybe_block_stream<'a>( &'a self, hashes: Vec, - ) -> impl Stream>, Option>>)>>> + 'a - { + ) -> impl Stream< + Item = ProviderResult< + Option<(Arc>, Option>>)>, + >, + > + 'a { let futures = hashes.into_iter().map(move |hash| self.get_receipts_and_maybe_block(hash)); futures.collect::>() @@ -227,7 +233,7 @@ impl EthStateCache { /// Requests the header for the given hash. /// /// Returns an error if the header is not found. - pub async fn get_header(&self, block_hash: B256) -> ProviderResult { + pub async fn get_header(&self, block_hash: B256) -> ProviderResult { let (response_tx, rx) = oneshot::channel(); let _ = self.to_service.send(CacheAction::GetHeader { block_hash, response_tx }); rx.await.map_err(|_| CacheServiceUnavailable)? @@ -244,7 +250,7 @@ impl EthStateCache { &self, block_hash: B256, max_blocks: usize, - ) -> Option>>> { + ) -> Option>>> { let (response_tx, rx) = oneshot::channel(); let _ = self.to_service.send(CacheAction::GetCachedParentBlocks { block_hash, @@ -777,7 +783,7 @@ impl Drop for ActionSender { /// /// Reorged blocks are removed from the cache. pub async fn cache_new_blocks_task( - eth_state_cache: EthStateCache, + eth_state_cache: EthStateCache, mut events: St, ) where St: Stream> + Unpin + 'static, diff --git a/crates/rpc/rpc-eth-types/src/fee_history.rs b/crates/rpc/rpc-eth-types/src/fee_history.rs index 011099bf053..20b81d62357 100644 --- a/crates/rpc/rpc-eth-types/src/fee_history.rs +++ b/crates/rpc/rpc-eth-types/src/fee_history.rs @@ -218,7 +218,7 @@ pub async fn fee_history_cache_new_blocks_task( fee_history_cache: FeeHistoryCache, mut events: St, provider: Provider, - cache: EthStateCache, + cache: EthStateCache, ) where St: Stream> + Unpin + 'static, Provider: diff --git a/crates/rpc/rpc-eth-types/src/gas_oracle.rs b/crates/rpc/rpc-eth-types/src/gas_oracle.rs index 795363f3dfd..c74c7e85023 100644 --- a/crates/rpc/rpc-eth-types/src/gas_oracle.rs +++ b/crates/rpc/rpc-eth-types/src/gas_oracle.rs @@ -15,7 +15,7 @@ use reth_rpc_server_types::{ DEFAULT_MAX_GAS_PRICE, MAX_HEADER_HISTORY, MAX_REWARD_PERCENTILE_COUNT, SAMPLE_NUMBER, }, }; -use reth_storage_api::{BlockReader, BlockReaderIdExt}; +use reth_storage_api::{BlockReaderIdExt, NodePrimitivesProvider}; use schnellru::{ByLength, LruMap}; use serde::{Deserialize, Serialize}; use std::fmt::{self, Debug, Formatter}; @@ -77,12 +77,12 @@ impl Default for GasPriceOracleConfig { #[derive(Debug)] pub struct GasPriceOracle where - Provider: BlockReader, + Provider: NodePrimitivesProvider, { /// The type used to subscribe to block events and get block info provider: Provider, /// The cache for blocks - cache: EthStateCache, + cache: EthStateCache, /// The config for the oracle oracle_config: GasPriceOracleConfig, /// The price under which the sample will be ignored. @@ -94,13 +94,13 @@ where impl GasPriceOracle where - Provider: BlockReaderIdExt, + Provider: BlockReaderIdExt + NodePrimitivesProvider, { /// Creates and returns the [`GasPriceOracle`]. pub fn new( provider: Provider, mut oracle_config: GasPriceOracleConfig, - cache: EthStateCache, + cache: EthStateCache, ) -> Self { // sanitize the percentile to be less than 100 if oracle_config.percentile > 100 { diff --git a/crates/rpc/rpc-eth-types/src/pending_block.rs b/crates/rpc/rpc-eth-types/src/pending_block.rs index fa9b554558b..a339b6b0730 100644 --- a/crates/rpc/rpc-eth-types/src/pending_block.rs +++ b/crates/rpc/rpc-eth-types/src/pending_block.rs @@ -10,7 +10,7 @@ use alloy_primitives::B256; use derive_more::Constructor; use reth_ethereum_primitives::Receipt; use reth_evm::EvmEnv; -use reth_primitives_traits::{Block, RecoveredBlock, SealedHeader}; +use reth_primitives_traits::{Block, NodePrimitives, RecoveredBlock, SealedHeader}; /// Configured [`EvmEnv`] for a pending block. #[derive(Debug, Clone, Constructor)] @@ -75,11 +75,11 @@ impl PendingBlockEnvOrigin { /// Locally built pending block for `pending` tag. #[derive(Debug, Constructor)] -pub struct PendingBlock { +pub struct PendingBlock { /// Timestamp when the pending block is considered outdated. pub expires_at: Instant, /// The locally built pending block. - pub block: Arc>, + pub block: Arc>, /// The receipts for the pending block - pub receipts: Arc>, + pub receipts: Arc>, } diff --git a/crates/rpc/rpc-eth-types/src/receipt.rs b/crates/rpc/rpc-eth-types/src/receipt.rs index 786f6e3f193..9b162ca8b93 100644 --- a/crates/rpc/rpc-eth-types/src/receipt.rs +++ b/crates/rpc/rpc-eth-types/src/receipt.rs @@ -102,7 +102,7 @@ impl EthReceiptConverter { impl ReceiptConverter for EthReceiptConverter where N: NodePrimitives, - ChainSpec: EthChainSpec, + ChainSpec: EthChainSpec + 'static, { type Error = EthApiError; type RpcReceipt = TransactionReceipt; diff --git a/crates/rpc/rpc-eth-types/src/simulate.rs b/crates/rpc/rpc-eth-types/src/simulate.rs index 9cca683d2be..733390a1965 100644 --- a/crates/rpc/rpc-eth-types/src/simulate.rs +++ b/crates/rpc/rpc-eth-types/src/simulate.rs @@ -12,7 +12,7 @@ use alloy_eips::eip2718::WithEncoded; use alloy_network::TransactionBuilder; use alloy_rpc_types_eth::{ simulate::{SimCallResult, SimulateError, SimulatedBlock}, - Block, BlockTransactionsKind, Header, + BlockTransactionsKind, }; use jsonrpsee_types::ErrorObject; use reth_evm::{ @@ -20,9 +20,9 @@ use reth_evm::{ Evm, }; use reth_primitives_traits::{ - block::BlockTx, BlockBody as _, NodePrimitives, Recovered, RecoveredBlock, SignedTransaction, + BlockBody as _, BlockTy, NodePrimitives, Recovered, RecoveredBlock, SignedTransaction, }; -use reth_rpc_convert::{RpcConvert, RpcTransaction, RpcTxReq}; +use reth_rpc_convert::{RpcBlock, RpcConvert, RpcTxReq}; use reth_rpc_server_types::result::rpc_err; use reth_storage_api::noop::NoopProvider; use revm::{ @@ -187,19 +187,14 @@ where } /// Handles outputs of the calls execution and builds a [`SimulatedBlock`]. -#[expect(clippy::type_complexity)] -pub fn build_simulated_block( - block: RecoveredBlock, +pub fn build_simulated_block( + block: RecoveredBlock>, results: Vec>, txs_kind: BlockTransactionsKind, tx_resp_builder: &T, -) -> Result, Header>>, T::Error> +) -> Result>, T::Error> where - T: RpcConvert< - Primitives: NodePrimitives>, - Error: FromEthApiError + FromEvmHalt, - >, - B: reth_primitives_traits::Block, + T: RpcConvert>, { let mut calls: Vec = Vec::with_capacity(results.len()); @@ -258,6 +253,10 @@ where calls.push(call); } - let block = block.into_rpc_block(txs_kind, |tx, tx_info| tx_resp_builder.fill(tx, tx_info))?; + let block = block.into_rpc_block( + txs_kind, + |tx, tx_info| tx_resp_builder.fill(tx, tx_info), + |header, size| tx_resp_builder.convert_header(header, size), + )?; Ok(SimulatedBlock { inner: block, calls }) } diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 6560aa45798..f3510e3a403 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -17,7 +17,7 @@ use jsonrpsee::core::RpcResult; use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardforks}; use reth_evm::{execute::Executor, ConfigureEvm, EvmEnvFor, TxEnvFor}; use reth_primitives_traits::{ - Block as _, BlockBody, NodePrimitives, ReceiptWithBloom, RecoveredBlock, SignedTransaction, + Block as _, BlockBody, ReceiptWithBloom, RecoveredBlock, SignedTransaction, }; use reth_revm::{ database::StateProviderDatabase, @@ -48,16 +48,16 @@ use tokio::sync::{AcquireError, OwnedSemaphorePermit}; /// `debug` API implementation. /// /// This type provides the functionality for handling `debug` related requests. -pub struct DebugApi { - inner: Arc>, +pub struct DebugApi { + inner: Arc>, } // === impl DebugApi === -impl DebugApi { +impl DebugApi { /// Create a new instance of the [`DebugApi`] - pub fn new(eth: Eth, blocking_task_guard: BlockingTaskGuard, evm_config: Evm) -> Self { - let inner = Arc::new(DebugApiInner { eth_api: eth, blocking_task_guard, evm_config }); + pub fn new(eth_api: Eth, blocking_task_guard: BlockingTaskGuard) -> Self { + let inner = Arc::new(DebugApiInner { eth_api, blocking_task_guard }); Self { inner } } @@ -67,7 +67,7 @@ impl DebugApi { } } -impl DebugApi { +impl DebugApi { /// Access the underlying provider. pub fn provider(&self) -> &Eth::Provider { self.inner.eth_api.provider() @@ -76,10 +76,9 @@ impl DebugApi { // === impl DebugApi === -impl DebugApi +impl DebugApi where Eth: EthApiTypes + TraceExt + 'static, - Evm: ConfigureEvm>> + 'static, { /// Acquires a permit to execute a tracing call. async fn acquire_trace_permit(&self) -> Result { @@ -636,7 +635,7 @@ where .eth_api() .spawn_with_state_at_block(block.parent_hash().into(), move |state_provider| { let db = StateProviderDatabase::new(&state_provider); - let block_executor = this.inner.evm_config.batch_executor(db); + let block_executor = this.eth_api().evm_config().batch_executor(db); let mut witness_record = ExecutionWitnessRecord::default(); @@ -897,10 +896,9 @@ where } #[async_trait] -impl DebugApiServer> for DebugApi +impl DebugApiServer> for DebugApi where Eth: EthApiTypes + EthTransactions + TraceExt + 'static, - Evm: ConfigureEvm>> + 'static, { /// Handler for `debug_getRawHeader` async fn raw_header(&self, block_id: BlockId) -> RpcResult { @@ -1305,23 +1303,21 @@ where } } -impl std::fmt::Debug for DebugApi { +impl std::fmt::Debug for DebugApi { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("DebugApi").finish_non_exhaustive() } } -impl Clone for DebugApi { +impl Clone for DebugApi { fn clone(&self) -> Self { Self { inner: Arc::clone(&self.inner) } } } -struct DebugApiInner { +struct DebugApiInner { /// The implementation of `eth` API eth_api: Eth, // restrict the number of concurrent calls to blocking calls blocking_task_guard: BlockingTaskGuard, - /// block executor for debug & trace apis - evm_config: Evm, } diff --git a/crates/rpc/rpc/src/eth/builder.rs b/crates/rpc/rpc/src/eth/builder.rs index a0e6708ce1b..283722701ce 100644 --- a/crates/rpc/rpc/src/eth/builder.rs +++ b/crates/rpc/rpc/src/eth/builder.rs @@ -4,10 +4,11 @@ use crate::{eth::core::EthApiInner, EthApi}; use alloy_network::Ethereum; use reth_chain_state::CanonStateSubscriptions; use reth_chainspec::ChainSpecProvider; -use reth_evm::ConfigureEvm; -use reth_node_api::NodePrimitives; +use reth_primitives_traits::HeaderTy; use reth_rpc_convert::{RpcConvert, RpcConverter}; -use reth_rpc_eth_api::helpers::pending_block::{BasicPendingEnvBuilder, PendingEnvBuilder}; +use reth_rpc_eth_api::{ + helpers::pending_block::PendingEnvBuilder, node::RpcNodeCoreAdapter, RpcNodeCore, +}; use reth_rpc_eth_types::{ fee_history::fee_history_cache_new_blocks_task, receipt::EthReceiptConverter, EthStateCache, EthStateCacheConfig, FeeHistoryCache, FeeHistoryCacheConfig, GasCap, GasPriceOracle, @@ -16,7 +17,6 @@ use reth_rpc_eth_types::{ use reth_rpc_server_types::constants::{ DEFAULT_ETH_PROOF_WINDOW, DEFAULT_MAX_SIMULATE_BLOCKS, DEFAULT_PROOF_PERMITS, }; -use reth_storage_api::{BlockReaderIdExt, StateProviderFactory}; use reth_tasks::{pool::BlockingTaskPool, TaskSpawner, TokioTaskExecutor}; use std::sync::Arc; @@ -25,14 +25,8 @@ use std::sync::Arc; /// This builder type contains all settings to create an [`EthApiInner`] or an [`EthApi`] instance /// directly. #[derive(Debug)] -pub struct EthApiBuilder -where - Provider: BlockReaderIdExt, -{ - provider: Provider, - pool: Pool, - network: Network, - evm_config: EvmConfig, +pub struct EthApiBuilder { + components: N, rpc_converter: Rpc, gas_cap: GasCap, max_simulate_blocks: u64, @@ -40,36 +34,39 @@ where fee_history_cache_config: FeeHistoryCacheConfig, proof_permits: usize, eth_state_cache_config: EthStateCacheConfig, - eth_cache: Option>, + eth_cache: Option>, gas_oracle_config: GasPriceOracleConfig, - gas_oracle: Option>, + gas_oracle: Option>, blocking_task_pool: Option, task_spawner: Box, next_env: NextEnv, } -impl +impl EthApiBuilder< - Provider, - Pool, - Network, - EvmConfig, - RpcConverter>, + RpcNodeCoreAdapter, + RpcConverter>, > where - Provider: BlockReaderIdExt + ChainSpecProvider, + RpcNodeCoreAdapter: + RpcNodeCore, Evm = EvmConfig>, { /// Creates a new `EthApiBuilder` instance. - pub fn new(provider: Provider, pool: Pool, network: Network, evm_config: EvmConfig) -> Self - where - Provider: BlockReaderIdExt, - { - let rpc_converter = RpcConverter::new(EthReceiptConverter::new(provider.chain_spec()), ()); + pub fn new(provider: Provider, pool: Pool, network: Network, evm_config: EvmConfig) -> Self { + Self::new_with_components(RpcNodeCoreAdapter::new(provider, pool, network, evm_config)) + } +} + +impl EthApiBuilder>> +where + N: RpcNodeCore>, +{ + /// Creates a new `EthApiBuilder` instance with the provided components. + pub fn new_with_components(components: N) -> Self { + let rpc_converter = + RpcConverter::new(EthReceiptConverter::new(components.provider().chain_spec())); Self { - provider, - pool, - network, - evm_config, + components, rpc_converter, eth_cache: None, gas_oracle: None, @@ -82,15 +79,14 @@ where task_spawner: TokioTaskExecutor::default().boxed(), gas_oracle_config: Default::default(), eth_state_cache_config: Default::default(), - next_env: BasicPendingEnvBuilder::default(), + next_env: Default::default(), } } } -impl - EthApiBuilder +impl EthApiBuilder where - Provider: BlockReaderIdExt + ChainSpecProvider, + N: RpcNodeCore, { /// Configures the task spawner used to spawn additional tasks. pub fn task_spawner(mut self, spawner: impl TaskSpawner + 'static) -> Self { @@ -102,12 +98,9 @@ where pub fn with_rpc_converter( self, rpc_converter: RpcNew, - ) -> EthApiBuilder { + ) -> EthApiBuilder { let Self { - provider, - pool, - network, - evm_config, + components, rpc_converter: _, gas_cap, max_simulate_blocks, @@ -123,10 +116,7 @@ where next_env, } = self; EthApiBuilder { - provider, - pool, - network, - evm_config, + components, rpc_converter, gas_cap, max_simulate_blocks, @@ -147,12 +137,9 @@ where pub fn with_pending_env_builder( self, next_env: NextEnvNew, - ) -> EthApiBuilder { + ) -> EthApiBuilder { let Self { - provider, - pool, - network, - evm_config, + components, rpc_converter, gas_cap, max_simulate_blocks, @@ -168,10 +155,7 @@ where next_env: _, } = self; EthApiBuilder { - provider, - pool, - network, - evm_config, + components, rpc_converter, gas_cap, max_simulate_blocks, @@ -199,10 +183,7 @@ where } /// Sets `eth_cache` instance - pub fn eth_cache( - mut self, - eth_cache: EthStateCache, - ) -> Self { + pub fn eth_cache(mut self, eth_cache: EthStateCache) -> Self { self.eth_cache = Some(eth_cache); self } @@ -215,7 +196,7 @@ where } /// Sets `gas_oracle` instance - pub fn gas_oracle(mut self, gas_oracle: GasPriceOracle) -> Self { + pub fn gas_oracle(mut self, gas_oracle: GasPriceOracle) -> Self { self.gas_oracle = Some(gas_oracle); self } @@ -267,29 +248,13 @@ where /// /// This function panics if the blocking task pool cannot be built. /// This will panic if called outside the context of a Tokio runtime. - pub fn build_inner(self) -> EthApiInner + pub fn build_inner(self) -> EthApiInner where - Provider: BlockReaderIdExt - + StateProviderFactory - + ChainSpecProvider - + CanonStateSubscriptions< - Primitives: NodePrimitives< - Block = Provider::Block, - Receipt = Provider::Receipt, - BlockHeader = Provider::Header, - >, - > + Clone - + Unpin - + 'static, - EvmConfig: ConfigureEvm, Rpc: RpcConvert, - NextEnv: PendingEnvBuilder, + NextEnv: PendingEnvBuilder, { let Self { - provider, - pool, - network, - evm_config, + components, rpc_converter, eth_state_cache_config, gas_oracle_config, @@ -305,27 +270,27 @@ where next_env, } = self; + let provider = components.provider().clone(); + let eth_cache = eth_cache .unwrap_or_else(|| EthStateCache::spawn(provider.clone(), eth_state_cache_config)); let gas_oracle = gas_oracle.unwrap_or_else(|| { GasPriceOracle::new(provider.clone(), gas_oracle_config, eth_cache.clone()) }); - let fee_history_cache = FeeHistoryCache::::new(fee_history_cache_config); + let fee_history_cache = + FeeHistoryCache::>::new(fee_history_cache_config); let new_canonical_blocks = provider.canonical_state_stream(); let fhc = fee_history_cache.clone(); let cache = eth_cache.clone(); - let prov = provider.clone(); task_spawner.spawn_critical( "cache canonical blocks for fee history task", Box::pin(async move { - fee_history_cache_new_blocks_task(fhc, new_canonical_blocks, prov, cache).await; + fee_history_cache_new_blocks_task(fhc, new_canonical_blocks, provider, cache).await; }), ); EthApiInner::new( - provider, - pool, - network, + components, eth_cache, gas_oracle, gas_cap, @@ -335,7 +300,6 @@ where BlockingTaskPool::build().expect("failed to build blocking task pool") }), fee_history_cache, - evm_config, task_spawner, proof_permits, rpc_converter, @@ -351,23 +315,10 @@ where /// /// This function panics if the blocking task pool cannot be built. /// This will panic if called outside the context of a Tokio runtime. - pub fn build(self) -> EthApi + pub fn build(self) -> EthApi where - Provider: BlockReaderIdExt - + StateProviderFactory - + CanonStateSubscriptions< - Primitives: NodePrimitives< - Block = Provider::Block, - Receipt = Provider::Receipt, - BlockHeader = Provider::Header, - >, - > + ChainSpecProvider - + Clone - + Unpin - + 'static, Rpc: RpcConvert, - EvmConfig: ConfigureEvm, - NextEnv: PendingEnvBuilder, + NextEnv: PendingEnvBuilder, { EthApi { inner: Arc::new(self.build_inner()) } } diff --git a/crates/rpc/rpc/src/eth/core.rs b/crates/rpc/rpc/src/eth/core.rs index 88828ecf6c4..a5fa5d3f651 100644 --- a/crates/rpc/rpc/src/eth/core.rs +++ b/crates/rpc/rpc/src/eth/core.rs @@ -10,31 +10,25 @@ use alloy_network::Ethereum; use alloy_primitives::{Bytes, U256}; use derive_more::Deref; use reth_chainspec::{ChainSpec, ChainSpecProvider}; -use reth_evm::ConfigureEvm; use reth_evm_ethereum::EthEvmConfig; +use reth_network_api::noop::NoopNetwork; use reth_node_api::{FullNodeComponents, FullNodeTypes}; use reth_rpc_convert::{RpcConvert, RpcConverter}; use reth_rpc_eth_api::{ - helpers::{ - pending_block::{BasicPendingEnvBuilder, PendingEnvBuilder}, - spec::SignersForRpc, - SpawnBlocking, - }, - node::RpcNodeCoreExt, + helpers::{pending_block::PendingEnvBuilder, spec::SignersForRpc, SpawnBlocking}, + node::{RpcNodeCoreAdapter, RpcNodeCoreExt}, EthApiTypes, RpcNodeCore, }; use reth_rpc_eth_types::{ receipt::EthReceiptConverter, EthApiError, EthStateCache, FeeHistoryCache, GasCap, GasPriceOracle, PendingBlock, }; -use reth_storage_api::{ - noop::NoopProvider, BlockReader, BlockReaderIdExt, NodePrimitivesProvider, ProviderBlock, - ProviderHeader, ProviderReceipt, -}; +use reth_storage_api::{noop::NoopProvider, BlockReaderIdExt, ProviderHeader}; use reth_tasks::{ pool::{BlockingTaskGuard, BlockingTaskPool}, TaskSpawner, TokioTaskExecutor, }; +use reth_transaction_pool::noop::NoopTransactionPool; use tokio::sync::{broadcast, Mutex}; const DEFAULT_BROADCAST_CAPACITY: usize = 2000; @@ -47,22 +41,10 @@ pub type EthRpcConverterFor = RpcConverter< >; /// Helper type alias for [`EthApi`] with components from the given [`FullNodeComponents`]. -pub type EthApiFor = EthApi< - ::Provider, - ::Pool, - ::Network, - ::Evm, - EthRpcConverterFor, ->; +pub type EthApiFor = EthApi>; /// Helper type alias for [`EthApi`] with components from the given [`FullNodeComponents`]. -pub type EthApiBuilderFor = EthApiBuilder< - ::Provider, - ::Pool, - ::Network, - ::Evm, - EthRpcConverterFor, ->; +pub type EthApiBuilderFor = EthApiBuilder>; /// `Eth` API implementation. /// @@ -79,17 +61,15 @@ pub type EthApiBuilderFor = EthApiBuilder< /// While this type requires various unrestricted generic components, trait bounds are enforced when /// additional traits are implemented for this type. #[derive(Deref)] -pub struct EthApi { +pub struct EthApi { /// All nested fields bundled together. #[deref] - pub(super) inner: Arc>, + pub(super) inner: Arc>, } -impl Clone - for EthApi +impl Clone for EthApi where - Provider: BlockReader, - EvmConfig: ConfigureEvm, + N: RpcNodeCore, Rpc: RpcConvert, { fn clone(&self) -> Self { @@ -97,7 +77,12 @@ where } } -impl EthApi> { +impl + EthApi< + RpcNodeCoreAdapter, + EthRpcConverter, + > +{ /// Convenience fn to obtain a new [`EthApiBuilder`] instance with mandatory components. /// /// Creating an [`EthApi`] requires a few mandatory components: @@ -125,53 +110,45 @@ impl EthApi> { /// .build(); /// ``` #[expect(clippy::type_complexity)] - pub fn builder( + pub fn builder( provider: Provider, pool: Pool, network: Network, evm_config: EvmConfig, ) -> EthApiBuilder< - Provider, - Pool, - Network, - EvmConfig, - RpcConverter>, + RpcNodeCoreAdapter, + RpcConverter>, > where - Provider: ChainSpecProvider + BlockReaderIdExt, + RpcNodeCoreAdapter: + RpcNodeCore, Evm = EvmConfig>, { EthApiBuilder::new(provider, pool, network, evm_config) } } -impl EthApi +impl EthApi where - Provider: BlockReaderIdExt + ChainSpecProvider, - EvmConfig: ConfigureEvm, + N: RpcNodeCore, Rpc: RpcConvert, - BasicPendingEnvBuilder: PendingEnvBuilder, + (): PendingEnvBuilder, { /// Creates a new, shareable instance using the default tokio task spawner. #[expect(clippy::too_many_arguments)] pub fn new( - provider: Provider, - pool: Pool, - network: Network, - eth_cache: EthStateCache, - gas_oracle: GasPriceOracle, + components: N, + eth_cache: EthStateCache, + gas_oracle: GasPriceOracle, gas_cap: impl Into, max_simulate_blocks: u64, eth_proof_window: u64, blocking_task_pool: BlockingTaskPool, - fee_history_cache: FeeHistoryCache>, - evm_config: EvmConfig, + fee_history_cache: FeeHistoryCache>, proof_permits: usize, rpc_converter: Rpc, ) -> Self { let inner = EthApiInner::new( - provider, - pool, - network, + components, eth_cache, gas_oracle, gas_cap, @@ -179,23 +156,19 @@ where eth_proof_window, blocking_task_pool, fee_history_cache, - evm_config, TokioTaskExecutor::default().boxed(), proof_permits, rpc_converter, - BasicPendingEnvBuilder::default(), + (), ); Self { inner: Arc::new(inner) } } } -impl EthApiTypes - for EthApi +impl EthApiTypes for EthApi where - Self: Send + Sync, - Provider: BlockReader, - EvmConfig: ConfigureEvm, + N: RpcNodeCore, Rpc: RpcConvert, { type Error = EthApiError; @@ -207,21 +180,16 @@ where } } -impl RpcNodeCore - for EthApi +impl RpcNodeCore for EthApi where - Provider: BlockReader + NodePrimitivesProvider + Clone + Unpin, - Pool: Send + Sync + Clone + Unpin, - Network: Send + Sync + Clone, - EvmConfig: ConfigureEvm, + N: RpcNodeCore, Rpc: RpcConvert, { - type Primitives = Provider::Primitives; - type Provider = Provider; - type Pool = Pool; - type Evm = EvmConfig; - type Network = Network; - type PayloadBuilder = (); + type Primitives = N::Primitives; + type Provider = N::Provider; + type Pool = N::Pool; + type Evm = N::Evm; + type Network = N::Network; fn pool(&self) -> &Self::Pool { self.inner.pool() @@ -235,35 +203,25 @@ where self.inner.network() } - fn payload_builder(&self) -> &Self::PayloadBuilder { - &() - } - fn provider(&self) -> &Self::Provider { self.inner.provider() } } -impl RpcNodeCoreExt - for EthApi +impl RpcNodeCoreExt for EthApi where - Provider: BlockReader + NodePrimitivesProvider + Clone + Unpin, - Pool: Send + Sync + Clone + Unpin, - Network: Send + Sync + Clone, - EvmConfig: ConfigureEvm, + N: RpcNodeCore, Rpc: RpcConvert, { #[inline] - fn cache(&self) -> &EthStateCache, ProviderReceipt> { + fn cache(&self) -> &EthStateCache { self.inner.cache() } } -impl std::fmt::Debug - for EthApi +impl std::fmt::Debug for EthApi where - Provider: BlockReader, - EvmConfig: ConfigureEvm, + N: RpcNodeCore, Rpc: RpcConvert, { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { @@ -271,12 +229,9 @@ where } } -impl SpawnBlocking - for EthApi +impl SpawnBlocking for EthApi where - Self: EthApiTypes + Clone + Send + Sync + 'static, - Provider: BlockReader, - EvmConfig: ConfigureEvm, + N: RpcNodeCore, Rpc: RpcConvert, { #[inline] @@ -297,25 +252,15 @@ where /// Container type `EthApi` #[expect(missing_debug_implementations)] -pub struct EthApiInner< - Provider: BlockReader, - Pool, - Network, - EvmConfig: ConfigureEvm, - Rpc: RpcConvert, -> { - /// The transaction pool. - pool: Pool, - /// The provider that can interact with the chain. - provider: Provider, - /// An interface to interact with the network - network: Network, +pub struct EthApiInner { + /// The components of the node. + components: N, /// All configured Signers - signers: SignersForRpc, + signers: SignersForRpc, /// The async cache frontend for eth related data - eth_cache: EthStateCache, + eth_cache: EthStateCache, /// The async gas oracle frontend for gas price suggestions - gas_oracle: GasPriceOracle, + gas_oracle: GasPriceOracle, /// Maximum gas limit for `eth_call` and call tracing RPC methods. gas_cap: u64, /// Maximum number of blocks for `eth_simulateV1`. @@ -327,13 +272,11 @@ pub struct EthApiInner< /// The type that can spawn tasks which would otherwise block. task_spawner: Box, /// Cached pending block if any - pending_block: Mutex>>, + pending_block: Mutex>>, /// A pool dedicated to CPU heavy blocking tasks. blocking_task_pool: BlockingTaskPool, /// Cache for block fees history - fee_history_cache: FeeHistoryCache>, - /// The type that defines how to configure the EVM - evm_config: EvmConfig, + fee_history_cache: FeeHistoryCache>, /// Guard for getproof calls blocking_task_guard: BlockingTaskGuard, @@ -345,38 +288,35 @@ pub struct EthApiInner< tx_resp_builder: Rpc, /// Builder for pending block environment. - next_env_builder: Box>, + next_env_builder: Box>, } -impl EthApiInner +impl EthApiInner where - Provider: BlockReaderIdExt, - EvmConfig: ConfigureEvm, + N: RpcNodeCore, Rpc: RpcConvert, { /// Creates a new, shareable instance using the default tokio task spawner. #[expect(clippy::too_many_arguments)] pub fn new( - provider: Provider, - pool: Pool, - network: Network, - eth_cache: EthStateCache, - gas_oracle: GasPriceOracle, + components: N, + eth_cache: EthStateCache, + gas_oracle: GasPriceOracle, gas_cap: impl Into, max_simulate_blocks: u64, eth_proof_window: u64, blocking_task_pool: BlockingTaskPool, - fee_history_cache: FeeHistoryCache>, - evm_config: EvmConfig, + fee_history_cache: FeeHistoryCache>, task_spawner: Box, proof_permits: usize, tx_resp_builder: Rpc, - next_env: impl PendingEnvBuilder, + next_env: impl PendingEnvBuilder, ) -> Self { let signers = parking_lot::RwLock::new(Default::default()); // get the block number of the latest block let starting_block = U256::from( - provider + components + .provider() .header_by_number_or_tag(BlockNumberOrTag::Latest) .ok() .flatten() @@ -387,9 +327,7 @@ where let (raw_tx_sender, _) = broadcast::channel(DEFAULT_BROADCAST_CAPACITY); Self { - provider, - pool, - network, + components, signers, eth_cache, gas_oracle, @@ -401,7 +339,6 @@ where pending_block: Default::default(), blocking_task_pool, fee_history_cache, - evm_config, blocking_task_guard: BlockingTaskGuard::new(proof_permits), raw_tx_sender, tx_resp_builder, @@ -410,16 +347,15 @@ where } } -impl EthApiInner +impl EthApiInner where - Provider: BlockReader, - EvmConfig: ConfigureEvm, + N: RpcNodeCore, Rpc: RpcConvert, { /// Returns a handle to data on disk. #[inline] - pub const fn provider(&self) -> &Provider { - &self.provider + pub fn provider(&self) -> &N::Provider { + self.components.provider() } /// Returns a handle to the transaction response builder. @@ -430,22 +366,20 @@ where /// Returns a handle to data in memory. #[inline] - pub const fn cache(&self) -> &EthStateCache { + pub const fn cache(&self) -> &EthStateCache { &self.eth_cache } /// Returns a handle to the pending block. #[inline] - pub const fn pending_block( - &self, - ) -> &Mutex>> { + pub const fn pending_block(&self) -> &Mutex>> { &self.pending_block } - /// Returns a type that knows how to build a [`ConfigureEvm::NextBlockEnvCtx`] for a pending - /// block. + /// Returns a type that knows how to build a [`reth_evm::ConfigureEvm::NextBlockEnvCtx`] for a + /// pending block. #[inline] - pub const fn pending_env_builder(&self) -> &dyn PendingEnvBuilder { + pub const fn pending_env_builder(&self) -> &dyn PendingEnvBuilder { &*self.next_env_builder } @@ -463,14 +397,14 @@ where /// Returns a handle to the EVM config. #[inline] - pub const fn evm_config(&self) -> &EvmConfig { - &self.evm_config + pub fn evm_config(&self) -> &N::Evm { + self.components.evm_config() } /// Returns a handle to the transaction pool. #[inline] - pub const fn pool(&self) -> &Pool { - &self.pool + pub fn pool(&self) -> &N::Pool { + self.components.pool() } /// Returns the gas cap. @@ -487,19 +421,19 @@ where /// Returns a handle to the gas oracle. #[inline] - pub const fn gas_oracle(&self) -> &GasPriceOracle { + pub const fn gas_oracle(&self) -> &GasPriceOracle { &self.gas_oracle } /// Returns a handle to the fee history cache. #[inline] - pub const fn fee_history_cache(&self) -> &FeeHistoryCache> { + pub const fn fee_history_cache(&self) -> &FeeHistoryCache> { &self.fee_history_cache } /// Returns a handle to the signers. #[inline] - pub const fn signers(&self) -> &SignersForRpc { + pub const fn signers(&self) -> &SignersForRpc { &self.signers } @@ -511,8 +445,8 @@ where /// Returns the inner `Network` #[inline] - pub const fn network(&self) -> &Network { - &self.network + pub fn network(&self) -> &N::Network { + self.components.network() } /// The maximum number of blocks into the past for generating state proofs. @@ -542,10 +476,9 @@ where #[cfg(test)] mod tests { - use crate::{EthApi, EthApiBuilder}; + use crate::{eth::helpers::types::EthRpcConverter, EthApi, EthApiBuilder}; use alloy_consensus::{Block, BlockBody, Header}; use alloy_eips::BlockNumberOrTag; - use alloy_network::Ethereum; use alloy_primitives::{Signature, B256, U64}; use alloy_rpc_types::FeeHistory; use jsonrpsee_types::error::INVALID_PARAMS_CODE; @@ -555,20 +488,18 @@ mod tests { use reth_ethereum_primitives::TransactionSigned; use reth_evm_ethereum::EthEvmConfig; use reth_network_api::noop::NoopNetwork; - use reth_provider::test_utils::{MockEthProvider, NoopProvider}; - use reth_rpc_convert::RpcConverter; - use reth_rpc_eth_api::EthApiServer; - use reth_rpc_eth_types::receipt::EthReceiptConverter; + use reth_provider::{ + test_utils::{MockEthProvider, NoopProvider}, + StageCheckpointReader, + }; + use reth_rpc_eth_api::{node::RpcNodeCoreAdapter, EthApiServer}; use reth_storage_api::{BlockReader, BlockReaderIdExt, StateProviderFactory}; use reth_testing_utils::generators; use reth_transaction_pool::test_utils::{testing_pool, TestPool}; type FakeEthApi

= EthApi< - P, - TestPool, - NoopNetwork, - EthEvmConfig, - RpcConverter>, + RpcNodeCoreAdapter, + EthRpcConverter, >; fn build_test_eth_api< @@ -576,10 +507,12 @@ mod tests { Block = reth_ethereum_primitives::Block, Receipt = reth_ethereum_primitives::Receipt, Header = alloy_consensus::Header, + Transaction = reth_ethereum_primitives::TransactionSigned, > + BlockReader + ChainSpecProvider + StateProviderFactory + CanonStateSubscriptions + + StageCheckpointReader + Unpin + Clone + 'static, @@ -687,7 +620,7 @@ mod tests { /// Invalid block range #[tokio::test] async fn test_fee_history_empty() { - let response = as EthApiServer<_, _, _, _, _>>::fee_history( + let response = as EthApiServer<_, _, _, _, _>>::fee_history( &build_test_eth_api(NoopProvider::default()), U64::from(1), BlockNumberOrTag::Latest, @@ -709,7 +642,7 @@ mod tests { let (eth_api, _, _) = prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default()); - let response = as EthApiServer<_, _, _, _, _>>::fee_history( + let response = as EthApiServer<_, _, _, _, _>>::fee_history( ð_api, U64::from(newest_block + 1), newest_block.into(), @@ -732,7 +665,7 @@ mod tests { let (eth_api, _, _) = prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default()); - let response = as EthApiServer<_, _, _, _, _>>::fee_history( + let response = as EthApiServer<_, _, _, _, _>>::fee_history( ð_api, U64::from(1), (newest_block + 1000).into(), @@ -755,7 +688,7 @@ mod tests { let (eth_api, _, _) = prepare_eth_api(newest_block, oldest_block, block_count, MockEthProvider::default()); - let response = as EthApiServer<_, _, _, _, _>>::fee_history( + let response = as EthApiServer<_, _, _, _, _>>::fee_history( ð_api, U64::from(0), newest_block.into(), diff --git a/crates/rpc/rpc/src/eth/filter.rs b/crates/rpc/rpc/src/eth/filter.rs index a6214eb7890..e0f9bfddddb 100644 --- a/crates/rpc/rpc/src/eth/filter.rs +++ b/crates/rpc/rpc/src/eth/filter.rs @@ -13,7 +13,7 @@ use reth_errors::ProviderError; use reth_primitives_traits::{NodePrimitives, SealedHeader}; use reth_rpc_eth_api::{ EngineEthFilter, EthApiTypes, EthFilterApiServer, FullEthApiTypes, QueryLimits, RpcConvert, - RpcNodeCore, RpcNodeCoreExt, RpcTransaction, + RpcNodeCoreExt, RpcTransaction, }; use reth_rpc_eth_types::{ logs_utils::{self, append_matching_block_logs, ProviderOrBlock}, @@ -22,7 +22,7 @@ use reth_rpc_eth_types::{ use reth_rpc_server_types::{result::rpc_error_with_code, ToRpcResult}; use reth_storage_api::{ BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, HeaderProvider, ProviderBlock, - ProviderReceipt, ReceiptProvider, TransactionsProvider, + ProviderReceipt, ReceiptProvider, }; use reth_tasks::TaskSpawner; use reth_transaction_pool::{NewSubpoolTransactionStream, PoolTransaction, TransactionPool}; @@ -304,13 +304,7 @@ where #[async_trait] impl EthFilterApiServer> for EthFilter where - Eth: FullEthApiTypes - + RpcNodeCoreExt< - Provider: BlockIdReader, - Primitives: NodePrimitives< - SignedTx = <::Provider as TransactionsProvider>::Transaction, - >, - > + 'static, + Eth: FullEthApiTypes + RpcNodeCoreExt + 'static, { /// Handler for `eth_newFilter` async fn new_filter(&self, filter: Filter) -> RpcResult { @@ -437,9 +431,7 @@ where } /// Access the underlying [`EthStateCache`]. - fn eth_cache( - &self, - ) -> &EthStateCache, ProviderReceipt> { + fn eth_cache(&self) -> &EthStateCache { self.eth_api.cache() } @@ -1093,6 +1085,7 @@ mod tests { use reth_network_api::noop::NoopNetwork; use reth_provider::test_utils::MockEthProvider; use reth_rpc_convert::RpcConverter; + use reth_rpc_eth_api::node::RpcNodeCoreAdapter; use reth_rpc_eth_types::receipt::EthReceiptConverter; use reth_tasks::TokioTaskExecutor; use reth_testing_utils::generators; @@ -1122,13 +1115,11 @@ mod tests { } // Helper function to create a test EthApi instance + #[expect(clippy::type_complexity)] fn build_test_eth_api( provider: MockEthProvider, ) -> EthApi< - MockEthProvider, - TestPool, - NoopNetwork, - EthEvmConfig, + RpcNodeCoreAdapter, RpcConverter>, > { EthApiBuilder::new( diff --git a/crates/rpc/rpc/src/eth/helpers/block.rs b/crates/rpc/rpc/src/eth/helpers/block.rs index 115183d0f17..8077802804b 100644 --- a/crates/rpc/rpc/src/eth/helpers/block.rs +++ b/crates/rpc/rpc/src/eth/helpers/block.rs @@ -1,51 +1,26 @@ //! Contains RPC handler implementations specific to blocks. -use reth_chainspec::ChainSpecProvider; -use reth_evm::ConfigureEvm; -use reth_primitives_traits::NodePrimitives; use reth_rpc_convert::RpcConvert; use reth_rpc_eth_api::{ - helpers::{EthBlocks, LoadBlock, LoadPendingBlock, SpawnBlocking}, - RpcNodeCoreExt, + helpers::{EthBlocks, LoadBlock, LoadPendingBlock}, + FromEvmError, RpcNodeCore, }; use reth_rpc_eth_types::EthApiError; -use reth_storage_api::{BlockReader, ProviderTx}; -use reth_transaction_pool::{PoolTransaction, TransactionPool}; use crate::EthApi; -impl EthBlocks - for EthApi +impl EthBlocks for EthApi where - Self: LoadBlock< - Error = EthApiError, - NetworkTypes = Rpc::Network, - RpcConvert: RpcConvert< - Primitives = Self::Primitives, - Error = Self::Error, - Network = Rpc::Network, - >, - >, - Provider: BlockReader + ChainSpecProvider, - EvmConfig: ConfigureEvm, - Rpc: RpcConvert, + N: RpcNodeCore, + EthApiError: FromEvmError, + Rpc: RpcConvert, { } -impl LoadBlock - for EthApi +impl LoadBlock for EthApi where - Self: LoadPendingBlock - + SpawnBlocking - + RpcNodeCoreExt< - Pool: TransactionPool< - Transaction: PoolTransaction>, - >, - Primitives: NodePrimitives>, - Evm = EvmConfig, - >, - Provider: BlockReader, - EvmConfig: ConfigureEvm, - Rpc: RpcConvert, + Self: LoadPendingBlock, + N: RpcNodeCore, + Rpc: RpcConvert, { } diff --git a/crates/rpc/rpc/src/eth/helpers/call.rs b/crates/rpc/rpc/src/eth/helpers/call.rs index 60be882bd2d..8a8377f7abc 100644 --- a/crates/rpc/rpc/src/eth/helpers/call.rs +++ b/crates/rpc/rpc/src/eth/helpers/call.rs @@ -1,55 +1,27 @@ //! Contains RPC handler implementations specific to endpoints that call/execute within evm. use crate::EthApi; -use reth_errors::ProviderError; -use reth_evm::{ConfigureEvm, TxEnvFor}; -use reth_node_api::NodePrimitives; +use reth_evm::TxEnvFor; use reth_rpc_convert::RpcConvert; use reth_rpc_eth_api::{ - helpers::{estimate::EstimateCall, Call, EthCall, LoadPendingBlock, LoadState, SpawnBlocking}, - FromEvmError, FullEthApiTypes, RpcNodeCore, RpcNodeCoreExt, + helpers::{estimate::EstimateCall, Call, EthCall}, + FromEvmError, RpcNodeCore, }; -use reth_storage_api::{BlockReader, ProviderHeader, ProviderTx}; -use reth_transaction_pool::{PoolTransaction, TransactionPool}; +use reth_rpc_eth_types::EthApiError; -impl EthCall - for EthApi +impl EthCall for EthApi where - Self: EstimateCall - + LoadPendingBlock - + FullEthApiTypes - + RpcNodeCoreExt< - Pool: TransactionPool< - Transaction: PoolTransaction>, - >, - Primitives: NodePrimitives>, - Evm = EvmConfig, - >, - EvmConfig: ConfigureEvm::Primitives>, - Provider: BlockReader, - Rpc: RpcConvert, + N: RpcNodeCore, + EthApiError: FromEvmError, + Rpc: RpcConvert>, { } -impl Call - for EthApi +impl Call for EthApi where - Self: LoadState< - Evm: ConfigureEvm< - Primitives: NodePrimitives< - BlockHeader = ProviderHeader, - SignedTx = ProviderTx, - >, - >, - RpcConvert: RpcConvert, Network = Rpc::Network>, - NetworkTypes = Rpc::Network, - Error: FromEvmError - + From<::Error> - + From, - > + SpawnBlocking, - Provider: BlockReader, - EvmConfig: ConfigureEvm, - Rpc: RpcConvert, + N: RpcNodeCore, + EthApiError: FromEvmError, + Rpc: RpcConvert>, { #[inline] fn call_gas_limit(&self) -> u64 { @@ -62,12 +34,10 @@ where } } -impl EstimateCall - for EthApi +impl EstimateCall for EthApi where - Self: Call, - Provider: BlockReader, - EvmConfig: ConfigureEvm, - Rpc: RpcConvert, + N: RpcNodeCore, + EthApiError: FromEvmError, + Rpc: RpcConvert>, { } diff --git a/crates/rpc/rpc/src/eth/helpers/fees.rs b/crates/rpc/rpc/src/eth/helpers/fees.rs index e9e6e4c6bd0..1d26644b47b 100644 --- a/crates/rpc/rpc/src/eth/helpers/fees.rs +++ b/crates/rpc/rpc/src/eth/helpers/fees.rs @@ -1,37 +1,28 @@ //! Contains RPC handler implementations for fee history. -use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardforks}; -use reth_evm::ConfigureEvm; use reth_rpc_convert::RpcConvert; -use reth_rpc_eth_api::helpers::{EthFees, LoadBlock, LoadFee}; -use reth_rpc_eth_types::{FeeHistoryCache, GasPriceOracle}; -use reth_storage_api::{BlockReader, BlockReaderIdExt, ProviderHeader, StateProviderFactory}; +use reth_rpc_eth_api::{ + helpers::{EthFees, LoadFee}, + FromEvmError, RpcNodeCore, +}; +use reth_rpc_eth_types::{EthApiError, FeeHistoryCache, GasPriceOracle}; +use reth_storage_api::ProviderHeader; use crate::EthApi; -impl EthFees - for EthApi +impl EthFees for EthApi where - Self: LoadFee< - Provider: ChainSpecProvider< - ChainSpec: EthChainSpec

>, - >, - >, - Provider: BlockReader, - EvmConfig: ConfigureEvm, - Rpc: RpcConvert, + N: RpcNodeCore, + EthApiError: FromEvmError, + Rpc: RpcConvert, { } -impl LoadFee - for EthApi +impl LoadFee for EthApi where - Self: LoadBlock, - Provider: BlockReaderIdExt - + ChainSpecProvider - + StateProviderFactory, - EvmConfig: ConfigureEvm, - Rpc: RpcConvert, + N: RpcNodeCore, + EthApiError: FromEvmError, + Rpc: RpcConvert, { #[inline] fn gas_oracle(&self) -> &GasPriceOracle { @@ -39,7 +30,7 @@ where } #[inline] - fn fee_history_cache(&self) -> &FeeHistoryCache> { + fn fee_history_cache(&self) -> &FeeHistoryCache> { self.inner.fee_history_cache() } } diff --git a/crates/rpc/rpc/src/eth/helpers/pending_block.rs b/crates/rpc/rpc/src/eth/helpers/pending_block.rs index d792baeb13c..5e007c340f1 100644 --- a/crates/rpc/rpc/src/eth/helpers/pending_block.rs +++ b/crates/rpc/rpc/src/eth/helpers/pending_block.rs @@ -1,56 +1,21 @@ //! Support for building a pending block with transactions from local view of mempool. use crate::EthApi; -use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardforks}; -use reth_evm::ConfigureEvm; -use reth_node_api::NodePrimitives; use reth_rpc_convert::RpcConvert; use reth_rpc_eth_api::{ - helpers::{pending_block::PendingEnvBuilder, LoadPendingBlock, SpawnBlocking}, - types::RpcTypes, + helpers::{pending_block::PendingEnvBuilder, LoadPendingBlock}, FromEvmError, RpcNodeCore, }; -use reth_rpc_eth_types::PendingBlock; -use reth_storage_api::{ - BlockReader, BlockReaderIdExt, ProviderBlock, ProviderHeader, ProviderReceipt, ProviderTx, - StateProviderFactory, -}; -use reth_transaction_pool::{PoolTransaction, TransactionPool}; +use reth_rpc_eth_types::{EthApiError, PendingBlock}; -impl LoadPendingBlock - for EthApi +impl LoadPendingBlock for EthApi where - Self: SpawnBlocking< - NetworkTypes = Rpc::Network, - Error: FromEvmError, - RpcConvert: RpcConvert, - > + RpcNodeCore< - Provider: BlockReaderIdExt - + ChainSpecProvider - + StateProviderFactory, - Pool: TransactionPool< - Transaction: PoolTransaction>, - >, - Evm = EvmConfig, - Primitives: NodePrimitives< - BlockHeader = ProviderHeader, - SignedTx = ProviderTx, - Receipt = ProviderReceipt, - Block = ProviderBlock, - >, - >, - Provider: BlockReader, - EvmConfig: ConfigureEvm, - Rpc: RpcConvert< - Network: RpcTypes
>>, - >, + N: RpcNodeCore, + EthApiError: FromEvmError, + Rpc: RpcConvert, { #[inline] - fn pending_block( - &self, - ) -> &tokio::sync::Mutex< - Option, ProviderReceipt>>, - > { + fn pending_block(&self) -> &tokio::sync::Mutex>> { self.inner.pending_block() } diff --git a/crates/rpc/rpc/src/eth/helpers/receipt.rs b/crates/rpc/rpc/src/eth/helpers/receipt.rs index 489e3abe079..358ef57f768 100644 --- a/crates/rpc/rpc/src/eth/helpers/receipt.rs +++ b/crates/rpc/rpc/src/eth/helpers/receipt.rs @@ -1,33 +1,14 @@ //! Builds an RPC receipt response w.r.t. data layout of network. use crate::EthApi; -use alloy_consensus::crypto::RecoveryError; -use reth_chainspec::ChainSpecProvider; -use reth_evm::ConfigureEvm; -use reth_node_api::NodePrimitives; use reth_rpc_convert::RpcConvert; -use reth_rpc_eth_api::{helpers::LoadReceipt, EthApiTypes, RpcNodeCoreExt}; -use reth_storage_api::{BlockReader, ProviderReceipt, ProviderTx}; +use reth_rpc_eth_api::{helpers::LoadReceipt, FromEvmError, RpcNodeCore}; +use reth_rpc_eth_types::EthApiError; -impl LoadReceipt - for EthApi +impl LoadReceipt for EthApi where - Self: RpcNodeCoreExt< - Primitives: NodePrimitives< - SignedTx = ProviderTx, - Receipt = ProviderReceipt, - >, - > + EthApiTypes< - NetworkTypes = Rpc::Network, - RpcConvert: RpcConvert< - Network = Rpc::Network, - Primitives = Self::Primitives, - Error = Self::Error, - >, - Error: From, - >, - Provider: BlockReader + ChainSpecProvider, - EvmConfig: ConfigureEvm, - Rpc: RpcConvert, + N: RpcNodeCore, + EthApiError: FromEvmError, + Rpc: RpcConvert, { } diff --git a/crates/rpc/rpc/src/eth/helpers/signer.rs b/crates/rpc/rpc/src/eth/helpers/signer.rs index f55d6259267..60d6a151f9b 100644 --- a/crates/rpc/rpc/src/eth/helpers/signer.rs +++ b/crates/rpc/rpc/src/eth/helpers/signer.rs @@ -8,18 +8,21 @@ use alloy_eips::eip2718::Decodable2718; use alloy_primitives::{eip191_hash_message, Address, Signature, B256}; use alloy_signer::SignerSync; use alloy_signer_local::PrivateKeySigner; -use reth_evm::ConfigureEvm; use reth_rpc_convert::{RpcConvert, RpcTypes, SignableTxRequest}; -use reth_rpc_eth_api::helpers::{signer::Result, AddDevSigners, EthSigner}; -use reth_rpc_eth_types::SignError; -use reth_storage_api::{BlockReader, ProviderTx}; +use reth_rpc_eth_api::{ + helpers::{signer::Result, AddDevSigners, EthSigner}, + FromEvmError, RpcNodeCore, +}; +use reth_rpc_eth_types::{EthApiError, SignError}; +use reth_storage_api::ProviderTx; -impl AddDevSigners - for EthApi +impl AddDevSigners for EthApi where - Provider: BlockReader, - EvmConfig: ConfigureEvm, - Rpc: RpcConvert>>>, + N: RpcNodeCore, + EthApiError: FromEvmError, + Rpc: RpcConvert< + Network: RpcTypes>>, + >, { fn with_dev_accounts(&self) { *self.inner.signers().write() = DevSigner::random_signers(20) diff --git a/crates/rpc/rpc/src/eth/helpers/spec.rs b/crates/rpc/rpc/src/eth/helpers/spec.rs index a26d671b8e5..b8ff79f9dc7 100644 --- a/crates/rpc/rpc/src/eth/helpers/spec.rs +++ b/crates/rpc/rpc/src/eth/helpers/spec.rs @@ -1,30 +1,19 @@ use alloy_primitives::U256; -use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; -use reth_evm::ConfigureEvm; -use reth_network_api::NetworkInfo; use reth_rpc_convert::RpcConvert; use reth_rpc_eth_api::{ helpers::{spec::SignersForApi, EthApiSpec}, RpcNodeCore, }; -use reth_storage_api::{BlockNumReader, BlockReader, ProviderTx, StageCheckpointReader}; +use reth_storage_api::ProviderTx; use crate::EthApi; -impl EthApiSpec - for EthApi +impl EthApiSpec for EthApi where - Self: RpcNodeCore< - Provider: ChainSpecProvider - + BlockNumReader - + StageCheckpointReader, - Network: NetworkInfo, - >, - Provider: BlockReader, - EvmConfig: ConfigureEvm, - Rpc: RpcConvert, + N: RpcNodeCore, + Rpc: RpcConvert, { - type Transaction = ProviderTx; + type Transaction = ProviderTx; type Rpc = Rpc::Network; fn starting_block(&self) -> U256 { diff --git a/crates/rpc/rpc/src/eth/helpers/state.rs b/crates/rpc/rpc/src/eth/helpers/state.rs index c26dccdd4e1..5d767d2ede5 100644 --- a/crates/rpc/rpc/src/eth/helpers/state.rs +++ b/crates/rpc/rpc/src/eth/helpers/state.rs @@ -1,43 +1,27 @@ //! Contains RPC handler implementations specific to state. -use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; -use reth_evm::ConfigureEvm; use reth_rpc_convert::RpcConvert; -use reth_storage_api::{BlockReader, StateProviderFactory}; -use reth_transaction_pool::TransactionPool; - use reth_rpc_eth_api::{ - helpers::{EthState, LoadState, SpawnBlocking}, - EthApiTypes, RpcNodeCoreExt, + helpers::{EthState, LoadState}, + RpcNodeCore, }; use crate::EthApi; -impl EthState - for EthApi +impl EthState for EthApi where - Self: LoadState + SpawnBlocking, - Provider: BlockReader, - EvmConfig: ConfigureEvm, - Rpc: RpcConvert, + N: RpcNodeCore, + Rpc: RpcConvert, { fn max_proof_window(&self) -> u64 { self.inner.eth_proof_window() } } -impl LoadState - for EthApi +impl LoadState for EthApi where - Self: RpcNodeCoreExt< - Provider: BlockReader - + StateProviderFactory - + ChainSpecProvider, - Pool: TransactionPool, - > + EthApiTypes, - Provider: BlockReader, - EvmConfig: ConfigureEvm, - Rpc: RpcConvert, + N: RpcNodeCore, + Rpc: RpcConvert, { } @@ -46,78 +30,42 @@ mod tests { use crate::eth::helpers::types::EthRpcConverter; use super::*; - use alloy_consensus::Header; - use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT_30M; use alloy_primitives::{Address, StorageKey, StorageValue, U256}; use reth_chainspec::ChainSpec; use reth_evm_ethereum::EthEvmConfig; use reth_network_api::noop::NoopNetwork; - use reth_provider::test_utils::{ExtendedAccount, MockEthProvider, NoopProvider}; - use reth_rpc_eth_api::helpers::EthState; - use reth_rpc_eth_types::{ - receipt::EthReceiptConverter, EthStateCache, FeeHistoryCache, FeeHistoryCacheConfig, - GasPriceOracle, - }; - use reth_rpc_server_types::constants::{ - DEFAULT_ETH_PROOF_WINDOW, DEFAULT_MAX_SIMULATE_BLOCKS, DEFAULT_PROOF_PERMITS, + use reth_provider::{ + test_utils::{ExtendedAccount, MockEthProvider, NoopProvider}, + ChainSpecProvider, }; - use reth_tasks::pool::BlockingTaskPool; + use reth_rpc_eth_api::{helpers::EthState, node::RpcNodeCoreAdapter}; use reth_transaction_pool::test_utils::{testing_pool, TestPool}; use std::collections::HashMap; - fn noop_eth_api( - ) -> EthApi> { + fn noop_eth_api() -> EthApi< + RpcNodeCoreAdapter, + EthRpcConverter, + > { let provider = NoopProvider::default(); let pool = testing_pool(); let evm_config = EthEvmConfig::mainnet(); - let cache = EthStateCache::spawn(NoopProvider::default(), Default::default()); - let rpc_converter = - EthRpcConverter::new(EthReceiptConverter::new(provider.chain_spec()), ()); - EthApi::new( - provider, - pool, - NoopNetwork::default(), - cache.clone(), - GasPriceOracle::new(NoopProvider::default(), Default::default(), cache), - ETHEREUM_BLOCK_GAS_LIMIT_30M, - DEFAULT_MAX_SIMULATE_BLOCKS, - DEFAULT_ETH_PROOF_WINDOW, - BlockingTaskPool::build().expect("failed to build tracing pool"), - FeeHistoryCache::
::new(FeeHistoryCacheConfig::default()), - evm_config, - DEFAULT_PROOF_PERMITS, - rpc_converter, - ) + EthApi::builder(provider, pool, NoopNetwork::default(), evm_config).build() } fn mock_eth_api( accounts: HashMap, - ) -> EthApi> { + ) -> EthApi< + RpcNodeCoreAdapter, + EthRpcConverter, + > { let pool = testing_pool(); let mock_provider = MockEthProvider::default(); let evm_config = EthEvmConfig::new(mock_provider.chain_spec()); mock_provider.extend_accounts(accounts); - let cache = EthStateCache::spawn(mock_provider.clone(), Default::default()); - let rpc_converter = - EthRpcConverter::new(EthReceiptConverter::new(mock_provider.chain_spec()), ()); - EthApi::new( - mock_provider.clone(), - pool, - (), - cache.clone(), - GasPriceOracle::new(mock_provider, Default::default(), cache), - ETHEREUM_BLOCK_GAS_LIMIT_30M, - DEFAULT_MAX_SIMULATE_BLOCKS, - DEFAULT_ETH_PROOF_WINDOW + 1, - BlockingTaskPool::build().expect("failed to build tracing pool"), - FeeHistoryCache::
::new(FeeHistoryCacheConfig::default()), - evm_config, - DEFAULT_PROOF_PERMITS, - rpc_converter, - ) + EthApi::builder(mock_provider, pool, NoopNetwork::default(), evm_config).build() } #[tokio::test] diff --git a/crates/rpc/rpc/src/eth/helpers/trace.rs b/crates/rpc/rpc/src/eth/helpers/trace.rs index 34db918a135..3e00f2df0c4 100644 --- a/crates/rpc/rpc/src/eth/helpers/trace.rs +++ b/crates/rpc/rpc/src/eth/helpers/trace.rs @@ -1,31 +1,15 @@ //! Contains RPC handler implementations specific to tracing. -use reth_evm::ConfigureEvm; -use reth_node_api::NodePrimitives; use reth_rpc_convert::RpcConvert; -use reth_rpc_eth_api::{ - helpers::{LoadState, Trace}, - FromEvmError, -}; -use reth_storage_api::{BlockReader, ProviderHeader, ProviderTx}; +use reth_rpc_eth_api::{helpers::Trace, FromEvmError, RpcNodeCore}; +use reth_rpc_eth_types::EthApiError; use crate::EthApi; -impl Trace - for EthApi +impl Trace for EthApi where - Self: LoadState< - Provider: BlockReader, - Evm: ConfigureEvm< - Primitives: NodePrimitives< - BlockHeader = ProviderHeader, - SignedTx = ProviderTx, - >, - >, - Error: FromEvmError, - >, - Provider: BlockReader, - EvmConfig: ConfigureEvm, - Rpc: RpcConvert, + N: RpcNodeCore, + EthApiError: FromEvmError, + Rpc: RpcConvert, { } diff --git a/crates/rpc/rpc/src/eth/helpers/transaction.rs b/crates/rpc/rpc/src/eth/helpers/transaction.rs index e6123895060..6f575bc9c61 100644 --- a/crates/rpc/rpc/src/eth/helpers/transaction.rs +++ b/crates/rpc/rpc/src/eth/helpers/transaction.rs @@ -2,23 +2,19 @@ use crate::EthApi; use alloy_primitives::{Bytes, B256}; -use reth_evm::ConfigureEvm; use reth_rpc_convert::RpcConvert; use reth_rpc_eth_api::{ - helpers::{spec::SignersForRpc, EthTransactions, LoadTransaction, SpawnBlocking}, - EthApiTypes, FromEthApiError, FullEthApiTypes, RpcNodeCore, RpcNodeCoreExt, + helpers::{spec::SignersForRpc, EthTransactions, LoadTransaction}, + FromEvmError, RpcNodeCore, }; -use reth_rpc_eth_types::utils::recover_raw_transaction; -use reth_storage_api::{BlockReader, BlockReaderIdExt, ProviderTx, TransactionsProvider}; +use reth_rpc_eth_types::{utils::recover_raw_transaction, EthApiError}; use reth_transaction_pool::{PoolTransaction, TransactionOrigin, TransactionPool}; -impl EthTransactions - for EthApi +impl EthTransactions for EthApi where - Self: LoadTransaction + EthApiTypes, - Provider: BlockReader>, - EvmConfig: ConfigureEvm, - Rpc: RpcConvert, + N: RpcNodeCore, + EthApiError: FromEvmError, + Rpc: RpcConvert, { #[inline] fn signers(&self) -> &SignersForRpc { @@ -37,49 +33,29 @@ where let pool_transaction = ::Transaction::from_pooled(recovered); // submit the transaction to the pool with a `Local` origin - let hash = self - .pool() - .add_transaction(TransactionOrigin::Local, pool_transaction) - .await - .map_err(Self::Error::from_eth_err)?; + let hash = self.pool().add_transaction(TransactionOrigin::Local, pool_transaction).await?; Ok(hash) } } -impl LoadTransaction - for EthApi +impl LoadTransaction for EthApi where - Self: SpawnBlocking - + FullEthApiTypes - + RpcNodeCoreExt - + EthApiTypes, - Provider: BlockReader, - EvmConfig: ConfigureEvm, - Rpc: RpcConvert, + N: RpcNodeCore, + EthApiError: FromEvmError, + Rpc: RpcConvert, { } #[cfg(test)] mod tests { - use crate::eth::helpers::types::EthRpcConverter; - use super::*; - use alloy_eips::eip1559::ETHEREUM_BLOCK_GAS_LIMIT_30M; use alloy_primitives::{hex_literal::hex, Bytes}; use reth_chainspec::ChainSpecProvider; use reth_evm_ethereum::EthEvmConfig; use reth_network_api::noop::NoopNetwork; use reth_provider::test_utils::NoopProvider; use reth_rpc_eth_api::helpers::EthTransactions; - use reth_rpc_eth_types::{ - receipt::EthReceiptConverter, EthStateCache, FeeHistoryCache, FeeHistoryCacheConfig, - GasPriceOracle, - }; - use reth_rpc_server_types::constants::{ - DEFAULT_ETH_PROOF_WINDOW, DEFAULT_MAX_SIMULATE_BLOCKS, DEFAULT_PROOF_PERMITS, - }; - use reth_tasks::pool::BlockingTaskPool; use reth_transaction_pool::{test_utils::testing_pool, TransactionPool}; #[tokio::test] @@ -90,25 +66,9 @@ mod tests { let pool = testing_pool(); let evm_config = EthEvmConfig::new(noop_provider.chain_spec()); - let cache = EthStateCache::spawn(noop_provider.clone(), Default::default()); - let fee_history_cache = FeeHistoryCache::new(FeeHistoryCacheConfig::default()); - let rpc_converter = - EthRpcConverter::new(EthReceiptConverter::new(noop_provider.chain_spec()), ()); - let eth_api = EthApi::new( - noop_provider.clone(), - pool.clone(), - noop_network_provider, - cache.clone(), - GasPriceOracle::new(noop_provider, Default::default(), cache.clone()), - ETHEREUM_BLOCK_GAS_LIMIT_30M, - DEFAULT_MAX_SIMULATE_BLOCKS, - DEFAULT_ETH_PROOF_WINDOW, - BlockingTaskPool::build().expect("failed to build tracing pool"), - fee_history_cache, - evm_config, - DEFAULT_PROOF_PERMITS, - rpc_converter, - ); + let eth_api = + EthApi::builder(noop_provider.clone(), pool.clone(), noop_network_provider, evm_config) + .build(); // https://etherscan.io/tx/0xa694b71e6c128a2ed8e2e0f6770bddbe52e3bb8f10e8472f9a79ab81497a8b5d let tx_1 = Bytes::from(hex!( diff --git a/crates/rpc/rpc/src/eth/helpers/types.rs b/crates/rpc/rpc/src/eth/helpers/types.rs index 816820fea6e..0c1d59a6ca3 100644 --- a/crates/rpc/rpc/src/eth/helpers/types.rs +++ b/crates/rpc/rpc/src/eth/helpers/types.rs @@ -21,7 +21,7 @@ mod tests { #[test] fn test_resolve_transaction_empty_request() { - let builder = EthRpcConverter::new(EthReceiptConverter::new(MAINNET.clone()), ()); + let builder = EthRpcConverter::new(EthReceiptConverter::new(MAINNET.clone())); let mut db = CacheDB::>::default(); let tx = TransactionRequest::default(); let result = resolve_transaction(tx, 21000, 0, 1, &mut db, &builder).unwrap(); @@ -36,7 +36,7 @@ mod tests { #[test] fn test_resolve_transaction_legacy() { let mut db = CacheDB::>::default(); - let builder = EthRpcConverter::new(EthReceiptConverter::new(MAINNET.clone()), ()); + let builder = EthRpcConverter::new(EthReceiptConverter::new(MAINNET.clone())); let tx = TransactionRequest { gas_price: Some(100), ..Default::default() }; @@ -52,7 +52,7 @@ mod tests { #[test] fn test_resolve_transaction_partial_eip1559() { let mut db = CacheDB::>::default(); - let rpc_converter = EthRpcConverter::new(EthReceiptConverter::new(MAINNET.clone()), ()); + let rpc_converter = EthRpcConverter::new(EthReceiptConverter::new(MAINNET.clone())); let tx = TransactionRequest { max_fee_per_gas: Some(200), From 4bd2fd2dacd01c551219dee6df27ca35875b186f Mon Sep 17 00:00:00 2001 From: cakevm Date: Mon, 21 Jul 2025 15:59:03 +0200 Subject: [PATCH 239/305] refactor: rename `AlloyRethProvider` to `RpcBlockchainProvider` and move to storage (#17524) --- .github/assets/check_wasm.sh | 2 +- Cargo.lock | 59 ++-- Cargo.toml | 4 +- .../rpc-provider}/Cargo.toml | 5 +- .../rpc-provider}/README.md | 29 +- .../rpc-provider}/src/lib.rs | 269 ++++++------------ 6 files changed, 136 insertions(+), 232 deletions(-) rename crates/{alloy-provider => storage/rpc-provider}/Cargo.toml (88%) rename crates/{alloy-provider => storage/rpc-provider}/README.md (53%) rename crates/{alloy-provider => storage/rpc-provider}/src/lib.rs (87%) diff --git a/.github/assets/check_wasm.sh b/.github/assets/check_wasm.sh index cec98aa8dbe..e140d01e796 100755 --- a/.github/assets/check_wasm.sh +++ b/.github/assets/check_wasm.sh @@ -60,7 +60,7 @@ exclude_crates=( reth-ress-provider # The following are not supposed to be working reth # all of the crates below - reth-alloy-provider + reth-storage-rpc-provider reth-invalid-block-hooks # reth-provider reth-libmdbx # mdbx reth-mdbx-sys # mdbx diff --git a/Cargo.lock b/Cargo.lock index ba6ea52a162..92955a7b15d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7191,36 +7191,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "reth-alloy-provider" -version = "1.5.1" -dependencies = [ - "alloy-consensus", - "alloy-eips", - "alloy-network", - "alloy-primitives", - "alloy-provider", - "alloy-rpc-types", - "alloy-rpc-types-engine", - "parking_lot", - "reth-chainspec", - "reth-db-api", - "reth-errors", - "reth-execution-types", - "reth-node-types", - "reth-primitives", - "reth-provider", - "reth-prune-types", - "reth-rpc-convert", - "reth-stages-types", - "reth-storage-api", - "reth-trie", - "revm", - "revm-primitives", - "tokio", - "tracing", -] - [[package]] name = "reth-basic-payload-builder" version = "1.5.1" @@ -10375,6 +10345,35 @@ dependencies = [ "thiserror 2.0.12", ] +[[package]] +name = "reth-storage-rpc-provider" +version = "1.5.1" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-network", + "alloy-primitives", + "alloy-provider", + "alloy-rpc-types", + "alloy-rpc-types-engine", + "parking_lot", + "reth-chainspec", + "reth-db-api", + "reth-errors", + "reth-execution-types", + "reth-node-types", + "reth-primitives", + "reth-provider", + "reth-prune-types", + "reth-rpc-convert", + "reth-stages-types", + "reth-storage-api", + "reth-trie", + "revm", + "tokio", + "tracing", +] + [[package]] name = "reth-tasks" version = "1.5.1" diff --git a/Cargo.toml b/Cargo.toml index 464f9212ac1..14da2c3b7e1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,7 +11,7 @@ exclude = [".github/"] members = [ "bin/reth-bench/", "bin/reth/", - "crates/alloy-provider/", + "crates/storage/rpc-provider/", "crates/chain-state/", "crates/chainspec/", "crates/cli/cli/", @@ -323,7 +323,7 @@ codegen-units = 1 # reth op-reth = { path = "crates/optimism/bin" } reth = { path = "bin/reth" } -reth-alloy-provider = { path = "crates/alloy-provider" } +reth-storage-rpc-provider = { path = "crates/storage/rpc-provider" } reth-basic-payload-builder = { path = "crates/payload/basic" } reth-bench = { path = "bin/reth-bench" } reth-chain-state = { path = "crates/chain-state" } diff --git a/crates/alloy-provider/Cargo.toml b/crates/storage/rpc-provider/Cargo.toml similarity index 88% rename from crates/alloy-provider/Cargo.toml rename to crates/storage/rpc-provider/Cargo.toml index 9e112b487b5..a47bf7ea218 100644 --- a/crates/alloy-provider/Cargo.toml +++ b/crates/storage/rpc-provider/Cargo.toml @@ -1,12 +1,12 @@ [package] -name = "reth-alloy-provider" +name = "reth-storage-rpc-provider" version.workspace = true edition.workspace = true rust-version.workspace = true license.workspace = true homepage.workspace = true repository.workspace = true -description = "Alloy provider implementation for reth that fetches state via RPC" +description = "RPC-based blockchain provider for reth that fetches data via RPC calls" [lints] workspace = true @@ -44,7 +44,6 @@ parking_lot.workspace = true # revm revm.workspace = true -revm-primitives.workspace = true [dev-dependencies] tokio = { workspace = true, features = ["rt", "macros"] } diff --git a/crates/alloy-provider/README.md b/crates/storage/rpc-provider/README.md similarity index 53% rename from crates/alloy-provider/README.md rename to crates/storage/rpc-provider/README.md index 0c02dbdf32a..7180d41840d 100644 --- a/crates/alloy-provider/README.md +++ b/crates/storage/rpc-provider/README.md @@ -1,11 +1,12 @@ -# Alloy Provider for Reth +# RPC Blockchain Provider for Reth -This crate provides an implementation of reth's `StateProviderFactory` and related traits that fetches state data via RPC instead of from a local database. +This crate provides an RPC-based implementation of reth's [`BlockchainProvider`](../provider/src/providers/blockchain_provider.rs) which provides access to local blockchain data, this crate offers the same functionality but for remote blockchain access via RPC. Originally created by [cakevm](https://github.com/cakevm/alloy-reth-provider). ## Features +- Provides the same interface as `BlockchainProvider` but for remote nodes - Implements `StateProviderFactory` for remote RPC state access - Supports Ethereum networks - Useful for testing without requiring a full database @@ -15,8 +16,7 @@ Originally created by [cakevm](https://github.com/cakevm/alloy-reth-provider). ```rust use alloy_provider::ProviderBuilder; -use reth_alloy_provider::AlloyRethProvider; -use reth_ethereum_node::EthereumNode; +use reth_storage_rpc_provider::RpcBlockchainProvider; // Initialize provider let provider = ProviderBuilder::new() @@ -24,11 +24,11 @@ let provider = ProviderBuilder::new() .await .unwrap(); -// Create database provider with NodeTypes -let db_provider = AlloyRethProvider::new(provider, EthereumNode); +// Create RPC blockchain provider with NodeTypes +let rpc_provider = RpcBlockchainProvider::new(provider); -// Get state at specific block -let state = db_provider.state_by_block_id(BlockId::number(16148323)).unwrap(); +// Get state at specific block - same interface as BlockchainProvider +let state = rpc_provider.state_by_block_id(BlockId::number(16148323)).unwrap(); ``` ## Configuration @@ -36,15 +36,14 @@ let state = db_provider.state_by_block_id(BlockId::number(16148323)).unwrap(); The provider can be configured with custom settings: ```rust -use reth_alloy_provider::{AlloyRethProvider, AlloyRethProviderConfig}; -use reth_ethereum_node::EthereumNode; +use reth_storage_rpc_provider::{RpcBlockchainProvider, RpcBlockchainProviderConfig}; -let config = AlloyRethProviderConfig { +let config = RpcBlockchainProviderConfig { compute_state_root: true, // Enable state root computation reth_rpc_support: true, // Use Reth-specific RPC methods (default: true) }; -let db_provider = AlloyRethProvider::new_with_config(provider, EthereumNode, config); +let rpc_provider = RpcBlockchainProvider::new_with_config(provider, config); ``` ## Configuration Options @@ -58,7 +57,9 @@ let db_provider = AlloyRethProvider::new_with_config(provider, EthereumNode, con ## Technical Details -The provider uses `alloy_network::AnyNetwork` for network operations, providing compatibility with various Ethereum-based networks while maintaining the expected block structure with headers. +The `RpcBlockchainProvider` uses `alloy_network::AnyNetwork` for network operations, providing compatibility with various Ethereum-based networks while maintaining the expected block structure with headers. + +This provider implements the same traits as the local `BlockchainProvider`, making it a drop-in replacement for scenarios where remote RPC access is preferred over local database access. ## License @@ -67,4 +68,4 @@ Licensed under either of: - Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) - MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) -at your option. \ No newline at end of file +at your option. diff --git a/crates/alloy-provider/src/lib.rs b/crates/storage/rpc-provider/src/lib.rs similarity index 87% rename from crates/alloy-provider/src/lib.rs rename to crates/storage/rpc-provider/src/lib.rs index 39d23efeff1..1e3c288e8a4 100644 --- a/crates/alloy-provider/src/lib.rs +++ b/crates/storage/rpc-provider/src/lib.rs @@ -1,7 +1,11 @@ -//! # Alloy Provider for Reth +//! # RPC Blockchain Provider for Reth //! -//! This crate provides an implementation of reth's `StateProviderFactory` and related traits -//! that fetches state data via RPC instead of from a local database. +//! This crate provides an RPC-based implementation of reth's `StateProviderFactory` and related +//! traits that fetches blockchain data via RPC instead of from a local database. +//! +//! Similar to the [`BlockchainProvider`](../../provider/src/providers/blockchain_provider.rs) +//! which provides access to local blockchain data, this crate offers the same functionality but for +//! remote blockchain access via RPC. //! //! Originally created by [cakevm](https://github.com/cakevm/alloy-reth-provider). //! @@ -65,9 +69,9 @@ use std::{ use tokio::{runtime::Handle, sync::broadcast}; use tracing::{trace, warn}; -/// Configuration for `AlloyRethProvider` +/// Configuration for `RpcBlockchainProvider` #[derive(Debug, Clone)] -pub struct AlloyRethProviderConfig { +pub struct RpcBlockchainProviderConfig { /// Whether to compute state root when creating execution outcomes pub compute_state_root: bool, /// Whether to use Reth-specific RPC methods for better performance @@ -78,13 +82,13 @@ pub struct AlloyRethProviderConfig { pub reth_rpc_support: bool, } -impl Default for AlloyRethProviderConfig { +impl Default for RpcBlockchainProviderConfig { fn default() -> Self { Self { compute_state_root: false, reth_rpc_support: true } } } -impl AlloyRethProviderConfig { +impl RpcBlockchainProviderConfig { /// Sets whether to compute state root when creating execution outcomes pub const fn with_compute_state_root(mut self, compute: bool) -> Self { self.compute_state_root = compute; @@ -98,17 +102,23 @@ impl AlloyRethProviderConfig { } } -/// A provider implementation that uses Alloy RPC to fetch state data +/// An RPC-based blockchain provider that fetches blockchain data via remote RPC calls. +/// +/// This is the RPC equivalent of +/// [`BlockchainProvider`](../../provider/src/providers/blockchain_provider.rs), implementing +/// the same `StateProviderFactory` and related traits but fetching data from a remote node instead +/// of local storage. /// -/// This provider implements reth's `StateProviderFactory` and related traits, -/// allowing it to be used as a drop-in replacement for database-backed providers -/// in scenarios where RPC access is preferred (e.g., testing). +/// This provider is useful for: +/// - Testing without requiring a full local database +/// - Accessing blockchain state from remote nodes +/// - Building light clients or tools that don't need full node storage /// /// The provider type is generic over the network type N (defaulting to `AnyNetwork`), /// but the current implementation is specialized for `alloy_network::AnyNetwork` /// as it needs to access block header fields directly. #[derive(Clone)] -pub struct AlloyRethProvider +pub struct RpcBlockchainProvider where Node: NodeTypes, { @@ -121,28 +131,28 @@ where /// Broadcast channel for canon state notifications canon_state_notification: broadcast::Sender>>, /// Configuration for the provider - config: AlloyRethProviderConfig, + config: RpcBlockchainProviderConfig, /// Cached chain spec chain_spec: Arc, } -impl std::fmt::Debug for AlloyRethProvider { +impl std::fmt::Debug for RpcBlockchainProvider { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("AlloyRethProvider").field("config", &self.config).finish() + f.debug_struct("RpcBlockchainProvider").field("config", &self.config).finish() } } -impl AlloyRethProvider { - /// Creates a new `AlloyRethProvider` with default configuration +impl RpcBlockchainProvider { + /// Creates a new `RpcBlockchainProvider` with default configuration pub fn new(provider: P) -> Self where Node::ChainSpec: Default, { - Self::new_with_config(provider, AlloyRethProviderConfig::default()) + Self::new_with_config(provider, RpcBlockchainProviderConfig::default()) } - /// Creates a new `AlloyRethProvider` with custom configuration - pub fn new_with_config(provider: P, config: AlloyRethProviderConfig) -> Self + /// Creates a new `RpcBlockchainProvider` with custom configuration + pub fn new_with_config(provider: P, config: RpcBlockchainProviderConfig) -> Self where Node::ChainSpec: Default, { @@ -185,15 +195,15 @@ impl AlloyRethProvider { } } -impl AlloyRethProvider +impl RpcBlockchainProvider where P: Provider + Clone + 'static, N: Network, Node: NodeTypes, { /// Helper function to create a state provider for a given block ID - fn create_state_provider(&self, block_id: BlockId) -> AlloyRethStateProvider { - AlloyRethStateProvider::with_chain_spec( + fn create_state_provider(&self, block_id: BlockId) -> RpcBlockchainStateProvider { + RpcBlockchainStateProvider::with_chain_spec( self.provider.clone(), block_id, self.chain_spec.clone(), @@ -216,7 +226,7 @@ where // This allows the types to be instantiated with any network while the actual functionality // requires AnyNetwork. Future improvements could add trait bounds for networks with // compatible block structures. -impl BlockHashReader for AlloyRethProvider +impl BlockHashReader for RpcBlockchainProvider where P: Provider + Clone + 'static, N: Network, @@ -239,7 +249,7 @@ where } } -impl BlockNumReader for AlloyRethProvider +impl BlockNumReader for RpcBlockchainProvider where P: Provider + Clone + 'static, N: Network, @@ -276,7 +286,7 @@ where } } -impl BlockIdReader for AlloyRethProvider +impl BlockIdReader for RpcBlockchainProvider where P: Provider + Clone + 'static, N: Network, @@ -319,7 +329,7 @@ where } } -impl HeaderProvider for AlloyRethProvider +impl HeaderProvider for RpcBlockchainProvider where P: Provider + Clone + 'static, N: Network, @@ -403,7 +413,7 @@ where } } -impl BlockBodyIndicesProvider for AlloyRethProvider +impl BlockBodyIndicesProvider for RpcBlockchainProvider where P: Provider + Clone + 'static, N: Network, @@ -421,7 +431,7 @@ where } } -impl BlockReader for AlloyRethProvider +impl BlockReader for RpcBlockchainProvider where P: Provider + Clone + 'static, N: Network, @@ -502,7 +512,7 @@ where } } -impl BlockReaderIdExt for AlloyRethProvider +impl BlockReaderIdExt for RpcBlockchainProvider where P: Provider + Clone + 'static, N: Network, @@ -536,7 +546,7 @@ where } } -impl ReceiptProvider for AlloyRethProvider +impl ReceiptProvider for RpcBlockchainProvider where P: Provider + Clone + 'static, N: Network, @@ -613,7 +623,7 @@ where } } -impl ReceiptProviderIdExt for AlloyRethProvider +impl ReceiptProviderIdExt for RpcBlockchainProvider where P: Provider + Clone + 'static, N: Network, @@ -622,7 +632,7 @@ where { } -impl TransactionsProvider for AlloyRethProvider +impl TransactionsProvider for RpcBlockchainProvider where P: Provider + Clone + 'static, N: Network, @@ -723,7 +733,7 @@ where } } -impl StateProviderFactory for AlloyRethProvider +impl StateProviderFactory for RpcBlockchainProvider where P: Provider + Clone + 'static, N: Network, @@ -795,15 +805,15 @@ where } } -impl DatabaseProviderFactory for AlloyRethProvider +impl DatabaseProviderFactory for RpcBlockchainProvider where P: Provider + Clone + 'static, N: Network, Node: NodeTypes, { type DB = DatabaseMock; - type ProviderRW = AlloyRethStateProvider; - type Provider = AlloyRethStateProvider; + type ProviderRW = RpcBlockchainStateProvider; + type Provider = RpcBlockchainStateProvider; fn database_provider_ro(&self) -> Result { // RPC provider returns a new state provider @@ -824,7 +834,7 @@ where } } -impl CanonChainTracker for AlloyRethProvider +impl CanonChainTracker for RpcBlockchainProvider where P: Provider + Clone + 'static, N: Network, @@ -852,7 +862,7 @@ where } } -impl NodePrimitivesProvider for AlloyRethProvider +impl NodePrimitivesProvider for RpcBlockchainProvider where P: Send + Sync, N: Send + Sync, @@ -861,7 +871,7 @@ where type Primitives = PrimitivesTy; } -impl CanonStateSubscriptions for AlloyRethProvider +impl CanonStateSubscriptions for RpcBlockchainProvider where P: Provider + Clone + 'static, N: Network, @@ -873,7 +883,7 @@ where } } -impl ChainSpecProvider for AlloyRethProvider +impl ChainSpecProvider for RpcBlockchainProvider where P: Send + Sync, N: Send + Sync, @@ -887,8 +897,11 @@ where } } -/// State provider implementation that fetches state via RPC -pub struct AlloyRethStateProvider +/// RPC-based state provider implementation that fetches blockchain state via remote RPC calls. +/// +/// This is the state provider counterpart to `RpcBlockchainProvider`, handling state queries +/// at specific block heights via RPC instead of local database access. +pub struct RpcBlockchainStateProvider where Node: NodeTypes, { @@ -913,17 +926,17 @@ where } impl std::fmt::Debug - for AlloyRethStateProvider + for RpcBlockchainStateProvider { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("AlloyRethStateProvider") + f.debug_struct("RpcBlockchainStateProvider") .field("provider", &self.provider) .field("block_id", &self.block_id) .finish() } } -impl AlloyRethStateProvider { +impl RpcBlockchainStateProvider { /// Creates a new state provider for the given block pub fn new( provider: P, @@ -1061,7 +1074,7 @@ impl AlloyRethStateProvider { } } -impl StateProvider for AlloyRethStateProvider +impl StateProvider for RpcBlockchainStateProvider where P: Provider + Clone + 'static, N: Network, @@ -1109,7 +1122,7 @@ where } } -impl BytecodeReader for AlloyRethStateProvider +impl BytecodeReader for RpcBlockchainStateProvider where P: Provider + Clone + 'static, N: Network, @@ -1138,7 +1151,7 @@ where } } -impl AccountReader for AlloyRethStateProvider +impl AccountReader for RpcBlockchainStateProvider where P: Provider + Clone + 'static, N: Network, @@ -1149,7 +1162,7 @@ where } } -impl StateRootProvider for AlloyRethStateProvider +impl StateRootProvider for RpcBlockchainStateProvider where P: Provider + Clone + 'static, N: Network, @@ -1193,7 +1206,7 @@ where } } -impl StorageReader for AlloyRethStateProvider +impl StorageReader for RpcBlockchainStateProvider where P: Provider + Clone + 'static, N: Network, @@ -1232,7 +1245,7 @@ where } } -impl reth_storage_api::StorageRootProvider for AlloyRethStateProvider +impl reth_storage_api::StorageRootProvider for RpcBlockchainStateProvider where P: Provider + Clone + 'static, N: Network, @@ -1266,7 +1279,7 @@ where } } -impl reth_storage_api::StateProofProvider for AlloyRethStateProvider +impl reth_storage_api::StateProofProvider for RpcBlockchainStateProvider where P: Provider + Clone + 'static, N: Network, @@ -1298,7 +1311,8 @@ where } } -impl reth_storage_api::HashedPostStateProvider for AlloyRethStateProvider +impl reth_storage_api::HashedPostStateProvider + for RpcBlockchainStateProvider where P: Provider + Clone + 'static, N: Network, @@ -1310,7 +1324,7 @@ where } } -impl StateReader for AlloyRethStateProvider +impl StateReader for RpcBlockchainStateProvider where P: Provider + Clone + 'static, N: Network, @@ -1327,7 +1341,7 @@ where } } -impl DBProvider for AlloyRethStateProvider +impl DBProvider for RpcBlockchainStateProvider where P: Provider + Clone + 'static, N: Network, @@ -1359,7 +1373,7 @@ where } } -impl BlockNumReader for AlloyRethStateProvider +impl BlockNumReader for RpcBlockchainStateProvider where P: Provider + Clone + 'static, N: Network, @@ -1398,7 +1412,7 @@ where } } -impl BlockHashReader for AlloyRethStateProvider +impl BlockHashReader for RpcBlockchainStateProvider where P: Provider + Clone + 'static, N: Network, @@ -1425,7 +1439,7 @@ where } } -impl BlockIdReader for AlloyRethStateProvider +impl BlockIdReader for RpcBlockchainStateProvider where P: Provider + Clone + 'static, N: Network, @@ -1451,7 +1465,7 @@ where } } -impl BlockReader for AlloyRethStateProvider +impl BlockReader for RpcBlockchainStateProvider where P: Provider + Clone + 'static, N: Network, @@ -1522,7 +1536,7 @@ where } } -impl TransactionsProvider for AlloyRethStateProvider +impl TransactionsProvider for RpcBlockchainStateProvider where P: Provider + Clone + 'static, N: Network, @@ -1593,7 +1607,7 @@ where } } -impl ReceiptProvider for AlloyRethStateProvider +impl ReceiptProvider for RpcBlockchainStateProvider where P: Provider + Clone + 'static, N: Network, @@ -1631,7 +1645,7 @@ where } } -impl HeaderProvider for AlloyRethStateProvider +impl HeaderProvider for RpcBlockchainStateProvider where P: Provider + Clone + 'static, N: Network, @@ -1685,7 +1699,7 @@ where } } -impl PruneCheckpointReader for AlloyRethStateProvider +impl PruneCheckpointReader for RpcBlockchainStateProvider where P: Provider + Clone + 'static, N: Network, @@ -1703,7 +1717,7 @@ where } } -impl StageCheckpointReader for AlloyRethStateProvider +impl StageCheckpointReader for RpcBlockchainStateProvider where P: Provider + Clone + 'static, N: Network, @@ -1725,7 +1739,7 @@ where } } -impl ChangeSetReader for AlloyRethStateProvider +impl ChangeSetReader for RpcBlockchainStateProvider where P: Provider + Clone + 'static, N: Network, @@ -1739,7 +1753,7 @@ where } } -impl StateProviderFactory for AlloyRethStateProvider +impl StateProviderFactory for RpcBlockchainStateProvider where P: Provider + Clone + 'static + Send + Sync, Node: NodeTypes + 'static, @@ -1805,7 +1819,7 @@ where } } -impl ChainSpecProvider for AlloyRethStateProvider +impl ChainSpecProvider for RpcBlockchainStateProvider where P: Send + Sync + std::fmt::Debug, N: Send + Sync, @@ -1827,7 +1841,7 @@ where // Note: FullExecutionDataProvider is already implemented via the blanket implementation // for types that implement both ExecutionDataProvider and BlockExecutionForkProvider -impl StatsReader for AlloyRethStateProvider +impl StatsReader for RpcBlockchainStateProvider where P: Provider + Clone + 'static, N: Network, @@ -1838,7 +1852,7 @@ where } } -impl BlockBodyIndicesProvider for AlloyRethStateProvider +impl BlockBodyIndicesProvider for RpcBlockchainStateProvider where P: Provider + Clone + 'static, N: Network, @@ -1859,7 +1873,7 @@ where } } -impl NodePrimitivesProvider for AlloyRethStateProvider +impl NodePrimitivesProvider for RpcBlockchainStateProvider where P: Send + Sync + std::fmt::Debug, N: Send + Sync, @@ -1868,7 +1882,7 @@ where type Primitives = PrimitivesTy; } -impl ChainStateBlockReader for AlloyRethStateProvider +impl ChainStateBlockReader for RpcBlockchainStateProvider where P: Provider + Clone + 'static, N: Network, @@ -1883,7 +1897,7 @@ where } } -impl ChainStateBlockWriter for AlloyRethStateProvider +impl ChainStateBlockWriter for RpcBlockchainStateProvider where P: Provider + Clone + 'static, N: Network, @@ -1897,112 +1911,3 @@ where Err(ProviderError::UnsupportedProvider) } } - -// Async database wrapper for revm compatibility -#[allow(dead_code)] -#[derive(Debug, Clone)] -struct AsyncDbWrapper { - provider: P, - block_id: BlockId, - network: std::marker::PhantomData, -} - -#[allow(dead_code)] -impl AsyncDbWrapper { - const fn new(provider: P, block_id: BlockId) -> Self { - Self { provider, block_id, network: std::marker::PhantomData } - } - - /// Helper function to execute async operations in a blocking context - fn block_on_async(&self, fut: F) -> T - where - F: Future, - { - tokio::task::block_in_place(move || Handle::current().block_on(fut)) - } -} - -impl revm::Database for AsyncDbWrapper -where - P: Provider + Clone + 'static, - N: Network, -{ - type Error = ProviderError; - - fn basic(&mut self, address: Address) -> Result, Self::Error> { - self.block_on_async(async { - let account_info = self - .provider - .get_account_info(address) - .block_id(self.block_id) - .await - .map_err(ProviderError::other)?; - - // Only return account if it exists - if account_info.balance.is_zero() && - account_info.nonce == 0 && - account_info.code.is_empty() - { - Ok(None) - } else { - let code_hash = if account_info.code.is_empty() { - revm_primitives::KECCAK_EMPTY - } else { - revm_primitives::keccak256(&account_info.code) - }; - - Ok(Some(revm::state::AccountInfo { - balance: account_info.balance, - nonce: account_info.nonce, - code_hash, - code: if account_info.code.is_empty() { - None - } else { - Some(revm::bytecode::Bytecode::new_raw(account_info.code)) - }, - })) - } - }) - } - - fn code_by_hash(&mut self, code_hash: B256) -> Result { - self.block_on_async(async { - // The method `debug_codeByHash` is currently only available on a Reth node - let code = self - .provider - .debug_code_by_hash(code_hash, None) - .await - .map_err(Self::Error::other)?; - - let Some(code) = code else { - // If the code was not found, return - return Ok(revm::bytecode::Bytecode::new()); - }; - - Ok(revm::bytecode::Bytecode::new_raw(code)) - }) - } - - fn storage(&mut self, address: Address, index: U256) -> Result { - self.block_on_async(async { - self.provider - .get_storage_at(address, index) - .block_id(self.block_id) - .await - .map_err(ProviderError::other) - }) - } - - fn block_hash(&mut self, number: u64) -> Result { - self.block_on_async(async { - let block = self - .provider - .get_block_by_number(number.into()) - .await - .map_err(ProviderError::other)? - .ok_or(ProviderError::HeaderNotFound(number.into()))?; - - Ok(block.header().hash()) - }) - } -} From 94c1c3f0784901aa94337bab63ef11b54a5b5ac6 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Mon, 21 Jul 2025 17:51:40 +0300 Subject: [PATCH 240/305] feat: `ComponentsFor` type alias (#17533) --- crates/ethereum/node/src/node.rs | 10 +++------- crates/exex/test-utils/src/lib.rs | 20 ++++---------------- crates/node/builder/src/builder/states.rs | 4 ++-- crates/node/builder/src/node.rs | 4 ++++ examples/custom-engine-types/src/main.rs | 15 +++------------ 5 files changed, 16 insertions(+), 37 deletions(-) diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index 2860053cf25..a8378d7d933 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -32,8 +32,7 @@ use reth_node_builder::{ BasicEngineApiBuilder, EngineApiBuilder, EngineValidatorAddOn, EngineValidatorBuilder, EthApiBuilder, EthApiCtx, Identity, RethRpcAddOns, RpcAddOns, RpcHandle, }, - BuilderContext, DebugNode, Node, NodeAdapter, NodeComponentsBuilder, PayloadBuilderConfig, - PayloadTypes, + BuilderContext, DebugNode, Node, NodeAdapter, PayloadBuilderConfig, PayloadTypes, }; use reth_provider::{providers::ProviderFactoryBuilder, EthStorage}; use reth_rpc::{ @@ -343,11 +342,8 @@ where EthereumConsensusBuilder, >; - type AddOns = EthereumAddOns< - NodeAdapter>::Components>, - EthereumEthApiBuilder, - EthereumEngineValidatorBuilder, - >; + type AddOns = + EthereumAddOns, EthereumEthApiBuilder, EthereumEngineValidatorBuilder>; fn components_builder(&self) -> Self::ComponentsBuilder { Self::components() diff --git a/crates/exex/test-utils/src/lib.rs b/crates/exex/test-utils/src/lib.rs index 14001ae8299..6463740dba2 100644 --- a/crates/exex/test-utils/src/lib.rs +++ b/crates/exex/test-utils/src/lib.rs @@ -35,7 +35,7 @@ use reth_node_api::{ use reth_node_builder::{ components::{ BasicPayloadServiceBuilder, Components, ComponentsBuilder, ConsensusBuilder, - ExecutorBuilder, NodeComponentsBuilder, PoolBuilder, + ExecutorBuilder, PoolBuilder, }, BuilderContext, Node, NodeAdapter, RethFullAdapter, }; @@ -133,11 +133,8 @@ where TestExecutorBuilder, TestConsensusBuilder, >; - type AddOns = EthereumAddOns< - NodeAdapter>::Components>, - EthereumEthApiBuilder, - EthereumEngineValidatorBuilder, - >; + type AddOns = + EthereumAddOns, EthereumEthApiBuilder, EthereumEngineValidatorBuilder>; fn components_builder(&self) -> Self::ComponentsBuilder { ComponentsBuilder::default() @@ -158,16 +155,7 @@ where pub type TmpDB = Arc>; /// The [`NodeAdapter`] for the [`TestExExContext`]. Contains type necessary to /// boot the testing environment -pub type Adapter = NodeAdapter< - RethFullAdapter, - <>, - >, - >>::ComponentsBuilder as NodeComponentsBuilder>>::Components, ->; +pub type Adapter = NodeAdapter>; /// An [`ExExContext`] using the [`Adapter`] type. pub type TestExExContext = ExExContext; diff --git a/crates/node/builder/src/builder/states.rs b/crates/node/builder/src/builder/states.rs index c4ced59d493..42646122781 100644 --- a/crates/node/builder/src/builder/states.rs +++ b/crates/node/builder/src/builder/states.rs @@ -10,7 +10,7 @@ use crate::{ hooks::NodeHooks, launch::LaunchNode, rpc::{RethRpcAddOns, RethRpcServerHandles, RpcContext}, - AddOns, FullNode, + AddOns, ComponentsFor, FullNode, }; use reth_exex::ExExContext; @@ -74,7 +74,7 @@ impl fmt::Debug for NodeTypesAdapter { /// Container for the node's types and the components and other internals that can be used by /// addons of the node. #[derive(Debug)] -pub struct NodeAdapter> { +pub struct NodeAdapter = ComponentsFor> { /// The components of the node. pub components: C, /// The task executor for the node. diff --git a/crates/node/builder/src/node.rs b/crates/node/builder/src/node.rs index 966b1227629..01ca760bc5b 100644 --- a/crates/node/builder/src/node.rs +++ b/crates/node/builder/src/node.rs @@ -19,6 +19,10 @@ use std::{ sync::Arc, }; +/// A helper type to obtain components for a given node when [`FullNodeTypes::Types`] is a [`Node`] +/// implementation. +pub type ComponentsFor = <<::Types as Node>::ComponentsBuilder as NodeComponentsBuilder>::Components; + /// A [`crate::Node`] is a [`NodeTypes`] that comes with preconfigured components. /// /// This can be used to configure the builder with a preset of components. diff --git a/examples/custom-engine-types/src/main.rs b/examples/custom-engine-types/src/main.rs index 8ab99b8fcb7..ad370ef0042 100644 --- a/examples/custom-engine-types/src/main.rs +++ b/examples/custom-engine-types/src/main.rs @@ -41,7 +41,7 @@ use reth_ethereum::{ builder::{ components::{BasicPayloadServiceBuilder, ComponentsBuilder, PayloadBuilderBuilder}, rpc::{EngineValidatorBuilder, RpcAddOns}, - BuilderContext, Node, NodeAdapter, NodeBuilder, NodeComponentsBuilder, + BuilderContext, Node, NodeAdapter, NodeBuilder, }, core::{args::RpcServerArgs, node_config::NodeConfig}, node::{ @@ -292,14 +292,7 @@ pub type MyNodeAddOns = RpcAddOns Node for MyCustomNode where - N: FullNodeTypes< - Types: NodeTypes< - Payload = CustomEngineTypes, - ChainSpec = ChainSpec, - Primitives = EthPrimitives, - Storage = EthStorage, - >, - >, + N: FullNodeTypes, { type ComponentsBuilder = ComponentsBuilder< N, @@ -309,9 +302,7 @@ where EthereumExecutorBuilder, EthereumConsensusBuilder, >; - type AddOns = MyNodeAddOns< - NodeAdapter>::Components>, - >; + type AddOns = MyNodeAddOns>; fn components_builder(&self) -> Self::ComponentsBuilder { ComponentsBuilder::default() From 0a8cb95eb9bc4f70ee17f847842a9cd81186c547 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Mon, 21 Jul 2025 17:51:46 +0300 Subject: [PATCH 241/305] feat: `EthApiCtx::eth_api_builder` (#17532) --- crates/ethereum/node/src/node.rs | 12 +----------- crates/node/builder/src/rpc.rs | 17 ++++++++++++++++- crates/optimism/rpc/src/eth/mod.rs | 13 ++----------- 3 files changed, 19 insertions(+), 23 deletions(-) diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index a8378d7d933..f74db2fdc9f 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -155,17 +155,7 @@ where type EthApi = EthApiFor; async fn build_eth_api(self, ctx: EthApiCtx<'_, N>) -> eyre::Result { - let api = reth_rpc::EthApiBuilder::new_with_components(ctx.components.clone()) - .eth_cache(ctx.cache) - .task_spawner(ctx.components.task_executor().clone()) - .gas_cap(ctx.config.rpc_gas_cap.into()) - .max_simulate_blocks(ctx.config.rpc_max_simulate_blocks) - .eth_proof_window(ctx.config.eth_proof_window) - .fee_history_cache_config(ctx.config.fee_history_cache) - .proof_permits(ctx.config.proof_permits) - .gas_oracle_config(ctx.config.gas_oracle) - .build(); - Ok(api) + Ok(ctx.eth_api_builder().build()) } } diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index 6b5561ef987..0a5c31f7ab1 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -18,7 +18,7 @@ use reth_node_core::{ version::{CARGO_PKG_VERSION, CLIENT_CODE, NAME_CLIENT, VERGEN_GIT_SHA}, }; use reth_payload_builder::{PayloadBuilderHandle, PayloadStore}; -use reth_rpc::eth::{EthApiTypes, FullEthApiServer}; +use reth_rpc::eth::{core::EthRpcConverterFor, EthApiTypes, FullEthApiServer}; use reth_rpc_api::{eth::helpers::AddDevSigners, IntoEngineApiRpcModule}; use reth_rpc_builder::{ auth::{AuthRpcModule, AuthServerHandle}, @@ -956,6 +956,21 @@ pub struct EthApiCtx<'a, N: FullNodeTypes> { pub cache: EthStateCache>, } +impl<'a, N: FullNodeComponents>> EthApiCtx<'a, N> { + /// Provides a [`EthApiBuilder`] with preconfigured config and components. + pub fn eth_api_builder(self) -> reth_rpc::EthApiBuilder> { + reth_rpc::EthApiBuilder::new_with_components(self.components.clone()) + .eth_cache(self.cache) + .task_spawner(self.components.task_executor().clone()) + .gas_cap(self.config.rpc_gas_cap.into()) + .max_simulate_blocks(self.config.rpc_max_simulate_blocks) + .eth_proof_window(self.config.eth_proof_window) + .fee_history_cache_config(self.config.fee_history_cache) + .proof_permits(self.config.proof_permits) + .gas_oracle_config(self.config.gas_oracle) + } +} + /// A `EthApi` that knows how to build `eth` namespace API from [`FullNodeComponents`]. pub trait EthApiBuilder: Default + Send + 'static { /// The Ethapi implementation this builder will build. diff --git a/crates/optimism/rpc/src/eth/mod.rs b/crates/optimism/rpc/src/eth/mod.rs index 461a36a1894..3b11c6a28fa 100644 --- a/crates/optimism/rpc/src/eth/mod.rs +++ b/crates/optimism/rpc/src/eth/mod.rs @@ -368,17 +368,8 @@ where let rpc_converter = RpcConverter::new(OpReceiptConverter::new(ctx.components.provider().clone())) .with_mapper(OpTxInfoMapper::new(ctx.components.provider().clone())); - let eth_api = reth_rpc::EthApiBuilder::new_with_components(ctx.components.clone()) - .with_rpc_converter(rpc_converter) - .eth_cache(ctx.cache) - .task_spawner(ctx.components.task_executor().clone()) - .gas_cap(ctx.config.rpc_gas_cap.into()) - .max_simulate_blocks(ctx.config.rpc_max_simulate_blocks) - .eth_proof_window(ctx.config.eth_proof_window) - .fee_history_cache_config(ctx.config.fee_history_cache) - .proof_permits(ctx.config.proof_permits) - .gas_oracle_config(ctx.config.gas_oracle) - .build_inner(); + + let eth_api = ctx.eth_api_builder().with_rpc_converter(rpc_converter).build_inner(); let sequencer_client = if let Some(url) = sequencer_url { Some( From 8c50d841872ce82ef5a4dfeadb2160aeeab9c8df Mon Sep 17 00:00:00 2001 From: PixelPilot <161360836+PixelPil0t1@users.noreply.github.com> Date: Mon, 21 Jul 2025 17:17:54 +0200 Subject: [PATCH 242/305] docs: Fix broken fuzzing module link in database.md (#17523) --- docs/design/database.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/design/database.md b/docs/design/database.md index fdc6251c0ca..42ec8eb8c6c 100644 --- a/docs/design/database.md +++ b/docs/design/database.md @@ -10,7 +10,7 @@ - We want Reth's serialized format to be able to trade off read/write speed for size, depending on who the user is. - To achieve that, we created the [Encode/Decode/Compress/Decompress traits](https://github.com/paradigmxyz/reth/blob/main/crates/storage/db-api/src/table.rs) to make the (de)serialization of database `Table::Key` and `Table::Values` generic. - This allows for [out-of-the-box benchmarking](https://github.com/paradigmxyz/reth/blob/0d9b9a392d4196793736522f3fc2ac804991b45d/crates/db/benches/encoding_iai.rs#L5) (using [Criterion](https://github.com/bheisler/criterion.rs)) - - It also enables [out-of-the-box fuzzing](https://github.com/paradigmxyz/reth/blob/0d9b9a392d4196793736522f3fc2ac804991b45d/crates/interfaces/src/db/codecs/fuzz/mod.rs) using [trailofbits/test-fuzz](https://github.com/trailofbits/test-fuzz). + - It also enables [out-of-the-box fuzzing](https://github.com/paradigmxyz/reth/blob/main/crates/storage/db-api/src/tables/codecs/fuzz/mod.rs) using [trailofbits/test-fuzz](https://github.com/trailofbits/test-fuzz). - We implemented that trait for the following encoding formats: - [Ethereum-specific Compact Encoding](https://github.com/paradigmxyz/reth/blob/0d9b9a392d4196793736522f3fc2ac804991b45d/crates/codecs/derive/src/compact/mod.rs): A lot of Ethereum datatypes have unnecessary zeros when serialized, or optional (e.g. on empty hashes) which would be nice not to pay in storage costs. - [Erigon](https://github.com/ledgerwatch/erigon/blob/12ee33a492f5d240458822d052820d9998653a63/docs/programmers_guide/db_walkthrough.MD) achieves that by having a `bitfield` set on Table "PlainState which adds a bitfield to Accounts. From 566ff51d042f057f53c4df9a7342397008aeb687 Mon Sep 17 00:00:00 2001 From: Brian Picciano Date: Mon, 21 Jul 2025 18:32:31 +0200 Subject: [PATCH 243/305] perf(trie): Re-use storage tries across payloads (#17488) Co-authored-by: Matthias Seitz --- .../configured_sparse_trie.rs | 6 + .../tree/src/tree/payload_processor/mod.rs | 158 ++++-------------- .../src/tree/payload_processor/sparse_trie.rs | 63 ++----- crates/trie/sparse/src/state.rs | 76 +++++++-- 4 files changed, 112 insertions(+), 191 deletions(-) diff --git a/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs b/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs index 83f8c82b529..d59f14c796a 100644 --- a/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs +++ b/crates/engine/tree/src/tree/payload_processor/configured_sparse_trie.rs @@ -34,6 +34,12 @@ impl From for ConfiguredSparseTrie { } } +impl Default for ConfiguredSparseTrie { + fn default() -> Self { + Self::Serial(Default::default()) + } +} + impl SparseTrieInterface for ConfiguredSparseTrie { fn with_root( self, diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index 2078df8088a..a5042c529d4 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -14,7 +14,7 @@ use alloy_evm::block::StateChangeSource; use alloy_primitives::B256; use executor::WorkloadExecutor; use multiproof::{SparseTrieUpdate, *}; -use parking_lot::{Mutex, RwLock}; +use parking_lot::RwLock; use prewarm::PrewarmMetrics; use reth_evm::{ConfigureEvm, OnStateHook, SpecFor}; use reth_primitives_traits::{NodePrimitives, SealedHeaderFor}; @@ -30,9 +30,8 @@ use reth_trie_parallel::{ }; use reth_trie_sparse::{ provider::{TrieNodeProvider, TrieNodeProviderFactory}, - SerialSparseTrie, SparseTrie, SparseTrieInterface, + ClearedSparseStateTrie, SerialSparseTrie, SparseStateTrie, SparseTrie, }; -use reth_trie_sparse_parallel::ParallelSparseTrie; use std::{ collections::VecDeque, sync::{ @@ -75,9 +74,11 @@ where precompile_cache_disabled: bool, /// Precompile cache map. precompile_cache_map: PrecompileCacheMap>, - /// A cleared accounts sparse trie, kept around to be reused for the state root computation so + /// A cleared `SparseStateTrie`, kept around to be reused for the state root computation so /// that allocations can be minimized. - accounts_trie: Arc>>>, + sparse_state_trie: Arc< + parking_lot::Mutex>>, + >, /// Whether to use the parallel sparse trie. use_parallel_sparse_trie: bool, _marker: std::marker::PhantomData, @@ -104,7 +105,7 @@ where evm_config, precompile_cache_disabled: config.precompile_cache_disabled(), precompile_cache_map, - accounts_trie: Arc::default(), + sparse_state_trie: Arc::default(), use_parallel_sparse_trie: config.enable_parallel_sparse_trie(), _marker: Default::default(), } @@ -209,17 +210,8 @@ where // wire the sparse trie to the state root response receiver let (state_root_tx, state_root_rx) = channel(); - // Take the stored accounts trie - let stored_accounts_trie = self.accounts_trie.lock().take(); - // Spawn the sparse trie task using any stored trie and parallel trie configuration. - self.spawn_sparse_trie_task( - sparse_trie_rx, - proof_task.handle(), - state_root_tx, - stored_accounts_trie, - self.use_parallel_sparse_trie, - ); + self.spawn_sparse_trie_task(sparse_trie_rx, proof_task.handle(), state_root_tx); // spawn the proof task self.executor.spawn_blocking(move || { @@ -320,128 +312,52 @@ where }) } - /// Generic function to spawn a sparse trie task for any trie type that can be converted to - /// `ConfiguredSparseTrie`. - fn spawn_trie_task( + /// Spawns the [`SparseTrieTask`] for this payload processor. + fn spawn_sparse_trie_task( &self, sparse_trie_rx: mpsc::Receiver, proof_task_handle: BPF, state_root_tx: mpsc::Sender>, - sparse_trie: Option>, ) where BPF: TrieNodeProviderFactory + Clone + Send + Sync + 'static, BPF::AccountNodeProvider: TrieNodeProvider + Send + Sync, BPF::StorageNodeProvider: TrieNodeProvider + Send + Sync, - A: SparseTrieInterface + Send + Sync + Default + 'static, - ConfiguredSparseTrie: From, { - let mut task = SparseTrieTask::<_, A, SerialSparseTrie>::new_with_stored_trie( - self.executor.clone(), - sparse_trie_rx, - proof_task_handle, - self.trie_metrics.clone(), - sparse_trie, - ); + // Reuse a stored SparseStateTrie, or create a new one using the desired configuration if + // there's none to reuse. + let cleared_sparse_trie = Arc::clone(&self.sparse_state_trie); + let sparse_state_trie = cleared_sparse_trie.lock().take().unwrap_or_else(|| { + let accounts_trie = if self.use_parallel_sparse_trie { + ConfiguredSparseTrie::Parallel(Default::default()) + } else { + ConfiguredSparseTrie::Serial(Default::default()) + }; + ClearedSparseStateTrie::from_state_trie( + SparseStateTrie::new() + .with_accounts_trie(SparseTrie::Blind(Some(Box::new(accounts_trie)))) + .with_updates(true), + ) + }); + + let task = + SparseTrieTask::<_, ConfiguredSparseTrie, SerialSparseTrie>::new_with_cleared_trie( + self.executor.clone(), + sparse_trie_rx, + proof_task_handle, + self.trie_metrics.clone(), + sparse_state_trie, + ); - let accounts_trie = Arc::clone(&self.accounts_trie); self.executor.spawn_blocking(move || { let (result, trie) = task.run(); // Send state root computation result let _ = state_root_tx.send(result); - // Clear and return accounts trie back to the payload processor - let trie = match trie { - SparseTrie::Blind(opt) => { - SparseTrie::Blind(opt.map(|t| Box::new(ConfiguredSparseTrie::from(*t)))) - } - SparseTrie::Revealed(t) => { - SparseTrie::Revealed(Box::new(ConfiguredSparseTrie::from(*t))) - } - }; - accounts_trie.lock().replace(trie.clear()); + // Clear the SparseStateTrie and replace it back into the mutex _after_ sending results + // to the next step, so that time spent clearing doesn't block the step after this one. + cleared_sparse_trie.lock().replace(ClearedSparseStateTrie::from_state_trie(trie)); }); } - - /// Helper to dispatch trie spawn based on the `ConfiguredSparseTrie` variant - fn dispatch_trie_spawn( - &self, - configured_trie: ConfiguredSparseTrie, - sparse_trie_rx: mpsc::Receiver, - proof_task_handle: BPF, - state_root_tx: mpsc::Sender>, - is_revealed: bool, - ) where - BPF: TrieNodeProviderFactory + Clone + Send + Sync + 'static, - BPF::AccountNodeProvider: TrieNodeProvider + Send + Sync, - BPF::StorageNodeProvider: TrieNodeProvider + Send + Sync, - { - match configured_trie { - ConfiguredSparseTrie::Serial(boxed_serial) => { - let trie = if is_revealed { - Some(SparseTrie::Revealed(boxed_serial)) - } else { - Some(SparseTrie::Blind(Some(boxed_serial))) - }; - self.spawn_trie_task(sparse_trie_rx, proof_task_handle, state_root_tx, trie); - } - ConfiguredSparseTrie::Parallel(boxed_parallel) => { - let trie = if is_revealed { - Some(SparseTrie::Revealed(boxed_parallel)) - } else { - Some(SparseTrie::Blind(Some(boxed_parallel))) - }; - self.spawn_trie_task(sparse_trie_rx, proof_task_handle, state_root_tx, trie); - } - } - } - - /// Helper method that handles sparse trie task spawning. - /// - /// If we have a stored trie, we will reuse it for spawning. If we do not have a stored trie, - /// we will create a new trie based on the configured trie type (parallel or serial). - fn spawn_sparse_trie_task( - &self, - sparse_trie_rx: mpsc::Receiver, - proof_task_handle: BPF, - state_root_tx: mpsc::Sender>, - stored_accounts_trie: Option>, - use_parallel_for_new: bool, - ) where - BPF: TrieNodeProviderFactory + Clone + Send + Sync + 'static, - BPF::AccountNodeProvider: TrieNodeProvider + Send + Sync, - BPF::StorageNodeProvider: TrieNodeProvider + Send + Sync, - { - let is_revealed = stored_accounts_trie.as_ref().is_some_and(|trie| trie.is_revealed()); - match stored_accounts_trie { - Some(SparseTrie::Revealed(boxed) | SparseTrie::Blind(Some(boxed))) => { - self.dispatch_trie_spawn( - *boxed, - sparse_trie_rx, - proof_task_handle, - state_root_tx, - is_revealed, - ); - } - _ => { - // No stored trie, create new based on config - if use_parallel_for_new { - self.spawn_trie_task::<_, ParallelSparseTrie>( - sparse_trie_rx, - proof_task_handle, - state_root_tx, - None, - ); - } else { - self.spawn_trie_task::<_, SerialSparseTrie>( - sparse_trie_rx, - proof_task_handle, - state_root_tx, - None, - ); - } - } - } - } } /// Handle to all the spawned tasks. diff --git a/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs b/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs index 4242752867b..9879a2c58bf 100644 --- a/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs +++ b/crates/engine/tree/src/tree/payload_processor/sparse_trie.rs @@ -11,7 +11,7 @@ use reth_trie_parallel::root::ParallelStateRootError; use reth_trie_sparse::{ errors::{SparseStateTrieResult, SparseTrieErrorKind}, provider::{TrieNodeProvider, TrieNodeProviderFactory}, - SerialSparseTrie, SparseStateTrie, SparseTrie, SparseTrieInterface, + ClearedSparseStateTrie, SerialSparseTrie, SparseStateTrie, SparseTrieInterface, }; use std::{ sync::mpsc, @@ -31,9 +31,7 @@ where pub(super) executor: WorkloadExecutor, /// Receives updates from the state root task. pub(super) updates: mpsc::Receiver, - /// Sparse Trie initialized with the blinded provider factory. - /// - /// It's kept as a field on the struct to prevent blocking on de-allocation in [`Self::run`]. + /// `SparseStateTrie` used for computing the state root. pub(super) trie: SparseStateTrie, pub(super) metrics: MultiProofTaskMetrics, /// Trie node provider factory. @@ -48,80 +46,39 @@ where A: SparseTrieInterface + Send + Sync + Default, S: SparseTrieInterface + Send + Sync + Default, { - /// Creates a new sparse trie task. - pub(super) fn new( + /// Creates a new sparse trie, pre-populating with a [`ClearedSparseStateTrie`]. + pub(super) fn new_with_cleared_trie( executor: WorkloadExecutor, updates: mpsc::Receiver, blinded_provider_factory: BPF, metrics: MultiProofTaskMetrics, + sparse_state_trie: ClearedSparseStateTrie, ) -> Self { Self { executor, updates, metrics, - trie: SparseStateTrie::new().with_updates(true), + trie: sparse_state_trie.into_inner(), blinded_provider_factory, } } - /// Creates a new sparse trie, populating the accounts trie with the given `SparseTrie`, if it - /// exists. - pub(super) fn new_with_stored_trie( - executor: WorkloadExecutor, - updates: mpsc::Receiver, - blinded_provider_factory: BPF, - trie_metrics: MultiProofTaskMetrics, - sparse_trie: Option>, - ) -> Self { - if let Some(sparse_trie) = sparse_trie { - Self::with_accounts_trie( - executor, - updates, - blinded_provider_factory, - trie_metrics, - sparse_trie, - ) - } else { - Self::new(executor, updates, blinded_provider_factory, trie_metrics) - } - } - - /// Creates a new sparse trie task, using the given [`SparseTrie::Blind`] for the accounts - /// trie. - pub(super) fn with_accounts_trie( - executor: WorkloadExecutor, - updates: mpsc::Receiver, - blinded_provider_factory: BPF, - metrics: MultiProofTaskMetrics, - sparse_trie: SparseTrie, - ) -> Self { - debug_assert!(sparse_trie.is_blind()); - let trie = SparseStateTrie::new().with_updates(true).with_accounts_trie(sparse_trie); - Self { executor, updates, metrics, trie, blinded_provider_factory } - } - /// Runs the sparse trie task to completion. /// /// This waits for new incoming [`SparseTrieUpdate`]. /// /// This concludes once the last trie update has been received. /// - /// NOTE: This function does not take `self` by value to prevent blocking on [`SparseStateTrie`] - /// drop. - /// /// # Returns /// /// - State root computation outcome. - /// - Accounts trie that needs to be cleared and reused to avoid reallocations. + /// - `SparseStateTrie` that needs to be cleared and reused to avoid reallocations. pub(super) fn run( - &mut self, - ) -> (Result, SparseTrie) { + mut self, + ) -> (Result, SparseStateTrie) { // run the main loop to completion let result = self.run_inner(); - // take the account trie so that we can reuse its already allocated data structures. - let trie = self.trie.take_accounts_trie(); - - (result, trie) + (result, self.trie) } /// Inner function to run the sparse trie task to completion. diff --git a/crates/trie/sparse/src/state.rs b/crates/trie/sparse/src/state.rs index 133b8dacbef..0739d6946a3 100644 --- a/crates/trie/sparse/src/state.rs +++ b/crates/trie/sparse/src/state.rs @@ -20,6 +20,36 @@ use reth_trie_common::{ }; use tracing::trace; +/// Provides type-safe re-use of cleared [`SparseStateTrie`]s, which helps to save allocations +/// across payload runs. +#[derive(Debug)] +pub struct ClearedSparseStateTrie< + A = SerialSparseTrie, // Account trie implementation + S = SerialSparseTrie, // Storage trie implementation +>(SparseStateTrie); + +impl ClearedSparseStateTrie +where + A: SparseTrieInterface + Default, + S: SparseTrieInterface + Default, +{ + /// Creates a [`ClearedSparseStateTrie`] by clearing all the existing internal state of a + /// [`SparseStateTrie`] and then storing that instance for later re-use. + pub fn from_state_trie(mut trie: SparseStateTrie) -> Self { + trie.state = trie.state.clear(); + trie.cleared_storages.extend(trie.storages.drain().map(|(_, trie)| trie.clear())); + trie.revealed_account_paths.clear(); + trie.revealed_storage_paths.clear(); + trie.account_rlp_buf.clear(); + Self(trie) + } + + /// Returns the cleared [`SparseStateTrie`], consuming this instance. + pub fn into_inner(self) -> SparseStateTrie { + self.0 + } +} + #[derive(Debug)] /// Sparse state trie representing lazy-loaded Ethereum state trie. pub struct SparseStateTrie< @@ -30,6 +60,8 @@ pub struct SparseStateTrie< state: SparseTrie, /// Sparse storage tries. storages: B256Map>, + /// Cleared storage tries, kept for re-use + cleared_storages: Vec>, /// Collection of revealed account trie paths. revealed_account_paths: HashSet, /// Collection of revealed storage trie paths, per account. @@ -52,6 +84,7 @@ where Self { state: Default::default(), storages: Default::default(), + cleared_storages: Default::default(), revealed_account_paths: Default::default(), revealed_storage_paths: Default::default(), retain_updates: false, @@ -70,16 +103,7 @@ impl SparseStateTrie { } } -impl SparseStateTrie -where - A: SparseTrieInterface + Default, - S: SparseTrieInterface + Default, -{ - /// Create new [`SparseStateTrie`] - pub fn new() -> Self { - Self::default() - } - +impl SparseStateTrie { /// Set the retention of branch node updates and deletions. pub const fn with_updates(mut self, retain_updates: bool) -> Self { self.retain_updates = retain_updates; @@ -91,10 +115,16 @@ where self.state = trie; self } +} - /// Takes the accounts trie. - pub fn take_accounts_trie(&mut self) -> SparseTrie { - core::mem::take(&mut self.state) +impl SparseStateTrie +where + A: SparseTrieInterface + Default, + S: SparseTrieInterface + Default, +{ + /// Create new [`SparseStateTrie`] + pub fn new() -> Self { + Self::default() } /// Returns `true` if account was already revealed. @@ -166,6 +196,16 @@ where self.storages.insert(address, storage_trie); } + /// Retrieves the storage trie for the given address, creating a new one if it doesn't exist. + /// + /// This method should always be used to create a storage trie, as it will re-use previously + /// allocated and cleared storage tries when possible. + fn get_or_create_storage_trie(&mut self, address: B256) -> &mut SparseTrie { + self.storages + .entry(address) + .or_insert_with(|| self.cleared_storages.pop().unwrap_or_default()) + } + /// Reveal unknown trie paths from multiproof. /// NOTE: This method does not extensively validate the proof. pub fn reveal_multiproof(&mut self, multiproof: MultiProof) -> SparseStateTrieResult<()> { @@ -302,10 +342,11 @@ where if let Some(root_node) = root_node { // Reveal root node if it wasn't already. trace!(target: "trie::sparse", ?account, ?root_node, "Revealing root storage node"); - let trie = self.storages.entry(account).or_default().reveal_root( + let retain_updates = self.retain_updates; + let trie = self.get_or_create_storage_trie(account).reveal_root( root_node.node, root_node.masks, - self.retain_updates, + retain_updates, )?; // Reserve the capacity for new nodes ahead of time, if the trie implementation @@ -380,13 +421,14 @@ where .get(&account) .is_none_or(|paths| !paths.contains(&path)) { - let storage_trie_entry = self.storages.entry(account).or_default(); + let retain_updates = self.retain_updates; + let storage_trie_entry = self.get_or_create_storage_trie(account); if path.is_empty() { // Handle special storage state root node case. storage_trie_entry.reveal_root( trie_node, TrieMasks::none(), - self.retain_updates, + retain_updates, )?; } else { // Reveal non-root storage trie node. From 1eff10d871b494e650ac06e69a76519209bdfe40 Mon Sep 17 00:00:00 2001 From: David Klank <155117116+davidjsonn@users.noreply.github.com> Date: Mon, 21 Jul 2025 21:11:34 +0300 Subject: [PATCH 244/305] docs: fix typo in OpReceiptBuilder comment (#17540) --- crates/optimism/rpc/src/eth/receipt.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/optimism/rpc/src/eth/receipt.rs b/crates/optimism/rpc/src/eth/receipt.rs index cd16c4e1664..edf16900f04 100644 --- a/crates/optimism/rpc/src/eth/receipt.rs +++ b/crates/optimism/rpc/src/eth/receipt.rs @@ -295,7 +295,7 @@ impl OpReceiptBuilder { Ok(Self { core_receipt, op_receipt_fields }) } - /// Builds [`OpTransactionReceipt`] by combing core (l1) receipt fields and additional OP + /// Builds [`OpTransactionReceipt`] by combining core (l1) receipt fields and additional OP /// receipt fields. pub fn build(self) -> OpTransactionReceipt { let Self { core_receipt: inner, op_receipt_fields } = self; From f532e49d2df3f75aa0560837e228c76a83e0b76e Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Mon, 21 Jul 2025 22:17:46 +0200 Subject: [PATCH 245/305] chore(deps): bump inspectors 027 (#17543) --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 92955a7b15d..978b63b02cd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10803,9 +10803,9 @@ dependencies = [ [[package]] name = "revm-inspectors" -version = "0.26.5" +version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7b99a2332cf8eed9e9a22fffbf76dfadc99d2c45de6ae6431a1eb9f657dd97a" +checksum = "aad27cab355b0aa905d0744f3222e716b40ad48b32276ac4b0a615f2c3364c97" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", diff --git a/Cargo.toml b/Cargo.toml index 14da2c3b7e1..4fcd3dcae2b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -465,7 +465,7 @@ revm-context = { version = "8.0.2", default-features = false } revm-context-interface = { version = "8.0.1", default-features = false } revm-database-interface = { version = "7.0.1", default-features = false } op-revm = { version = "8.0.3", default-features = false } -revm-inspectors = "0.26.5" +revm-inspectors = "0.27.1" # eth alloy-chains = { version = "0.2.5", default-features = false } From 39f1ee879517eae7d377ef5905c8bf517aab5110 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Mon, 21 Jul 2025 21:18:45 +0100 Subject: [PATCH 246/305] feat(reth-bench): auto-create output directory (#17541) Co-authored-by: Claude --- bin/reth-bench/src/bench/context.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/bin/reth-bench/src/bench/context.rs b/bin/reth-bench/src/bench/context.rs index 57b4067999a..c4006dc8155 100644 --- a/bin/reth-bench/src/bench/context.rs +++ b/bin/reth-bench/src/bench/context.rs @@ -36,11 +36,16 @@ impl BenchContext { pub(crate) async fn new(bench_args: &BenchmarkArgs, rpc_url: String) -> eyre::Result { info!("Running benchmark using data from RPC URL: {}", rpc_url); - // Ensure that output directory is a directory + // Ensure that output directory exists and is a directory if let Some(output) = &bench_args.output { if output.is_file() { return Err(eyre::eyre!("Output path must be a directory")); } + // Create the directory if it doesn't exist + if !output.exists() { + std::fs::create_dir_all(output)?; + info!("Created output directory: {:?}", output); + } } // set up alloy client for blocks From 7b76a1e00fe2da5eb8cabd7d4e00bd9b9dfe6774 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Tue, 22 Jul 2025 11:47:27 +0300 Subject: [PATCH 247/305] chore: relax EthereumEthApiBuilder bound (#17546) --- crates/ethereum/node/src/node.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index f74db2fdc9f..ccaf9e209a5 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -5,6 +5,7 @@ use crate::{EthEngineTypes, EthEvmConfig}; use alloy_eips::{eip7840::BlobParams, merge::EPOCH_SLOTS}; use alloy_network::Ethereum; use alloy_rpc_types_engine::ExecutionData; +use alloy_rpc_types_eth::TransactionRequest; use reth_chainspec::{ChainSpec, EthChainSpec, EthereumHardforks, Hardforks}; use reth_engine_local::LocalPayloadAttributesBuilder; use reth_engine_primitives::EngineTypes; @@ -41,7 +42,7 @@ use reth_rpc::{ }; use reth_rpc_api::servers::BlockSubmissionValidationApiServer; use reth_rpc_builder::{config::RethRpcServerConfig, middleware::RethRpcMiddleware}; -use reth_rpc_eth_api::{helpers::pending_block::BuildPendingEnv, RpcConvert}; +use reth_rpc_eth_api::{helpers::pending_block::BuildPendingEnv, RpcConvert, SignableTxRequest}; use reth_rpc_eth_types::{error::FromEvmError, EthApiError}; use reth_rpc_server_types::RethRpcModule; use reth_tracing::tracing::{debug, info}; @@ -141,7 +142,7 @@ pub struct EthereumEthApiBuilder; impl EthApiBuilder for EthereumEthApiBuilder where N: FullNodeComponents< - Types: NodeTypes, + Types: NodeTypes, Evm: ConfigureEvm>>, >, EthRpcConverterFor: RpcConvert< @@ -150,6 +151,7 @@ where Error = EthApiError, Network = Ethereum, >, + TransactionRequest: SignableTxRequest>, EthApiError: FromEvmError, { type EthApi = EthApiFor; From 58e6113584046d5d5e0c931f7a3d4ac555f69cc5 Mon Sep 17 00:00:00 2001 From: adust Date: Tue, 22 Jul 2025 18:34:53 +0900 Subject: [PATCH 248/305] feat: implement DatabaseProviderFactory for NoopProvider (#17134) Co-authored-by: Claude Co-authored-by: Emilia Hane --- crates/storage/storage-api/src/noop.rs | 76 +++++++++++++++++++++++++- 1 file changed, 73 insertions(+), 3 deletions(-) diff --git a/crates/storage/storage-api/src/noop.rs b/crates/storage/storage-api/src/noop.rs index 5eff34025d0..0409bfad62b 100644 --- a/crates/storage/storage-api/src/noop.rs +++ b/crates/storage/storage-api/src/noop.rs @@ -8,6 +8,9 @@ use crate::{ StateProvider, StateProviderBox, StateProviderFactory, StateRootProvider, StorageRootProvider, TransactionVariant, TransactionsProvider, }; + +#[cfg(feature = "db-api")] +use crate::{DBProvider, DatabaseProviderFactory}; use alloc::{boxed::Box, string::String, sync::Arc, vec::Vec}; use alloy_consensus::transaction::TransactionMeta; use alloy_eips::{BlockHashOrNumber, BlockId, BlockNumberOrTag}; @@ -20,9 +23,13 @@ use core::{ ops::{RangeBounds, RangeInclusive}, }; use reth_chainspec::{ChainInfo, ChainSpecProvider, EthChainSpec, MAINNET}; +#[cfg(feature = "db-api")] +use reth_db_api::mock::{DatabaseMock, TxMock}; use reth_db_models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_ethereum_primitives::EthPrimitives; use reth_primitives_traits::{Account, Bytecode, NodePrimitives, RecoveredBlock, SealedHeader}; +#[cfg(feature = "db-api")] +use reth_prune_types::PruneModes; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; use reth_storage_errors::provider::{ProviderError, ProviderResult}; @@ -36,20 +43,38 @@ use reth_trie_common::{ #[non_exhaustive] pub struct NoopProvider { chain_spec: Arc, + #[cfg(feature = "db-api")] + tx: TxMock, + #[cfg(feature = "db-api")] + prune_modes: PruneModes, _phantom: PhantomData, } impl NoopProvider { /// Create a new instance for specific primitive types. pub fn new(chain_spec: Arc) -> Self { - Self { chain_spec, _phantom: Default::default() } + Self { + chain_spec, + #[cfg(feature = "db-api")] + tx: TxMock::default(), + #[cfg(feature = "db-api")] + prune_modes: PruneModes::none(), + _phantom: Default::default(), + } } } impl NoopProvider { /// Create a new instance of the `NoopBlockReader`. pub fn eth(chain_spec: Arc) -> Self { - Self { chain_spec, _phantom: Default::default() } + Self { + chain_spec, + #[cfg(feature = "db-api")] + tx: TxMock::default(), + #[cfg(feature = "db-api")] + prune_modes: PruneModes::none(), + _phantom: Default::default(), + } } } @@ -68,7 +93,14 @@ impl Default for NoopProvider { impl Clone for NoopProvider { fn clone(&self) -> Self { - Self { chain_spec: Arc::clone(&self.chain_spec), _phantom: Default::default() } + Self { + chain_spec: Arc::clone(&self.chain_spec), + #[cfg(feature = "db-api")] + tx: self.tx.clone(), + #[cfg(feature = "db-api")] + prune_modes: self.prune_modes.clone(), + _phantom: Default::default(), + } } } @@ -558,3 +590,41 @@ impl BlockBodyIndicesProvider for NoopProvider DBProvider for NoopProvider { + type Tx = TxMock; + + fn tx_ref(&self) -> &Self::Tx { + &self.tx + } + + fn tx_mut(&mut self) -> &mut Self::Tx { + &mut self.tx + } + + fn into_tx(self) -> Self::Tx { + self.tx + } + + fn prune_modes_ref(&self) -> &PruneModes { + &self.prune_modes + } +} + +#[cfg(feature = "db-api")] +impl DatabaseProviderFactory + for NoopProvider +{ + type DB = DatabaseMock; + type Provider = Self; + type ProviderRW = Self; + + fn database_provider_ro(&self) -> ProviderResult { + Ok(self.clone()) + } + + fn database_provider_rw(&self) -> ProviderResult { + Ok(self.clone()) + } +} From 3ab5bac40c3dc06384abf3778cd5c1608deb225d Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 22 Jul 2025 12:57:48 +0200 Subject: [PATCH 249/305] chore: bump deps (#17554) --- Cargo.lock | 176 +++++++++++++-------------- Cargo.toml | 72 +++++------ crates/rpc/rpc-api/src/mev.rs | 10 +- crates/rpc/rpc-eth-api/src/bundle.rs | 8 +- crates/rpc/rpc/src/eth/sim_bundle.rs | 10 +- 5 files changed, 137 insertions(+), 139 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 978b63b02cd..3af569d1a6f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -112,9 +112,9 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3b746060277f3d7f9c36903bb39b593a741cb7afcb0044164c28f0e9b673f0" +checksum = "1b6093bc69509849435a2d68237a2e9fea79d27390c8e62f1e4012c460aabad8" dependencies = [ "alloy-eips", "alloy-primitives", @@ -138,9 +138,9 @@ dependencies = [ [[package]] name = "alloy-consensus-any" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf98679329fa708fa809ea596db6d974da892b068ad45e48ac1956f582edf946" +checksum = "8d1cfed4fefd13b5620cb81cdb6ba397866ff0de514c1b24806e6e79cdff5570" dependencies = [ "alloy-consensus", "alloy-eips", @@ -153,9 +153,9 @@ dependencies = [ [[package]] name = "alloy-contract" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a10e47f5305ea08c37b1772086c1573e9a0a257227143996841172d37d3831bb" +checksum = "f28074a21cd4f7c3a7ab218c4f38fae6be73944e1feae3b670c68b60bf85ca40" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -175,9 +175,9 @@ dependencies = [ [[package]] name = "alloy-dyn-abi" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b95b3deca680efc7e9cba781f1a1db352fa1ea50e6384a514944dcf4419e652" +checksum = "d9e8a436f0aad7df8bb47f144095fba61202265d9f5f09a70b0e3227881a668e" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -236,9 +236,9 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f562a81278a3ed83290e68361f2d1c75d018ae3b8589a314faf9303883e18ec9" +checksum = "5937e2d544e9b71000942d875cbc57965b32859a666ea543cc57aae5a06d602d" dependencies = [ "alloy-eip2124", "alloy-eip2930", @@ -279,9 +279,9 @@ dependencies = [ [[package]] name = "alloy-genesis" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc41384e9ab8c9b2fb387c52774d9d432656a28edcda1c2d4083e96051524518" +checksum = "c51b4c13e02a8104170a4de02ccf006d7c233e6c10ab290ee16e7041e6ac221d" dependencies = [ "alloy-eips", "alloy-primitives", @@ -307,9 +307,9 @@ dependencies = [ [[package]] name = "alloy-json-abi" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15516116086325c157c18261d768a20677f0f699348000ed391d4ad0dcb82530" +checksum = "459f98c6843f208856f338bfb25e65325467f7aff35dfeb0484d0a76e059134b" dependencies = [ "alloy-primitives", "alloy-sol-type-parser", @@ -319,9 +319,9 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12c454fcfcd5d26ed3b8cae5933cbee9da5f0b05df19b46d4bd4446d1f082565" +checksum = "b590caa6b6d8bc10e6e7a7696c59b1e550e89f27f50d1ee13071150d3a3e3f66" dependencies = [ "alloy-primitives", "alloy-sol-types", @@ -334,9 +334,9 @@ dependencies = [ [[package]] name = "alloy-network" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42d6d39eabe5c7b3d8f23ac47b0b683b99faa4359797114636c66e0743103d05" +checksum = "36fe5af1fca03277daa56ad4ce5f6d623d3f4c2273ea30b9ee8674d18cefc1fa" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -360,9 +360,9 @@ dependencies = [ [[package]] name = "alloy-network-primitives" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3704fa8b7ba9ba3f378d99b3d628c8bc8c2fc431b709947930f154e22a8368b6" +checksum = "793df1e3457573877fbde8872e4906638fde565ee2d3bd16d04aad17d43dbf0e" dependencies = [ "alloy-consensus", "alloy-eips", @@ -402,9 +402,9 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6177ed26655d4e84e00b65cb494d4e0b8830e7cae7ef5d63087d445a2600fb55" +checksum = "3cfebde8c581a5d37b678d0a48a32decb51efd7a63a08ce2517ddec26db705c8" dependencies = [ "alloy-rlp", "arbitrary", @@ -433,9 +433,9 @@ dependencies = [ [[package]] name = "alloy-provider" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08800e8cbe70c19e2eb7cf3d7ff4b28bdd9b3933f8e1c8136c7d910617ba03bf" +checksum = "d59879a772ebdcde9dc4eb38b2535d32e8503d3175687cc09e763a625c5fcf32" dependencies = [ "alloy-chains", "alloy-consensus", @@ -479,9 +479,9 @@ dependencies = [ [[package]] name = "alloy-pubsub" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae68457a2c2ead6bd7d7acb5bf5f1623324b1962d4f8e7b0250657a3c3ab0a0b" +checksum = "fbdfb2899b54b7cb0063fa8e61938320f9be6b81b681be69c203abf130a87baa" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -522,9 +522,9 @@ dependencies = [ [[package]] name = "alloy-rpc-client" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "162301b5a57d4d8f000bf30f4dcb82f9f468f3e5e846eeb8598dd39e7886932c" +checksum = "7f060e3bb9f319eb01867a2d6d1ff9e0114e8877f5ca8f5db447724136106cae" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -548,9 +548,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6cd8ca94ae7e2b32cc3895d9981f3772aab0b4756aa60e9ed0bcfee50f0e1328" +checksum = "d47b637369245d2dafef84b223b1ff5ea59e6cd3a98d2d3516e32788a0b216df" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -561,9 +561,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-admin" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7bff682e76f3f72e9ddc75e54a1bd1db5ce53cbdf2cce2d63a3a981437f78f5" +checksum = "db29bf8f7c961533b017f383122cab6517c8da95712cf832e23c60415d520a58" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -573,9 +573,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f3ff6a778ebda3deaed9af17930d678611afe1effa895c4260b61009c314f82" +checksum = "c0b1f499acb3fc729615147bc113b8b798b17379f19d43058a687edc5792c102" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -585,9 +585,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-any" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "076b47e834b367d8618c52dd0a0d6a711ddf66154636df394805300af4923b8a" +checksum = "1e26b4dd90b33bd158975307fb9cf5fafa737a0e33cbb772a8648bf8be13c104" dependencies = [ "alloy-consensus-any", "alloy-rpc-types-eth", @@ -596,9 +596,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-beacon" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48f39da9b760e78fc3f347fba4da257aa6328fb33f73682b26cc0a6874798f7d" +checksum = "9196cbbf4b82a3cc0c471a8e68ccb30102170d930948ac940d2bceadc1b1346b" dependencies = [ "alloy-eips", "alloy-primitives", @@ -614,9 +614,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-debug" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94a2a86ad7b7d718c15e79d0779bd255561b6b22968dc5ed2e7c0fbc43bb55fe" +checksum = "71841e6fc8e221892035a74f7d5b279c0a2bf27a7e1c93e7476c64ce9056624e" dependencies = [ "alloy-primitives", "serde", @@ -624,9 +624,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ba838417c42e8f1fe5eb4f4bbfacb7b5d4b9e615b8d2e831b921e04bf0bed62" +checksum = "f2f9cbf5f781b9ee39cfdddea078fdef6015424f4c8282ef0e5416d15ca352c4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -645,9 +645,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c2f847e635ec0be819d06e2ada4bcc4e4204026a83c4bfd78ae8d550e027ae7" +checksum = "46586ec3c278639fc0e129f0eb73dbfa3d57f683c44b2ff5e066fab7ba63fa1f" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -667,9 +667,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-mev" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb1c9b23cedf70aeb99ea9f16b78cdf902f524e227922fb340e3eb899ebe96dc" +checksum = "79b6e80b501842c3f5803dd5752ae41b61f43bf6d2e1b8d29999d3312d67a8a5" dependencies = [ "alloy-consensus", "alloy-eips", @@ -682,9 +682,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fc58180302a94c934d455eeedb3ecb99cdc93da1dbddcdbbdb79dd6fe618b2a" +checksum = "bc9a2184493c374ca1dbba9569d37215c23e489970f8c3994f731cb3ed6b0b7d" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -696,9 +696,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-txpool" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f9f089d78bb94148e0fcfda087d4ce5fd35a7002847b5e90610c0fcb140f7b4" +checksum = "a3aaf142f4f6c0bdd06839c422179bae135024407d731e6f365380f88cd4730e" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -708,9 +708,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae699248d02ade9db493bbdae61822277dc14ae0f82a5a4153203b60e34422a6" +checksum = "1e1722bc30feef87cc0fa824e43c9013f9639cc6c037be7be28a31361c788be2" dependencies = [ "alloy-primitives", "arbitrary", @@ -720,9 +720,9 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cf7d793c813515e2b627b19a15693960b3ed06670f9f66759396d06ebe5747b" +checksum = "d3674beb29e68fbbc7be302b611cf35fe07b736e308012a280861df5a2361395" dependencies = [ "alloy-primitives", "async-trait", @@ -735,9 +735,9 @@ dependencies = [ [[package]] name = "alloy-signer-local" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51a424bc5a11df0d898ce0fd15906b88ebe2a6e4f17a514b51bc93946bb756bd" +checksum = "ad7094c39cd41b03ed642145b0bd37251e31a9cf2ed19e1ce761f089867356a6" dependencies = [ "alloy-consensus", "alloy-network", @@ -753,9 +753,9 @@ dependencies = [ [[package]] name = "alloy-sol-macro" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a14f21d053aea4c6630687c2f4ad614bed4c81e14737a9b904798b24f30ea849" +checksum = "aedac07a10d4c2027817a43cc1f038313fc53c7ac866f7363239971fd01f9f18" dependencies = [ "alloy-sol-macro-expander", "alloy-sol-macro-input", @@ -767,9 +767,9 @@ dependencies = [ [[package]] name = "alloy-sol-macro-expander" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34d99282e7c9ef14eb62727981a985a01869e586d1dec729d3bb33679094c100" +checksum = "24f9a598f010f048d8b8226492b6401104f5a5c1273c2869b72af29b48bb4ba9" dependencies = [ "alloy-sol-macro-input", "const-hex", @@ -785,9 +785,9 @@ dependencies = [ [[package]] name = "alloy-sol-macro-input" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eda029f955b78e493360ee1d7bd11e1ab9f2a220a5715449babc79d6d0a01105" +checksum = "f494adf9d60e49aa6ce26dfd42c7417aa6d4343cf2ae621f20e4d92a5ad07d85" dependencies = [ "const-hex", "dunce", @@ -801,9 +801,9 @@ dependencies = [ [[package]] name = "alloy-sol-type-parser" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10db1bd7baa35bc8d4a1b07efbf734e73e5ba09f2580fb8cee3483a36087ceb2" +checksum = "52db32fbd35a9c0c0e538b58b81ebbae08a51be029e7ad60e08b60481c2ec6c3" dependencies = [ "serde", "winnow", @@ -811,9 +811,9 @@ dependencies = [ [[package]] name = "alloy-sol-types" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58377025a47d8b8426b3e4846a251f2c1991033b27f517aade368146f6ab1dfe" +checksum = "a285b46e3e0c177887028278f04cc8262b76fd3b8e0e20e93cea0a58c35f5ac5" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -823,9 +823,9 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f317d20f047b3de4d9728c556e2e9a92c9a507702d2016424cd8be13a74ca5e" +checksum = "f89bec2f59a41c0e259b6fe92f78dfc49862c17d10f938db9c33150d5a7f42b6" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -846,9 +846,9 @@ dependencies = [ [[package]] name = "alloy-transport-http" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff084ac7b1f318c87b579d221f11b748341d68b9ddaa4ffca5e62ed2b8cfefb4" +checksum = "0d3615ec64d775fec840f4e9d5c8e1f739eb1854d8d28db093fb3d4805e0cb53" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -861,9 +861,9 @@ dependencies = [ [[package]] name = "alloy-transport-ipc" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edb099cdad8ed2e6a80811cdf9bbf715ebf4e34c981b4a6e2d1f9daacbf8b218" +checksum = "374db72669d8ee09063b9aa1a316e812d5cdfce7fc9a99a3eceaa0e5512300d2" dependencies = [ "alloy-json-rpc", "alloy-pubsub", @@ -881,9 +881,9 @@ dependencies = [ [[package]] name = "alloy-transport-ws" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e915e1250dc129ad48d264573ccd08e4716fdda564a772fd217875b8459aff9" +checksum = "f5dbaa6851875d59c8803088f4b6ec72eaeddf7667547ae8995c1a19fbca6303" dependencies = [ "alloy-pubsub", "alloy-transport", @@ -919,9 +919,9 @@ dependencies = [ [[package]] name = "alloy-tx-macros" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1154c8187a5ff985c95a8b2daa2fedcf778b17d7668e5e50e556c4ff9c881154" +checksum = "9f916ff6d52f219c44a9684aea764ce2c7e1d53bd4a724c9b127863aeacc30bb" dependencies = [ "alloy-primitives", "darling", @@ -5975,9 +5975,9 @@ checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" [[package]] name = "op-alloy-consensus" -version = "0.18.11" +version = "0.18.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18986c5cf19a790b8b9e8c856a950b48ed6dd6a0259d0efd5f5c9bebbba1fc3a" +checksum = "eda4af86c3185b06f8d70986a591c087f054c5217cc7ce53cd0ec36dc42d7425" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6001,9 +6001,9 @@ checksum = "a79f352fc3893dcd670172e615afef993a41798a1d3fc0db88a3e60ef2e70ecc" [[package]] name = "op-alloy-network" -version = "0.18.11" +version = "0.18.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac69810db9294e1de90b2cc6688b213399d8a5c96b283220caddd98a65dcbc39" +checksum = "ab526485e1aee4dbd929aaa431aaa9db8678c936ee7d1449760f783ae45afa01" dependencies = [ "alloy-consensus", "alloy-network", @@ -6017,9 +6017,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-jsonrpsee" -version = "0.18.11" +version = "0.18.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "490c08acf608a3fd039728dc5b77a2ff903793db223509f4d94e43c22717a8f7" +checksum = "5f34feb6c3aef85c9ab9198f1402867030e54d13f6c66dda18235497ac808cb0" dependencies = [ "alloy-primitives", "jsonrpsee", @@ -6027,9 +6027,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types" -version = "0.18.11" +version = "0.18.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7dd487b283473591919ba95829f7a8d27d511488948d2ee6b24b283dd83008f" +checksum = "98bfe0a4e1225930ffe288a9b3ce0d95c6fc2ee6696132e5ad7ecc7b0ee139a8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6046,9 +6046,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types-engine" -version = "0.18.11" +version = "0.18.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "814d2b82a6d0b973afc78e797a74818165f257041b9173016dccbe3647f8b1da" +checksum = "3a420102c1b857a4ba373fcaf674d5c0499fd3705ddce95be9a69f3561c337b3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11892,9 +11892,9 @@ dependencies = [ [[package]] name = "syn-solidity" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9ac494e7266fcdd2ad80bf4375d55d27a117ea5c866c26d0e97fe5b3caeeb75" +checksum = "a7a985ff4ffd7373e10e0fb048110fb11a162e5a4c47f92ddb8787a6f766b769" dependencies = [ "paste", "proc-macro2", diff --git a/Cargo.toml b/Cargo.toml index 4fcd3dcae2b..4983598f808 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -469,53 +469,53 @@ revm-inspectors = "0.27.1" # eth alloy-chains = { version = "0.2.5", default-features = false } -alloy-dyn-abi = "1.2.0" +alloy-dyn-abi = "1.3.0" alloy-eip2124 = { version = "0.2.0", default-features = false } alloy-evm = { version = "0.15", default-features = false } -alloy-primitives = { version = "1.2.0", default-features = false, features = ["map-foldhash"] } +alloy-primitives = { version = "1.3.0", default-features = false, features = ["map-foldhash"] } alloy-rlp = { version = "0.3.10", default-features = false, features = ["core-net"] } -alloy-sol-macro = "1.2.0" -alloy-sol-types = { version = "1.2.0", default-features = false } +alloy-sol-macro = "1.3.0" +alloy-sol-types = { version = "1.3.0", default-features = false } alloy-trie = { version = "0.9.0", default-features = false } alloy-hardforks = "0.2.7" -alloy-consensus = { version = "1.0.22", default-features = false } -alloy-contract = { version = "1.0.22", default-features = false } -alloy-eips = { version = "1.0.22", default-features = false } -alloy-genesis = { version = "1.0.22", default-features = false } -alloy-json-rpc = { version = "1.0.22", default-features = false } -alloy-network = { version = "1.0.22", default-features = false } -alloy-network-primitives = { version = "1.0.22", default-features = false } -alloy-provider = { version = "1.0.22", features = ["reqwest"], default-features = false } -alloy-pubsub = { version = "1.0.22", default-features = false } -alloy-rpc-client = { version = "1.0.22", default-features = false } -alloy-rpc-types = { version = "1.0.22", features = ["eth"], default-features = false } -alloy-rpc-types-admin = { version = "1.0.22", default-features = false } -alloy-rpc-types-anvil = { version = "1.0.22", default-features = false } -alloy-rpc-types-beacon = { version = "1.0.22", default-features = false } -alloy-rpc-types-debug = { version = "1.0.22", default-features = false } -alloy-rpc-types-engine = { version = "1.0.22", default-features = false } -alloy-rpc-types-eth = { version = "1.0.22", default-features = false } -alloy-rpc-types-mev = { version = "1.0.22", default-features = false } -alloy-rpc-types-trace = { version = "1.0.22", default-features = false } -alloy-rpc-types-txpool = { version = "1.0.22", default-features = false } -alloy-serde = { version = "1.0.22", default-features = false } -alloy-signer = { version = "1.0.22", default-features = false } -alloy-signer-local = { version = "1.0.22", default-features = false } -alloy-transport = { version = "1.0.22" } -alloy-transport-http = { version = "1.0.22", features = ["reqwest-rustls-tls"], default-features = false } -alloy-transport-ipc = { version = "1.0.22", default-features = false } -alloy-transport-ws = { version = "1.0.22", default-features = false } +alloy-consensus = { version = "1.0.23", default-features = false } +alloy-contract = { version = "1.0.23", default-features = false } +alloy-eips = { version = "1.0.23", default-features = false } +alloy-genesis = { version = "1.0.23", default-features = false } +alloy-json-rpc = { version = "1.0.23", default-features = false } +alloy-network = { version = "1.0.23", default-features = false } +alloy-network-primitives = { version = "1.0.23", default-features = false } +alloy-provider = { version = "1.0.23", features = ["reqwest"], default-features = false } +alloy-pubsub = { version = "1.0.23", default-features = false } +alloy-rpc-client = { version = "1.0.23", default-features = false } +alloy-rpc-types = { version = "1.0.23", features = ["eth"], default-features = false } +alloy-rpc-types-admin = { version = "1.0.23", default-features = false } +alloy-rpc-types-anvil = { version = "1.0.23", default-features = false } +alloy-rpc-types-beacon = { version = "1.0.23", default-features = false } +alloy-rpc-types-debug = { version = "1.0.23", default-features = false } +alloy-rpc-types-engine = { version = "1.0.23", default-features = false } +alloy-rpc-types-eth = { version = "1.0.23", default-features = false } +alloy-rpc-types-mev = { version = "1.0.23", default-features = false } +alloy-rpc-types-trace = { version = "1.0.23", default-features = false } +alloy-rpc-types-txpool = { version = "1.0.23", default-features = false } +alloy-serde = { version = "1.0.23", default-features = false } +alloy-signer = { version = "1.0.23", default-features = false } +alloy-signer-local = { version = "1.0.23", default-features = false } +alloy-transport = { version = "1.0.23" } +alloy-transport-http = { version = "1.0.23", features = ["reqwest-rustls-tls"], default-features = false } +alloy-transport-ipc = { version = "1.0.23", default-features = false } +alloy-transport-ws = { version = "1.0.23", default-features = false } # op alloy-op-evm = { version = "0.15", default-features = false } alloy-op-hardforks = "0.2.2" -op-alloy-rpc-types = { version = "0.18.11", default-features = false } -op-alloy-rpc-types-engine = { version = "0.18.11", default-features = false } -op-alloy-network = { version = "0.18.11", default-features = false } -op-alloy-consensus = { version = "0.18.11", default-features = false } -op-alloy-rpc-jsonrpsee = { version = "0.18.11", default-features = false } +op-alloy-rpc-types = { version = "0.18.12", default-features = false } +op-alloy-rpc-types-engine = { version = "0.18.12", default-features = false } +op-alloy-network = { version = "0.18.12", default-features = false } +op-alloy-consensus = { version = "0.18.12", default-features = false } +op-alloy-rpc-jsonrpsee = { version = "0.18.12", default-features = false } op-alloy-flz = { version = "0.13.1", default-features = false } # misc diff --git a/crates/rpc/rpc-api/src/mev.rs b/crates/rpc/rpc-api/src/mev.rs index 76de76a079b..274fcbf9316 100644 --- a/crates/rpc/rpc-api/src/mev.rs +++ b/crates/rpc/rpc-api/src/mev.rs @@ -1,6 +1,4 @@ -use alloy_rpc_types_mev::{ - EthBundleHash, SendBundleRequest, SimBundleOverrides, SimBundleResponse, -}; +use alloy_rpc_types_mev::{EthBundleHash, MevSendBundle, SimBundleOverrides, SimBundleResponse}; use jsonrpsee::proc_macros::rpc; /// Mev rpc interface. @@ -12,7 +10,7 @@ pub trait MevSimApi { #[method(name = "simBundle")] async fn sim_bundle( &self, - bundle: SendBundleRequest, + bundle: MevSendBundle, sim_overrides: SimBundleOverrides, ) -> jsonrpsee::core::RpcResult; } @@ -26,7 +24,7 @@ pub trait MevFullApi { #[method(name = "sendBundle")] async fn send_bundle( &self, - request: SendBundleRequest, + request: MevSendBundle, ) -> jsonrpsee::core::RpcResult; /// Similar to `mev_sendBundle` but instead of submitting a bundle to the relay, it returns @@ -34,7 +32,7 @@ pub trait MevFullApi { #[method(name = "simBundle")] async fn sim_bundle( &self, - bundle: SendBundleRequest, + bundle: MevSendBundle, sim_overrides: SimBundleOverrides, ) -> jsonrpsee::core::RpcResult; } diff --git a/crates/rpc/rpc-eth-api/src/bundle.rs b/crates/rpc/rpc-eth-api/src/bundle.rs index 79e64fae02d..b47ef1b3bb3 100644 --- a/crates/rpc/rpc-eth-api/src/bundle.rs +++ b/crates/rpc/rpc-eth-api/src/bundle.rs @@ -4,8 +4,8 @@ use alloy_primitives::{Bytes, B256}; use alloy_rpc_types_mev::{ - CancelPrivateTransactionRequest, EthBundleHash, EthCallBundle, EthCallBundleResponse, - EthCancelBundle, EthSendBundle, PrivateTransactionRequest, + EthBundleHash, EthCallBundle, EthCallBundleResponse, EthCancelBundle, + EthCancelPrivateTransaction, EthSendBundle, EthSendPrivateTransaction, }; use jsonrpsee::proc_macros::rpc; @@ -49,7 +49,7 @@ pub trait EthBundleApi { #[method(name = "sendPrivateTransaction")] async fn send_private_transaction( &self, - request: PrivateTransactionRequest, + request: EthSendPrivateTransaction, ) -> jsonrpsee::core::RpcResult; /// The `eth_sendPrivateRawTransaction` method can be used to send private transactions to @@ -67,6 +67,6 @@ pub trait EthBundleApi { #[method(name = "cancelPrivateTransaction")] async fn cancel_private_transaction( &self, - request: CancelPrivateTransactionRequest, + request: EthCancelPrivateTransaction, ) -> jsonrpsee::core::RpcResult; } diff --git a/crates/rpc/rpc/src/eth/sim_bundle.rs b/crates/rpc/rpc/src/eth/sim_bundle.rs index cfc11658575..67d94d8140f 100644 --- a/crates/rpc/rpc/src/eth/sim_bundle.rs +++ b/crates/rpc/rpc/src/eth/sim_bundle.rs @@ -6,8 +6,8 @@ use alloy_evm::overrides::apply_block_overrides; use alloy_primitives::U256; use alloy_rpc_types_eth::BlockId; use alloy_rpc_types_mev::{ - BundleItem, Inclusion, Privacy, RefundConfig, SendBundleRequest, SimBundleLogs, - SimBundleOverrides, SimBundleResponse, Validity, + BundleItem, Inclusion, MevSendBundle, Privacy, RefundConfig, SimBundleLogs, SimBundleOverrides, + SimBundleResponse, Validity, }; use jsonrpsee::core::RpcResult; use reth_evm::{ConfigureEvm, Evm}; @@ -88,7 +88,7 @@ where /// inclusion, validity and privacy settings from parent bundles. fn parse_and_flatten_bundle( &self, - request: &SendBundleRequest, + request: &MevSendBundle, ) -> Result>>, EthApiError> { let mut items = Vec::new(); @@ -219,7 +219,7 @@ where async fn sim_bundle_inner( &self, - request: SendBundleRequest, + request: MevSendBundle, overrides: SimBundleOverrides, logs: bool, ) -> Result { @@ -415,7 +415,7 @@ where { async fn sim_bundle( &self, - request: SendBundleRequest, + request: MevSendBundle, overrides: SimBundleOverrides, ) -> RpcResult { trace!("mev_simBundle called, request: {:?}, overrides: {:?}", request, overrides); From 48617dc33c5f1564e0e7f4f3abf15e7c383dc15b Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 22 Jul 2025 12:58:20 +0200 Subject: [PATCH 250/305] ci: mark system eest tests as passing (#17542) --- .github/assets/hive/expected_failures.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/assets/hive/expected_failures.yaml b/.github/assets/hive/expected_failures.yaml index da8cb1606d3..c5dda276186 100644 --- a/.github/assets/hive/expected_failures.yaml +++ b/.github/assets/hive/expected_failures.yaml @@ -101,9 +101,7 @@ eest/consume-rlp: - tests/prague/eip7002_el_triggerable_withdrawals/test_contract_deployment.py::test_system_contract_deployment[fork_CancunToPragueAtTime15k-blockchain_test_engine-deploy_after_fork-zero_balance]-reth - tests/prague/eip6110_deposits/test_modified_contract.py::test_invalid_log_length[fork_Prague-blockchain_test_engine-slice_bytes_False]-reth - tests/prague/eip6110_deposits/test_modified_contract.py::test_invalid_log_length[fork_Prague-blockchain_test_engine-slice_bytes_True]-reth - - tests/prague/eip7251_consolidations/test_modified_consolidation_contract.py::test_system_contract_errors[fork_Prague-blockchain_test-system_contract_reaches_gas_limit-system_contract_0x0000bbddc7ce488642fb579f8b00f3a590007251]-reth - tests/prague/eip7251_consolidations/test_contract_deployment.py::test_system_contract_deployment[fork_CancunToPragueAtTime15k-blockchain_test-deploy_after_fork-nonzero_balance]-reth - tests/prague/eip7251_consolidations/test_contract_deployment.py::test_system_contract_deployment[fork_CancunToPragueAtTime15k-blockchain_test-deploy_after_fork-zero_balance]-reth - - tests/prague/eip7002_el_triggerable_withdrawals/test_modified_withdrawal_contract.py::test_system_contract_errors[fork_Prague-blockchain_test-system_contract_reaches_gas_limit-system_contract_0x00000961ef480eb55e80d19ad83579a64c007002]-reth - tests/prague/eip7002_el_triggerable_withdrawals/test_contract_deployment.py::test_system_contract_deployment[fork_CancunToPragueAtTime15k-blockchain_test-deploy_after_fork-nonzero_balance]-reth - tests/prague/eip7002_el_triggerable_withdrawals/test_contract_deployment.py::test_system_contract_deployment[fork_CancunToPragueAtTime15k-blockchain_test-deploy_after_fork-zero_balance]-reth From 53df3b803ad97fd692e2e0b79540556bffee8248 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Tue, 22 Jul 2025 14:04:37 +0300 Subject: [PATCH 251/305] feat: add `AddOns` for custom node example (#17544) --- Cargo.lock | 4 ++ crates/optimism/node/src/node.rs | 27 ++++++----- crates/optimism/rpc/Cargo.toml | 2 +- crates/optimism/rpc/src/eth/transaction.rs | 12 ++--- crates/rpc/rpc-convert/src/transaction.rs | 13 ++++-- examples/custom-node/Cargo.toml | 7 ++- examples/custom-node/src/engine.rs | 46 +++++++------------ examples/custom-node/src/engine_api.rs | 29 ++++-------- examples/custom-node/src/lib.rs | 36 +++++++++++---- examples/custom-node/src/rpc.rs | 53 ++++++++++++++++++++++ 10 files changed, 147 insertions(+), 82 deletions(-) create mode 100644 examples/custom-node/src/rpc.rs diff --git a/Cargo.lock b/Cargo.lock index 3af569d1a6f..a3b5a6f2fd1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3380,10 +3380,12 @@ dependencies = [ "alloy-eips", "alloy-evm", "alloy-genesis", + "alloy-network", "alloy-op-evm", "alloy-primitives", "alloy-rlp", "alloy-rpc-types-engine", + "alloy-rpc-types-eth", "alloy-serde", "async-trait", "derive_more", @@ -3391,6 +3393,7 @@ dependencies = [ "jsonrpsee", "modular-bitfield", "op-alloy-consensus", + "op-alloy-rpc-types", "op-alloy-rpc-types-engine", "op-revm", "reth-chain-state", @@ -6037,6 +6040,7 @@ dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", "alloy-serde", + "arbitrary", "derive_more", "op-alloy-consensus", "serde", diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index ae41e3d8ee0..4b2f713bcec 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -55,7 +55,7 @@ use reth_optimism_txpool::{ OpPooledTx, }; use reth_provider::{providers::ProviderFactoryBuilder, CanonStateSubscriptions}; -use reth_rpc_api::{DebugApiServer, L2EthApiExtServer}; +use reth_rpc_api::{eth::RpcTypes, DebugApiServer, L2EthApiExtServer}; use reth_rpc_server_types::RethRpcModule; use reth_tracing::tracing::{debug, info}; use reth_transaction_pool::{ @@ -164,6 +164,17 @@ impl OpNode { .consensus(OpConsensusBuilder::default()) } + /// Returns [`OpAddOnsBuilder`] with configured arguments. + pub fn add_ons_builder(&self) -> OpAddOnsBuilder { + OpAddOnsBuilder::default() + .with_sequencer(self.args.sequencer.clone()) + .with_sequencer_headers(self.args.sequencer_headers.clone()) + .with_da_config(self.da_config.clone()) + .with_enable_tx_conditional(self.args.enable_tx_conditional) + .with_min_suggested_priority_fee(self.args.min_suggested_priority_fee) + .with_historical_rpc(self.args.historical_rpc.clone()) + } + /// Instantiates the [`ProviderFactoryBuilder`] for an opstack node. /// /// # Open a Providerfactory in read-only mode from a datadir @@ -224,14 +235,7 @@ where } fn add_ons(&self) -> Self::AddOns { - Self::AddOns::builder() - .with_sequencer(self.args.sequencer.clone()) - .with_sequencer_headers(self.args.sequencer_headers.clone()) - .with_da_config(self.da_config.clone()) - .with_enable_tx_conditional(self.args.enable_tx_conditional) - .with_min_suggested_priority_fee(self.args.min_suggested_priority_fee) - .with_historical_rpc(self.args.historical_rpc.clone()) - .build() + self.add_ons_builder().build() } } @@ -428,12 +432,11 @@ where impl NodeAddOns for OpAddOns where N: FullNodeComponents< - Types: OpFullNodeTypes, + Types: NodeTypes, Evm: ConfigureEvm, + Pool: TransactionPool, >, - N::Types: NodeTypes, EthB: EthApiBuilder, - ::Transaction: OpPooledTx, EV: EngineValidatorBuilder, EB: EngineApiBuilder, RpcMiddleware: RethRpcMiddleware, diff --git a/crates/optimism/rpc/Cargo.toml b/crates/optimism/rpc/Cargo.toml index 51d0037c7e8..97f598628ef 100644 --- a/crates/optimism/rpc/Cargo.toml +++ b/crates/optimism/rpc/Cargo.toml @@ -14,7 +14,7 @@ workspace = true [dependencies] # reth reth-evm.workspace = true -reth-primitives-traits.workspace = true +reth-primitives-traits = { workspace = true, features = ["op"] } reth-storage-api.workspace = true reth-rpc-eth-api = { workspace = true, features = ["op"] } reth-rpc-eth-types.workspace = true diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index 7b46db38cc1..89c72613b9b 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -3,8 +3,9 @@ use crate::{OpEthApi, OpEthApiError, SequencerClient}; use alloy_primitives::{Bytes, B256}; use alloy_rpc_types_eth::TransactionInfo; -use op_alloy_consensus::{transaction::OpTransactionInfo, OpTxEnvelope}; +use op_alloy_consensus::{transaction::OpTransactionInfo, OpTransaction}; use reth_optimism_primitives::DepositReceipt; +use reth_primitives_traits::SignedTransaction; use reth_rpc_eth_api::{ helpers::{spec::SignersForRpc, EthTransactions, LoadTransaction}, try_into_op_tx_info, FromEthApiError, RpcConvert, RpcNodeCore, TxInfoMapper, @@ -109,18 +110,15 @@ impl OpTxInfoMapper { } } -impl TxInfoMapper<&OpTxEnvelope> for OpTxInfoMapper +impl TxInfoMapper<&T> for OpTxInfoMapper where + T: OpTransaction + SignedTransaction, Provider: ReceiptProvider, { type Out = OpTransactionInfo; type Err = ProviderError; - fn try_map( - &self, - tx: &OpTxEnvelope, - tx_info: TransactionInfo, - ) -> Result { + fn try_map(&self, tx: &T, tx_info: TransactionInfo) -> Result { try_into_op_tx_info(&self.provider, tx, tx_info) } } diff --git a/crates/rpc/rpc-convert/src/transaction.rs b/crates/rpc/rpc-convert/src/transaction.rs index 2d4aad69edd..2654bde0474 100644 --- a/crates/rpc/rpc-convert/src/transaction.rs +++ b/crates/rpc/rpc-convert/src/transaction.rs @@ -557,17 +557,22 @@ pub mod op { use op_alloy_rpc_types::OpTransactionRequest; use op_revm::OpTransaction; use reth_optimism_primitives::DepositReceipt; + use reth_primitives_traits::SignedTransaction; use reth_storage_api::{errors::ProviderError, ReceiptProvider}; /// Creates [`OpTransactionInfo`] by adding [`OpDepositInfo`] to [`TransactionInfo`] if `tx` is /// a deposit. - pub fn try_into_op_tx_info>( + pub fn try_into_op_tx_info( provider: &T, - tx: &OpTxEnvelope, + tx: &Tx, tx_info: TransactionInfo, - ) -> Result { + ) -> Result + where + Tx: op_alloy_consensus::OpTransaction + SignedTransaction, + T: ReceiptProvider, + { let deposit_meta = if tx.is_deposit() { - provider.receipt_by_hash(tx.tx_hash())?.and_then(|receipt| { + provider.receipt_by_hash(*tx.tx_hash())?.and_then(|receipt| { receipt.as_deposit_receipt().map(|receipt| OpDepositInfo { deposit_receipt_version: receipt.deposit_receipt_version, deposit_nonce: receipt.deposit_nonce, diff --git a/examples/custom-node/Cargo.toml b/examples/custom-node/Cargo.toml index f43f2eb1c43..787e4db5e51 100644 --- a/examples/custom-node/Cargo.toml +++ b/examples/custom-node/Cargo.toml @@ -13,7 +13,7 @@ reth-network-peers.workspace = true reth-node-builder.workspace = true reth-optimism-forks.workspace = true reth-db-api.workspace = true -reth-op = { workspace = true, features = ["node", "pool"] } +reth-op = { workspace = true, features = ["node", "pool", "rpc"] } reth-payload-builder.workspace = true reth-rpc-api.workspace = true reth-rpc-engine-api.workspace = true @@ -32,9 +32,12 @@ alloy-op-evm.workspace = true alloy-primitives.workspace = true alloy-rlp.workspace = true alloy-serde.workspace = true +alloy-network.workspace = true alloy-rpc-types-engine.workspace = true +alloy-rpc-types-eth.workspace = true op-alloy-consensus.workspace = true op-alloy-rpc-types-engine.workspace = true +op-alloy-rpc-types.workspace = true op-revm.workspace = true # misc @@ -64,5 +67,7 @@ arbitrary = [ "reth-ethereum/arbitrary", "alloy-rpc-types-engine/arbitrary", "reth-db-api/arbitrary", + "alloy-rpc-types-eth/arbitrary", + "op-alloy-rpc-types/arbitrary", ] default = [] diff --git a/examples/custom-node/src/engine.rs b/examples/custom-node/src/engine.rs index e3bc6019d7b..bf82747c133 100644 --- a/examples/custom-node/src/engine.rs +++ b/examples/custom-node/src/engine.rs @@ -1,6 +1,7 @@ use crate::{ chainspec::CustomChainSpec, primitives::{CustomHeader, CustomNodePrimitives, CustomTransaction}, + CustomNode, }; use op_alloy_rpc_types_engine::{OpExecutionData, OpExecutionPayload}; use reth_chain_state::ExecutedBlockWithTrieUpdates; @@ -8,9 +9,8 @@ use reth_ethereum::{ node::api::{ validate_version_specific_fields, AddOnsContext, BuiltPayload, EngineApiMessageVersion, EngineObjectValidationError, EngineValidator, ExecutionPayload, FullNodeComponents, - InvalidPayloadAttributesError, NewPayloadError, NodePrimitives, NodeTypes, - PayloadAttributes, PayloadBuilderAttributes, PayloadOrAttributes, PayloadTypes, - PayloadValidator, + InvalidPayloadAttributesError, NewPayloadError, NodePrimitives, PayloadAttributes, + PayloadBuilderAttributes, PayloadOrAttributes, PayloadTypes, PayloadValidator, }, primitives::{RecoveredBlock, SealedBlock}, storage::StateProviderFactory, @@ -176,9 +176,9 @@ impl From impl PayloadTypes for CustomPayloadTypes { type ExecutionData = CustomExecutionData; - type BuiltPayload = CustomBuiltPayload; - type PayloadAttributes = CustomPayloadAttributes; - type PayloadBuilderAttributes = CustomPayloadBuilderAttributes; + type BuiltPayload = OpBuiltPayload; + type PayloadAttributes = OpPayloadAttributes; + type PayloadBuilderAttributes = OpPayloadBuilderAttributes; fn block_to_payload( block: SealedBlock< @@ -237,18 +237,14 @@ where } } -impl EngineValidator for CustomEngineValidator

+impl

EngineValidator for CustomEngineValidator

where P: StateProviderFactory + Send + Sync + Unpin + 'static, - T: PayloadTypes< - PayloadAttributes = CustomPayloadAttributes, - ExecutionData = CustomExecutionData, - >, { fn validate_version_specific_fields( &self, version: EngineApiMessageVersion, - payload_or_attrs: PayloadOrAttributes<'_, Self::ExecutionData, T::PayloadAttributes>, + payload_or_attrs: PayloadOrAttributes<'_, Self::ExecutionData, OpPayloadAttributes>, ) -> Result<(), EngineObjectValidationError> { validate_version_specific_fields(self.chain_spec(), version, payload_or_attrs) } @@ -256,29 +252,27 @@ where fn ensure_well_formed_attributes( &self, version: EngineApiMessageVersion, - attributes: &T::PayloadAttributes, + attributes: &OpPayloadAttributes, ) -> Result<(), EngineObjectValidationError> { validate_version_specific_fields( self.chain_spec(), version, - PayloadOrAttributes::::PayloadAttributes( - attributes, - ), + PayloadOrAttributes::::PayloadAttributes(attributes), )?; // custom validation logic - ensure that the custom field is not zero - if attributes.extension == 0 { - return Err(EngineObjectValidationError::invalid_params( - CustomError::CustomFieldIsNotZero, - )) - } + // if attributes.extension == 0 { + // return Err(EngineObjectValidationError::invalid_params( + // CustomError::CustomFieldIsNotZero, + // )) + // } Ok(()) } fn validate_payload_attributes_against_header( &self, - _attr: &::PayloadAttributes, + _attr: &OpPayloadAttributes, _header: &::Header, ) -> Result<(), InvalidPayloadAttributesError> { // skip default timestamp validation @@ -300,13 +294,7 @@ pub struct CustomEngineValidatorBuilder; impl EngineValidatorBuilder for CustomEngineValidatorBuilder where - N: FullNodeComponents< - Types: NodeTypes< - Payload = CustomPayloadTypes, - ChainSpec = CustomChainSpec, - Primitives = CustomNodePrimitives, - >, - >, + N: FullNodeComponents, { type Validator = CustomEngineValidator; diff --git a/examples/custom-node/src/engine_api.rs b/examples/custom-node/src/engine_api.rs index bc92ffb8a99..7e5d1455f0e 100644 --- a/examples/custom-node/src/engine_api.rs +++ b/examples/custom-node/src/engine_api.rs @@ -1,21 +1,19 @@ use crate::{ - chainspec::CustomChainSpec, - engine::{ - CustomBuiltPayload, CustomExecutionData, CustomPayloadAttributes, CustomPayloadTypes, - }, + engine::{CustomExecutionData, CustomPayloadTypes}, primitives::CustomNodePrimitives, + CustomNode, }; use alloy_rpc_types_engine::{ ExecutionPayloadV3, ForkchoiceState, ForkchoiceUpdated, PayloadId, PayloadStatus, }; use async_trait::async_trait; use jsonrpsee::{core::RpcResult, proc_macros::rpc, RpcModule}; +use op_alloy_rpc_types_engine::OpPayloadAttributes; use reth_ethereum::node::api::{ AddOnsContext, BeaconConsensusEngineHandle, EngineApiMessageVersion, FullNodeComponents, - NodeTypes, }; use reth_node_builder::rpc::EngineApiBuilder; -use reth_op::node::OpStorage; +use reth_op::node::OpBuiltPayload; use reth_payload_builder::PayloadStore; use reth_rpc_api::IntoEngineApiRpcModule; use reth_rpc_engine_api::EngineApiError; @@ -30,9 +28,9 @@ pub struct CustomExecutionPayloadEnvelope { extension: u64, } -impl From for CustomExecutionPayloadEnvelope { - fn from(value: CustomBuiltPayload) -> Self { - let sealed_block = value.0.into_sealed_block(); +impl From> for CustomExecutionPayloadEnvelope { + fn from(value: OpBuiltPayload) -> Self { + let sealed_block = value.into_sealed_block(); let hash = sealed_block.hash(); let extension = sealed_block.header().extension; let block = sealed_block.into_block(); @@ -53,7 +51,7 @@ pub trait CustomEngineApi { async fn fork_choice_updated( &self, fork_choice_state: ForkchoiceState, - payload_attributes: Option, + payload_attributes: Option, ) -> RpcResult; #[method(name = "getPayload")] @@ -93,7 +91,7 @@ impl CustomEngineApiServer for CustomEngineApi { async fn fork_choice_updated( &self, fork_choice_state: ForkchoiceState, - payload_attributes: Option, + payload_attributes: Option, ) -> RpcResult { Ok(self .inner @@ -132,14 +130,7 @@ pub struct CustomEngineApiBuilder {} impl EngineApiBuilder for CustomEngineApiBuilder where - N: FullNodeComponents< - Types: NodeTypes< - Payload = CustomPayloadTypes, - ChainSpec = CustomChainSpec, - Primitives = CustomNodePrimitives, - Storage = OpStorage, - >, - >, + N: FullNodeComponents, { type EngineApi = CustomEngineApi; diff --git a/examples/custom-node/src/lib.rs b/examples/custom-node/src/lib.rs index a4511e204e8..45dbde46628 100644 --- a/examples/custom-node/src/lib.rs +++ b/examples/custom-node/src/lib.rs @@ -8,18 +8,26 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] use crate::{ - evm::CustomExecutorBuilder, pool::CustomPooledTransaction, primitives::CustomTransaction, + engine::{CustomEngineValidatorBuilder, CustomPayloadTypes}, + engine_api::CustomEngineApiBuilder, + evm::CustomExecutorBuilder, + pool::CustomPooledTransaction, + primitives::CustomTransaction, + rpc::CustomRpcTypes, }; use chainspec::CustomChainSpec; use primitives::CustomNodePrimitives; use reth_ethereum::node::api::{FullNodeTypes, NodeTypes}; use reth_node_builder::{ components::{BasicPayloadServiceBuilder, ComponentsBuilder}, - Node, + Node, NodeAdapter, }; -use reth_op::node::{ - node::{OpConsensusBuilder, OpNetworkBuilder, OpPayloadBuilder, OpPoolBuilder}, - txpool, OpNode, OpPayloadTypes, +use reth_op::{ + node::{ + node::{OpConsensusBuilder, OpNetworkBuilder, OpPayloadBuilder, OpPoolBuilder}, + txpool, OpAddOns, OpNode, + }, + rpc::OpEthApiBuilder, }; pub mod chainspec; @@ -28,16 +36,19 @@ pub mod engine_api; pub mod evm; pub mod pool; pub mod primitives; +pub mod rpc; #[derive(Debug, Clone)] -pub struct CustomNode {} +pub struct CustomNode { + inner: OpNode, +} impl NodeTypes for CustomNode { type Primitives = CustomNodePrimitives; type ChainSpec = CustomChainSpec; type StateCommitment = ::StateCommitment; type Storage = ::Storage; - type Payload = OpPayloadTypes; + type Payload = CustomPayloadTypes; } impl Node for CustomNode @@ -53,7 +64,12 @@ where OpConsensusBuilder, >; - type AddOns = (); + type AddOns = OpAddOns< + NodeAdapter, + OpEthApiBuilder, + CustomEngineValidatorBuilder, + CustomEngineApiBuilder, + >; fn components_builder(&self) -> Self::ComponentsBuilder { ComponentsBuilder::default() @@ -65,5 +81,7 @@ where .consensus(OpConsensusBuilder::default()) } - fn add_ons(&self) -> Self::AddOns {} + fn add_ons(&self) -> Self::AddOns { + self.inner.add_ons_builder().build() + } } diff --git a/examples/custom-node/src/rpc.rs b/examples/custom-node/src/rpc.rs new file mode 100644 index 00000000000..8259297367d --- /dev/null +++ b/examples/custom-node/src/rpc.rs @@ -0,0 +1,53 @@ +use crate::{ + evm::CustomTxEnv, + primitives::{CustomHeader, CustomTransaction}, +}; +use alloy_consensus::error::ValueError; +use alloy_network::TxSigner; +use op_alloy_consensus::OpTxEnvelope; +use op_alloy_rpc_types::{OpTransactionReceipt, OpTransactionRequest}; +use reth_op::rpc::RpcTypes; +use reth_rpc_api::eth::{ + transaction::TryIntoTxEnv, EthTxEnvError, SignTxRequestError, SignableTxRequest, TryIntoSimTx, +}; +use revm::context::{BlockEnv, CfgEnv}; + +#[derive(Debug, Clone, Copy, Default)] +#[non_exhaustive] +pub struct CustomRpcTypes; + +impl RpcTypes for CustomRpcTypes { + type Header = alloy_rpc_types_eth::Header; + type Receipt = OpTransactionReceipt; + type TransactionRequest = OpTransactionRequest; + type TransactionResponse = op_alloy_rpc_types::Transaction; +} + +impl TryIntoSimTx for OpTransactionRequest { + fn try_into_sim_tx(self) -> Result> { + Ok(CustomTransaction::Op(self.try_into_sim_tx()?)) + } +} + +impl TryIntoTxEnv for OpTransactionRequest { + type Err = EthTxEnvError; + + fn try_into_tx_env( + self, + cfg_env: &CfgEnv, + block_env: &BlockEnv, + ) -> Result { + Ok(CustomTxEnv::Op(self.try_into_tx_env(cfg_env, block_env)?)) + } +} + +impl SignableTxRequest for OpTransactionRequest { + async fn try_build_and_sign( + self, + signer: impl TxSigner + Send, + ) -> Result { + Ok(CustomTransaction::Op( + SignableTxRequest::::try_build_and_sign(self, signer).await?, + )) + } +} From a0de7f875ef42a45c31cb719b32aea33f86733ff Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Tue, 22 Jul 2025 12:22:49 +0100 Subject: [PATCH 252/305] fix: convert latency to milliseconds in reth-bench script (#17555) Co-authored-by: Claude --- bin/reth-bench/scripts/compare_newpayload_latency.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/bin/reth-bench/scripts/compare_newpayload_latency.py b/bin/reth-bench/scripts/compare_newpayload_latency.py index 7d3c212d490..d0b914b6963 100755 --- a/bin/reth-bench/scripts/compare_newpayload_latency.py +++ b/bin/reth-bench/scripts/compare_newpayload_latency.py @@ -68,8 +68,9 @@ def main(): df1 = df1.head(min_len) df2 = df2.head(min_len) - latency1 = df1['total_latency'].values - latency2 = df2['total_latency'].values + # Convert from microseconds to milliseconds for better readability + latency1 = df1['total_latency'].values / 1000.0 + latency2 = df2['total_latency'].values / 1000.0 # Handle division by zero with np.errstate(divide='ignore', invalid='ignore'): From 4fb1b8a614ef43a64204676b5e8e98ad8cddbb70 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 22 Jul 2025 14:51:59 +0200 Subject: [PATCH 253/305] ci: fix era sync test (#17561) --- .github/workflows/sync-era.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/sync-era.yml b/.github/workflows/sync-era.yml index 973dc5ec036..11d2baa9994 100644 --- a/.github/workflows/sync-era.yml +++ b/.github/workflows/sync-era.yml @@ -53,7 +53,7 @@ jobs: --chain ${{ matrix.chain.chain }} \ --debug.tip ${{ matrix.chain.tip }} \ --debug.max-block ${{ matrix.chain.block }} \ - --debug.terminate + --debug.terminate \ --era.enable - name: Verify the target block hash run: | From d8451e54e7267f9f1634118d6d279b2216f7e2bb Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Tue, 22 Jul 2025 15:32:51 +0200 Subject: [PATCH 254/305] chore: bump version v1.6.0 (#17556) --- Cargo.lock | 268 +++++++++++++++++++-------------------- Cargo.toml | 2 +- docs/vocs/vocs.config.ts | 2 +- 3 files changed, 136 insertions(+), 136 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a3b5a6f2fd1..fb64c281d7c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3034,7 +3034,7 @@ dependencies = [ [[package]] name = "ef-tests" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -3626,7 +3626,7 @@ dependencies = [ [[package]] name = "exex-subscription" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-primitives", "clap", @@ -6072,7 +6072,7 @@ dependencies = [ [[package]] name = "op-reth" -version = "1.5.1" +version = "1.6.0" dependencies = [ "clap", "reth-cli-util", @@ -7150,7 +7150,7 @@ checksum = "95325155c684b1c89f7765e30bc1c42e4a6da51ca513615660cb8a62ef9a88e3" [[package]] name = "reth" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-rpc-types", "aquamarine", @@ -7197,7 +7197,7 @@ dependencies = [ [[package]] name = "reth-basic-payload-builder" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7220,7 +7220,7 @@ dependencies = [ [[package]] name = "reth-bench" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-eips", "alloy-json-rpc", @@ -7259,7 +7259,7 @@ dependencies = [ [[package]] name = "reth-chain-state" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7290,7 +7290,7 @@ dependencies = [ [[package]] name = "reth-chainspec" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-chains", "alloy-consensus", @@ -7310,7 +7310,7 @@ dependencies = [ [[package]] name = "reth-cli" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-genesis", "clap", @@ -7323,7 +7323,7 @@ dependencies = [ [[package]] name = "reth-cli-commands" -version = "1.5.1" +version = "1.6.0" dependencies = [ "ahash", "alloy-chains", @@ -7403,7 +7403,7 @@ dependencies = [ [[package]] name = "reth-cli-runner" -version = "1.5.1" +version = "1.6.0" dependencies = [ "reth-tasks", "tokio", @@ -7412,7 +7412,7 @@ dependencies = [ [[package]] name = "reth-cli-util" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7432,7 +7432,7 @@ dependencies = [ [[package]] name = "reth-codecs" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7456,7 +7456,7 @@ dependencies = [ [[package]] name = "reth-codecs-derive" -version = "1.5.1" +version = "1.6.0" dependencies = [ "convert_case", "proc-macro2", @@ -7467,7 +7467,7 @@ dependencies = [ [[package]] name = "reth-config" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-primitives", "eyre", @@ -7484,7 +7484,7 @@ dependencies = [ [[package]] name = "reth-consensus" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -7496,7 +7496,7 @@ dependencies = [ [[package]] name = "reth-consensus-common" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7510,7 +7510,7 @@ dependencies = [ [[package]] name = "reth-consensus-debug-client" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7534,7 +7534,7 @@ dependencies = [ [[package]] name = "reth-db" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -7567,7 +7567,7 @@ dependencies = [ [[package]] name = "reth-db-api" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -7597,7 +7597,7 @@ dependencies = [ [[package]] name = "reth-db-common" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -7626,7 +7626,7 @@ dependencies = [ [[package]] name = "reth-db-models" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7643,7 +7643,7 @@ dependencies = [ [[package]] name = "reth-discv4" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -7670,7 +7670,7 @@ dependencies = [ [[package]] name = "reth-discv5" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -7695,7 +7695,7 @@ dependencies = [ [[package]] name = "reth-dns-discovery" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-chains", "alloy-primitives", @@ -7723,7 +7723,7 @@ dependencies = [ [[package]] name = "reth-downloaders" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7762,7 +7762,7 @@ dependencies = [ [[package]] name = "reth-e2e-test-utils" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7823,7 +7823,7 @@ dependencies = [ [[package]] name = "reth-ecies" -version = "1.5.1" +version = "1.6.0" dependencies = [ "aes", "alloy-primitives", @@ -7853,7 +7853,7 @@ dependencies = [ [[package]] name = "reth-engine-local" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -7876,7 +7876,7 @@ dependencies = [ [[package]] name = "reth-engine-primitives" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7900,7 +7900,7 @@ dependencies = [ [[package]] name = "reth-engine-service" -version = "1.5.1" +version = "1.6.0" dependencies = [ "futures", "pin-project", @@ -7930,7 +7930,7 @@ dependencies = [ [[package]] name = "reth-engine-tree" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8000,7 +8000,7 @@ dependencies = [ [[package]] name = "reth-engine-util" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-rpc-types-engine", @@ -8026,7 +8026,7 @@ dependencies = [ [[package]] name = "reth-era" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8048,7 +8048,7 @@ dependencies = [ [[package]] name = "reth-era-downloader" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-primitives", "bytes", @@ -8065,7 +8065,7 @@ dependencies = [ [[package]] name = "reth-era-utils" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8094,7 +8094,7 @@ dependencies = [ [[package]] name = "reth-errors" -version = "1.5.1" +version = "1.6.0" dependencies = [ "reth-consensus", "reth-execution-errors", @@ -8104,7 +8104,7 @@ dependencies = [ [[package]] name = "reth-eth-wire" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-chains", "alloy-consensus", @@ -8142,7 +8142,7 @@ dependencies = [ [[package]] name = "reth-eth-wire-types" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-chains", "alloy-consensus", @@ -8167,7 +8167,7 @@ dependencies = [ [[package]] name = "reth-ethereum" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-rpc-types-engine", "alloy-rpc-types-eth", @@ -8206,7 +8206,7 @@ dependencies = [ [[package]] name = "reth-ethereum-cli" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "clap", @@ -8228,7 +8228,7 @@ dependencies = [ [[package]] name = "reth-ethereum-consensus" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8244,7 +8244,7 @@ dependencies = [ [[package]] name = "reth-ethereum-engine-primitives" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8262,7 +8262,7 @@ dependencies = [ [[package]] name = "reth-ethereum-forks" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-eip2124", "alloy-hardforks", @@ -8275,7 +8275,7 @@ dependencies = [ [[package]] name = "reth-ethereum-payload-builder" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8301,7 +8301,7 @@ dependencies = [ [[package]] name = "reth-ethereum-primitives" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8325,7 +8325,7 @@ dependencies = [ [[package]] name = "reth-etl" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-primitives", "rayon", @@ -8335,7 +8335,7 @@ dependencies = [ [[package]] name = "reth-evm" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8360,7 +8360,7 @@ dependencies = [ [[package]] name = "reth-evm-ethereum" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8382,7 +8382,7 @@ dependencies = [ [[package]] name = "reth-execution-errors" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-evm", "alloy-primitives", @@ -8394,7 +8394,7 @@ dependencies = [ [[package]] name = "reth-execution-types" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8414,7 +8414,7 @@ dependencies = [ [[package]] name = "reth-exex" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8458,7 +8458,7 @@ dependencies = [ [[package]] name = "reth-exex-test-utils" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-eips", "eyre", @@ -8490,7 +8490,7 @@ dependencies = [ [[package]] name = "reth-exex-types" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8507,7 +8507,7 @@ dependencies = [ [[package]] name = "reth-fs-util" -version = "1.5.1" +version = "1.6.0" dependencies = [ "serde", "serde_json", @@ -8516,7 +8516,7 @@ dependencies = [ [[package]] name = "reth-invalid-block-hooks" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8543,7 +8543,7 @@ dependencies = [ [[package]] name = "reth-ipc" -version = "1.5.1" +version = "1.6.0" dependencies = [ "bytes", "futures", @@ -8565,7 +8565,7 @@ dependencies = [ [[package]] name = "reth-libmdbx" -version = "1.5.1" +version = "1.6.0" dependencies = [ "bitflags 2.9.1", "byteorder", @@ -8584,7 +8584,7 @@ dependencies = [ [[package]] name = "reth-mdbx-sys" -version = "1.5.1" +version = "1.6.0" dependencies = [ "bindgen", "cc", @@ -8592,7 +8592,7 @@ dependencies = [ [[package]] name = "reth-metrics" -version = "1.5.1" +version = "1.6.0" dependencies = [ "futures", "metrics", @@ -8603,14 +8603,14 @@ dependencies = [ [[package]] name = "reth-net-banlist" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-primitives", ] [[package]] name = "reth-net-nat" -version = "1.5.1" +version = "1.6.0" dependencies = [ "futures-util", "if-addrs", @@ -8624,7 +8624,7 @@ dependencies = [ [[package]] name = "reth-network" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8685,7 +8685,7 @@ dependencies = [ [[package]] name = "reth-network-api" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8709,7 +8709,7 @@ dependencies = [ [[package]] name = "reth-network-p2p" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8731,7 +8731,7 @@ dependencies = [ [[package]] name = "reth-network-peers" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -8748,7 +8748,7 @@ dependencies = [ [[package]] name = "reth-network-types" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-eip2124", "humantime-serde", @@ -8761,7 +8761,7 @@ dependencies = [ [[package]] name = "reth-nippy-jar" -version = "1.5.1" +version = "1.6.0" dependencies = [ "anyhow", "bincode 1.3.3", @@ -8779,7 +8779,7 @@ dependencies = [ [[package]] name = "reth-node-api" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-rpc-types-engine", "eyre", @@ -8802,7 +8802,7 @@ dependencies = [ [[package]] name = "reth-node-builder" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8868,7 +8868,7 @@ dependencies = [ [[package]] name = "reth-node-core" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8921,7 +8921,7 @@ dependencies = [ [[package]] name = "reth-node-ethereum" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-contract", @@ -8975,7 +8975,7 @@ dependencies = [ [[package]] name = "reth-node-ethstats" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -8998,7 +8998,7 @@ dependencies = [ [[package]] name = "reth-node-events" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9021,7 +9021,7 @@ dependencies = [ [[package]] name = "reth-node-metrics" -version = "1.5.1" +version = "1.6.0" dependencies = [ "eyre", "http", @@ -9043,7 +9043,7 @@ dependencies = [ [[package]] name = "reth-node-types" -version = "1.5.1" +version = "1.6.0" dependencies = [ "reth-chainspec", "reth-db-api", @@ -9055,7 +9055,7 @@ dependencies = [ [[package]] name = "reth-op" -version = "1.5.1" +version = "1.6.0" dependencies = [ "reth-chainspec", "reth-cli-util", @@ -9094,7 +9094,7 @@ dependencies = [ [[package]] name = "reth-optimism-chainspec" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-chains", "alloy-consensus", @@ -9121,7 +9121,7 @@ dependencies = [ [[package]] name = "reth-optimism-cli" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9169,7 +9169,7 @@ dependencies = [ [[package]] name = "reth-optimism-consensus" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-chains", "alloy-consensus", @@ -9201,7 +9201,7 @@ dependencies = [ [[package]] name = "reth-optimism-evm" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9228,7 +9228,7 @@ dependencies = [ [[package]] name = "reth-optimism-forks" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-op-hardforks", "alloy-primitives", @@ -9238,7 +9238,7 @@ dependencies = [ [[package]] name = "reth-optimism-node" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9295,7 +9295,7 @@ dependencies = [ [[package]] name = "reth-optimism-payload-builder" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9333,7 +9333,7 @@ dependencies = [ [[package]] name = "reth-optimism-primitives" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9360,7 +9360,7 @@ dependencies = [ [[package]] name = "reth-optimism-rpc" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9417,7 +9417,7 @@ dependencies = [ [[package]] name = "reth-optimism-storage" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9435,7 +9435,7 @@ dependencies = [ [[package]] name = "reth-optimism-txpool" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9472,7 +9472,7 @@ dependencies = [ [[package]] name = "reth-payload-builder" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9492,7 +9492,7 @@ dependencies = [ [[package]] name = "reth-payload-builder-primitives" -version = "1.5.1" +version = "1.6.0" dependencies = [ "pin-project", "reth-payload-primitives", @@ -9503,7 +9503,7 @@ dependencies = [ [[package]] name = "reth-payload-primitives" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9522,7 +9522,7 @@ dependencies = [ [[package]] name = "reth-payload-util" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9531,7 +9531,7 @@ dependencies = [ [[package]] name = "reth-payload-validator" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-rpc-types-engine", @@ -9540,7 +9540,7 @@ dependencies = [ [[package]] name = "reth-primitives" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9562,7 +9562,7 @@ dependencies = [ [[package]] name = "reth-primitives-traits" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9600,7 +9600,7 @@ dependencies = [ [[package]] name = "reth-provider" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9649,7 +9649,7 @@ dependencies = [ [[package]] name = "reth-prune" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9681,7 +9681,7 @@ dependencies = [ [[package]] name = "reth-prune-types" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-primitives", "arbitrary", @@ -9700,7 +9700,7 @@ dependencies = [ [[package]] name = "reth-ress-protocol" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9726,7 +9726,7 @@ dependencies = [ [[package]] name = "reth-ress-provider" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9752,7 +9752,7 @@ dependencies = [ [[package]] name = "reth-revm" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9766,7 +9766,7 @@ dependencies = [ [[package]] name = "reth-rpc" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -9845,7 +9845,7 @@ dependencies = [ [[package]] name = "reth-rpc-api" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-eips", "alloy-genesis", @@ -9872,7 +9872,7 @@ dependencies = [ [[package]] name = "reth-rpc-api-testing-util" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9891,7 +9891,7 @@ dependencies = [ [[package]] name = "reth-rpc-builder" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-eips", "alloy-network", @@ -9946,7 +9946,7 @@ dependencies = [ [[package]] name = "reth-rpc-convert" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-json-rpc", @@ -9970,7 +9970,7 @@ dependencies = [ [[package]] name = "reth-rpc-e2e-tests" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-genesis", "alloy-rpc-types-engine", @@ -9990,7 +9990,7 @@ dependencies = [ [[package]] name = "reth-rpc-engine-api" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10026,7 +10026,7 @@ dependencies = [ [[package]] name = "reth-rpc-eth-api" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -10069,7 +10069,7 @@ dependencies = [ [[package]] name = "reth-rpc-eth-types" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10113,7 +10113,7 @@ dependencies = [ [[package]] name = "reth-rpc-layer" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-rpc-types-engine", "http", @@ -10130,7 +10130,7 @@ dependencies = [ [[package]] name = "reth-rpc-server-types" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10145,7 +10145,7 @@ dependencies = [ [[package]] name = "reth-stages" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10206,7 +10206,7 @@ dependencies = [ [[package]] name = "reth-stages-api" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10235,7 +10235,7 @@ dependencies = [ [[package]] name = "reth-stages-types" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-primitives", "arbitrary", @@ -10252,7 +10252,7 @@ dependencies = [ [[package]] name = "reth-stateless" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10277,7 +10277,7 @@ dependencies = [ [[package]] name = "reth-static-file" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-primitives", "assert_matches", @@ -10301,7 +10301,7 @@ dependencies = [ [[package]] name = "reth-static-file-types" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-primitives", "clap", @@ -10313,7 +10313,7 @@ dependencies = [ [[package]] name = "reth-storage-api" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10336,7 +10336,7 @@ dependencies = [ [[package]] name = "reth-storage-errors" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10351,7 +10351,7 @@ dependencies = [ [[package]] name = "reth-storage-rpc-provider" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10380,7 +10380,7 @@ dependencies = [ [[package]] name = "reth-tasks" -version = "1.5.1" +version = "1.6.0" dependencies = [ "auto_impl", "dyn-clone", @@ -10397,7 +10397,7 @@ dependencies = [ [[package]] name = "reth-testing-utils" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10412,7 +10412,7 @@ dependencies = [ [[package]] name = "reth-tokio-util" -version = "1.5.1" +version = "1.6.0" dependencies = [ "tokio", "tokio-stream", @@ -10421,7 +10421,7 @@ dependencies = [ [[package]] name = "reth-tracing" -version = "1.5.1" +version = "1.6.0" dependencies = [ "clap", "eyre", @@ -10435,7 +10435,7 @@ dependencies = [ [[package]] name = "reth-tracing-otlp" -version = "1.5.1" +version = "1.6.0" dependencies = [ "opentelemetry", "opentelemetry-otlp", @@ -10448,7 +10448,7 @@ dependencies = [ [[package]] name = "reth-transaction-pool" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10494,7 +10494,7 @@ dependencies = [ [[package]] name = "reth-trie" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10526,7 +10526,7 @@ dependencies = [ [[package]] name = "reth-trie-common" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -10558,7 +10558,7 @@ dependencies = [ [[package]] name = "reth-trie-db" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10584,7 +10584,7 @@ dependencies = [ [[package]] name = "reth-trie-parallel" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -10613,7 +10613,7 @@ dependencies = [ [[package]] name = "reth-trie-sparse" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -10645,7 +10645,7 @@ dependencies = [ [[package]] name = "reth-trie-sparse-parallel" -version = "1.5.1" +version = "1.6.0" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -10672,7 +10672,7 @@ dependencies = [ [[package]] name = "reth-zstd-compressors" -version = "1.5.1" +version = "1.6.0" dependencies = [ "zstd", ] diff --git a/Cargo.toml b/Cargo.toml index 4983598f808..f11cb5158ca 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,5 @@ [workspace.package] -version = "1.5.1" +version = "1.6.0" edition = "2021" rust-version = "1.86" license = "MIT OR Apache-2.0" diff --git a/docs/vocs/vocs.config.ts b/docs/vocs/vocs.config.ts index cee55bc2d9f..0df7a4ceb86 100644 --- a/docs/vocs/vocs.config.ts +++ b/docs/vocs/vocs.config.ts @@ -18,7 +18,7 @@ export default defineConfig({ }, { text: 'GitHub', link: 'https://github.com/paradigmxyz/reth' }, { - text: 'v1.5.1', + text: 'v1.6.0', items: [ { text: 'Releases', From ca645b40eef400a341d6fd4f9c6b0f08410f7cbf Mon Sep 17 00:00:00 2001 From: nk_ysg Date: Tue, 22 Jul 2025 21:35:16 +0800 Subject: [PATCH 255/305] fix(exex): update batch threadshold calculate processed blocks (#17551) --- Cargo.lock | 87 ++++++++++++++-------------- crates/exex/exex/src/backfill/job.rs | 50 +++++++++++++++- 2 files changed, 92 insertions(+), 45 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fb64c281d7c..1daf6c368b4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -97,9 +97,9 @@ checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "alloy-chains" -version = "0.2.6" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4195a29a4b87137b2bb02105e746102873bc03561805cf45c0e510c961f160e6" +checksum = "5674914c2cfdb866c21cb0c09d82374ee39a1395cf512e7515f4c014083b3fff" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -107,7 +107,7 @@ dependencies = [ "num_enum", "proptest", "serde", - "strum 0.27.2", + "strum 0.27.1", ] [[package]] @@ -640,7 +640,7 @@ dependencies = [ "jsonwebtoken", "rand 0.8.5", "serde", - "strum 0.27.2", + "strum 0.27.1", ] [[package]] @@ -1922,9 +1922,9 @@ dependencies = [ [[package]] name = "bytemuck_derive" -version = "1.10.0" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "441473f2b4b0459a68628c744bc61d23e730fb00128b841d30fa4bb3972257e4" +checksum = "7ecc273b49b3205b83d648f0690daa588925572cc5063745bfe547fe7ec8e1a1" dependencies = [ "proc-macro2", "quote", @@ -2613,9 +2613,9 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "4.1.3" +version = "4.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" +checksum = "373b7c5dbd637569a2cca66e8d66b8c446a1e7bf064ea321d265d7b3dfe7c97e" dependencies = [ "cfg-if", "cpufeatures", @@ -3715,9 +3715,9 @@ dependencies = [ [[package]] name = "fiat-crypto" -version = "0.2.9" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" +checksum = "64cd1e32ddd350061ae6edb1b082d7c54915b5c672c389143b9a63403a109f24" [[package]] name = "filetime" @@ -4389,7 +4389,7 @@ dependencies = [ "tokio", "tokio-rustls", "tower-service", - "webpki-roots 1.0.2", + "webpki-roots 1.0.1", ] [[package]] @@ -4793,9 +4793,9 @@ dependencies = [ [[package]] name = "instability" -version = "0.3.9" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "435d80800b936787d62688c927b6490e887c7ef5ff9ce922c6c6050fca75eb9a" +checksum = "0bf9fed6d91cfb734e7476a06bde8300a1b94e217e1b523b6f0cd1a01998c71d" dependencies = [ "darling", "indoc", @@ -5281,9 +5281,9 @@ dependencies = [ [[package]] name = "libredox" -version = "0.1.6" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4488594b9328dee448adb906d8b126d9b7deb7cf5c22161ee591610bb1be83c0" +checksum = "1580801010e535496706ba011c15f8532df6b42297d2e471fec38ceadd8c0638" dependencies = [ "bitflags 2.9.1", "libc", @@ -7139,7 +7139,7 @@ dependencies = [ "wasm-bindgen-futures", "wasm-streams", "web-sys", - "webpki-roots 1.0.2", + "webpki-roots 1.0.1", ] [[package]] @@ -7559,7 +7559,7 @@ dependencies = [ "rustc-hash 2.1.1", "serde", "serde_json", - "strum 0.27.2", + "strum 0.27.1", "sysinfo", "tempfile", "thiserror 2.0.12", @@ -8909,7 +8909,7 @@ dependencies = [ "secp256k1 0.30.0", "serde", "shellexpand", - "strum 0.27.2", + "strum 0.27.1", "thiserror 2.0.12", "tokio", "toml", @@ -9641,7 +9641,7 @@ dependencies = [ "revm-database", "revm-database-interface", "revm-state", - "strum 0.27.2", + "strum 0.27.1", "tempfile", "tokio", "tracing", @@ -9717,8 +9717,8 @@ dependencies = [ "reth-ress-protocol", "reth-storage-errors", "reth-tracing", - "strum 0.27.2", - "strum_macros 0.27.2", + "strum 0.27.1", + "strum_macros 0.27.1", "tokio", "tokio-stream", "tracing", @@ -10140,7 +10140,7 @@ dependencies = [ "reth-errors", "reth-network-api", "serde", - "strum 0.27.2", + "strum 0.27.1", ] [[package]] @@ -10308,7 +10308,7 @@ dependencies = [ "derive_more", "reth-nippy-jar", "serde", - "strum 0.27.2", + "strum 0.27.1", ] [[package]] @@ -11128,15 +11128,15 @@ dependencies = [ [[package]] name = "rustix" -version = "1.0.8" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11181fbabf243db407ef8df94a6ce0b2f9a733bd8be4ad02b4eda9602296cac8" +checksum = "c71e83d6afe7ff64890ec6b71d6a69bb8a610ab78ce364b3352876bb4c801266" dependencies = [ "bitflags 2.9.1", "errno", "libc", "linux-raw-sys 0.9.4", - "windows-sys 0.60.2", + "windows-sys 0.59.0", ] [[package]] @@ -11458,9 +11458,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.141" +version = "1.0.140" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30b9eff21ebe718216c6ec64e1d9ac57087aad11efc64e32002bce4a0d4c03d3" +checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" dependencies = [ "indexmap 2.10.0", "itoa", @@ -11834,11 +11834,11 @@ dependencies = [ [[package]] name = "strum" -version = "0.27.2" +version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af23d6f6c1a224baef9d3f61e287d2761385a5b88fdab4eb4c6f11aeb54c4bcf" +checksum = "f64def088c51c9510a8579e3c5d67c65349dcf755e5479ad3d010aa6454e2c32" dependencies = [ - "strum_macros 0.27.2", + "strum_macros 0.27.1", ] [[package]] @@ -11856,13 +11856,14 @@ dependencies = [ [[package]] name = "strum_macros" -version = "0.27.2" +version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7695ce3845ea4b33927c055a39dc438a45b059f7c1b3d91d38d10355fb8cbca7" +checksum = "c77a8c5abcaf0f9ce05d62342b7d298c346515365c36b673df4ebe3ced01fde8" dependencies = [ "heck", "proc-macro2", "quote", + "rustversion", "syn 2.0.104", ] @@ -11983,7 +11984,7 @@ dependencies = [ "fastrand 2.3.0", "getrandom 0.3.3", "once_cell", - "rustix 1.0.8", + "rustix 1.0.7", "windows-sys 0.59.0", ] @@ -12626,9 +12627,9 @@ dependencies = [ [[package]] name = "tracy-client-sys" -version = "0.26.1" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "319c70195101a93f56db4c74733e272d720768e13471f400c78406a326b172b0" +checksum = "5f9612d9503675b07b244922ea6f6f3cdd88c43add1b3498084613fc88cdf69d" dependencies = [ "cc", "windows-targets 0.52.6", @@ -13119,14 +13120,14 @@ version = "0.26.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75c7f0ef91146ebfb530314f5f1d24528d7f0767efbfd31dce919275413e393e" dependencies = [ - "webpki-root-certs 1.0.2", + "webpki-root-certs 1.0.1", ] [[package]] name = "webpki-root-certs" -version = "1.0.2" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e4ffd8df1c57e87c325000a3d6ef93db75279dc3a231125aac571650f22b12a" +checksum = "86138b15b2b7d561bc4469e77027b8dd005a43dc502e9031d1f5afc8ce1f280e" dependencies = [ "rustls-pki-types", ] @@ -13137,14 +13138,14 @@ version = "0.26.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "521bc38abb08001b01866da9f51eb7c5d647a19260e00054a8c7fd5f9e57f7a9" dependencies = [ - "webpki-roots 1.0.2", + "webpki-roots 1.0.1", ] [[package]] name = "webpki-roots" -version = "1.0.2" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e8983c3ab33d6fb807cfcdad2491c4ea8cbc8ed839181c7dfd9c67c83e261b2" +checksum = "8782dd5a41a24eed3a4f40b606249b3e236ca61adf1f25ea4d45c73de122b502" dependencies = [ "rustls-pki-types", ] @@ -13782,7 +13783,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af3a19837351dc82ba89f8a125e22a3c475f05aba604acc023d62b2739ae2909" dependencies = [ "libc", - "rustix 1.0.8", + "rustix 1.0.7", ] [[package]] diff --git a/crates/exex/exex/src/backfill/job.rs b/crates/exex/exex/src/backfill/job.rs index bbfd6c2a894..1a294e50659 100644 --- a/crates/exex/exex/src/backfill/job.rs +++ b/crates/exex/exex/src/backfill/job.rs @@ -124,7 +124,7 @@ where blocks.push(block); // Check if we should commit now if self.thresholds.is_end_of_batch( - block_number - *self.range.start(), + block_number - *self.range.start() + 1, executor.size_hint() as u64, cumulative_gas, batch_start.elapsed(), @@ -243,7 +243,10 @@ impl From> for SingleBlockBackfillJob { #[cfg(test)] mod tests { use crate::{ - backfill::test_utils::{blocks_and_execution_outputs, chain_spec, to_execution_outcome}, + backfill::{ + job::ExecutionStageThresholds, + test_utils::{blocks_and_execution_outputs, chain_spec, to_execution_outcome}, + }, BackfillJobFactory, }; use reth_db_common::init::init_genesis; @@ -333,4 +336,47 @@ mod tests { Ok(()) } + + #[test] + fn test_backfill_with_batch_threshold() -> eyre::Result<()> { + reth_tracing::init_test_tracing(); + + // Create a key pair for the sender + let key_pair = generators::generate_key(&mut generators::rng()); + let address = public_key_to_address(key_pair.public_key()); + + let chain_spec = chain_spec(address); + + let executor = EthEvmConfig::ethereum(chain_spec.clone()); + let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); + init_genesis(&provider_factory)?; + let blockchain_db = BlockchainProvider::new(provider_factory.clone())?; + + let blocks_and_execution_outputs = + blocks_and_execution_outputs(provider_factory, chain_spec, key_pair)?; + let (block1, output1) = blocks_and_execution_outputs[0].clone(); + let (block2, output2) = blocks_and_execution_outputs[1].clone(); + + // Backfill with max_blocks=1, expect two separate chains + let factory = BackfillJobFactory::new(executor, blockchain_db).with_thresholds( + ExecutionStageThresholds { max_blocks: Some(1), ..Default::default() }, + ); + let job = factory.backfill(1..=2); + let chains = job.collect::, _>>()?; + + // Assert two chains, each with one block + assert_eq!(chains.len(), 2); + + let mut chain1 = chains[0].clone(); + chain1.execution_outcome_mut().bundle.reverts.sort(); + assert_eq!(chain1.blocks(), &[(1, block1)].into()); + assert_eq!(chain1.execution_outcome(), &to_execution_outcome(1, &output1)); + + let mut chain2 = chains[1].clone(); + chain2.execution_outcome_mut().bundle.reverts.sort(); + assert_eq!(chain2.blocks(), &[(2, block2)].into()); + assert_eq!(chain2.execution_outcome(), &to_execution_outcome(2, &output2)); + + Ok(()) + } } From 2446c2fd42f2ed511322dd8d378586c4bf6846a5 Mon Sep 17 00:00:00 2001 From: Amidamaru Date: Tue, 22 Jul 2025 21:41:39 +0700 Subject: [PATCH 256/305] perf: process chunks in par for get logs in block range `eth_getLogs` (#16675) --- Cargo.lock | 1 + crates/rpc/rpc/Cargo.toml | 1 + crates/rpc/rpc/src/eth/filter.rs | 83 ++++++++++++++++++++++++++++++++ 3 files changed, 85 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 1daf6c368b4..9de2bbb5597 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9794,6 +9794,7 @@ dependencies = [ "http", "http-body", "hyper", + "itertools 0.14.0", "jsonrpsee", "jsonrpsee-types", "jsonwebtoken", diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index d7cf9839b03..12a7f42b263 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -89,6 +89,7 @@ serde.workspace = true sha2.workspace = true thiserror.workspace = true derive_more.workspace = true +itertools.workspace = true [dev-dependencies] reth-ethereum-primitives.workspace = true diff --git a/crates/rpc/rpc/src/eth/filter.rs b/crates/rpc/rpc/src/eth/filter.rs index e0f9bfddddb..ff5841c3747 100644 --- a/crates/rpc/rpc/src/eth/filter.rs +++ b/crates/rpc/rpc/src/eth/filter.rs @@ -8,6 +8,7 @@ use alloy_rpc_types_eth::{ }; use async_trait::async_trait; use futures::future::TryFutureExt; +use itertools::Itertools; use jsonrpsee::{core::RpcResult, server::IdProvider}; use reth_errors::ProviderError; use reth_primitives_traits::{NodePrimitives, SealedHeader}; @@ -71,6 +72,12 @@ const BLOOM_ADJUSTMENT_MIN_BLOCKS: u64 = 100; /// The maximum number of headers we read at once when handling a range filter. const MAX_HEADERS_RANGE: u64 = 1_000; // with ~530bytes per header this is ~500kb +/// Threshold for enabling parallel processing in range mode +const PARALLEL_PROCESSING_THRESHOLD: u64 = 1000; + +/// Default concurrency for parallel processing +const DEFAULT_PARALLEL_CONCURRENCY: usize = 4; + /// `Eth` filter RPC implementation. /// /// This type handles `eth_` rpc requests related to filters (`eth_getLogs`). @@ -1039,6 +1046,20 @@ impl< range_headers.push(next_header); } + // Check if we should use parallel processing for large ranges + let remaining_headers = self.iter.len() + range_headers.len(); + if remaining_headers >= PARALLEL_PROCESSING_THRESHOLD as usize { + self.process_large_range(range_headers).await + } else { + self.process_small_range(range_headers).await + } + } + + /// Process small range headers + async fn process_small_range( + &mut self, + range_headers: Vec::Header>>, + ) -> Result>, EthFilterError> { // Process each header individually to avoid queuing for all receipts for header in range_headers { // First check if already cached to avoid unnecessary provider calls @@ -1070,6 +1091,68 @@ impl< Ok(self.next.pop_front()) } + + /// Process large range headers + async fn process_large_range( + &mut self, + range_headers: Vec::Header>>, + ) -> Result>, EthFilterError> { + // Split headers into chunks + let chunk_size = std::cmp::max(range_headers.len() / DEFAULT_PARALLEL_CONCURRENCY, 1); + let header_chunks = range_headers + .into_iter() + .chunks(chunk_size) + .into_iter() + .map(|chunk| chunk.collect::>()) + .collect::>(); + + // Process chunks in parallel + let mut tasks = Vec::new(); + for chunk_headers in header_chunks { + let filter_inner = self.filter_inner.clone(); + let task = tokio::task::spawn_blocking(move || { + let mut chunk_results = Vec::new(); + + for header in chunk_headers { + // Fetch directly from provider - RangeMode is used for older blocks unlikely to + // be cached + let receipts = + match filter_inner.provider().receipts_by_block(header.hash().into())? { + Some(receipts) => Arc::new(receipts), + None => continue, // No receipts found + }; + + if !receipts.is_empty() { + chunk_results.push(ReceiptBlockResult { + receipts, + recovered_block: None, + header, + }); + } + } + + Ok::>, EthFilterError>(chunk_results) + }); + tasks.push(task); + } + + let results = futures::future::join_all(tasks).await; + for result in results { + match result { + Ok(Ok(chunk_results)) => { + for result in chunk_results { + self.next.push_back(result); + } + } + Ok(Err(e)) => return Err(e), + Err(_join_err) => { + return Err(EthFilterError::InternalError); + } + } + } + + Ok(self.next.pop_front()) + } } #[cfg(test)] From c1bfa31444ef75e8b17915bf1e7031a49b7ca754 Mon Sep 17 00:00:00 2001 From: nk_ysg Date: Tue, 22 Jul 2025 22:50:07 +0800 Subject: [PATCH 257/305] chore: rm unused file (#17563) --- docs/design/codecs.md | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 docs/design/codecs.md diff --git a/docs/design/codecs.md b/docs/design/codecs.md deleted file mode 100644 index e69de29bb2d..00000000000 From c2098faea32a48bf19694f46678c15e502a9e495 Mon Sep 17 00:00:00 2001 From: 0xOsiris Date: Tue, 22 Jul 2025 07:50:18 -0700 Subject: [PATCH 258/305] feat: make basic block builder pub (#17476) --- crates/evm/evm/src/execute.rs | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/crates/evm/evm/src/execute.rs b/crates/evm/evm/src/execute.rs index 5e0c03592bc..2e797d429e5 100644 --- a/crates/evm/evm/src/execute.rs +++ b/crates/evm/evm/src/execute.rs @@ -344,15 +344,22 @@ pub trait BlockBuilder { fn into_executor(self) -> Self::Executor; } -pub(crate) struct BasicBlockBuilder<'a, F, Executor, Builder, N: NodePrimitives> +/// A type that constructs a block from transactions and execution results. +#[derive(Debug)] +pub struct BasicBlockBuilder<'a, F, Executor, Builder, N: NodePrimitives> where F: BlockExecutorFactory, { - pub(crate) executor: Executor, - pub(crate) transactions: Vec>>, - pub(crate) ctx: F::ExecutionCtx<'a>, - pub(crate) parent: &'a SealedHeader>, - pub(crate) assembler: Builder, + /// The block executor used to execute transactions. + pub executor: Executor, + /// The transactions executed in this block. + pub transactions: Vec>>, + /// The parent block execution context. + pub ctx: F::ExecutionCtx<'a>, + /// The sealed parent block header. + pub parent: &'a SealedHeader>, + /// The assembler used to build the block. + pub assembler: Builder, } /// Conversions for executable transactions. From 868c421c5d783c3a58a8fca77406a902b118baa4 Mon Sep 17 00:00:00 2001 From: Hai | RISE <150876604+hai-rise@users.noreply.github.com> Date: Tue, 22 Jul 2025 21:51:03 +0700 Subject: [PATCH 259/305] feat(pool): return state of an added tx (#17442) --- crates/net/network/src/transactions/mod.rs | 11 ++++--- crates/net/network/tests/it/txgossip.rs | 17 +++++----- crates/optimism/rpc/src/eth/ext.rs | 6 ++-- crates/optimism/rpc/src/eth/transaction.rs | 6 ++-- .../rpc-eth-api/src/helpers/transaction.rs | 6 ++-- crates/rpc/rpc/src/eth/helpers/transaction.rs | 7 +++-- crates/transaction-pool/src/lib.rs | 9 +++--- crates/transaction-pool/src/noop.rs | 10 +++--- crates/transaction-pool/src/pool/mod.rs | 31 ++++++++++++++++--- crates/transaction-pool/src/traits.rs | 12 +++---- crates/transaction-pool/tests/it/blobs.rs | 4 +-- crates/transaction-pool/tests/it/evict.rs | 5 +-- crates/transaction-pool/tests/it/listeners.rs | 2 +- crates/transaction-pool/tests/it/pending.rs | 4 +-- examples/txpool-tracing/src/submit.rs | 7 +++-- 15 files changed, 88 insertions(+), 49 deletions(-) diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index 18233700e25..05ab9ecbf71 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -61,8 +61,8 @@ use reth_primitives_traits::SignedTransaction; use reth_tokio_util::EventStream; use reth_transaction_pool::{ error::{PoolError, PoolResult}, - GetPooledTransactionLimit, PoolTransaction, PropagateKind, PropagatedTransactions, - TransactionPool, ValidPoolTransaction, + AddedTransactionOutcome, GetPooledTransactionLimit, PoolTransaction, PropagateKind, + PropagatedTransactions, TransactionPool, ValidPoolTransaction, }; use std::{ collections::{hash_map::Entry, HashMap, HashSet}, @@ -81,7 +81,8 @@ use tracing::{debug, trace}; /// The future for importing transactions into the pool. /// /// Resolves with the result of each transaction import. -pub type PoolImportFuture = Pin>> + Send + 'static>>; +pub type PoolImportFuture = + Pin>> + Send + 'static>>; /// Api to interact with [`TransactionsManager`] task. /// @@ -561,10 +562,10 @@ impl TransactionsManager { /// Processes a batch import results. - fn on_batch_import_result(&mut self, batch_results: Vec>) { + fn on_batch_import_result(&mut self, batch_results: Vec>) { for res in batch_results { match res { - Ok(hash) => { + Ok(AddedTransactionOutcome { hash, .. }) => { self.on_good_import(hash); } Err(err) => { diff --git a/crates/net/network/tests/it/txgossip.rs b/crates/net/network/tests/it/txgossip.rs index 4014f41bfcb..ed1c2f925dd 100644 --- a/crates/net/network/tests/it/txgossip.rs +++ b/crates/net/network/tests/it/txgossip.rs @@ -10,7 +10,9 @@ use reth_network::{ }; use reth_network_api::{events::PeerEvent, PeerKind, PeersInfo}; use reth_provider::test_utils::{ExtendedAccount, MockEthProvider}; -use reth_transaction_pool::{test_utils::TransactionGenerator, PoolTransaction, TransactionPool}; +use reth_transaction_pool::{ + test_utils::TransactionGenerator, AddedTransactionOutcome, PoolTransaction, TransactionPool, +}; use std::sync::Arc; use tokio::join; @@ -42,7 +44,8 @@ async fn test_tx_gossip() { provider.add_account(sender, ExtendedAccount::new(0, U256::from(100_000_000))); // insert pending tx in peer0's pool - let hash = peer0_pool.add_external_transaction(tx).await.unwrap(); + let AddedTransactionOutcome { hash, .. } = + peer0_pool.add_external_transaction(tx).await.unwrap(); let inserted = peer0_tx_listener.recv().await.unwrap(); assert_eq!(inserted, hash); @@ -81,10 +84,10 @@ async fn test_tx_propagation_policy_trusted_only() { provider.add_account(sender, ExtendedAccount::new(0, U256::from(100_000_000))); // insert the tx in peer0's pool - let hash_0 = peer_0_handle.pool().unwrap().add_external_transaction(tx).await.unwrap(); + let outcome_0 = peer_0_handle.pool().unwrap().add_external_transaction(tx).await.unwrap(); let inserted = peer0_tx_listener.recv().await.unwrap(); - assert_eq!(inserted, hash_0); + assert_eq!(inserted, outcome_0.hash); // ensure tx is not gossiped to peer1 peer1_tx_listener.try_recv().expect_err("Empty"); @@ -108,16 +111,16 @@ async fn test_tx_propagation_policy_trusted_only() { provider.add_account(sender, ExtendedAccount::new(0, U256::from(100_000_000))); // insert pending tx in peer0's pool - let hash_1 = peer_0_handle.pool().unwrap().add_external_transaction(tx).await.unwrap(); + let outcome_1 = peer_0_handle.pool().unwrap().add_external_transaction(tx).await.unwrap(); let inserted = peer0_tx_listener.recv().await.unwrap(); - assert_eq!(inserted, hash_1); + assert_eq!(inserted, outcome_1.hash); // ensure peer1 now receives the pending txs from peer0 let mut buff = Vec::with_capacity(2); buff.push(peer1_tx_listener.recv().await.unwrap()); buff.push(peer1_tx_listener.recv().await.unwrap()); - assert!(buff.contains(&hash_1)); + assert!(buff.contains(&outcome_1.hash)); } #[tokio::test(flavor = "multi_thread")] diff --git a/crates/optimism/rpc/src/eth/ext.rs b/crates/optimism/rpc/src/eth/ext.rs index 46008d0608b..6c4e1bc7cf1 100644 --- a/crates/optimism/rpc/src/eth/ext.rs +++ b/crates/optimism/rpc/src/eth/ext.rs @@ -10,7 +10,9 @@ use reth_optimism_txpool::conditional::MaybeConditionalTransaction; use reth_rpc_eth_api::L2EthApiExtServer; use reth_rpc_eth_types::utils::recover_raw_transaction; use reth_storage_api::{BlockReaderIdExt, StateProviderFactory}; -use reth_transaction_pool::{PoolTransaction, TransactionOrigin, TransactionPool}; +use reth_transaction_pool::{ + AddedTransactionOutcome, PoolTransaction, TransactionOrigin, TransactionPool, +}; use std::sync::Arc; use tokio::sync::Semaphore; @@ -157,7 +159,7 @@ where } else { // otherwise, add to pool with the appended conditional tx.set_conditional(condition); - let hash = + let AddedTransactionOutcome { hash, .. } = self.pool().add_transaction(TransactionOrigin::Private, tx).await.map_err(|e| { OpEthApiError::Eth(reth_rpc_eth_types::EthApiError::PoolError(e.into())) })?; diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index 89c72613b9b..f8437c12623 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -12,7 +12,9 @@ use reth_rpc_eth_api::{ }; use reth_rpc_eth_types::utils::recover_raw_transaction; use reth_storage_api::{errors::ProviderError, ReceiptProvider}; -use reth_transaction_pool::{PoolTransaction, TransactionOrigin, TransactionPool}; +use reth_transaction_pool::{ + AddedTransactionOutcome, PoolTransaction, TransactionOrigin, TransactionPool, +}; use std::fmt::{Debug, Formatter}; impl EthTransactions for OpEthApi @@ -55,7 +57,7 @@ where } // submit the transaction to the pool with a `Local` origin - let hash = self + let AddedTransactionOutcome { hash, .. } = self .pool() .add_transaction(TransactionOrigin::Local, pool_transaction) .await diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index 33cf0048e46..168653e7c60 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -29,7 +29,9 @@ use reth_storage_api::{ BlockNumReader, BlockReaderIdExt, ProviderBlock, ProviderReceipt, ProviderTx, ReceiptProvider, TransactionsProvider, }; -use reth_transaction_pool::{PoolTransaction, TransactionOrigin, TransactionPool}; +use reth_transaction_pool::{ + AddedTransactionOutcome, PoolTransaction, TransactionOrigin, TransactionPool, +}; use std::sync::Arc; /// Transaction related functions for the [`EthApiServer`](crate::EthApiServer) trait in @@ -417,7 +419,7 @@ pub trait EthTransactions: LoadTransaction { .map_err(|_| EthApiError::TransactionConversionError)?; // submit the transaction to the pool with a `Local` origin - let hash = self + let AddedTransactionOutcome { hash, .. } = self .pool() .add_transaction(TransactionOrigin::Local, pool_transaction) .await diff --git a/crates/rpc/rpc/src/eth/helpers/transaction.rs b/crates/rpc/rpc/src/eth/helpers/transaction.rs index 6f575bc9c61..f82886a9beb 100644 --- a/crates/rpc/rpc/src/eth/helpers/transaction.rs +++ b/crates/rpc/rpc/src/eth/helpers/transaction.rs @@ -8,7 +8,9 @@ use reth_rpc_eth_api::{ FromEvmError, RpcNodeCore, }; use reth_rpc_eth_types::{utils::recover_raw_transaction, EthApiError}; -use reth_transaction_pool::{PoolTransaction, TransactionOrigin, TransactionPool}; +use reth_transaction_pool::{ + AddedTransactionOutcome, PoolTransaction, TransactionOrigin, TransactionPool, +}; impl EthTransactions for EthApi where @@ -33,7 +35,8 @@ where let pool_transaction = ::Transaction::from_pooled(recovered); // submit the transaction to the pool with a `Local` origin - let hash = self.pool().add_transaction(TransactionOrigin::Local, pool_transaction).await?; + let AddedTransactionOutcome { hash, .. } = + self.pool().add_transaction(TransactionOrigin::Local, pool_transaction).await?; Ok(hash) } diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index 14c44056cc8..9d0cd255fc1 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -281,8 +281,9 @@ pub use crate::{ error::PoolResult, ordering::{CoinbaseTipOrdering, Priority, TransactionOrdering}, pool::{ - blob_tx_priority, fee_delta, state::SubPool, AllTransactionsEvents, FullTransactionEvent, - NewTransactionEvent, TransactionEvent, TransactionEvents, TransactionListenerKind, + blob_tx_priority, fee_delta, state::SubPool, AddedTransactionOutcome, + AllTransactionsEvents, FullTransactionEvent, NewTransactionEvent, TransactionEvent, + TransactionEvents, TransactionListenerKind, }, traits::*, validate::{ @@ -486,7 +487,7 @@ where &self, origin: TransactionOrigin, transaction: Self::Transaction, - ) -> PoolResult { + ) -> PoolResult { let (_, tx) = self.validate(origin, transaction).await; let mut results = self.pool.add_transactions(origin, std::iter::once(tx)); results.pop().expect("result length is the same as the input") @@ -496,7 +497,7 @@ where &self, origin: TransactionOrigin, transactions: Vec, - ) -> Vec> { + ) -> Vec> { if transactions.is_empty() { return Vec::new() } diff --git a/crates/transaction-pool/src/noop.rs b/crates/transaction-pool/src/noop.rs index bf4f55e57c4..45851f31f88 100644 --- a/crates/transaction-pool/src/noop.rs +++ b/crates/transaction-pool/src/noop.rs @@ -9,9 +9,9 @@ use crate::{ pool::TransactionListenerKind, traits::{BestTransactionsAttributes, GetPooledTransactionLimit, NewBlobSidecar}, validate::ValidTransaction, - AllPoolTransactions, AllTransactionsEvents, BestTransactions, BlockInfo, EthPoolTransaction, - EthPooledTransaction, NewTransactionEvent, PoolResult, PoolSize, PoolTransaction, - PropagatedTransactions, TransactionEvents, TransactionOrigin, TransactionPool, + AddedTransactionOutcome, AllPoolTransactions, AllTransactionsEvents, BestTransactions, + BlockInfo, EthPoolTransaction, EthPooledTransaction, NewTransactionEvent, PoolResult, PoolSize, + PoolTransaction, PropagatedTransactions, TransactionEvents, TransactionOrigin, TransactionPool, TransactionValidationOutcome, TransactionValidator, ValidPoolTransaction, }; use alloy_eips::{ @@ -79,7 +79,7 @@ impl TransactionPool for NoopTransactionPool { &self, _origin: TransactionOrigin, transaction: Self::Transaction, - ) -> PoolResult { + ) -> PoolResult { let hash = *transaction.hash(); Err(PoolError::other(hash, Box::new(NoopInsertError::new(transaction)))) } @@ -88,7 +88,7 @@ impl TransactionPool for NoopTransactionPool { &self, _origin: TransactionOrigin, transactions: Vec, - ) -> Vec> { + ) -> Vec> { transactions .into_iter() .map(|transaction| { diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 7df08a59528..2d007452064 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -458,7 +458,7 @@ where pool: &mut RwLockWriteGuard<'_, TxPool>, origin: TransactionOrigin, tx: TransactionValidationOutcome, - ) -> PoolResult { + ) -> PoolResult { match tx { TransactionValidationOutcome::Valid { balance, @@ -494,6 +494,10 @@ where let added = pool.add_transaction(tx, balance, state_nonce, bytecode_hash)?; let hash = *added.hash(); + let state = match added.subpool() { + SubPool::Pending => AddedTransactionState::Pending, + _ => AddedTransactionState::Queued, + }; // transaction was successfully inserted into the pool if let Some(sidecar) = maybe_sidecar { @@ -524,7 +528,7 @@ where // Notify listeners for _all_ transactions self.on_new_transaction(added.into_new_transaction_event()); - Ok(hash) + Ok(AddedTransactionOutcome { hash, state }) } TransactionValidationOutcome::Invalid(tx, err) => { let mut listener = self.event_listener.write(); @@ -563,7 +567,7 @@ where &self, origin: TransactionOrigin, transactions: impl IntoIterator>, - ) -> Vec> { + ) -> Vec> { // Add the transactions and enforce the pool size limits in one write lock let (mut added, discarded) = { let mut pool = self.pool.write(); @@ -599,7 +603,7 @@ where // A newly added transaction may be immediately discarded, so we need to // adjust the result here for res in &mut added { - if let Ok(hash) = res { + if let Ok(AddedTransactionOutcome { hash, .. }) = res { if discarded_hashes.contains(hash) { *res = Err(PoolError::new(*hash, PoolErrorKind::DiscardedOnInsert)) } @@ -1167,7 +1171,6 @@ impl AddedTransaction { } /// Returns the subpool this transaction was added to - #[cfg(test)] pub(crate) const fn subpool(&self) -> SubPool { match self { Self::Pending(_) => SubPool::Pending, @@ -1185,6 +1188,24 @@ impl AddedTransaction { } } +/// The state of a transaction when is was added to the pool +#[derive(Debug)] +pub enum AddedTransactionState { + /// Ready for execution + Pending, + /// Not ready for execution due to a nonce gap or insufficient balance + Queued, // TODO: Break it down to missing nonce, insufficient balance, etc. +} + +/// The outcome of a successful transaction addition +#[derive(Debug)] +pub struct AddedTransactionOutcome { + /// The hash of the transaction + pub hash: TxHash, + /// The state of the transaction + pub state: AddedTransactionState, +} + /// Contains all state changes after a [`CanonicalStateUpdate`] was processed #[derive(Debug)] pub(crate) struct OnNewCanonicalStateOutcome { diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index 10bac5afe9c..090f59169b0 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -58,7 +58,7 @@ use crate::{ TransactionListenerKind, }, validate::ValidPoolTransaction, - AllTransactionsEvents, + AddedTransactionOutcome, AllTransactionsEvents, }; use alloy_consensus::{error::ValueError, BlockHeader, Signed, Typed2718}; use alloy_eips::{ @@ -130,7 +130,7 @@ pub trait TransactionPool: Clone + Debug + Send + Sync { fn add_external_transaction( &self, transaction: Self::Transaction, - ) -> impl Future> + Send { + ) -> impl Future> + Send { self.add_transaction(TransactionOrigin::External, transaction) } @@ -140,7 +140,7 @@ pub trait TransactionPool: Clone + Debug + Send + Sync { fn add_external_transactions( &self, transactions: Vec, - ) -> impl Future>> + Send { + ) -> impl Future>> + Send { self.add_transactions(TransactionOrigin::External, transactions) } @@ -163,7 +163,7 @@ pub trait TransactionPool: Clone + Debug + Send + Sync { &self, origin: TransactionOrigin, transaction: Self::Transaction, - ) -> impl Future> + Send; + ) -> impl Future> + Send; /// Adds the given _unvalidated_ transaction into the pool. /// @@ -174,14 +174,14 @@ pub trait TransactionPool: Clone + Debug + Send + Sync { &self, origin: TransactionOrigin, transactions: Vec, - ) -> impl Future>> + Send; + ) -> impl Future>> + Send; /// Submit a consensus transaction directly to the pool fn add_consensus_transaction( &self, tx: Recovered<::Consensus>, origin: TransactionOrigin, - ) -> impl Future> + Send { + ) -> impl Future> + Send { async move { let tx_hash = *tx.tx_hash(); diff --git a/crates/transaction-pool/tests/it/blobs.rs b/crates/transaction-pool/tests/it/blobs.rs index 9417c62278b..9f7e224a235 100644 --- a/crates/transaction-pool/tests/it/blobs.rs +++ b/crates/transaction-pool/tests/it/blobs.rs @@ -3,7 +3,7 @@ use reth_transaction_pool::{ error::PoolErrorKind, test_utils::{MockTransaction, MockTransactionFactory, TestPoolBuilder}, - PoolTransaction, TransactionOrigin, TransactionPool, + AddedTransactionOutcome, PoolTransaction, TransactionOrigin, TransactionPool, }; #[tokio::test(flavor = "multi_thread")] @@ -12,7 +12,7 @@ async fn blobs_exclusive() { let mut mock_tx_factory = MockTransactionFactory::default(); let blob_tx = mock_tx_factory.create_eip4844(); - let hash = txpool + let AddedTransactionOutcome { hash, .. } = txpool .add_transaction(TransactionOrigin::External, blob_tx.transaction.clone()) .await .unwrap(); diff --git a/crates/transaction-pool/tests/it/evict.rs b/crates/transaction-pool/tests/it/evict.rs index 721988888b3..5a869702457 100644 --- a/crates/transaction-pool/tests/it/evict.rs +++ b/crates/transaction-pool/tests/it/evict.rs @@ -9,7 +9,8 @@ use reth_transaction_pool::{ test_utils::{ MockFeeRange, MockTransactionDistribution, MockTransactionRatio, TestPool, TestPoolBuilder, }, - BlockInfo, PoolConfig, SubPoolLimit, TransactionOrigin, TransactionPool, TransactionPoolExt, + AddedTransactionOutcome, BlockInfo, PoolConfig, SubPoolLimit, TransactionOrigin, + TransactionPool, TransactionPoolExt, }; #[tokio::test(flavor = "multi_thread")] @@ -97,7 +98,7 @@ async fn only_blobs_eviction() { let results = pool.add_transactions(TransactionOrigin::External, set).await; for (i, result) in results.iter().enumerate() { match result { - Ok(hash) => { + Ok(AddedTransactionOutcome { hash, .. }) => { println!("✅ Inserted tx into pool with hash: {hash}"); } Err(e) => { diff --git a/crates/transaction-pool/tests/it/listeners.rs b/crates/transaction-pool/tests/it/listeners.rs index 5eb296e8ae7..d0a9c9c5aa8 100644 --- a/crates/transaction-pool/tests/it/listeners.rs +++ b/crates/transaction-pool/tests/it/listeners.rs @@ -113,7 +113,7 @@ async fn txpool_listener_all() { let added_result = txpool.add_transaction(TransactionOrigin::External, transaction.transaction.clone()).await; - assert_matches!(added_result, Ok(hash) if hash == *transaction.transaction.get_hash()); + assert_matches!(added_result, Ok(outcome) if outcome.hash == *transaction.transaction.get_hash()); assert_matches!( all_tx_events.next().await, diff --git a/crates/transaction-pool/tests/it/pending.rs b/crates/transaction-pool/tests/it/pending.rs index be559c71eec..095dcfe5085 100644 --- a/crates/transaction-pool/tests/it/pending.rs +++ b/crates/transaction-pool/tests/it/pending.rs @@ -12,7 +12,7 @@ async fn txpool_new_pending_txs() { let added_result = txpool.add_transaction(TransactionOrigin::External, transaction.transaction.clone()).await; - assert_matches!(added_result, Ok(hash) if hash == *transaction.transaction.get_hash()); + assert_matches!(added_result, Ok(outcome) if outcome.hash == *transaction.transaction.get_hash()); let mut best_txns = txpool.best_transactions(); assert_matches!(best_txns.next(), Some(tx) if tx.transaction.get_hash() == transaction.transaction.get_hash()); @@ -20,6 +20,6 @@ async fn txpool_new_pending_txs() { let transaction = mock_tx_factory.create_eip1559(); let added_result = txpool.add_transaction(TransactionOrigin::External, transaction.transaction.clone()).await; - assert_matches!(added_result, Ok(hash) if hash == *transaction.transaction.get_hash()); + assert_matches!(added_result, Ok(outcome) if outcome.hash == *transaction.transaction.get_hash()); assert_matches!(best_txns.next(), Some(tx) if tx.transaction.get_hash() == transaction.transaction.get_hash()); } diff --git a/examples/txpool-tracing/src/submit.rs b/examples/txpool-tracing/src/submit.rs index eb2c7957e04..b59cefe2f21 100644 --- a/examples/txpool-tracing/src/submit.rs +++ b/examples/txpool-tracing/src/submit.rs @@ -7,7 +7,10 @@ use alloy_primitives::{Address, TxHash, U256}; use futures_util::StreamExt; use reth_ethereum::{ node::api::{FullNodeComponents, NodeTypes}, - pool::{PoolTransaction, TransactionEvent, TransactionOrigin, TransactionPool}, + pool::{ + AddedTransactionOutcome, PoolTransaction, TransactionEvent, TransactionOrigin, + TransactionPool, + }, primitives::SignerRecoverable, rpc::eth::primitives::TransactionRequest, EthPrimitives, TransactionSigned, @@ -93,7 +96,7 @@ pub async fn submit_eth_transfer( gas_limit: u64, max_priority_fee_per_gas: u128, max_fee_per_gas: u128, -) -> eyre::Result +) -> eyre::Result where FC: FullNodeComponents>, { From 8ce656f83406338ddc7ef81f90c24168a69d129a Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Tue, 22 Jul 2025 18:55:36 +0200 Subject: [PATCH 260/305] feat: add TreePayloadValidator (#17451) --- crates/engine/tree/src/tree/metrics.rs | 6 +- crates/engine/tree/src/tree/mod.rs | 2 + .../engine/tree/src/tree/payload_validator.rs | 967 ++++++++++++++++++ 3 files changed, 972 insertions(+), 3 deletions(-) create mode 100644 crates/engine/tree/src/tree/payload_validator.rs diff --git a/crates/engine/tree/src/tree/metrics.rs b/crates/engine/tree/src/tree/metrics.rs index d3478b6c3ff..4d5b58c6f04 100644 --- a/crates/engine/tree/src/tree/metrics.rs +++ b/crates/engine/tree/src/tree/metrics.rs @@ -7,7 +7,7 @@ use reth_trie::updates::TrieUpdates; /// Metrics for the `EngineApi`. #[derive(Debug, Default)] -pub(crate) struct EngineApiMetrics { +pub struct EngineApiMetrics { /// Engine API-specific metrics. pub(crate) engine: EngineMetrics, /// Block executor metrics. @@ -15,13 +15,13 @@ pub(crate) struct EngineApiMetrics { /// Metrics for block validation pub(crate) block_validation: BlockValidationMetrics, /// A copy of legacy blockchain tree metrics, to be replaced when we replace the old tree - pub(crate) tree: TreeMetrics, + pub tree: TreeMetrics, } /// Metrics for the entire blockchain tree #[derive(Metrics)] #[metrics(scope = "blockchain_tree")] -pub(super) struct TreeMetrics { +pub struct TreeMetrics { /// The highest block number in the canonical chain pub canonical_chain_height: Gauge, /// The number of reorgs diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index a029df3b3e4..9ec5d0b9d78 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -72,6 +72,7 @@ mod invalid_block_hook; mod invalid_headers; mod metrics; mod payload_processor; +pub mod payload_validator; mod persistence_state; pub mod precompile_cache; #[cfg(test)] @@ -85,6 +86,7 @@ pub use block_buffer::BlockBuffer; pub use invalid_block_hook::{InvalidBlockHooks, NoopInvalidBlockHook}; pub use invalid_headers::InvalidHeaderCache; pub use payload_processor::*; +pub use payload_validator::TreePayloadValidator; pub use persistence_state::PersistenceState; pub use reth_engine_primitives::TreeConfig; use reth_evm::execute::BlockExecutionOutput; diff --git a/crates/engine/tree/src/tree/payload_validator.rs b/crates/engine/tree/src/tree/payload_validator.rs new file mode 100644 index 00000000000..1145aebfb6f --- /dev/null +++ b/crates/engine/tree/src/tree/payload_validator.rs @@ -0,0 +1,967 @@ +//! Concrete implementation of the `PayloadValidator` trait. + +use crate::tree::{ + cached_state::CachedStateProvider, + instrumented_state::InstrumentedStateProvider, + payload_processor::PayloadProcessor, + precompile_cache::{CachedPrecompile, CachedPrecompileMetrics, PrecompileCacheMap}, + ConsistentDbView, EngineApiMetrics, EngineApiTreeState, InvalidHeaderCache, PersistingKind, + StateProviderDatabase, TreeConfig, +}; +use alloy_eips::BlockNumHash; +use alloy_evm::{block::BlockExecutor, Evm}; +use alloy_primitives::B256; +use reth_chain_state::CanonicalInMemoryState; +use reth_consensus::{ConsensusError, FullConsensus}; +use reth_engine_primitives::InvalidBlockHook; +use reth_evm::{ConfigureEvm, SpecFor}; +use reth_payload_primitives::NewPayloadError; +use reth_primitives_traits::{ + AlloyBlockHeader, Block, BlockBody, GotExpected, NodePrimitives, RecoveredBlock, SealedHeader, +}; +use reth_provider::{ + BlockExecutionOutput, BlockNumReader, BlockReader, DBProvider, DatabaseProviderFactory, + HashedPostStateProvider, HeaderProvider, ProviderError, StateCommitmentProvider, StateProvider, + StateProviderFactory, StateReader, +}; +use reth_revm::db::State; +use reth_trie::{updates::TrieUpdates, HashedPostState, TrieInput}; +use reth_trie_db::{DatabaseHashedPostState, StateCommitment}; +use reth_trie_parallel::root::{ParallelStateRoot, ParallelStateRootError}; +use std::{collections::HashMap, sync::Arc, time::Instant}; +use tracing::{debug, trace}; + +/// Outcome of validating a payload +#[derive(Debug)] +pub enum PayloadValidationOutcome { + /// Payload is valid and produced a block + Valid { + /// The block created from the payload + block: RecoveredBlock, + /// The trie updates from state root computation + trie_updates: reth_trie::updates::TrieUpdates, + }, + /// Payload is invalid but block construction succeeded + Invalid { + /// The block created from the payload + block: RecoveredBlock, + /// The validation error + error: NewPayloadError, + }, +} + +/// Information about the current persistence state for validation context +#[derive(Debug, Clone, Copy)] +pub struct PersistenceInfo { + /// The last persisted block + pub last_persisted_block: BlockNumHash, + /// The current persistence action, if any + pub current_action: Option, +} + +impl PersistenceInfo { + /// Creates a new persistence info with no current action + pub const fn new(last_persisted_block: BlockNumHash) -> Self { + Self { last_persisted_block, current_action: None } + } + + /// Creates persistence info with a saving blocks action + pub const fn with_saving_blocks( + last_persisted_block: BlockNumHash, + highest: BlockNumHash, + ) -> Self { + Self { + last_persisted_block, + current_action: Some(PersistenceAction::SavingBlocks { highest }), + } + } + + /// Creates persistence info with a removing blocks action + pub const fn with_removing_blocks( + last_persisted_block: BlockNumHash, + new_tip_num: u64, + ) -> Self { + Self { + last_persisted_block, + current_action: Some(PersistenceAction::RemovingBlocks { new_tip_num }), + } + } +} + +/// The type of persistence action currently in progress +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum PersistenceAction { + /// Saving blocks to disk + SavingBlocks { + /// The highest block being saved + highest: BlockNumHash, + }, + /// Removing blocks from disk + RemovingBlocks { + /// The new tip after removal + new_tip_num: u64, + }, +} + +/// Context providing access to tree state during validation +pub struct TreeCtx<'a, N: NodePrimitives> { + /// The engine API tree state + state: &'a EngineApiTreeState, + /// Information about the current persistence state + persistence_info: PersistenceInfo, + /// Reference to the canonical in-memory state + canonical_in_memory_state: &'a CanonicalInMemoryState, +} + +impl<'a, N: NodePrimitives> std::fmt::Debug for TreeCtx<'a, N> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("TreeCtx") + .field("state", &"EngineApiTreeState") + .field("persistence_info", &self.persistence_info) + .field("canonical_in_memory_state", &self.canonical_in_memory_state) + .finish() + } +} + +impl<'a, N: NodePrimitives> TreeCtx<'a, N> { + /// Creates a new tree context + pub const fn new( + state: &'a EngineApiTreeState, + persistence_info: PersistenceInfo, + canonical_in_memory_state: &'a CanonicalInMemoryState, + ) -> Self { + Self { state, persistence_info, canonical_in_memory_state } + } + + /// Returns a reference to the engine API tree state + pub const fn state(&self) -> &'a EngineApiTreeState { + self.state + } + + /// Returns a reference to the persistence info + pub const fn persistence_info(&self) -> &PersistenceInfo { + &self.persistence_info + } + + /// Returns a reference to the canonical in-memory state + pub const fn canonical_in_memory_state(&self) -> &'a CanonicalInMemoryState { + self.canonical_in_memory_state + } +} + +/// A helper type that provides reusable payload validation logic for network-specific validators. +/// +/// This type contains common validation, execution, and state root computation logic that can be +/// used by network-specific payload validators (e.g., Ethereum, Optimism). It is not meant to be +/// used as a standalone component, but rather as a building block for concrete implementations. +pub struct TreePayloadValidator +where + N: NodePrimitives, + P: DatabaseProviderFactory + + BlockReader + + BlockNumReader + + StateProviderFactory + + StateReader + + StateCommitmentProvider + + HashedPostStateProvider + + HeaderProvider

+ + Clone + + 'static, + C: ConfigureEvm + 'static, +{ + /// Provider for database access. + provider: P, + /// Consensus implementation for validation. + consensus: Arc>, + /// EVM configuration. + evm_config: C, + /// Configuration for the tree. + config: TreeConfig, + /// Payload processor for state root computation. + payload_processor: PayloadProcessor, + /// Precompile cache map. + precompile_cache_map: PrecompileCacheMap>, + /// Precompile cache metrics. + precompile_cache_metrics: HashMap, + /// Tracks invalid headers to prevent duplicate hook calls. + invalid_headers: InvalidHeaderCache, + /// Hook to call when invalid blocks are encountered. + invalid_block_hook: Box>, + /// Metrics for the engine api. + metrics: EngineApiMetrics, +} + +impl std::fmt::Debug for TreePayloadValidator +where + N: NodePrimitives, + P: DatabaseProviderFactory + + BlockReader + + BlockNumReader + + StateProviderFactory + + StateReader + + StateCommitmentProvider + + HashedPostStateProvider + + HeaderProvider
+ + Clone + + std::fmt::Debug + + 'static, + C: ConfigureEvm + 'static, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("TreePayloadValidator") + .field("provider", &self.provider) + .field("consensus", &"Arc") + .field("evm_config", &self.evm_config) + .field("config", &self.config) + .field("payload_processor", &self.payload_processor) + .field("precompile_cache_map", &self.precompile_cache_map) + .field("precompile_cache_metrics", &self.precompile_cache_metrics) + .field("invalid_headers", &self.invalid_headers) + .field("invalid_block_hook", &"Box") + .field("metrics", &self.metrics) + .finish() + } +} + +impl TreePayloadValidator +where + N: NodePrimitives, + P: DatabaseProviderFactory + + BlockReader + + BlockNumReader + + StateProviderFactory + + StateReader + + StateCommitmentProvider + + HashedPostStateProvider + + HeaderProvider
+ + Clone + + 'static, + C: ConfigureEvm + 'static, +{ + /// Creates a new `TreePayloadValidator`. + #[allow(clippy::too_many_arguments)] + pub fn new( + provider: P, + consensus: Arc>, + evm_config: C, + config: TreeConfig, + payload_processor: PayloadProcessor, + precompile_cache_map: PrecompileCacheMap>, + invalid_headers_cache_size: u32, + invalid_block_hook: Box>, + metrics: EngineApiMetrics, + ) -> Self { + Self { + provider, + consensus, + evm_config, + config, + payload_processor, + precompile_cache_map, + precompile_cache_metrics: HashMap::new(), + invalid_headers: InvalidHeaderCache::new(invalid_headers_cache_size), + invalid_block_hook, + metrics, + } + } + + /// Validates a block that has already been converted from a payload. + /// + /// This method performs: + /// - Consensus validation + /// - Block execution + /// - State root computation + /// - Fork detection + pub fn validate_block_with_state( + &mut self, + block: RecoveredBlock, + ctx: TreeCtx<'_, N>, + ) -> Result, NewPayloadError> + where + N::Block: Block>, + { + // Helper macro to preserve block context when returning errors + macro_rules! ensure_ok { + ($expr:expr) => { + match $expr { + Ok(val) => val, + Err(e) => { + let error = NewPayloadError::Other(Box::new(e)); + return Ok(PayloadValidationOutcome::Invalid { block, error }); + } + } + }; + } + + // Extract references we need before moving ctx + let tree_state = ctx.state(); + let persistence_info = *ctx.persistence_info(); + + // Then validate the block using the validate_block method + if let Err(consensus_error) = self.validate_block(&block, ctx) { + trace!(target: "engine::tree", block=?block.num_hash(), ?consensus_error, "Block validation failed"); + let payload_error = NewPayloadError::Other(Box::new(consensus_error)); + return Ok(PayloadValidationOutcome::Invalid { block, error: payload_error }); + } + + // Get the parent block's state to execute against + let parent_hash = block.parent_hash(); + + // Get parent header for error context + let parent_header = ensure_ok!(self.get_parent_header(parent_hash, tree_state)); + + // Create StateProviderBuilder + let provider_builder = match self.create_state_provider_builder(parent_hash, tree_state) { + Ok(builder) => builder, + Err(e) => { + let error = NewPayloadError::Other(Box::new(e)); + return Ok(PayloadValidationOutcome::Invalid { block, error }); + } + }; + + // Determine persisting kind and state root task decision early for handle creation + let persisting_kind = + self.persisting_kind_for(block.header(), &persistence_info, tree_state); + let run_parallel_state_root = + persisting_kind.can_run_parallel_state_root() && !self.config.state_root_fallback(); + let has_ancestors_with_missing_trie_updates = + self.has_ancestors_with_missing_trie_updates(block.sealed_header(), tree_state); + let use_state_root_task = run_parallel_state_root && + self.config.use_state_root_task() && + !has_ancestors_with_missing_trie_updates; + + // Build the state provider + let state_provider = ensure_ok!(provider_builder.build()); + + // Create a PayloadHandle for state hook support + let (mut handle, use_state_root_task) = self.spawn_payload_tasks( + &block, + provider_builder, + use_state_root_task, + tree_state, + &persistence_info, + ); + + // Execute the block with proper state provider wrapping + let (output, execution_time) = match self.execute_block_with_state_provider( + state_provider, + &block, + &handle, + ) { + Ok(result) => result, + Err(error) => { + trace!(target: "engine::tree", block=?block.num_hash(), ?error, "Block execution failed"); + return Ok(PayloadValidationOutcome::Invalid { block, error }); + } + }; + + debug!(target: "engine::tree", block=?block.num_hash(), ?execution_time, "Block executed"); + + // Stop prewarming after execution + handle.stop_prewarming_execution(); + + // Perform post-execution validation + if let Err(consensus_error) = self.consensus.validate_block_post_execution(&block, &output) + { + trace!(target: "engine::tree", block=?block.num_hash(), ?consensus_error, "Post-execution validation failed"); + let error = NewPayloadError::Other(Box::new(consensus_error)); + return Ok(PayloadValidationOutcome::Invalid { block, error }); + } + + // Compute hashed post state + let hashed_state = self.provider.hashed_post_state(&output.state); + + debug!(target: "engine::tree", block=?block.num_hash(), "Calculating block state root"); + + debug!( + target: "engine::tree", + block=?block.num_hash(), + ?persisting_kind, + run_parallel_state_root, + has_ancestors_with_missing_trie_updates, + use_state_root_task, + config_allows_state_root_task=self.config.use_state_root_task(), + "Deciding which state root algorithm to run" + ); + + let state_root_start = Instant::now(); + let (state_root, trie_updates) = match self.compute_state_root_with_strategy( + &block, + &hashed_state, + tree_state, + persisting_kind, + run_parallel_state_root, + use_state_root_task, + &mut handle, + execution_time, + ) { + Ok(result) => result, + Err(error) => return Ok(PayloadValidationOutcome::Invalid { block, error }), + }; + + let state_root_elapsed = state_root_start.elapsed(); + self.metrics + .block_validation + .record_state_root(&trie_updates, state_root_elapsed.as_secs_f64()); + + debug!(target: "engine::tree", ?state_root, ?state_root_elapsed, block=?block.num_hash(), "Calculated state root"); + + // Ensure state root matches + if state_root != block.header().state_root() { + // call post-block hook + self.on_invalid_block( + &parent_header, + &block, + &output, + Some((&trie_updates, state_root)), + ); + let error = NewPayloadError::Other(Box::new(ConsensusError::BodyStateRootDiff( + GotExpected { got: state_root, expected: block.header().state_root() }.into(), + ))); + return Ok(PayloadValidationOutcome::Invalid { block, error }); + } + + Ok(PayloadValidationOutcome::Valid { block, trie_updates }) + } + + /// Validates a block according to consensus rules. + /// + /// This method performs: + /// - Header validation + /// - Pre-execution validation + /// - Parent header validation + /// + /// This method is intended to be used by network-specific validators as part of their + /// block validation flow. + pub fn validate_block( + &self, + block: &RecoveredBlock, + ctx: TreeCtx<'_, N>, + ) -> Result<(), ConsensusError> + where + N::Block: Block, + { + let block_num_hash = block.num_hash(); + debug!(target: "engine::tree", block=?block_num_hash, parent = ?block.header().parent_hash(), "Validating downloaded block"); + + // Validate block consensus rules + trace!(target: "engine::tree", block=?block_num_hash, "Validating block header"); + self.consensus.validate_header(block.sealed_header())?; + + trace!(target: "engine::tree", block=?block_num_hash, "Validating block pre-execution"); + self.consensus.validate_block_pre_execution(block)?; + + // Get parent header for validation + let parent_hash = block.header().parent_hash(); + let parent_header = self + .get_parent_header(parent_hash, ctx.state()) + .map_err(|e| ConsensusError::Other(e.to_string()))?; + + // Validate against parent + trace!(target: "engine::tree", block=?block_num_hash, "Validating block against parent"); + self.consensus.validate_header_against_parent(block.sealed_header(), &parent_header)?; + + debug!(target: "engine::tree", block=?block_num_hash, "Block validation complete"); + Ok(()) + } + + /// Executes the given block using the provided state provider. + fn execute_block( + &mut self, + state_provider: &S, + block: &RecoveredBlock, + handle: &crate::tree::PayloadHandle, + ) -> Result<(BlockExecutionOutput, Instant), NewPayloadError> + where + S: StateProvider, + { + trace!(target: "engine::tree", block = ?block.num_hash(), "Executing block"); + + // Create state database + let mut db = State::builder() + .with_database(StateProviderDatabase::new(state_provider)) + .with_bundle_update() + .without_state_clear() + .build(); + + // Configure executor for the block + let mut executor = self.evm_config.executor_for_block(&mut db, block); + + // Configure precompile caching if enabled + if !self.config.precompile_cache_disabled() { + // Get the spec id before the closure + let spec_id = *self.evm_config.evm_env(block.header()).spec_id(); + + executor.evm_mut().precompiles_mut().map_precompiles(|address, precompile| { + let metrics = self + .precompile_cache_metrics + .entry(*address) + .or_insert_with(|| CachedPrecompileMetrics::new_with_address(*address)) + .clone(); + let cache = self.precompile_cache_map.cache_for_address(*address); + CachedPrecompile::wrap(precompile, cache, spec_id, Some(metrics)) + }); + } + + // Execute the block + let start = Instant::now(); + let output = self + .metrics + .executor + .execute_metered(executor, block, Box::new(handle.state_hook())) + .map_err(|e| NewPayloadError::Other(Box::new(e)))?; + + Ok((output, start)) + } + + /// Executes a block with proper state provider wrapping and optional instrumentation. + /// + /// This method wraps the base state provider with: + /// 1. `CachedStateProvider` for cache support + /// 2. `InstrumentedStateProvider` for metrics (if enabled) + fn execute_block_with_state_provider( + &mut self, + state_provider: S, + block: &RecoveredBlock, + handle: &crate::tree::PayloadHandle, + ) -> Result<(BlockExecutionOutput, Instant), NewPayloadError> + where + S: StateProvider, + { + // Wrap state provider with cached state provider for execution + let cached_state_provider = CachedStateProvider::new_with_caches( + state_provider, + handle.caches(), + handle.cache_metrics(), + ); + + // Execute the block with optional instrumentation + if self.config.state_provider_metrics() { + let instrumented_provider = + InstrumentedStateProvider::from_state_provider(&cached_state_provider); + let result = self.execute_block(&instrumented_provider, block, handle); + instrumented_provider.record_total_latency(); + result + } else { + self.execute_block(&cached_state_provider, block, handle) + } + } + + /// Computes the state root for the given block. + /// + /// This method attempts to compute the state root in parallel if configured and conditions + /// allow, otherwise falls back to synchronous computation. + fn compute_state_root( + &self, + parent_hash: B256, + hashed_state: &HashedPostState, + ) -> Result<(B256, TrieUpdates), NewPayloadError> { + // Get the state provider for the parent block + let state_provider = self + .provider + .history_by_block_hash(parent_hash) + .map_err(|e| NewPayloadError::Other(Box::new(e)))?; + + // Compute the state root with trie updates + let (state_root, trie_updates) = state_provider + .state_root_with_updates(hashed_state.clone()) + .map_err(|e| NewPayloadError::Other(Box::new(e)))?; + + Ok((state_root, trie_updates)) + } + + /// Attempts to get the state root from the background task. + fn try_state_root_from_task( + &self, + handle: &mut crate::tree::PayloadHandle, + block: &RecoveredBlock, + execution_time: Instant, + ) -> Option<(B256, TrieUpdates)> { + match handle.state_root() { + Ok(crate::tree::payload_processor::sparse_trie::StateRootComputeOutcome { + state_root, + trie_updates, + }) => { + let elapsed = execution_time.elapsed(); + debug!(target: "engine::tree", ?state_root, ?elapsed, "State root task finished"); + + // Double check the state root matches what we expect + if state_root == block.header().state_root() { + Some((state_root, trie_updates)) + } else { + debug!( + target: "engine::tree", + ?state_root, + block_state_root = ?block.header().state_root(), + "State root task returned incorrect state root" + ); + None + } + } + Err(error) => { + debug!(target: "engine::tree", %error, "Background state root computation failed"); + None + } + } + } + + /// Computes state root with appropriate strategy based on configuration. + #[allow(clippy::too_many_arguments)] + fn compute_state_root_with_strategy( + &self, + block: &RecoveredBlock, + hashed_state: &HashedPostState, + tree_state: &EngineApiTreeState, + persisting_kind: PersistingKind, + run_parallel_state_root: bool, + use_state_root_task: bool, + handle: &mut crate::tree::PayloadHandle, + execution_time: Instant, + ) -> Result<(B256, TrieUpdates), NewPayloadError> { + let parent_hash = block.parent_hash(); + + if !run_parallel_state_root { + // Use synchronous computation + return self.compute_state_root(parent_hash, hashed_state); + } + + // Parallel state root is enabled + if use_state_root_task { + debug!(target: "engine::tree", block=?block.num_hash(), "Using sparse trie state root algorithm"); + + // Try to get state root from background task first + if let Some((state_root, trie_updates)) = + self.try_state_root_from_task(handle, block, execution_time) + { + return Ok((state_root, trie_updates)); + } + + // Background task failed or returned incorrect root, fall back to parallel + debug!(target: "engine::tree", "Falling back to parallel state root computation"); + } else { + debug!(target: "engine::tree", block=?block.num_hash(), "Using parallel state root algorithm"); + } + + // Try parallel computation + match self.compute_state_root_parallel( + parent_hash, + hashed_state, + tree_state, + persisting_kind, + ) { + Ok(result) => Ok(result), + Err(ParallelStateRootError::Provider(ProviderError::ConsistentView(error))) => { + debug!(target: "engine::tree", %error, "Parallel state root computation failed consistency check, falling back to synchronous"); + self.metrics.block_validation.state_root_parallel_fallback_total.increment(1); + self.compute_state_root(parent_hash, hashed_state) + } + Err(error) => Err(NewPayloadError::Other(Box::new(error))), + } + } + + /// Computes state root in parallel. + /// + /// # Returns + /// + /// Returns `Ok(_)` if computed successfully. + /// Returns `Err(_)` if error was encountered during computation. + /// `Err(ProviderError::ConsistentView(_))` can be safely ignored and fallback computation + /// should be used instead. + fn compute_state_root_parallel( + &self, + parent_hash: B256, + hashed_state: &HashedPostState, + tree_state: &EngineApiTreeState, + persisting_kind: PersistingKind, + ) -> Result<(B256, TrieUpdates), ParallelStateRootError> { + let consistent_view = ConsistentDbView::new_with_latest_tip(self.provider.clone())?; + + // Compute trie input using the tree state + let mut input = self.compute_trie_input( + consistent_view.provider_ro()?, + parent_hash, + tree_state, + persisting_kind, + )?; + + // Extend with block we are validating root for + input.append_ref(hashed_state); + + ParallelStateRoot::new(consistent_view, input).incremental_root_with_updates() + } + + /// Check if the given block has any ancestors with missing trie updates. + /// + /// This walks back through the chain starting from the parent of the target block + /// and checks if any ancestor blocks are missing trie updates. + fn has_ancestors_with_missing_trie_updates( + &self, + target_header: &SealedHeader, + tree_state: &EngineApiTreeState, + ) -> bool { + // Walk back through the chain starting from the parent of the target block + let mut current_hash = target_header.parent_hash(); + while let Some(block) = tree_state.tree_state.executed_block_by_hash(current_hash) { + // Check if this block is missing trie updates + if block.trie.is_missing() { + return true; + } + + // Move to the parent block + current_hash = block.block.recovered_block.parent_hash(); + } + + false + } + + /// Determines the persisting kind for the given block based on persistence info. + /// + /// This is adapted from the `persisting_kind_for` method in `EngineApiTreeHandler`. + fn persisting_kind_for( + &self, + block: &N::BlockHeader, + persistence_info: &PersistenceInfo, + tree_state: &EngineApiTreeState, + ) -> PersistingKind { + // Check that we're currently persisting + let Some(action) = &persistence_info.current_action else { + return PersistingKind::NotPersisting; + }; + + // Check that the persistence action is saving blocks, not removing them + let PersistenceAction::SavingBlocks { highest } = action else { + return PersistingKind::PersistingNotDescendant; + }; + + // The block being validated can only be a descendant if its number is higher than + // the highest block persisting. Otherwise, it's likely a fork of a lower block. + if block.number() > highest.number && tree_state.tree_state.is_descendant(*highest, block) { + PersistingKind::PersistingDescendant + } else { + PersistingKind::PersistingNotDescendant + } + } + + /// Creates a payload handle for the given block. + /// + /// This method decides whether to use full spawn (with background state root tasks) + /// or cache-only spawn based on the current conditions. + /// + /// Returns a tuple of (`PayloadHandle`, `use_state_root_task`) where `use_state_root_task` + /// indicates whether the state root task was actually enabled (it may be disabled + /// if prefix sets are non-empty). + fn spawn_payload_tasks( + &mut self, + block: &RecoveredBlock, + provider_builder: crate::tree::StateProviderBuilder, + use_state_root_task: bool, + tree_state: &EngineApiTreeState, + persistence_info: &PersistenceInfo, + ) -> (crate::tree::PayloadHandle, bool) { + let header = block.clone_sealed_header(); + let txs = block.clone_transactions_recovered().collect(); + + if !use_state_root_task { + // Use cache-only spawn when state root tasks are not needed + let handle = + self.payload_processor.spawn_cache_exclusive(header, txs, provider_builder); + return (handle, false); + } + + // Try to use full spawn with background state root computation support + let Ok(consistent_view) = ConsistentDbView::new_with_latest_tip(self.provider.clone()) + else { + // Fall back to cache-only spawn if consistent view fails + let handle = + self.payload_processor.spawn_cache_exclusive(header, txs, provider_builder); + return (handle, false); + }; + + let Ok(provider_ro) = consistent_view.provider_ro() else { + // Fall back to cache-only spawn if provider creation fails + let handle = + self.payload_processor.spawn_cache_exclusive(header, txs, provider_builder); + return (handle, false); + }; + + // For the handle creation, we need to determine persisting kind again + // This could be optimized by passing it from validate_payload + let persisting_kind = + self.persisting_kind_for(block.header(), persistence_info, tree_state); + + let trie_input_start = Instant::now(); + let Ok(trie_input) = + self.compute_trie_input(provider_ro, block.parent_hash(), tree_state, persisting_kind) + else { + // Fall back to cache-only spawn if trie input computation fails + let handle = + self.payload_processor.spawn_cache_exclusive(header, txs, provider_builder); + return (handle, false); + }; + let trie_input_elapsed = trie_input_start.elapsed(); + self.metrics.block_validation.trie_input_duration.record(trie_input_elapsed.as_secs_f64()); + + // Use state root task only if prefix sets are empty, otherwise proof generation is too + // expensive because it requires walking over the paths in the prefix set in every + // proof. + if trie_input.prefix_sets.is_empty() { + let handle = self.payload_processor.spawn( + header, + txs, + provider_builder, + consistent_view, + trie_input, + &self.config, + ); + (handle, true) + } else { + debug!(target: "engine::tree", block=?block.num_hash(), "Disabling state root task due to non-empty prefix sets"); + let handle = + self.payload_processor.spawn_cache_exclusive(header, txs, provider_builder); + (handle, false) + } + } + + /// Retrieves the parent header from tree state or database. + fn get_parent_header( + &self, + parent_hash: B256, + tree_state: &EngineApiTreeState, + ) -> Result, ProviderError> { + // First try to get from tree state + if let Some(parent_block) = tree_state.tree_state.executed_block_by_hash(parent_hash) { + Ok(parent_block.block.recovered_block.sealed_header().clone()) + } else { + // Fallback to database + let header = self + .provider + .header(&parent_hash)? + .ok_or_else(|| ProviderError::HeaderNotFound(parent_hash.into()))?; + Ok(SealedHeader::seal_slow(header)) + } + } + + /// Creates a `StateProviderBuilder` for the given parent hash. + /// + /// This method checks if the parent is in the tree state (in-memory) or persisted to disk, + /// and creates the appropriate provider builder. + fn create_state_provider_builder( + &self, + parent_hash: B256, + tree_state: &EngineApiTreeState, + ) -> Result, ProviderError> { + if let Some((historical, blocks)) = tree_state.tree_state.blocks_by_hash(parent_hash) { + // Parent is in memory, create builder with overlay + Ok(crate::tree::StateProviderBuilder::new( + self.provider.clone(), + historical, + Some(blocks), + )) + } else { + // Parent is not in memory, check if it's persisted + self.provider + .header(&parent_hash)? + .ok_or_else(|| ProviderError::HeaderNotFound(parent_hash.into()))?; + // Parent is persisted, create builder without overlay + Ok(crate::tree::StateProviderBuilder::new(self.provider.clone(), parent_hash, None)) + } + } + + /// Called when an invalid block is encountered during validation. + fn on_invalid_block( + &mut self, + parent_header: &SealedHeader, + block: &RecoveredBlock, + output: &BlockExecutionOutput, + trie_updates: Option<(&TrieUpdates, B256)>, + ) { + if self.invalid_headers.get(&block.hash()).is_some() { + // we already marked this block as invalid + return; + } + self.invalid_block_hook.on_invalid_block(parent_header, block, output, trie_updates); + } + + /// Computes the trie input at the provided parent hash. + fn compute_trie_input( + &self, + provider: TP, + parent_hash: B256, + tree_state: &EngineApiTreeState, + persisting_kind: PersistingKind, + ) -> Result + where + TP: DBProvider + BlockNumReader, + { + let mut input = TrieInput::default(); + + let best_block_number = + provider.best_block_number().map_err(ParallelStateRootError::Provider)?; + + // Get blocks from tree state + let (historical, mut blocks) = tree_state + .tree_state + .blocks_by_hash(parent_hash) + .map_or_else(|| (parent_hash.into(), vec![]), |(hash, blocks)| (hash.into(), blocks)); + + // Filter blocks based on persisting kind + if matches!(persisting_kind, PersistingKind::PersistingDescendant) { + // If we are persisting a descendant, filter out upto the last persisted block + let last_persisted_block_number = provider + .convert_hash_or_number(historical) + .map_err(ParallelStateRootError::Provider)? + .ok_or_else(|| { + ParallelStateRootError::Provider(ProviderError::BlockHashNotFound( + historical.as_hash().unwrap(), + )) + })?; + + blocks.retain(|b| b.recovered_block().number() > last_persisted_block_number); + } + + if blocks.is_empty() { + debug!(target: "engine::tree", %parent_hash, "Parent found on disk"); + } else { + debug!(target: "engine::tree", %parent_hash, %historical, blocks = blocks.len(), "Parent found in memory"); + } + + // Convert the historical block to the block number + let block_number = provider + .convert_hash_or_number(historical) + .map_err(ParallelStateRootError::Provider)? + .ok_or_else(|| { + ParallelStateRootError::Provider(ProviderError::BlockHashNotFound( + historical.as_hash().unwrap(), + )) + })?; + + // Retrieve revert state for historical block + let revert_state = if block_number == best_block_number { + // No revert state needed if we're at the best block + debug!(target: "engine::tree", block_number, best_block_number, "Empty revert state"); + HashedPostState::default() + } else { + let revert_state = HashedPostState::from_reverts::< + ::KeyHasher, + >(provider.tx_ref(), block_number + 1) + .map_err(|e| ParallelStateRootError::Provider(ProviderError::from(e)))?; + debug!( + target: "engine::tree", + block_number, + best_block_number, + accounts = revert_state.accounts.len(), + storages = revert_state.storages.len(), + "Non-empty revert state" + ); + revert_state + }; + input.append(revert_state); + + // Extend with contents of parent in-memory blocks + input.extend_with_blocks( + blocks.iter().rev().map(|block| (block.hashed_state(), block.trie_updates())), + ); + + Ok(input) + } +} From a1a4f2df7aa67d16b2acd8f63d364d3c597f7ecd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9a=20Narzis?= <78718413+lean-apple@users.noreply.github.com> Date: Tue, 22 Jul 2025 19:19:12 +0200 Subject: [PATCH 261/305] refactor: use alloy `Log::collect_for_receipt` instead of macro to collect logs (#17569) --- crates/rpc/rpc-eth-types/src/receipt.rs | 24 ++++-------------------- 1 file changed, 4 insertions(+), 20 deletions(-) diff --git a/crates/rpc/rpc-eth-types/src/receipt.rs b/crates/rpc/rpc-eth-types/src/receipt.rs index 9b162ca8b93..37700ffcd1d 100644 --- a/crates/rpc/rpc-eth-types/src/receipt.rs +++ b/crates/rpc/rpc-eth-types/src/receipt.rs @@ -33,27 +33,11 @@ where let cumulative_gas_used = receipt.cumulative_gas_used(); let logs_bloom = receipt.bloom(); - macro_rules! build_rpc_logs { - ($logs:expr) => { - $logs - .enumerate() - .map(|(tx_log_idx, log)| Log { - inner: log, - block_hash: Some(meta.block_hash), - block_number: Some(meta.block_number), - block_timestamp: Some(meta.timestamp), - transaction_hash: Some(meta.tx_hash), - transaction_index: Some(meta.index), - log_index: Some((next_log_index + tx_log_idx) as u64), - removed: false, - }) - .collect() - }; - } - let logs = match receipt { - Cow::Borrowed(r) => build_rpc_logs!(r.logs().iter().cloned()), - Cow::Owned(r) => build_rpc_logs!(r.into_logs().into_iter()), + Cow::Borrowed(r) => { + Log::collect_for_receipt(*next_log_index, *meta, r.logs().iter().cloned()) + } + Cow::Owned(r) => Log::collect_for_receipt(*next_log_index, *meta, r.into_logs()), }; let rpc_receipt = alloy_rpc_types_eth::Receipt { status, cumulative_gas_used, logs }; From 58235419bb855728d9f2afbc8bd59cf8690e8804 Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin <5773434+shekhirin@users.noreply.github.com> Date: Tue, 22 Jul 2025 19:51:11 +0100 Subject: [PATCH 262/305] feat(reth-bench): add gas throughput chart to python script (#17572) Co-authored-by: Claude --- .gitignore | 5 + .../scripts/compare_newpayload_latency.py | 251 ++++++++++++++++-- 2 files changed, 235 insertions(+), 21 deletions(-) diff --git a/.gitignore b/.gitignore index 58813003cfb..3a38da5cb1e 100644 --- a/.gitignore +++ b/.gitignore @@ -63,3 +63,8 @@ recipe.json _ # broken links report links-report.json + +# Python cache +__pycache__/ +*.py[cod] +*$py.class diff --git a/bin/reth-bench/scripts/compare_newpayload_latency.py b/bin/reth-bench/scripts/compare_newpayload_latency.py index d0b914b6963..f434d034b9a 100755 --- a/bin/reth-bench/scripts/compare_newpayload_latency.py +++ b/bin/reth-bench/scripts/compare_newpayload_latency.py @@ -16,6 +16,8 @@ # # - A simple line graph plotting the latencies of the two files against each # other. +# +# - A gas per second (gas/s) chart showing throughput over time. import argparse @@ -23,25 +25,80 @@ import matplotlib.pyplot as plt import numpy as np import sys +import os +from matplotlib.ticker import FuncFormatter + +def get_output_filename(base_path, suffix=None): + """Generate output filename with optional suffix.""" + if suffix is None: + return base_path + + # Split the base path into directory, name, and extension + dir_name = os.path.dirname(base_path) + base_name = os.path.basename(base_path) + name, ext = os.path.splitext(base_name) + + # Create new filename with suffix + new_name = f"{name}_{suffix}{ext}" + return os.path.join(dir_name, new_name) if dir_name else new_name + +def format_gas_units(value, pos): + """Format gas values with appropriate units (gas, Kgas, Mgas, Ggas, Tgas).""" + if value == 0: + return '0' + + # Define unit thresholds and labels + units = [ + (1e12, 'Tgas'), # Teragas + (1e9, 'Ggas'), # Gigagas + (1e6, 'Mgas'), # Megagas + (1e3, 'Kgas'), # Kilogas + (1, 'gas') # gas + ] + + abs_value = abs(value) + for threshold, unit in units: + if abs_value >= threshold: + scaled_value = value / threshold + # Format with appropriate precision + if scaled_value >= 100: + return f'{scaled_value:.0f}{unit}/s' + elif scaled_value >= 10: + return f'{scaled_value:.1f}{unit}/s' + else: + return f'{scaled_value:.2f}{unit}/s' + + return f'{value:.0f}gas/s' + +def moving_average(data, window_size): + """Calculate moving average with given window size.""" + if window_size <= 1: + return data + + # Use pandas for efficient rolling mean calculation + series = pd.Series(data) + return series.rolling(window=window_size, center=True, min_periods=1).mean().values def main(): parser = argparse.ArgumentParser(description='Generate histogram of total_latency percent differences between two CSV files') parser.add_argument('baseline_csv', help='First CSV file, used as the baseline/control') parser.add_argument('comparison_csv', help='Second CSV file, which is being compared to the baseline') parser.add_argument('-o', '--output', default='latency.png', help='Output image file (default: latency.png)') - parser.add_argument('--graphs', default='all', help='Comma-separated list of graphs to plot: histogram, line, all (default: all)') + parser.add_argument('--graphs', default='all', help='Comma-separated list of graphs to plot: histogram, line, gas, all (default: all)') + parser.add_argument('--average', type=int, metavar='N', help='Apply moving average over N blocks to smooth line and gas charts') + parser.add_argument('--separate', action='store_true', help='Output each chart as a separate file') args = parser.parse_args() # Parse graph selection if args.graphs.lower() == 'all': - selected_graphs = {'histogram', 'line'} + selected_graphs = {'histogram', 'line', 'gas'} else: selected_graphs = set(graph.strip().lower() for graph in args.graphs.split(',')) - valid_graphs = {'histogram', 'line'} + valid_graphs = {'histogram', 'line', 'gas'} invalid_graphs = selected_graphs - valid_graphs if invalid_graphs: - print(f"Error: Invalid graph types: {', '.join(invalid_graphs)}. Valid options are: histogram, line, all", file=sys.stderr) + print(f"Error: Invalid graph types: {', '.join(invalid_graphs)}. Valid options are: histogram, line, gas, all", file=sys.stderr) sys.exit(1) try: @@ -62,6 +119,15 @@ def main(): print(f"Error: 'total_latency' column not found in {args.comparison_csv}", file=sys.stderr) sys.exit(1) + # Check for gas_used column if gas graph is selected + if 'gas' in selected_graphs: + if 'gas_used' not in df1.columns: + print(f"Error: 'gas_used' column not found in {args.baseline_csv} (required for gas graph)", file=sys.stderr) + sys.exit(1) + if 'gas_used' not in df2.columns: + print(f"Error: 'gas_used' column not found in {args.comparison_csv} (required for gas graph)", file=sys.stderr) + sys.exit(1) + if len(df1) != len(df2): print("Warning: CSV files have different number of rows. Using minimum length.", file=sys.stderr) min_len = min(len(df1), len(df2)) @@ -93,23 +159,35 @@ def main(): print("Error: No valid graphs selected", file=sys.stderr) sys.exit(1) - if num_plots == 1: - fig, ax = plt.subplots(1, 1, figsize=(12, 6)) - axes = [ax] + # Store output filenames + output_files = [] + + if args.separate: + # We'll create individual figures for each graph + pass else: - fig, axes = plt.subplots(num_plots, 1, figsize=(12, 6 * num_plots)) + # Create combined figure + if num_plots == 1: + fig, ax = plt.subplots(1, 1, figsize=(12, 6)) + axes = [ax] + else: + fig, axes = plt.subplots(num_plots, 1, figsize=(12, 6 * num_plots)) plot_idx = 0 # Plot histogram if selected if 'histogram' in selected_graphs: + if args.separate: + fig, ax = plt.subplots(1, 1, figsize=(12, 6)) + else: + ax = axes[plot_idx] + min_diff = np.floor(percent_diff.min()) max_diff = np.ceil(percent_diff.max()) # Create histogram with 1% buckets bins = np.arange(min_diff, max_diff + 1, 1) - ax = axes[plot_idx] ax.hist(percent_diff, bins=bins, edgecolor='black', alpha=0.7) ax.set_xlabel('Percent Difference (%)') ax.set_ylabel('Number of Blocks') @@ -120,38 +198,151 @@ def main(): ax.axvline(mean_diff, color='red', linestyle='--', label=f'Mean: {mean_diff:.2f}%') ax.axvline(median_diff, color='orange', linestyle='--', label=f'Median: {median_diff:.2f}%') ax.legend() - plot_idx += 1 + + if args.separate: + plt.tight_layout() + output_file = get_output_filename(args.output, 'histogram') + plt.savefig(output_file, dpi=300, bbox_inches='tight') + output_files.append(output_file) + plt.close(fig) + else: + plot_idx += 1 # Plot line graph if selected if 'line' in selected_graphs: + if args.separate: + fig, ax = plt.subplots(1, 1, figsize=(12, 6)) + else: + ax = axes[plot_idx] + # Determine comparison color based on median change. The median being # negative means processing time got faster, so that becomes green. comparison_color = 'green' if median_diff < 0 else 'red' - ax = axes[plot_idx] + # Apply moving average if requested + plot_latency1 = latency1[:len(percent_diff)] + plot_latency2 = latency2[:len(percent_diff)] + + if args.average: + plot_latency1 = moving_average(plot_latency1, args.average) + plot_latency2 = moving_average(plot_latency2, args.average) if 'block_number' in df1.columns and 'block_number' in df2.columns: block_numbers = df1['block_number'].values[:len(percent_diff)] - ax.plot(block_numbers, latency1[:len(percent_diff)], 'orange', alpha=0.7, label=f'Baseline ({args.baseline_csv})') - ax.plot(block_numbers, latency2[:len(percent_diff)], comparison_color, alpha=0.7, label=f'Comparison ({args.comparison_csv})') + ax.plot(block_numbers, plot_latency1, 'orange', alpha=0.7, label=f'Baseline ({args.baseline_csv})') + ax.plot(block_numbers, plot_latency2, comparison_color, alpha=0.7, label=f'Comparison ({args.comparison_csv})') ax.set_xlabel('Block Number') ax.set_ylabel('Total Latency (ms)') - ax.set_title('Total Latency vs Block Number') + title = 'Total Latency vs Block Number' + if args.average: + title += f' ({args.average}-block moving average)' + ax.set_title(title) ax.grid(True, alpha=0.3) ax.legend() else: # If no block_number column, use index indices = np.arange(len(percent_diff)) - ax.plot(indices, latency1[:len(percent_diff)], 'orange', alpha=0.7, label=f'Baseline ({args.baseline_csv})') - ax.plot(indices, latency2[:len(percent_diff)], comparison_color, alpha=0.7, label=f'Comparison ({args.comparison_csv})') + ax.plot(indices, plot_latency1, 'orange', alpha=0.7, label=f'Baseline ({args.baseline_csv})') + ax.plot(indices, plot_latency2, comparison_color, alpha=0.7, label=f'Comparison ({args.comparison_csv})') ax.set_xlabel('Block Index') ax.set_ylabel('Total Latency (ms)') - ax.set_title('Total Latency vs Block Index') + title = 'Total Latency vs Block Index' + if args.average: + title += f' ({args.average}-block moving average)' + ax.set_title(title) ax.grid(True, alpha=0.3) ax.legend() - plot_idx += 1 + + if args.separate: + plt.tight_layout() + output_file = get_output_filename(args.output, 'line') + plt.savefig(output_file, dpi=300, bbox_inches='tight') + output_files.append(output_file) + plt.close(fig) + else: + plot_idx += 1 - plt.tight_layout() - plt.savefig(args.output, dpi=300, bbox_inches='tight') + # Plot gas/s graph if selected + if 'gas' in selected_graphs: + if args.separate: + fig, ax = plt.subplots(1, 1, figsize=(12, 6)) + else: + ax = axes[plot_idx] + + # Calculate gas per second (gas/s) + # latency is in microseconds, so convert to seconds for gas/s calculation + gas1 = df1['gas_used'].values[:len(percent_diff)] + gas2 = df2['gas_used'].values[:len(percent_diff)] + + # Convert latency from microseconds to seconds + latency1_sec = df1['total_latency'].values[:len(percent_diff)] / 1_000_000.0 + latency2_sec = df2['total_latency'].values[:len(percent_diff)] / 1_000_000.0 + + # Calculate gas per second + gas_per_sec1 = gas1 / latency1_sec + gas_per_sec2 = gas2 / latency2_sec + + # Store original values for statistics before averaging + original_gas_per_sec1 = gas_per_sec1.copy() + original_gas_per_sec2 = gas_per_sec2.copy() + + # Apply moving average if requested + if args.average: + gas_per_sec1 = moving_average(gas_per_sec1, args.average) + gas_per_sec2 = moving_average(gas_per_sec2, args.average) + + # Calculate median gas/s for color determination (use original values) + median_gas_per_sec1 = np.median(original_gas_per_sec1) + median_gas_per_sec2 = np.median(original_gas_per_sec2) + comparison_color = 'green' if median_gas_per_sec2 > median_gas_per_sec1 else 'red' + + if 'block_number' in df1.columns and 'block_number' in df2.columns: + block_numbers = df1['block_number'].values[:len(percent_diff)] + ax.plot(block_numbers, gas_per_sec1, 'orange', alpha=0.7, label=f'Baseline ({args.baseline_csv})') + ax.plot(block_numbers, gas_per_sec2, comparison_color, alpha=0.7, label=f'Comparison ({args.comparison_csv})') + ax.set_xlabel('Block Number') + ax.set_ylabel('Gas Throughput') + title = 'Gas Throughput vs Block Number' + if args.average: + title += f' ({args.average}-block moving average)' + ax.set_title(title) + ax.grid(True, alpha=0.3) + ax.legend() + + # Format Y-axis with gas units + formatter = FuncFormatter(format_gas_units) + ax.yaxis.set_major_formatter(formatter) + else: + # If no block_number column, use index + indices = np.arange(len(percent_diff)) + ax.plot(indices, gas_per_sec1, 'orange', alpha=0.7, label=f'Baseline ({args.baseline_csv})') + ax.plot(indices, gas_per_sec2, comparison_color, alpha=0.7, label=f'Comparison ({args.comparison_csv})') + ax.set_xlabel('Block Index') + ax.set_ylabel('Gas Throughput') + title = 'Gas Throughput vs Block Index' + if args.average: + title += f' ({args.average}-block moving average)' + ax.set_title(title) + ax.grid(True, alpha=0.3) + ax.legend() + + # Format Y-axis with gas units + formatter = FuncFormatter(format_gas_units) + ax.yaxis.set_major_formatter(formatter) + + if args.separate: + plt.tight_layout() + output_file = get_output_filename(args.output, 'gas') + plt.savefig(output_file, dpi=300, bbox_inches='tight') + output_files.append(output_file) + plt.close(fig) + else: + plot_idx += 1 + + # Save combined figure if not using separate files + if not args.separate: + plt.tight_layout() + plt.savefig(args.output, dpi=300, bbox_inches='tight') + output_files.append(args.output) # Create graph type description for output message graph_types = [] @@ -159,8 +350,17 @@ def main(): graph_types.append('histogram') if 'line' in selected_graphs: graph_types.append('latency graph') + if 'gas' in selected_graphs: + graph_types.append('gas/s graph') graph_desc = ' and '.join(graph_types) - print(f"{graph_desc.capitalize()} saved to {args.output}") + + # Print output file(s) information + if args.separate: + print(f"Saved {len(output_files)} separate files:") + for output_file in output_files: + print(f" - {output_file}") + else: + print(f"{graph_desc.capitalize()} saved to {args.output}") # Always print statistics print(f"\nStatistics:") @@ -170,6 +370,15 @@ def main(): print(f"Min: {percent_diff.min():.2f}%") print(f"Max: {percent_diff.max():.2f}%") print(f"Total blocks analyzed: {len(percent_diff)}") + + # Print gas/s statistics if gas data is available + if 'gas' in selected_graphs: + # Use original values for statistics (not averaged) + print(f"\nGas/s Statistics:") + print(f"Baseline median gas/s: {median_gas_per_sec1:,.0f}") + print(f"Comparison median gas/s: {median_gas_per_sec2:,.0f}") + gas_diff_percent = ((median_gas_per_sec2 - median_gas_per_sec1) / median_gas_per_sec1) * 100 + print(f"Gas/s percent change: {gas_diff_percent:+.2f}%") if __name__ == '__main__': main() From 752637a5d7c2adbca3c061d9631516074bd44bcf Mon Sep 17 00:00:00 2001 From: Rez Date: Wed, 23 Jul 2025 18:10:14 +1000 Subject: [PATCH 263/305] feat: make CompactEnvelope trait public for external crate usage (#17576) --- crates/storage/codecs/src/alloy/transaction/ethereum.rs | 3 ++- crates/storage/codecs/src/alloy/transaction/mod.rs | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/crates/storage/codecs/src/alloy/transaction/ethereum.rs b/crates/storage/codecs/src/alloy/transaction/ethereum.rs index 14d51b866fb..7824f60301a 100644 --- a/crates/storage/codecs/src/alloy/transaction/ethereum.rs +++ b/crates/storage/codecs/src/alloy/transaction/ethereum.rs @@ -112,7 +112,8 @@ impl Envelope } } -pub(super) trait CompactEnvelope: Sized { +/// Compact serialization for transaction envelopes with compression and bitfield packing. +pub trait CompactEnvelope: Sized { /// Takes a buffer which can be written to. *Ideally*, it returns the length written to. fn to_compact(&self, buf: &mut B) -> usize where diff --git a/crates/storage/codecs/src/alloy/transaction/mod.rs b/crates/storage/codecs/src/alloy/transaction/mod.rs index 47881b6f87a..f841ff24f17 100644 --- a/crates/storage/codecs/src/alloy/transaction/mod.rs +++ b/crates/storage/codecs/src/alloy/transaction/mod.rs @@ -56,7 +56,7 @@ where cond_mod!(eip1559, eip2930, eip4844, eip7702, legacy, txtype); mod ethereum; -pub use ethereum::{Envelope, FromTxCompact, ToTxCompact}; +pub use ethereum::{CompactEnvelope, Envelope, FromTxCompact, ToTxCompact}; #[cfg(all(feature = "test-utils", feature = "op"))] pub mod optimism; From 81e0cb038573cab6b4f3f41b6a345ebfdebd6dfb Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Wed, 23 Jul 2025 12:01:52 +0200 Subject: [PATCH 264/305] feat(ci): add ignored tests management to hive workflow (#17577) --- .github/assets/hive/ignored_tests.yaml | 17 ++++++++++++++ .github/assets/hive/parse.py | 31 ++++++++++++++++++++++++++ .github/workflows/hive.yml | 2 +- 3 files changed, 49 insertions(+), 1 deletion(-) create mode 100644 .github/assets/hive/ignored_tests.yaml diff --git a/.github/assets/hive/ignored_tests.yaml b/.github/assets/hive/ignored_tests.yaml new file mode 100644 index 00000000000..43021de8420 --- /dev/null +++ b/.github/assets/hive/ignored_tests.yaml @@ -0,0 +1,17 @@ +# Ignored Tests Configuration +# +# This file contains tests that should be ignored for various reasons (flaky, known issues, etc). +# These tests will be IGNORED in the CI results - they won't cause the build to fail +# regardless of whether they pass or fail. +# +# Format +# test_suite: +# - "test name 1" +# - "test name 2" +# +# When a test should no longer be ignored, remove it from this list. + +engine-withdrawals: + # flaky + - Withdrawals Fork on Block 1 - 8 Block Re-Org NewPayload (Paris) (reth) + diff --git a/.github/assets/hive/parse.py b/.github/assets/hive/parse.py index c408a4d1336..11a30ae095b 100644 --- a/.github/assets/hive/parse.py +++ b/.github/assets/hive/parse.py @@ -7,6 +7,7 @@ parser = argparse.ArgumentParser(description="Check for unexpected test results based on an exclusion list.") parser.add_argument("report_json", help="Path to the hive report JSON file.") parser.add_argument("--exclusion", required=True, help="Path to the exclusion YAML file.") +parser.add_argument("--ignored", required=True, help="Path to the ignored tests YAML file.") args = parser.parse_args() # Load hive JSON @@ -18,13 +19,30 @@ exclusion_data = yaml.safe_load(file) exclusions = exclusion_data.get(report['name'], []) +# Load ignored tests YAML +with open(args.ignored, 'r') as file: + ignored_data = yaml.safe_load(file) + ignored_tests = ignored_data.get(report['name'], []) + # Collect unexpected failures and passes unexpected_failures = [] unexpected_passes = [] +ignored_results = {'passed': [], 'failed': []} for test in report['testCases'].values(): test_name = test['name'] test_pass = test['summaryResult']['pass'] + + # Check if this is an ignored test + if test_name in ignored_tests: + # Track ignored test results for informational purposes + if test_pass: + ignored_results['passed'].append(test_name) + else: + ignored_results['failed'].append(test_name) + continue # Skip this test - don't count it as unexpected + + # Check against expected failures if test_name in exclusions: if test_pass: unexpected_passes.append(test_name) @@ -32,6 +50,19 @@ if not test_pass: unexpected_failures.append(test_name) +# Print summary of ignored tests if any were ignored +if ignored_results['passed'] or ignored_results['failed']: + print("Ignored Tests:") + if ignored_results['passed']: + print(f" Passed ({len(ignored_results['passed'])} tests):") + for test in ignored_results['passed']: + print(f" {test}") + if ignored_results['failed']: + print(f" Failed ({len(ignored_results['failed'])} tests):") + for test in ignored_results['failed']: + print(f" {test}") + print() + # Check if there are any unexpected failures or passes and exit with error if unexpected_failures or unexpected_passes: if unexpected_failures: diff --git a/.github/workflows/hive.yml b/.github/workflows/hive.yml index b9a927500ec..d219376bef8 100644 --- a/.github/workflows/hive.yml +++ b/.github/workflows/hive.yml @@ -200,7 +200,7 @@ jobs: - name: Parse hive output run: | - find hivetests/workspace/logs -type f -name "*.json" ! -name "hive.json" | xargs -I {} python .github/assets/hive/parse.py {} --exclusion .github/assets/hive/expected_failures.yaml + find hivetests/workspace/logs -type f -name "*.json" ! -name "hive.json" | xargs -I {} python .github/assets/hive/parse.py {} --exclusion .github/assets/hive/expected_failures.yaml --ignored .github/assets/hive/ignored_tests.yaml - name: Print simulator output if: ${{ failure() }} From 42c1947c8a7b704c36db572fe0fa445b006feb84 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Wed, 23 Jul 2025 12:10:23 +0200 Subject: [PATCH 265/305] chore(hive): update expected failures (#17580) --- .github/assets/hive/expected_failures.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/assets/hive/expected_failures.yaml b/.github/assets/hive/expected_failures.yaml index c5dda276186..a4dd3376efd 100644 --- a/.github/assets/hive/expected_failures.yaml +++ b/.github/assets/hive/expected_failures.yaml @@ -59,7 +59,6 @@ engine-auth: # worth re-visiting when more of these related tests are passing eest/consume-engine: - tests/prague/eip7702_set_code_tx/test_set_code_txs.py::test_set_code_to_non_empty_storage[fork_Prague-blockchain_test_engine-zero_nonce]-reth - - tests/prague/eip7251_consolidations/test_modified_consolidation_contract.py::test_system_contract_errors[fork_Prague-blockchain_test_engine-system_contract_reaches_gas_limit-system_contract_0x0000bbddc7ce488642fb579f8b00f3a590007251]-reth - tests/prague/eip7251_consolidations/test_contract_deployment.py::test_system_contract_deployment[fork_CancunToPragueAtTime15k-blockchain_test_engine-deploy_after_fork-nonzero_balance]-reth - tests/prague/eip7251_consolidations/test_contract_deployment.py::test_system_contract_deployment[fork_CancunToPragueAtTime15k-blockchain_test_engine-deploy_after_fork-zero_balance]-reth - tests/prague/eip6110_deposits/test_modified_contract.py::test_invalid_layout[fork_Prague-blockchain_test_engine-log_argument_amount_offset-value_zero]-reth @@ -67,7 +66,6 @@ eest/consume-engine: - tests/prague/eip6110_deposits/test_modified_contract.py::test_invalid_layout[fork_Prague-blockchain_test_engine-log_argument_index_offset-value_zero]-reth - tests/prague/eip6110_deposits/test_modified_contract.py::test_invalid_layout[fork_Prague-blockchain_test_engine-log_argument_index_size-value_zero]-reth - tests/prague/eip6110_deposits/test_modified_contract.py::test_invalid_layout[fork_Prague-blockchain_test_engine-log_argument_pubkey_offset-value_zero]-reth - - tests/prague/eip7002_el_triggerable_withdrawals/test_modified_withdrawal_contract.py::test_system_contract_errors[fork_Prague-blockchain_test_engine-system_contract_reaches_gas_limit-system_contract_0x00000961ef480eb55e80d19ad83579a64c007002]-reth - tests/prague/eip6110_deposits/test_modified_contract.py::test_invalid_layout[fork_Prague-blockchain_test_engine-log_argument_pubkey_size-value_zero]-reth - tests/prague/eip6110_deposits/test_modified_contract.py::test_invalid_layout[fork_Prague-blockchain_test_engine-log_argument_signature_offset-value_zero]-reth - tests/prague/eip6110_deposits/test_modified_contract.py::test_invalid_layout[fork_Prague-blockchain_test_engine-log_argument_signature_size-value_zero]-reth From ed8eacfc5b54f4be67b309cd1b696f59c550869c Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Wed, 23 Jul 2025 13:25:58 +0200 Subject: [PATCH 266/305] refactor: move EngineValidator trait to reth-engine-tree (#17559) --- Cargo.lock | 7 ++ crates/engine/primitives/src/lib.rs | 50 +------------- crates/engine/primitives/src/message.rs | 5 +- crates/engine/service/src/service.rs | 4 +- crates/engine/tree/src/tree/mod.rs | 6 +- .../engine/tree/src/tree/payload_validator.rs | 68 ++++++++++++++++--- crates/engine/tree/src/tree/tests.rs | 55 +++++++++++++-- crates/ethereum/node/Cargo.toml | 2 + crates/ethereum/node/src/engine.rs | 3 +- crates/node/builder/src/rpc.rs | 5 +- crates/optimism/node/Cargo.toml | 2 + crates/optimism/node/src/engine.rs | 6 +- crates/optimism/rpc/Cargo.toml | 1 + crates/optimism/rpc/src/engine.rs | 3 +- crates/rpc/rpc-builder/Cargo.toml | 1 + crates/rpc/rpc-engine-api/Cargo.toml | 1 + crates/rpc/rpc-engine-api/src/engine_api.rs | 3 +- examples/custom-engine-types/Cargo.toml | 1 + examples/custom-engine-types/src/main.rs | 7 +- examples/custom-node/Cargo.toml | 1 + examples/custom-node/src/engine.rs | 3 +- 21 files changed, 152 insertions(+), 82 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9de2bbb5597..c70093e6288 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3336,6 +3336,7 @@ dependencies = [ "alloy-rpc-types", "eyre", "reth-basic-payload-builder", + "reth-engine-tree", "reth-ethereum", "reth-ethereum-payload-builder", "reth-payload-builder", @@ -3399,6 +3400,7 @@ dependencies = [ "reth-chain-state", "reth-codecs", "reth-db-api", + "reth-engine-tree", "reth-ethereum", "reth-network-peers", "reth-node-builder", @@ -8943,6 +8945,7 @@ dependencies = [ "reth-e2e-test-utils", "reth-engine-local", "reth-engine-primitives", + "reth-engine-tree", "reth-ethereum-consensus", "reth-ethereum-engine-primitives", "reth-ethereum-payload-builder", @@ -9258,6 +9261,7 @@ dependencies = [ "reth-db", "reth-e2e-test-utils", "reth-engine-local", + "reth-engine-tree", "reth-evm", "reth-network", "reth-node-api", @@ -9387,6 +9391,7 @@ dependencies = [ "op-revm", "reqwest", "reth-chainspec", + "reth-engine-tree", "reth-evm", "reth-metrics", "reth-node-api", @@ -9910,6 +9915,7 @@ dependencies = [ "reth-chainspec", "reth-consensus", "reth-engine-primitives", + "reth-engine-tree", "reth-ethereum-engine-primitives", "reth-ethereum-primitives", "reth-evm", @@ -10005,6 +10011,7 @@ dependencies = [ "parking_lot", "reth-chainspec", "reth-engine-primitives", + "reth-engine-tree", "reth-ethereum-engine-primitives", "reth-ethereum-primitives", "reth-metrics", diff --git a/crates/engine/primitives/src/lib.rs b/crates/engine/primitives/src/lib.rs index b9ac213e5d9..45e087526ea 100644 --- a/crates/engine/primitives/src/lib.rs +++ b/crates/engine/primitives/src/lib.rs @@ -11,12 +11,8 @@ extern crate alloc; -use alloy_consensus::BlockHeader; use reth_errors::ConsensusError; -use reth_payload_primitives::{ - EngineApiMessageVersion, EngineObjectValidationError, InvalidPayloadAttributesError, - NewPayloadError, PayloadAttributes, PayloadOrAttributes, PayloadTypes, -}; +use reth_payload_primitives::{NewPayloadError, PayloadTypes}; use reth_primitives_traits::{Block, RecoveredBlock}; use reth_trie_common::HashedPostState; use serde::{de::DeserializeOwned, Serialize}; @@ -136,47 +132,3 @@ pub trait PayloadValidator: Send + Sync + Unpin + 'static { Ok(()) } } - -/// Type that validates the payloads processed by the engine. -pub trait EngineValidator: - PayloadValidator -{ - /// Validates the presence or exclusion of fork-specific fields based on the payload attributes - /// and the message version. - fn validate_version_specific_fields( - &self, - version: EngineApiMessageVersion, - payload_or_attrs: PayloadOrAttributes< - '_, - Types::ExecutionData, - ::PayloadAttributes, - >, - ) -> Result<(), EngineObjectValidationError>; - - /// Ensures that the payload attributes are valid for the given [`EngineApiMessageVersion`]. - fn ensure_well_formed_attributes( - &self, - version: EngineApiMessageVersion, - attributes: &::PayloadAttributes, - ) -> Result<(), EngineObjectValidationError>; - - /// Validates the payload attributes with respect to the header. - /// - /// By default, this enforces that the payload attributes timestamp is greater than the - /// timestamp according to: - /// > 7. Client software MUST ensure that payloadAttributes.timestamp is greater than - /// > timestamp - /// > of a block referenced by forkchoiceState.headBlockHash. - /// - /// See also [engine api spec](https://github.com/ethereum/execution-apis/tree/fe8e13c288c592ec154ce25c534e26cb7ce0530d/src/engine) - fn validate_payload_attributes_against_header( - &self, - attr: &::PayloadAttributes, - header: &::Header, - ) -> Result<(), InvalidPayloadAttributesError> { - if attr.timestamp() <= header.timestamp() { - return Err(InvalidPayloadAttributesError::InvalidTimestamp); - } - Ok(()) - } -} diff --git a/crates/engine/primitives/src/message.rs b/crates/engine/primitives/src/message.rs index 283f6a4135b..6f67d59d8f0 100644 --- a/crates/engine/primitives/src/message.rs +++ b/crates/engine/primitives/src/message.rs @@ -1,6 +1,5 @@ use crate::{ - error::BeaconForkChoiceUpdateError, BeaconOnNewPayloadError, EngineApiMessageVersion, - ExecutionPayload, ForkchoiceStatus, + error::BeaconForkChoiceUpdateError, BeaconOnNewPayloadError, ExecutionPayload, ForkchoiceStatus, }; use alloy_rpc_types_engine::{ ForkChoiceUpdateResult, ForkchoiceState, ForkchoiceUpdateError, ForkchoiceUpdated, PayloadId, @@ -15,7 +14,7 @@ use core::{ use futures::{future::Either, FutureExt, TryFutureExt}; use reth_errors::RethResult; use reth_payload_builder_primitives::PayloadBuilderError; -use reth_payload_primitives::PayloadTypes; +use reth_payload_primitives::{EngineApiMessageVersion, PayloadTypes}; use tokio::sync::{mpsc::UnboundedSender, oneshot}; /// Represents the outcome of forkchoice update. diff --git a/crates/engine/service/src/service.rs b/crates/engine/service/src/service.rs index f634d2a3264..63a85300fa1 100644 --- a/crates/engine/service/src/service.rs +++ b/crates/engine/service/src/service.rs @@ -2,13 +2,13 @@ use futures::{Stream, StreamExt}; use pin_project::pin_project; use reth_chainspec::EthChainSpec; use reth_consensus::{ConsensusError, FullConsensus}; -use reth_engine_primitives::{BeaconConsensusEngineEvent, BeaconEngineMessage, EngineValidator}; +use reth_engine_primitives::{BeaconConsensusEngineEvent, BeaconEngineMessage}; use reth_engine_tree::{ backfill::PipelineSync, download::BasicBlockDownloader, engine::{EngineApiKind, EngineApiRequest, EngineApiRequestHandler, EngineHandler}, persistence::PersistenceHandle, - tree::{EngineApiTreeHandler, InvalidBlockHook, TreeConfig}, + tree::{EngineApiTreeHandler, EngineValidator, InvalidBlockHook, TreeConfig}, }; pub use reth_engine_tree::{ chain::{ChainEvent, ChainOrchestrator}, diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 9ec5d0b9d78..cda5e5365a6 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -26,8 +26,8 @@ use reth_chain_state::{ use reth_consensus::{Consensus, FullConsensus}; pub use reth_engine_primitives::InvalidBlockHook; use reth_engine_primitives::{ - BeaconConsensusEngineEvent, BeaconEngineMessage, BeaconOnNewPayloadError, EngineValidator, - ExecutionPayload, ForkchoiceStateTracker, OnForkChoiceUpdated, + BeaconConsensusEngineEvent, BeaconEngineMessage, BeaconOnNewPayloadError, ExecutionPayload, + ForkchoiceStateTracker, OnForkChoiceUpdated, }; use reth_errors::{ConsensusError, ProviderResult}; use reth_evm::{ConfigureEvm, Evm, SpecFor}; @@ -86,7 +86,7 @@ pub use block_buffer::BlockBuffer; pub use invalid_block_hook::{InvalidBlockHooks, NoopInvalidBlockHook}; pub use invalid_headers::InvalidHeaderCache; pub use payload_processor::*; -pub use payload_validator::TreePayloadValidator; +pub use payload_validator::{EngineValidator, TreePayloadValidator}; pub use persistence_state::PersistenceState; pub use reth_engine_primitives::TreeConfig; use reth_evm::execute::BlockExecutionOutput; diff --git a/crates/engine/tree/src/tree/payload_validator.rs b/crates/engine/tree/src/tree/payload_validator.rs index 1145aebfb6f..c4a756da9c6 100644 --- a/crates/engine/tree/src/tree/payload_validator.rs +++ b/crates/engine/tree/src/tree/payload_validator.rs @@ -13,9 +13,12 @@ use alloy_evm::{block::BlockExecutor, Evm}; use alloy_primitives::B256; use reth_chain_state::CanonicalInMemoryState; use reth_consensus::{ConsensusError, FullConsensus}; -use reth_engine_primitives::InvalidBlockHook; +use reth_engine_primitives::{InvalidBlockHook, PayloadValidator}; use reth_evm::{ConfigureEvm, SpecFor}; -use reth_payload_primitives::NewPayloadError; +use reth_payload_primitives::{ + EngineApiMessageVersion, EngineObjectValidationError, InvalidPayloadAttributesError, + NewPayloadError, PayloadAttributes, PayloadOrAttributes, PayloadTypes, +}; use reth_primitives_traits::{ AlloyBlockHeader, Block, BlockBody, GotExpected, NodePrimitives, RecoveredBlock, SealedHeader, }; @@ -305,7 +308,7 @@ where } // Get the parent block's state to execute against - let parent_hash = block.parent_hash(); + let parent_hash = block.header().parent_hash(); // Get parent header for error context let parent_header = ensure_ok!(self.get_parent_header(parent_hash, tree_state)); @@ -618,7 +621,7 @@ where handle: &mut crate::tree::PayloadHandle, execution_time: Instant, ) -> Result<(B256, TrieUpdates), NewPayloadError> { - let parent_hash = block.parent_hash(); + let parent_hash = block.header().parent_hash(); if !run_parallel_state_root { // Use synchronous computation @@ -708,7 +711,7 @@ where } // Move to the parent block - current_hash = block.block.recovered_block.parent_hash(); + current_hash = block.block.recovered_block.header().parent_hash(); } false @@ -790,9 +793,12 @@ where self.persisting_kind_for(block.header(), persistence_info, tree_state); let trie_input_start = Instant::now(); - let Ok(trie_input) = - self.compute_trie_input(provider_ro, block.parent_hash(), tree_state, persisting_kind) - else { + let Ok(trie_input) = self.compute_trie_input( + provider_ro, + block.header().parent_hash(), + tree_state, + persisting_kind, + ) else { // Fall back to cache-only spawn if trie input computation fails let handle = self.payload_processor.spawn_cache_exclusive(header, txs, provider_builder); @@ -916,7 +922,7 @@ where )) })?; - blocks.retain(|b| b.recovered_block().number() > last_persisted_block_number); + blocks.retain(|b| b.recovered_block().header().number() > last_persisted_block_number); } if blocks.is_empty() { @@ -965,3 +971,47 @@ where Ok(input) } } + +/// Type that validates the payloads processed by the engine. +pub trait EngineValidator: + PayloadValidator +{ + /// Validates the presence or exclusion of fork-specific fields based on the payload attributes + /// and the message version. + fn validate_version_specific_fields( + &self, + version: EngineApiMessageVersion, + payload_or_attrs: PayloadOrAttributes< + '_, + Types::ExecutionData, + ::PayloadAttributes, + >, + ) -> Result<(), EngineObjectValidationError>; + + /// Ensures that the payload attributes are valid for the given [`EngineApiMessageVersion`]. + fn ensure_well_formed_attributes( + &self, + version: EngineApiMessageVersion, + attributes: &::PayloadAttributes, + ) -> Result<(), EngineObjectValidationError>; + + /// Validates the payload attributes with respect to the header. + /// + /// By default, this enforces that the payload attributes timestamp is greater than the + /// timestamp according to: + /// > 7. Client software MUST ensure that payloadAttributes.timestamp is greater than + /// > timestamp + /// > of a block referenced by forkchoiceState.headBlockHash. + /// + /// See also: + fn validate_payload_attributes_against_header( + &self, + attr: &::PayloadAttributes, + header: &::Header, + ) -> Result<(), InvalidPayloadAttributesError> { + if attr.timestamp() <= header.timestamp() { + return Err(InvalidPayloadAttributesError::InvalidTimestamp); + } + Ok(()) + } +} diff --git a/crates/engine/tree/src/tree/tests.rs b/crates/engine/tree/src/tree/tests.rs index 9922d29ff1d..d6e4babfeaf 100644 --- a/crates/engine/tree/src/tree/tests.rs +++ b/crates/engine/tree/src/tree/tests.rs @@ -1,5 +1,5 @@ use super::*; -use crate::persistence::PersistenceAction; +use crate::{persistence::PersistenceAction, tree::EngineValidator}; use alloy_consensus::Header; use alloy_primitives::{ map::{HashMap, HashSet}, @@ -15,7 +15,6 @@ use reth_ethereum_consensus::EthBeaconConsensus; use reth_ethereum_engine_primitives::EthEngineTypes; use reth_ethereum_primitives::{Block, EthPrimitives}; use reth_evm_ethereum::MockEvmConfig; -use reth_node_ethereum::EthereumEngineValidator; use reth_primitives_traits::Block as _; use reth_provider::test_utils::MockEthProvider; use reth_trie::HashedPostState; @@ -25,6 +24,54 @@ use std::{ sync::mpsc::{channel, Sender}, }; +/// Mock engine validator for tests +#[derive(Debug, Clone)] +struct MockEngineValidator; + +impl reth_engine_primitives::PayloadValidator for MockEngineValidator { + type Block = Block; + type ExecutionData = alloy_rpc_types_engine::ExecutionData; + + fn ensure_well_formed_payload( + &self, + payload: Self::ExecutionData, + ) -> Result< + reth_primitives_traits::RecoveredBlock, + reth_payload_primitives::NewPayloadError, + > { + // For tests, convert the execution payload to a block + let block = reth_ethereum_primitives::Block::try_from(payload.payload).map_err(|e| { + reth_payload_primitives::NewPayloadError::Other(format!("{e:?}").into()) + })?; + let sealed = block.seal_slow(); + sealed.try_recover().map_err(|e| reth_payload_primitives::NewPayloadError::Other(e.into())) + } +} + +impl EngineValidator for MockEngineValidator { + fn validate_version_specific_fields( + &self, + _version: reth_payload_primitives::EngineApiMessageVersion, + _payload_or_attrs: reth_payload_primitives::PayloadOrAttributes< + '_, + alloy_rpc_types_engine::ExecutionData, + alloy_rpc_types_engine::PayloadAttributes, + >, + ) -> Result<(), reth_payload_primitives::EngineObjectValidationError> { + // Mock implementation - always valid + Ok(()) + } + + fn ensure_well_formed_attributes( + &self, + _version: reth_payload_primitives::EngineApiMessageVersion, + _attributes: &alloy_rpc_types_engine::PayloadAttributes, + ) -> Result<(), reth_payload_primitives::EngineObjectValidationError> { + // Mock implementation - always valid + Ok(()) + } +} + /// This is a test channel that allows you to `release` any value that is in the channel. /// /// If nothing has been sent, then the next value will be immediately sent. @@ -83,7 +130,7 @@ struct TestHarness { EthPrimitives, MockEthProvider, EthEngineTypes, - EthereumEngineValidator, + MockEngineValidator, MockEvmConfig, >, to_tree_tx: Sender, Block>>, @@ -117,7 +164,7 @@ impl TestHarness { let provider = MockEthProvider::default(); - let payload_validator = EthereumEngineValidator::new(chain_spec.clone()); + let payload_validator = MockEngineValidator; let (from_tree_tx, from_tree_rx) = unbounded_channel(); diff --git a/crates/ethereum/node/Cargo.toml b/crates/ethereum/node/Cargo.toml index 2605efbf6bd..7c3613c46ea 100644 --- a/crates/ethereum/node/Cargo.toml +++ b/crates/ethereum/node/Cargo.toml @@ -32,6 +32,7 @@ reth-rpc-builder.workspace = true reth-rpc-server-types.workspace = true reth-node-api.workspace = true reth-chainspec.workspace = true +reth-engine-tree.workspace = true reth-revm = { workspace = true, features = ["std"] } reth-trie-db.workspace = true reth-rpc-eth-types.workspace = true @@ -100,4 +101,5 @@ test-utils = [ "reth-evm/test-utils", "reth-primitives-traits/test-utils", "reth-evm-ethereum/test-utils", + "reth-engine-tree/test-utils", ] diff --git a/crates/ethereum/node/src/engine.rs b/crates/ethereum/node/src/engine.rs index 14e1f4eff2a..1c4ea2ce404 100644 --- a/crates/ethereum/node/src/engine.rs +++ b/crates/ethereum/node/src/engine.rs @@ -6,7 +6,8 @@ pub use alloy_rpc_types_engine::{ ExecutionPayloadV1, PayloadAttributes as EthPayloadAttributes, }; use reth_chainspec::{EthChainSpec, EthereumHardforks}; -use reth_engine_primitives::{EngineValidator, PayloadValidator}; +use reth_engine_primitives::PayloadValidator; +use reth_engine_tree::tree::EngineValidator; use reth_ethereum_payload_builder::EthereumExecutionPayloadValidator; use reth_ethereum_primitives::Block; use reth_node_api::PayloadTypes; diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index 0a5c31f7ab1..021e30b6dcb 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -9,9 +9,10 @@ use alloy_rpc_types_engine::ExecutionData; use jsonrpsee::{core::middleware::layer::Either, RpcModule}; use reth_chain_state::CanonStateSubscriptions; use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; +use reth_engine_tree::tree::EngineValidator; use reth_node_api::{ - AddOnsContext, BlockTy, EngineTypes, EngineValidator, FullNodeComponents, FullNodeTypes, - NodeAddOns, NodeTypes, PayloadTypes, PrimitivesTy, + AddOnsContext, BlockTy, EngineTypes, FullNodeComponents, FullNodeTypes, NodeAddOns, NodeTypes, + PayloadTypes, PrimitivesTy, }; use reth_node_core::{ node_config::NodeConfig, diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index 539828f265e..6ce9aff49b9 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -30,6 +30,7 @@ reth-tasks = { workspace = true, optional = true } reth-trie-common.workspace = true reth-node-core.workspace = true reth-rpc-engine-api.workspace = true +reth-engine-tree.workspace = true reth-engine-local = { workspace = true, features = ["op"] } reth-rpc-api.workspace = true @@ -116,6 +117,7 @@ test-utils = [ "reth-optimism-primitives/arbitrary", "reth-primitives-traits/test-utils", "reth-trie-common/test-utils", + "reth-engine-tree/test-utils", ] reth-codec = ["reth-optimism-primitives/reth-codec"] diff --git a/crates/optimism/node/src/engine.rs b/crates/optimism/node/src/engine.rs index bba734ae8fd..8e6e466d037 100644 --- a/crates/optimism/node/src/engine.rs +++ b/crates/optimism/node/src/engine.rs @@ -6,14 +6,14 @@ use op_alloy_rpc_types_engine::{ OpPayloadAttributes, }; use reth_consensus::ConsensusError; +use reth_engine_tree::tree::EngineValidator; use reth_node_api::{ payload::{ validate_parent_beacon_block_root_presence, EngineApiMessageVersion, EngineObjectValidationError, MessageValidationKind, NewPayloadError, PayloadOrAttributes, PayloadTypes, VersionSpecificValidationError, }, - validate_version_specific_fields, BuiltPayload, EngineTypes, EngineValidator, NodePrimitives, - PayloadValidator, + validate_version_specific_fields, BuiltPayload, EngineTypes, NodePrimitives, PayloadValidator, }; use reth_optimism_consensus::isthmus; use reth_optimism_forks::OpHardforks; @@ -290,7 +290,7 @@ mod test { use alloy_primitives::{b64, Address, B256, B64}; use alloy_rpc_types_engine::PayloadAttributes; use reth_chainspec::ChainSpec; - use reth_node_builder::EngineValidator; + use reth_engine_tree::tree::EngineValidator; use reth_optimism_chainspec::{OpChainSpec, BASE_SEPOLIA}; use reth_provider::noop::NoopProvider; use reth_trie_common::KeccakKeyHasher; diff --git a/crates/optimism/rpc/Cargo.toml b/crates/optimism/rpc/Cargo.toml index 97f598628ef..233f3dd134c 100644 --- a/crates/optimism/rpc/Cargo.toml +++ b/crates/optimism/rpc/Cargo.toml @@ -27,6 +27,7 @@ reth-node-api.workspace = true reth-node-builder.workspace = true reth-chainspec.workspace = true reth-rpc-engine-api.workspace = true +reth-engine-tree.workspace = true # op-reth reth-optimism-evm.workspace = true diff --git a/crates/optimism/rpc/src/engine.rs b/crates/optimism/rpc/src/engine.rs index ac2cb7fcb2c..523f997e002 100644 --- a/crates/optimism/rpc/src/engine.rs +++ b/crates/optimism/rpc/src/engine.rs @@ -14,7 +14,8 @@ use op_alloy_rpc_types_engine::{ SuperchainSignal, }; use reth_chainspec::EthereumHardforks; -use reth_node_api::{EngineTypes, EngineValidator}; +use reth_engine_tree::tree::EngineValidator; +use reth_node_api::EngineTypes; use reth_rpc_api::IntoEngineApiRpcModule; use reth_rpc_engine_api::EngineApi; use reth_storage_api::{BlockReader, HeaderProvider, StateProviderFactory}; diff --git a/crates/rpc/rpc-builder/Cargo.toml b/crates/rpc/rpc-builder/Cargo.toml index 281b32ef568..50b284698ed 100644 --- a/crates/rpc/rpc-builder/Cargo.toml +++ b/crates/rpc/rpc-builder/Cargo.toml @@ -67,6 +67,7 @@ reth-tracing.workspace = true reth-transaction-pool = { workspace = true, features = ["test-utils"] } reth-rpc-convert.workspace = true reth-engine-primitives.workspace = true +reth-engine-tree.workspace = true reth-node-ethereum.workspace = true alloy-primitives.workspace = true diff --git a/crates/rpc/rpc-engine-api/Cargo.toml b/crates/rpc/rpc-engine-api/Cargo.toml index 4304f17f707..da119de5b2c 100644 --- a/crates/rpc/rpc-engine-api/Cargo.toml +++ b/crates/rpc/rpc-engine-api/Cargo.toml @@ -21,6 +21,7 @@ reth-payload-builder-primitives.workspace = true reth-payload-primitives.workspace = true reth-tasks.workspace = true reth-engine-primitives.workspace = true +reth-engine-tree.workspace = true reth-transaction-pool.workspace = true reth-primitives-traits.workspace = true diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index 8738e94abe9..9ed34c5a1e6 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -18,7 +18,8 @@ use async_trait::async_trait; use jsonrpsee_core::{server::RpcModule, RpcResult}; use parking_lot::Mutex; use reth_chainspec::EthereumHardforks; -use reth_engine_primitives::{BeaconConsensusEngineHandle, EngineTypes, EngineValidator}; +use reth_engine_primitives::{BeaconConsensusEngineHandle, EngineTypes}; +use reth_engine_tree::tree::EngineValidator; use reth_payload_builder::PayloadStore; use reth_payload_primitives::{ validate_payload_timestamp, EngineApiMessageVersion, ExecutionPayload, diff --git a/examples/custom-engine-types/Cargo.toml b/examples/custom-engine-types/Cargo.toml index 50bd58620e3..1c1144d1bb9 100644 --- a/examples/custom-engine-types/Cargo.toml +++ b/examples/custom-engine-types/Cargo.toml @@ -10,6 +10,7 @@ reth-payload-builder.workspace = true reth-basic-payload-builder.workspace = true reth-ethereum-payload-builder.workspace = true reth-ethereum = { workspace = true, features = ["test-utils", "node", "node-api", "pool"] } +reth-engine-tree.workspace = true reth-tracing.workspace = true reth-trie-db.workspace = true alloy-genesis.workspace = true diff --git a/examples/custom-engine-types/src/main.rs b/examples/custom-engine-types/src/main.rs index ad370ef0042..d339019d167 100644 --- a/examples/custom-engine-types/src/main.rs +++ b/examples/custom-engine-types/src/main.rs @@ -29,14 +29,15 @@ use alloy_rpc_types::{ Withdrawal, }; use reth_basic_payload_builder::{BuildArguments, BuildOutcome, PayloadBuilder, PayloadConfig}; +use reth_engine_tree::tree::EngineValidator; use reth_ethereum::{ chainspec::{Chain, ChainSpec, ChainSpecProvider}, node::{ api::{ payload::{EngineApiMessageVersion, EngineObjectValidationError, PayloadOrAttributes}, - validate_version_specific_fields, AddOnsContext, EngineTypes, EngineValidator, - FullNodeComponents, FullNodeTypes, InvalidPayloadAttributesError, NewPayloadError, - NodeTypes, PayloadAttributes, PayloadBuilderAttributes, PayloadTypes, PayloadValidator, + validate_version_specific_fields, AddOnsContext, EngineTypes, FullNodeComponents, + FullNodeTypes, InvalidPayloadAttributesError, NewPayloadError, NodeTypes, + PayloadAttributes, PayloadBuilderAttributes, PayloadTypes, PayloadValidator, }, builder::{ components::{BasicPayloadServiceBuilder, ComponentsBuilder, PayloadBuilderBuilder}, diff --git a/examples/custom-node/Cargo.toml b/examples/custom-node/Cargo.toml index 787e4db5e51..9722919f7d8 100644 --- a/examples/custom-node/Cargo.toml +++ b/examples/custom-node/Cargo.toml @@ -17,6 +17,7 @@ reth-op = { workspace = true, features = ["node", "pool", "rpc"] } reth-payload-builder.workspace = true reth-rpc-api.workspace = true reth-rpc-engine-api.workspace = true +reth-engine-tree.workspace = true reth-ethereum = { workspace = true, features = ["node-api", "network", "evm", "pool", "trie", "storage-api"] } # revm diff --git a/examples/custom-node/src/engine.rs b/examples/custom-node/src/engine.rs index bf82747c133..d441b94afa5 100644 --- a/examples/custom-node/src/engine.rs +++ b/examples/custom-node/src/engine.rs @@ -5,10 +5,11 @@ use crate::{ }; use op_alloy_rpc_types_engine::{OpExecutionData, OpExecutionPayload}; use reth_chain_state::ExecutedBlockWithTrieUpdates; +use reth_engine_tree::tree::EngineValidator; use reth_ethereum::{ node::api::{ validate_version_specific_fields, AddOnsContext, BuiltPayload, EngineApiMessageVersion, - EngineObjectValidationError, EngineValidator, ExecutionPayload, FullNodeComponents, + EngineObjectValidationError, ExecutionPayload, FullNodeComponents, InvalidPayloadAttributesError, NewPayloadError, NodePrimitives, PayloadAttributes, PayloadBuilderAttributes, PayloadOrAttributes, PayloadTypes, PayloadValidator, }, From 2c5a967898aeb6018d3f3a9a67a206b192289b7a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9a=20Narzis?= <78718413+lean-apple@users.noreply.github.com> Date: Wed, 23 Jul 2025 13:28:17 +0200 Subject: [PATCH 267/305] feat(era): add era types (#17477) --- crates/era-utils/src/export.rs | 9 +- crates/era-utils/src/history.rs | 3 +- crates/era/src/consensus_types.rs | 235 ++++++++++++++++++++++++ crates/era/src/e2s_types.rs | 93 ++++++++++ crates/era/src/era1_file.rs | 15 +- crates/era/src/era1_types.rs | 87 ++------- crates/era/src/era_types.rs | 286 ++++++++++++++++++++++++++++++ crates/era/src/execution_types.rs | 11 +- crates/era/src/lib.rs | 26 ++- crates/era/tests/it/dd.rs | 5 +- crates/era/tests/it/genesis.rs | 11 +- crates/era/tests/it/roundtrip.rs | 3 +- 12 files changed, 682 insertions(+), 102 deletions(-) create mode 100644 crates/era/src/consensus_types.rs create mode 100644 crates/era/src/era_types.rs diff --git a/crates/era-utils/src/export.rs b/crates/era-utils/src/export.rs index f76b3f82a12..49909d80958 100644 --- a/crates/era-utils/src/export.rs +++ b/crates/era-utils/src/export.rs @@ -5,6 +5,7 @@ use alloy_consensus::BlockHeader; use alloy_primitives::{BlockNumber, B256, U256}; use eyre::{eyre, Result}; use reth_era::{ + e2s_types::IndexEntry, era1_file::Era1Writer, era1_types::{BlockIndex, Era1Id}, execution_types::{ @@ -151,8 +152,8 @@ where let mut writer = Era1Writer::new(file); writer.write_version()?; - let mut offsets = Vec::with_capacity(block_count); - let mut position = VERSION_ENTRY_SIZE as i64; + let mut offsets = Vec::::with_capacity(block_count); + let mut position = VERSION_ENTRY_SIZE as u64; let mut blocks_written = 0; let mut final_header_data = Vec::new(); @@ -177,7 +178,7 @@ where let body_size = compressed_body.data.len() + ENTRY_HEADER_SIZE; let receipts_size = compressed_receipts.data.len() + ENTRY_HEADER_SIZE; let difficulty_size = 32 + ENTRY_HEADER_SIZE; // U256 is 32 + 8 bytes header overhead - let total_size = header_size + body_size + receipts_size + difficulty_size; + let total_size = (header_size + body_size + receipts_size + difficulty_size) as u64; let block_tuple = BlockTuple::new( compressed_header, @@ -187,7 +188,7 @@ where ); offsets.push(position); - position += total_size as i64; + position += total_size; writer.write_block(&block_tuple)?; blocks_written += 1; diff --git a/crates/era-utils/src/history.rs b/crates/era-utils/src/history.rs index 75eaa4591cf..5d212c1694c 100644 --- a/crates/era-utils/src/history.rs +++ b/crates/era-utils/src/history.rs @@ -10,7 +10,8 @@ use reth_db_api::{ use reth_era::{ e2s_types::E2sError, era1_file::{BlockTupleIterator, Era1Reader}, - execution_types::{BlockTuple, DecodeCompressed}, + execution_types::BlockTuple, + DecodeCompressed, }; use reth_era_downloader::EraMeta; use reth_etl::Collector; diff --git a/crates/era/src/consensus_types.rs b/crates/era/src/consensus_types.rs new file mode 100644 index 00000000000..cdcc77ce57a --- /dev/null +++ b/crates/era/src/consensus_types.rs @@ -0,0 +1,235 @@ +//! Consensus types for Era post-merge history files + +use crate::{ + e2s_types::{E2sError, Entry}, + DecodeCompressedSsz, +}; +use snap::{read::FrameDecoder, write::FrameEncoder}; +use ssz::Decode; +use std::io::{Read, Write}; + +/// `CompressedSignedBeaconBlock` record type: [0x01, 0x00] +pub const COMPRESSED_SIGNED_BEACON_BLOCK: [u8; 2] = [0x01, 0x00]; + +/// `CompressedBeaconState` record type: [0x02, 0x00] +pub const COMPRESSED_BEACON_STATE: [u8; 2] = [0x02, 0x00]; + +/// Compressed signed beacon block +/// +/// See also . +#[derive(Debug, Clone)] +pub struct CompressedSignedBeaconBlock { + /// Snappy-compressed ssz-encoded `SignedBeaconBlock` + pub data: Vec, +} + +impl CompressedSignedBeaconBlock { + /// Create a new [`CompressedSignedBeaconBlock`] from compressed data + pub const fn new(data: Vec) -> Self { + Self { data } + } + + /// Create from ssz-encoded block by compressing it with snappy + pub fn from_ssz(ssz_data: &[u8]) -> Result { + let mut compressed = Vec::new(); + { + let mut encoder = FrameEncoder::new(&mut compressed); + + Write::write_all(&mut encoder, ssz_data).map_err(|e| { + E2sError::SnappyCompression(format!("Failed to compress signed beacon block: {e}")) + })?; + + encoder.flush().map_err(|e| { + E2sError::SnappyCompression(format!("Failed to flush encoder: {e}")) + })?; + } + Ok(Self { data: compressed }) + } + + /// Decompress to get the original ssz-encoded signed beacon block + pub fn decompress(&self) -> Result, E2sError> { + let mut decoder = FrameDecoder::new(self.data.as_slice()); + let mut decompressed = Vec::new(); + Read::read_to_end(&mut decoder, &mut decompressed).map_err(|e| { + E2sError::SnappyDecompression(format!("Failed to decompress signed beacon block: {e}")) + })?; + + Ok(decompressed) + } + + /// Convert to an [`Entry`] + pub fn to_entry(&self) -> Entry { + Entry::new(COMPRESSED_SIGNED_BEACON_BLOCK, self.data.clone()) + } + + /// Create from an [`Entry`] + pub fn from_entry(entry: &Entry) -> Result { + if entry.entry_type != COMPRESSED_SIGNED_BEACON_BLOCK { + return Err(E2sError::Ssz(format!( + "Invalid entry type for CompressedSignedBeaconBlock: expected {:02x}{:02x}, got {:02x}{:02x}", + COMPRESSED_SIGNED_BEACON_BLOCK[0], + COMPRESSED_SIGNED_BEACON_BLOCK[1], + entry.entry_type[0], + entry.entry_type[1] + ))); + } + + Ok(Self { data: entry.data.clone() }) + } + + /// Decode the compressed signed beacon block into ssz bytes + pub fn decode_to_ssz(&self) -> Result, E2sError> { + self.decompress() + } +} + +impl DecodeCompressedSsz for CompressedSignedBeaconBlock { + fn decode(&self) -> Result { + let ssz_bytes = self.decompress()?; + T::from_ssz_bytes(&ssz_bytes).map_err(|e| { + E2sError::Ssz(format!("Failed to decode SSZ data into target type: {e:?}")) + }) + } +} + +/// Compressed beacon state +/// +/// See also . +#[derive(Debug, Clone)] +pub struct CompressedBeaconState { + /// Snappy-compressed ssz-encoded `BeaconState` + pub data: Vec, +} + +impl CompressedBeaconState { + /// Create a new [`CompressedBeaconState`] from compressed data + pub const fn new(data: Vec) -> Self { + Self { data } + } + + /// Compress with snappy from ssz-encoded state + pub fn from_ssz(ssz_data: &[u8]) -> Result { + let mut compressed = Vec::new(); + { + let mut encoder = FrameEncoder::new(&mut compressed); + + Write::write_all(&mut encoder, ssz_data).map_err(|e| { + E2sError::SnappyCompression(format!("Failed to compress beacon state: {e}")) + })?; + + encoder.flush().map_err(|e| { + E2sError::SnappyCompression(format!("Failed to flush encoder: {e}")) + })?; + } + Ok(Self { data: compressed }) + } + + /// Decompress to get the original ssz-encoded beacon state + pub fn decompress(&self) -> Result, E2sError> { + let mut decoder = FrameDecoder::new(self.data.as_slice()); + let mut decompressed = Vec::new(); + Read::read_to_end(&mut decoder, &mut decompressed).map_err(|e| { + E2sError::SnappyDecompression(format!("Failed to decompress beacon state: {e}")) + })?; + + Ok(decompressed) + } + + /// Convert to an [`Entry`] + pub fn to_entry(&self) -> Entry { + Entry::new(COMPRESSED_BEACON_STATE, self.data.clone()) + } + + /// Create from an [`Entry`] + pub fn from_entry(entry: &Entry) -> Result { + if entry.entry_type != COMPRESSED_BEACON_STATE { + return Err(E2sError::Ssz(format!( + "Invalid entry type for CompressedBeaconState: expected {:02x}{:02x}, got {:02x}{:02x}", + COMPRESSED_BEACON_STATE[0], + COMPRESSED_BEACON_STATE[1], + entry.entry_type[0], + entry.entry_type[1] + ))); + } + + Ok(Self { data: entry.data.clone() }) + } + + /// Decode the compressed beacon state into ssz bytes + pub fn decode_to_ssz(&self) -> Result, E2sError> { + self.decompress() + } +} + +impl DecodeCompressedSsz for CompressedBeaconState { + fn decode(&self) -> Result { + let ssz_bytes = self.decompress()?; + T::from_ssz_bytes(&ssz_bytes).map_err(|e| { + E2sError::Ssz(format!("Failed to decode SSZ data into target type: {e:?}")) + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_signed_beacon_block_compression_roundtrip() { + let ssz_data = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; + + let compressed_block = CompressedSignedBeaconBlock::from_ssz(&ssz_data).unwrap(); + let decompressed = compressed_block.decompress().unwrap(); + + assert_eq!(decompressed, ssz_data); + } + + #[test] + fn test_beacon_state_compression_roundtrip() { + let ssz_data = vec![10, 9, 8, 7, 6, 5, 4, 3, 2, 1]; + + let compressed_state = CompressedBeaconState::from_ssz(&ssz_data).unwrap(); + let decompressed = compressed_state.decompress().unwrap(); + + assert_eq!(decompressed, ssz_data); + } + + #[test] + fn test_entry_conversion_signed_beacon_block() { + let ssz_data = vec![1, 2, 3, 4, 5]; + let compressed_block = CompressedSignedBeaconBlock::from_ssz(&ssz_data).unwrap(); + + let entry = compressed_block.to_entry(); + assert_eq!(entry.entry_type, COMPRESSED_SIGNED_BEACON_BLOCK); + + let recovered = CompressedSignedBeaconBlock::from_entry(&entry).unwrap(); + let recovered_ssz = recovered.decode_to_ssz().unwrap(); + + assert_eq!(recovered_ssz, ssz_data); + } + + #[test] + fn test_entry_conversion_beacon_state() { + let ssz_data = vec![5, 4, 3, 2, 1]; + let compressed_state = CompressedBeaconState::from_ssz(&ssz_data).unwrap(); + + let entry = compressed_state.to_entry(); + assert_eq!(entry.entry_type, COMPRESSED_BEACON_STATE); + + let recovered = CompressedBeaconState::from_entry(&entry).unwrap(); + let recovered_ssz = recovered.decode_to_ssz().unwrap(); + + assert_eq!(recovered_ssz, ssz_data); + } + + #[test] + fn test_invalid_entry_type() { + let invalid_entry = Entry::new([0xFF, 0xFF], vec![1, 2, 3]); + + let result = CompressedSignedBeaconBlock::from_entry(&invalid_entry); + assert!(result.is_err()); + + let result = CompressedBeaconState::from_entry(&invalid_entry); + assert!(result.is_err()); + } +} diff --git a/crates/era/src/e2s_types.rs b/crates/era/src/e2s_types.rs index c2d4734c2e7..3e5681eb119 100644 --- a/crates/era/src/e2s_types.rs +++ b/crates/era/src/e2s_types.rs @@ -165,3 +165,96 @@ impl Entry { self.entry_type == SLOT_INDEX } } + +/// Serialize and deserialize index entries with format: +/// `starting-number | offsets... | count` +pub trait IndexEntry: Sized { + /// Get the entry type identifier for this index + fn entry_type() -> [u8; 2]; + + /// Create a new instance with starting number and offsets + fn new(starting_number: u64, offsets: Vec) -> Self; + + /// Get the starting number - can be starting slot or block number for example + fn starting_number(&self) -> u64; + + /// Get the offsets vector + fn offsets(&self) -> &[u64]; + + /// Convert to an [`Entry`] for storage in an e2store file + /// Format: starting-number | offset1 | offset2 | ... | count + fn to_entry(&self) -> Entry { + let mut data = Vec::with_capacity(8 + self.offsets().len() * 8 + 8); + + // Add starting number + data.extend_from_slice(&self.starting_number().to_le_bytes()); + + // Add all offsets + data.extend(self.offsets().iter().flat_map(|offset| offset.to_le_bytes())); + + // Encode count - 8 bytes again + let count = self.offsets().len() as u64; + data.extend_from_slice(&count.to_le_bytes()); + + Entry::new(Self::entry_type(), data) + } + + /// Create from an [`Entry`] + fn from_entry(entry: &Entry) -> Result { + let expected_type = Self::entry_type(); + + if entry.entry_type != expected_type { + return Err(E2sError::Ssz(format!( + "Invalid entry type: expected {:02x}{:02x}, got {:02x}{:02x}", + expected_type[0], expected_type[1], entry.entry_type[0], entry.entry_type[1] + ))); + } + + if entry.data.len() < 16 { + return Err(E2sError::Ssz( + "Index entry too short: need at least 16 bytes for starting_number and count" + .to_string(), + )); + } + + // Extract count from last 8 bytes + let count_bytes = &entry.data[entry.data.len() - 8..]; + let count = u64::from_le_bytes( + count_bytes + .try_into() + .map_err(|_| E2sError::Ssz("Failed to read count bytes".to_string()))?, + ) as usize; + + // Verify entry has correct size + let expected_len = 8 + count * 8 + 8; + if entry.data.len() != expected_len { + return Err(E2sError::Ssz(format!( + "Index entry has incorrect length: expected {expected_len}, got {}", + entry.data.len() + ))); + } + + // Extract starting number from first 8 bytes + let starting_number = u64::from_le_bytes( + entry.data[0..8] + .try_into() + .map_err(|_| E2sError::Ssz("Failed to read starting_number bytes".to_string()))?, + ); + + // Extract all offsets + let mut offsets = Vec::with_capacity(count); + for i in 0..count { + let start = 8 + i * 8; + let end = start + 8; + let offset_bytes = &entry.data[start..end]; + let offset = u64::from_le_bytes( + offset_bytes + .try_into() + .map_err(|_| E2sError::Ssz(format!("Failed to read offset {i} bytes")))?, + ); + offsets.push(offset); + } + + Ok(Self::new(starting_number, offsets)) + } +} diff --git a/crates/era/src/era1_file.rs b/crates/era/src/era1_file.rs index 547d770f06d..b665b481766 100644 --- a/crates/era/src/era1_file.rs +++ b/crates/era/src/era1_file.rs @@ -3,11 +3,11 @@ //! The structure of an Era1 file follows the specification: //! `Version | block-tuple* | other-entries* | Accumulator | BlockIndex` //! -//! See also +//! See also . use crate::{ e2s_file::{E2StoreReader, E2StoreWriter}, - e2s_types::{E2sError, Entry, Version}, + e2s_types::{E2sError, Entry, IndexEntry, Version}, era1_types::{BlockIndex, Era1Group, Era1Id, BLOCK_INDEX}, execution_types::{ self, Accumulator, BlockTuple, CompressedBody, CompressedHeader, CompressedReceipts, @@ -43,13 +43,13 @@ impl Era1File { /// Get a block by its number, if present in this file pub fn get_block_by_number(&self, number: BlockNumber) -> Option<&BlockTuple> { - let index = (number - self.group.block_index.starting_number) as usize; + let index = (number - self.group.block_index.starting_number()) as usize; (index < self.group.blocks.len()).then(|| &self.group.blocks[index]) } /// Get the range of block numbers contained in this file pub fn block_range(&self) -> std::ops::RangeInclusive { - let start = self.group.block_index.starting_number; + let start = self.group.block_index.starting_number(); let end = start + (self.group.blocks.len() as u64) - 1; start..=end } @@ -59,6 +59,7 @@ impl Era1File { self.block_range().contains(&number) } } + /// Reader for Era1 files that builds on top of [`E2StoreReader`] #[derive(Debug)] pub struct Era1Reader { @@ -215,8 +216,8 @@ impl Era1Reader { let id = Era1Id::new( network_name, - block_index.starting_number, - block_index.offsets.len() as u32, + block_index.starting_number(), + block_index.offsets().len() as u32, ); Ok(Era1File::new(group, id)) @@ -445,7 +446,7 @@ mod tests { let mut offsets = Vec::with_capacity(block_count); for i in 0..block_count { - offsets.push(i as i64 * 100); + offsets.push(i as u64 * 100); } let block_index = BlockIndex::new(start_block, offsets); let group = Era1Group::new(blocks, accumulator, block_index); diff --git a/crates/era/src/era1_types.rs b/crates/era/src/era1_types.rs index 3078f952979..48a5486bd5b 100644 --- a/crates/era/src/era1_types.rs +++ b/crates/era/src/era1_types.rs @@ -3,7 +3,7 @@ //! See also use crate::{ - e2s_types::{E2sError, Entry}, + e2s_types::{Entry, IndexEntry}, execution_types::{Accumulator, BlockTuple, MAX_BLOCKS_PER_ERA1}, }; use alloy_primitives::BlockNumber; @@ -38,6 +38,7 @@ impl Era1Group { ) -> Self { Self { blocks, accumulator, block_index, other_entries: Vec::new() } } + /// Add another entry to this group pub fn add_entry(&mut self, entry: Entry) { self.other_entries.push(entry); @@ -52,20 +53,15 @@ impl Era1Group { #[derive(Debug, Clone)] pub struct BlockIndex { /// Starting block number - pub starting_number: BlockNumber, + starting_number: BlockNumber, /// Offsets to data at each block number - pub offsets: Vec, + offsets: Vec, } impl BlockIndex { - /// Create a new [`BlockIndex`] - pub const fn new(starting_number: BlockNumber, offsets: Vec) -> Self { - Self { starting_number, offsets } - } - /// Get the offset for a specific block number - pub fn offset_for_block(&self, block_number: BlockNumber) -> Option { + pub fn offset_for_block(&self, block_number: BlockNumber) -> Option { if block_number < self.starting_number { return None; } @@ -73,72 +69,23 @@ impl BlockIndex { let index = (block_number - self.starting_number) as usize; self.offsets.get(index).copied() } +} - /// Convert to an [`Entry`] for storage in an e2store file - pub fn to_entry(&self) -> Entry { - // Format: starting-(block)-number | index | index | index ... | count - let mut data = Vec::with_capacity(8 + self.offsets.len() * 8 + 8); - - // Add starting block number - data.extend_from_slice(&self.starting_number.to_le_bytes()); - - // Add all offsets - for offset in &self.offsets { - data.extend_from_slice(&offset.to_le_bytes()); - } - - // Add count - data.extend_from_slice(&(self.offsets.len() as i64).to_le_bytes()); - - Entry::new(BLOCK_INDEX, data) +impl IndexEntry for BlockIndex { + fn new(starting_number: u64, offsets: Vec) -> Self { + Self { starting_number, offsets } } - /// Create from an [`Entry`] - pub fn from_entry(entry: &Entry) -> Result { - if entry.entry_type != BLOCK_INDEX { - return Err(E2sError::Ssz(format!( - "Invalid entry type for BlockIndex: expected {:02x}{:02x}, got {:02x}{:02x}", - BLOCK_INDEX[0], BLOCK_INDEX[1], entry.entry_type[0], entry.entry_type[1] - ))); - } - - if entry.data.len() < 16 { - return Err(E2sError::Ssz(String::from( - "BlockIndex entry too short to contain starting block number and count", - ))); - } - - // Extract starting block number = first 8 bytes - let mut starting_number_bytes = [0u8; 8]; - starting_number_bytes.copy_from_slice(&entry.data[0..8]); - let starting_number = u64::from_le_bytes(starting_number_bytes); - - // Extract count = last 8 bytes - let mut count_bytes = [0u8; 8]; - count_bytes.copy_from_slice(&entry.data[entry.data.len() - 8..]); - let count = u64::from_le_bytes(count_bytes) as usize; - - // Verify that the entry has the correct size - let expected_size = 8 + count * 8 + 8; - if entry.data.len() != expected_size { - return Err(E2sError::Ssz(format!( - "BlockIndex entry has incorrect size: expected {}, got {}", - expected_size, - entry.data.len() - ))); - } + fn entry_type() -> [u8; 2] { + BLOCK_INDEX + } - // Extract all offsets - let mut offsets = Vec::with_capacity(count); - for i in 0..count { - let start = 8 + i * 8; - let end = start + 8; - let mut offset_bytes = [0u8; 8]; - offset_bytes.copy_from_slice(&entry.data[start..end]); - offsets.push(i64::from_le_bytes(offset_bytes)); - } + fn starting_number(&self) -> u64 { + self.starting_number + } - Ok(Self { starting_number, offsets }) + fn offsets(&self) -> &[u64] { + &self.offsets } } diff --git a/crates/era/src/era_types.rs b/crates/era/src/era_types.rs new file mode 100644 index 00000000000..d145a08daa7 --- /dev/null +++ b/crates/era/src/era_types.rs @@ -0,0 +1,286 @@ +//! Era types for `.era` files +//! +//! See also + +use crate::{ + consensus_types::{CompressedBeaconState, CompressedSignedBeaconBlock}, + e2s_types::{Entry, IndexEntry, SLOT_INDEX}, +}; + +/// Era file content group +/// +/// Format: `Version | block* | era-state | other-entries* | slot-index(block)? | slot-index(state)` +/// See also +#[derive(Debug)] +pub struct EraGroup { + /// Group including all blocks leading up to the era transition in slot order + pub blocks: Vec, + + /// State in the era transition slot + pub era_state: CompressedBeaconState, + + /// Other entries that don't fit into standard categories + pub other_entries: Vec, + + /// Block slot index, omitted for genesis era + pub slot_index: Option, + + /// State slot index + pub state_slot_index: SlotIndex, +} + +impl EraGroup { + /// Create a new era group + pub const fn new( + blocks: Vec, + era_state: CompressedBeaconState, + state_slot_index: SlotIndex, + ) -> Self { + Self { blocks, era_state, other_entries: Vec::new(), slot_index: None, state_slot_index } + } + + /// Create a new era group with block slot index + pub const fn with_block_index( + blocks: Vec, + era_state: CompressedBeaconState, + slot_index: SlotIndex, + state_slot_index: SlotIndex, + ) -> Self { + Self { + blocks, + era_state, + other_entries: Vec::new(), + slot_index: Some(slot_index), + state_slot_index, + } + } + + /// Check if this is a genesis era - no blocks yet + pub fn is_genesis(&self) -> bool { + self.blocks.is_empty() && self.slot_index.is_none() + } + + /// Add another entry to this group + pub fn add_entry(&mut self, entry: Entry) { + self.other_entries.push(entry); + } +} + +/// [`SlotIndex`] records store offsets to data at specific slots +/// from the beginning of the index record to the beginning of the corresponding data. +/// +/// Format: `starting-slot | index | index | index ... | count` +/// +/// See also . +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct SlotIndex { + /// Starting slot number + pub starting_slot: u64, + + /// Offsets to data at each slot + /// 0 indicates no data for that slot + pub offsets: Vec, +} + +impl SlotIndex { + /// Create a new slot index + pub const fn new(starting_slot: u64, offsets: Vec) -> Self { + Self { starting_slot, offsets } + } + + /// Get the number of slots covered by this index + pub fn slot_count(&self) -> usize { + self.offsets.len() + } + + /// Get the offset for a specific slot + pub fn get_offset(&self, slot_index: usize) -> Option { + self.offsets.get(slot_index).copied() + } + + /// Check if a slot has data - non-zero offset + pub fn has_data_at_slot(&self, slot_index: usize) -> bool { + self.get_offset(slot_index).is_some_and(|offset| offset != 0) + } +} + +impl IndexEntry for SlotIndex { + fn new(starting_number: u64, offsets: Vec) -> Self { + Self { starting_slot: starting_number, offsets } + } + + fn entry_type() -> [u8; 2] { + SLOT_INDEX + } + + fn starting_number(&self) -> u64 { + self.starting_slot + } + + fn offsets(&self) -> &[u64] { + &self.offsets + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + consensus_types::{CompressedBeaconState, CompressedSignedBeaconBlock}, + e2s_types::{Entry, IndexEntry}, + }; + + /// Helper function to create a simple beacon block + fn create_beacon_block(data_size: usize) -> CompressedSignedBeaconBlock { + let block_data = vec![0xAA; data_size]; + CompressedSignedBeaconBlock::new(block_data) + } + + /// Helper function to create a simple beacon state + fn create_beacon_state(data_size: usize) -> CompressedBeaconState { + let state_data = vec![0xBB; data_size]; + CompressedBeaconState::new(state_data) + } + + #[test] + fn test_slot_index_roundtrip() { + let starting_slot = 1000; + let offsets = vec![100, 200, 300, 400, 500]; + + let slot_index = SlotIndex::new(starting_slot, offsets.clone()); + + let entry = slot_index.to_entry(); + + // Validate entry type + assert_eq!(entry.entry_type, SLOT_INDEX); + + // Convert back to slot index + let recovered = SlotIndex::from_entry(&entry).unwrap(); + + // Verify fields match + assert_eq!(recovered.starting_slot, starting_slot); + assert_eq!(recovered.offsets, offsets); + } + #[test] + fn test_slot_index_basic_operations() { + let starting_slot = 2000; + let offsets = vec![100, 200, 300]; + + let slot_index = SlotIndex::new(starting_slot, offsets); + + assert_eq!(slot_index.slot_count(), 3); + assert_eq!(slot_index.starting_slot, 2000); + } + + #[test] + fn test_slot_index_empty_slots() { + let starting_slot = 1000; + let offsets = vec![100, 0, 300, 0, 500]; + + let slot_index = SlotIndex::new(starting_slot, offsets); + + // Test that empty slots return false for has_data_at_slot + // slot 1000: offset 100 + assert!(slot_index.has_data_at_slot(0)); + // slot 1001: offset 0 - empty + assert!(!slot_index.has_data_at_slot(1)); + // slot 1002: offset 300 + assert!(slot_index.has_data_at_slot(2)); + // slot 1003: offset 0 - empty + assert!(!slot_index.has_data_at_slot(3)); + // slot 1004: offset 500 + assert!(slot_index.has_data_at_slot(4)); + } + + #[test] + fn test_era_group_basic_construction() { + let blocks = + vec![create_beacon_block(10), create_beacon_block(15), create_beacon_block(20)]; + let era_state = create_beacon_state(50); + let state_slot_index = SlotIndex::new(1000, vec![100, 200, 300]); + + let era_group = EraGroup::new(blocks, era_state, state_slot_index); + + // Verify initial state + assert_eq!(era_group.blocks.len(), 3); + assert_eq!(era_group.other_entries.len(), 0); + assert_eq!(era_group.slot_index, None); + assert_eq!(era_group.state_slot_index.starting_slot, 1000); + assert_eq!(era_group.state_slot_index.offsets, vec![100, 200, 300]); + } + #[test] + fn test_era_group_with_block_index() { + let blocks = vec![create_beacon_block(10), create_beacon_block(15)]; + let era_state = create_beacon_state(50); + let block_slot_index = SlotIndex::new(500, vec![50, 100]); + let state_slot_index = SlotIndex::new(1000, vec![200, 300]); + + let era_group = + EraGroup::with_block_index(blocks, era_state, block_slot_index, state_slot_index); + + // Verify state with block index + assert_eq!(era_group.blocks.len(), 2); + assert_eq!(era_group.other_entries.len(), 0); + assert!(era_group.slot_index.is_some()); + + let block_index = era_group.slot_index.as_ref().unwrap(); + assert_eq!(block_index.starting_slot, 500); + assert_eq!(block_index.offsets, vec![50, 100]); + + assert_eq!(era_group.state_slot_index.starting_slot, 1000); + assert_eq!(era_group.state_slot_index.offsets, vec![200, 300]); + } + + #[test] + fn test_era_group_genesis_check() { + // Genesis era - no blocks, no block slot index + let era_state = create_beacon_state(50); + let state_slot_index = SlotIndex::new(0, vec![100]); + + let genesis_era = EraGroup::new(vec![], era_state, state_slot_index); + assert!(genesis_era.is_genesis()); + + // Non-genesis era - has blocks + let blocks = vec![create_beacon_block(10)]; + let era_state = create_beacon_state(50); + let state_slot_index = SlotIndex::new(1000, vec![100]); + + let normal_era = EraGroup::new(blocks, era_state, state_slot_index); + assert!(!normal_era.is_genesis()); + + // Non-genesis era - has block slot index + let era_state = create_beacon_state(50); + let block_slot_index = SlotIndex::new(500, vec![50]); + let state_slot_index = SlotIndex::new(1000, vec![100]); + + let era_with_index = + EraGroup::with_block_index(vec![], era_state, block_slot_index, state_slot_index); + assert!(!era_with_index.is_genesis()); + } + + #[test] + fn test_era_group_add_entries() { + let blocks = vec![create_beacon_block(10)]; + let era_state = create_beacon_state(50); + let state_slot_index = SlotIndex::new(1000, vec![100]); + + // Create and verify group + let mut era_group = EraGroup::new(blocks, era_state, state_slot_index); + assert_eq!(era_group.other_entries.len(), 0); + + // Create custom entries with different types + let entry1 = Entry::new([0x01, 0x01], vec![1, 2, 3, 4]); + let entry2 = Entry::new([0x02, 0x02], vec![5, 6, 7, 8]); + + // Add those entries + era_group.add_entry(entry1); + era_group.add_entry(entry2); + + // Verify entries were added correctly + assert_eq!(era_group.other_entries.len(), 2); + assert_eq!(era_group.other_entries[0].entry_type, [0x01, 0x01]); + assert_eq!(era_group.other_entries[0].data, vec![1, 2, 3, 4]); + assert_eq!(era_group.other_entries[1].entry_type, [0x02, 0x02]); + assert_eq!(era_group.other_entries[1].data, vec![5, 6, 7, 8]); + } +} diff --git a/crates/era/src/execution_types.rs b/crates/era/src/execution_types.rs index 34b953b8359..4a1e33df533 100644 --- a/crates/era/src/execution_types.rs +++ b/crates/era/src/execution_types.rs @@ -10,7 +10,10 @@ //! //! See also -use crate::e2s_types::{E2sError, Entry}; +use crate::{ + e2s_types::{E2sError, Entry}, + DecodeCompressed, +}; use alloy_consensus::{Block, BlockBody, Header}; use alloy_primitives::{B256, U256}; use alloy_rlp::{Decodable, Encodable}; @@ -96,12 +99,6 @@ pub struct CompressedHeader { pub data: Vec, } -/// Extension trait for generic decoding from compressed data -pub trait DecodeCompressed { - /// Decompress and decode the data into the given type - fn decode(&self) -> Result; -} - impl CompressedHeader { /// Create a new [`CompressedHeader`] from compressed data pub const fn new(data: Vec) -> Self { diff --git a/crates/era/src/lib.rs b/crates/era/src/lib.rs index 6007da18738..97ffa8b26c0 100644 --- a/crates/era/src/lib.rs +++ b/crates/era/src/lib.rs @@ -1,19 +1,37 @@ //! Era and Era1 files support for Ethereum history expiry. //! -//! -//! Era files are special instances of .e2s files with a strict content format -//! optimized for reading and long-term storage and distribution. -//! //! Era1 files use the same e2store foundation but are specialized for //! execution layer block history, following the format: //! Version | block-tuple* | other-entries* | Accumulator | `BlockIndex` //! +//! Era files are special instances of `.e2s` files with a strict content format +//! optimized for reading and long-term storage and distribution. +//! //! See also: //! - E2store format: +//! - Era format: //! - Era1 format: +pub mod consensus_types; pub mod e2s_file; pub mod e2s_types; pub mod era1_file; pub mod era1_types; +pub mod era_types; pub mod execution_types; + +use crate::e2s_types::E2sError; +use alloy_rlp::Decodable; +use ssz::Decode; + +/// Extension trait for generic decoding from compressed data +pub trait DecodeCompressed { + /// Decompress and decode the data into the given type + fn decode(&self) -> Result; +} + +/// Extension trait for generic decoding from compressed ssz data +pub trait DecodeCompressedSsz { + /// Decompress and decode the SSZ data into the given type + fn decode(&self) -> Result; +} diff --git a/crates/era/tests/it/dd.rs b/crates/era/tests/it/dd.rs index 7aa0afb6e20..0c656a512f9 100644 --- a/crates/era/tests/it/dd.rs +++ b/crates/era/tests/it/dd.rs @@ -4,6 +4,7 @@ use alloy_consensus::{BlockBody, Header}; use alloy_primitives::U256; use reth_era::{ + e2s_types::IndexEntry, era1_file::{Era1Reader, Era1Writer}, execution_types::CompressedBody, }; @@ -30,7 +31,7 @@ async fn test_mainnet_era1_only_file_decompression_and_decoding() -> eyre::Resul for &block_idx in &test_block_indices { let block = &file.group.blocks[block_idx]; - let block_number = file.group.block_index.starting_number + block_idx as u64; + let block_number = file.group.block_index.starting_number() + block_idx as u64; println!( "\n Testing block {}, compressed body size: {} bytes", @@ -110,7 +111,7 @@ async fn test_mainnet_era1_only_file_decompression_and_decoding() -> eyre::Resul for &idx in &test_block_indices { let original_block = &file.group.blocks[idx]; let read_back_block = &read_back_file.group.blocks[idx]; - let block_number = file.group.block_index.starting_number + idx as u64; + let block_number = file.group.block_index.starting_number() + idx as u64; println!("Block {block_number} details:"); println!(" Header size: {} bytes", original_block.header.data.len()); diff --git a/crates/era/tests/it/genesis.rs b/crates/era/tests/it/genesis.rs index 1812a77798a..80869f97fa0 100644 --- a/crates/era/tests/it/genesis.rs +++ b/crates/era/tests/it/genesis.rs @@ -3,13 +3,12 @@ //! These tests verify proper decompression and decoding of genesis blocks //! from different networks. -use alloy_consensus::{BlockBody, Header}; -use reth_era::execution_types::CompressedBody; -use reth_ethereum_primitives::TransactionSigned; - use crate::{ Era1TestDownloader, ERA1_MAINNET_FILES_NAMES, ERA1_SEPOLIA_FILES_NAMES, MAINNET, SEPOLIA, }; +use alloy_consensus::{BlockBody, Header}; +use reth_era::{e2s_types::IndexEntry, execution_types::CompressedBody}; +use reth_ethereum_primitives::TransactionSigned; #[tokio::test(flavor = "multi_thread")] #[ignore = "download intensive"] @@ -23,7 +22,7 @@ async fn test_mainnet_genesis_block_decompression() -> eyre::Result<()> { for &block_idx in &test_blocks { let block = &file.group.blocks[block_idx]; - let block_number = file.group.block_index.starting_number + block_idx as u64; + let block_number = file.group.block_index.starting_number() + block_idx as u64; println!( "Testing block {}, compressed body size: {} bytes", @@ -75,7 +74,7 @@ async fn test_sepolia_genesis_block_decompression() -> eyre::Result<()> { for &block_idx in &test_blocks { let block = &file.group.blocks[block_idx]; - let block_number = file.group.block_index.starting_number + block_idx as u64; + let block_number = file.group.block_index.starting_number() + block_idx as u64; println!( "Testing block {}, compressed body size: {} bytes", diff --git a/crates/era/tests/it/roundtrip.rs b/crates/era/tests/it/roundtrip.rs index a444fe9c570..2397094646a 100644 --- a/crates/era/tests/it/roundtrip.rs +++ b/crates/era/tests/it/roundtrip.rs @@ -10,6 +10,7 @@ use alloy_consensus::{BlockBody, BlockHeader, Header}; use rand::{prelude::IndexedRandom, rng}; use reth_era::{ + e2s_types::IndexEntry, era1_file::{Era1File, Era1Reader, Era1Writer}, era1_types::{Era1Group, Era1Id}, execution_types::{BlockTuple, CompressedBody, CompressedHeader, TotalDifficulty}, @@ -71,7 +72,7 @@ async fn test_file_roundtrip( for &block_id in &test_block_indices { let original_block = &original_file.group.blocks[block_id]; let roundtrip_block = &roundtrip_file.group.blocks[block_id]; - let block_number = original_file.group.block_index.starting_number + block_id as u64; + let block_number = original_file.group.block_index.starting_number() + block_id as u64; println!("Testing roundtrip for block {block_number}"); From ff76f66cd791cc2d1b1e4cc7e42f5d971084b293 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Wed, 23 Jul 2025 14:39:38 +0300 Subject: [PATCH 268/305] feat: abstraction for attributes -> NextBlockEnv conversion (#17570) --- crates/optimism/node/src/node.rs | 63 ++++++--- crates/optimism/payload/src/builder.rs | 176 +++++++++++++----------- crates/optimism/payload/src/payload.rs | 48 ++++++- crates/optimism/payload/src/traits.rs | 29 +++- crates/optimism/rpc/src/witness.rs | 43 +++--- crates/payload/primitives/src/lib.rs | 3 +- crates/payload/primitives/src/traits.rs | 22 ++- 7 files changed, 260 insertions(+), 124 deletions(-) diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index 4b2f713bcec..ed9e9b08f16 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -7,7 +7,7 @@ use crate::{ OpEngineApiBuilder, OpEngineTypes, }; use op_alloy_consensus::{interop::SafetyLevel, OpPooledTransaction}; -use op_alloy_rpc_types_engine::{OpExecutionData, OpPayloadAttributes}; +use op_alloy_rpc_types_engine::OpExecutionData; use reth_chainspec::{ChainSpecProvider, EthChainSpec, Hardforks}; use reth_engine_local::LocalPayloadAttributesBuilder; use reth_evm::ConfigureEvm; @@ -16,8 +16,8 @@ use reth_network::{ PeersInfo, }; use reth_node_api::{ - AddOnsContext, EngineTypes, FullNodeComponents, KeyHasherTy, NodeAddOns, NodePrimitives, - PayloadAttributesBuilder, PayloadTypes, PrimitivesTy, TxTy, + AddOnsContext, BuildNextEnv, EngineTypes, FullNodeComponents, HeaderTy, KeyHasherTy, + NodeAddOns, NodePrimitives, PayloadAttributesBuilder, PayloadTypes, PrimitivesTy, TxTy, }; use reth_node_builder::{ components::{ @@ -34,12 +34,12 @@ use reth_node_builder::{ }; use reth_optimism_chainspec::{OpChainSpec, OpHardfork}; use reth_optimism_consensus::OpBeaconConsensus; -use reth_optimism_evm::{OpEvmConfig, OpNextBlockEnvAttributes, OpRethReceiptBuilder}; +use reth_optimism_evm::{OpEvmConfig, OpRethReceiptBuilder}; use reth_optimism_forks::OpHardforks; use reth_optimism_payload_builder::{ builder::OpPayloadTransactions, config::{OpBuilderConfig, OpDAConfig}, - OpBuiltPayload, OpPayloadBuilderAttributes, OpPayloadPrimitives, + OpAttributes, OpBuiltPayload, OpPayloadPrimitives, }; use reth_optimism_primitives::{DepositReceipt, OpPrimitives}; use reth_optimism_rpc::{ @@ -63,6 +63,7 @@ use reth_transaction_pool::{ TransactionPool, TransactionValidationTaskExecutor, }; use reth_trie_db::MerklePatriciaTrie; +use serde::de::DeserializeOwned; use std::{marker::PhantomData, sync::Arc}; /// Marker trait for Optimism node types with standard engine, chain spec, and primitives. @@ -429,17 +430,29 @@ where } } -impl NodeAddOns for OpAddOns +impl NodeAddOns + for OpAddOns where N: FullNodeComponents< - Types: NodeTypes, - Evm: ConfigureEvm, + Types: NodeTypes< + ChainSpec: OpHardforks, + Primitives: OpPayloadPrimitives, + Payload: PayloadTypes, + >, + Evm: ConfigureEvm< + NextBlockEnvCtx: BuildNextEnv< + Attrs, + HeaderTy, + ::ChainSpec, + >, + >, Pool: TransactionPool, >, EthB: EthApiBuilder, EV: EngineValidatorBuilder, EB: EngineApiBuilder, RpcMiddleware: RethRpcMiddleware, + Attrs: OpAttributes, RpcPayloadAttributes: DeserializeOwned>, { type Handle = RpcHandle; @@ -485,7 +498,7 @@ where ctx.node.evm_config().clone(), ); // install additional OP specific rpc methods - let debug_ext = OpDebugWitnessApi::new( + let debug_ext = OpDebugWitnessApi::<_, _, _, Attrs>::new( ctx.node.provider().clone(), Box::new(ctx.node.task_executor().clone()), builder, @@ -544,17 +557,29 @@ where } } -impl RethRpcAddOns for OpAddOns +impl RethRpcAddOns + for OpAddOns where N: FullNodeComponents< - Types: OpFullNodeTypes, - Evm: ConfigureEvm, + Types: NodeTypes< + ChainSpec: OpHardforks, + Primitives: OpPayloadPrimitives, + Payload: PayloadTypes, + >, + Evm: ConfigureEvm< + NextBlockEnvCtx: BuildNextEnv< + Attrs, + HeaderTy, + ::ChainSpec, + >, + >, >, <::Pool as TransactionPool>::Transaction: OpPooledTx, EthB: EthApiBuilder, EV: EngineValidatorBuilder, EB: EngineApiBuilder, RpcMiddleware: RethRpcMiddleware, + Attrs: OpAttributes, RpcPayloadAttributes: DeserializeOwned>, { type EthApi = EthB::EthApi; @@ -941,7 +966,7 @@ impl OpPayloadBuilder { } } -impl PayloadBuilderBuilder for OpPayloadBuilder +impl PayloadBuilderBuilder for OpPayloadBuilder where Node: FullNodeTypes< Provider: ChainSpecProvider, @@ -949,20 +974,24 @@ where Primitives: OpPayloadPrimitives, Payload: PayloadTypes< BuiltPayload = OpBuiltPayload>, - PayloadAttributes = OpPayloadAttributes, - PayloadBuilderAttributes = OpPayloadBuilderAttributes>, + PayloadBuilderAttributes = Attrs, >, >, >, Evm: ConfigureEvm< Primitives = PrimitivesTy, - NextBlockEnvCtx = OpNextBlockEnvAttributes, + NextBlockEnvCtx: BuildNextEnv< + Attrs, + HeaderTy, + ::ChainSpec, + >, > + 'static, Pool: TransactionPool>> + Unpin + 'static, Txs: OpPayloadTransactions, + Attrs: OpAttributes>, { type PayloadBuilder = - reth_optimism_payload_builder::OpPayloadBuilder; + reth_optimism_payload_builder::OpPayloadBuilder; async fn build_payload_builder( self, diff --git a/crates/optimism/payload/src/builder.rs b/crates/optimism/payload/src/builder.rs index d5a3260420d..d511b17392f 100644 --- a/crates/optimism/payload/src/builder.rs +++ b/crates/optimism/payload/src/builder.rs @@ -3,14 +3,13 @@ use crate::{ config::{OpBuilderConfig, OpDAConfig}, error::OpPayloadBuilderError, - payload::{OpBuiltPayload, OpPayloadBuilderAttributes}, - OpPayloadPrimitives, + payload::OpBuiltPayload, + OpAttributes, OpPayloadBuilderAttributes, OpPayloadPrimitives, }; use alloy_consensus::{BlockHeader, Transaction, Typed2718}; -use alloy_primitives::{Bytes, B256, U256}; +use alloy_primitives::{B256, U256}; use alloy_rpc_types_debug::ExecutionWitness; use alloy_rpc_types_engine::PayloadId; -use op_alloy_rpc_types_engine::OpPayloadAttributes; use reth_basic_payload_builder::*; use reth_chain_state::{ExecutedBlock, ExecutedBlockWithTrieUpdates, ExecutedTrieUpdates}; use reth_chainspec::{ChainSpecProvider, EthChainSpec}; @@ -21,7 +20,6 @@ use reth_evm::{ ConfigureEvm, Database, Evm, }; use reth_execution_types::ExecutionOutcome; -use reth_optimism_evm::OpNextBlockEnvAttributes; use reth_optimism_forks::OpHardforks; use reth_optimism_primitives::{transaction::OpTransaction, ADDRESS_L2_TO_L1_MESSAGE_PASSER}; use reth_optimism_txpool::{ @@ -30,7 +28,7 @@ use reth_optimism_txpool::{ OpPooledTx, }; use reth_payload_builder_primitives::PayloadBuilderError; -use reth_payload_primitives::PayloadBuilderAttributes; +use reth_payload_primitives::{BuildNextEnv, PayloadBuilderAttributes}; use reth_payload_util::{BestPayloadTransactions, NoopPayloadTransactions, PayloadTransactions}; use reth_primitives_traits::{ HeaderTy, NodePrimitives, SealedHeader, SealedHeaderFor, SignedTransaction, TxTy, @@ -42,12 +40,18 @@ use reth_revm::{ use reth_storage_api::{errors::ProviderError, StateProvider, StateProviderFactory}; use reth_transaction_pool::{BestTransactionsAttributes, PoolTransaction, TransactionPool}; use revm::context::{Block, BlockEnv}; -use std::sync::Arc; +use std::{marker::PhantomData, sync::Arc}; use tracing::{debug, trace, warn}; /// Optimism's payload builder -#[derive(Debug, Clone)] -pub struct OpPayloadBuilder { +#[derive(Debug)] +pub struct OpPayloadBuilder< + Pool, + Client, + Evm, + Txs = (), + Attrs = OpPayloadBuilderAttributes::Primitives>>, +> { /// The rollup's compute pending block configuration option. // TODO(clabby): Implement this feature. pub compute_pending_block: bool, @@ -62,9 +66,31 @@ pub struct OpPayloadBuilder { /// The type responsible for yielding the best transactions for the payload if mempool /// transactions are allowed. pub best_transactions: Txs, + /// Marker for the payload attributes type. + _pd: PhantomData, +} + +impl Clone for OpPayloadBuilder +where + Pool: Clone, + Client: Clone, + Evm: ConfigureEvm, + Txs: Clone, +{ + fn clone(&self) -> Self { + Self { + evm_config: self.evm_config.clone(), + pool: self.pool.clone(), + client: self.client.clone(), + config: self.config.clone(), + best_transactions: self.best_transactions.clone(), + compute_pending_block: self.compute_pending_block, + _pd: PhantomData, + } + } } -impl OpPayloadBuilder { +impl OpPayloadBuilder { /// `OpPayloadBuilder` constructor. /// /// Configures the builder with the default settings. @@ -86,11 +112,12 @@ impl OpPayloadBuilder { evm_config, config, best_transactions: (), + _pd: PhantomData, } } } -impl OpPayloadBuilder { +impl OpPayloadBuilder { /// Sets the rollup's compute pending block configuration option. pub const fn set_compute_pending_block(mut self, compute_pending_block: bool) -> Self { self.compute_pending_block = compute_pending_block; @@ -102,7 +129,7 @@ impl OpPayloadBuilder { pub fn with_transactions( self, best_transactions: T, - ) -> OpPayloadBuilder { + ) -> OpPayloadBuilder { let Self { pool, client, compute_pending_block, evm_config, config, .. } = self; OpPayloadBuilder { pool, @@ -111,6 +138,7 @@ impl OpPayloadBuilder { evm_config, best_transactions, config, + _pd: PhantomData, } } @@ -125,12 +153,16 @@ impl OpPayloadBuilder { } } -impl OpPayloadBuilder +impl OpPayloadBuilder where Pool: TransactionPool>, Client: StateProviderFactory + ChainSpecProvider, N: OpPayloadPrimitives, - Evm: ConfigureEvm, + Evm: ConfigureEvm< + Primitives = N, + NextBlockEnvCtx: BuildNextEnv, + >, + Attrs: OpAttributes>, { /// Constructs an Optimism payload from the transactions sent via the /// Payload attributes by the sequencer. If the `no_tx_pool` argument is passed in @@ -142,7 +174,7 @@ where /// a result indicating success with the payload or an error in case of failure. fn build_payload<'a, Txs>( &self, - args: BuildArguments, OpBuiltPayload>, + args: BuildArguments>, best: impl FnOnce(BestTransactionsAttributes) -> Txs + Send + Sync + 'a, ) -> Result>, PayloadBuilderError> where @@ -165,7 +197,7 @@ where let state_provider = self.client.state_by_block_hash(ctx.parent().hash())?; let state = StateProviderDatabase::new(&state_provider); - if ctx.attributes().no_tx_pool { + if ctx.attributes().no_tx_pool() { builder.build(state, &state_provider, ctx) } else { // sequencer mode we can reuse cachedreads from previous runs @@ -178,10 +210,13 @@ where pub fn payload_witness( &self, parent: SealedHeader, - attributes: OpPayloadAttributes, - ) -> Result { - let attributes = OpPayloadBuilderAttributes::try_new(parent.hash(), attributes, 3) - .map_err(PayloadBuilderError::other)?; + attributes: Attrs::RpcPayloadAttributes, + ) -> Result + where + Attrs: PayloadBuilderAttributes, + { + let attributes = + Attrs::try_new(parent.hash(), attributes, 3).map_err(PayloadBuilderError::other)?; let config = PayloadConfig { parent_header: Arc::new(parent), attributes }; let ctx = OpPayloadBuilderCtx { @@ -201,15 +236,20 @@ where } /// Implementation of the [`PayloadBuilder`] trait for [`OpPayloadBuilder`]. -impl PayloadBuilder for OpPayloadBuilder +impl PayloadBuilder + for OpPayloadBuilder where N: OpPayloadPrimitives, Client: StateProviderFactory + ChainSpecProvider + Clone, Pool: TransactionPool>, - Evm: ConfigureEvm, + Evm: ConfigureEvm< + Primitives = N, + NextBlockEnvCtx: BuildNextEnv, + >, Txs: OpPayloadTransactions, + Attrs: OpAttributes, { - type Attributes = OpPayloadBuilderAttributes; + type Attributes = Attrs; type BuiltPayload = OpBuiltPayload; fn try_build( @@ -278,18 +318,22 @@ impl<'a, Txs> OpBuilder<'a, Txs> { impl OpBuilder<'_, Txs> { /// Builds the payload on top of the state. - pub fn build( + pub fn build( self, db: impl Database, state_provider: impl StateProvider, - ctx: OpPayloadBuilderCtx, + ctx: OpPayloadBuilderCtx, ) -> Result>, PayloadBuilderError> where - EvmConfig: ConfigureEvm, + Evm: ConfigureEvm< + Primitives = N, + NextBlockEnvCtx: BuildNextEnv, + >, ChainSpec: EthChainSpec + OpHardforks, N: OpPayloadPrimitives, Txs: PayloadTransactions + OpPooledTx>, + Attrs: OpAttributes, { let Self { best } = self; debug!(target: "payload_builder", id=%ctx.payload_id(), parent_header = ?ctx.parent().hash(), parent_number = ctx.parent().number(), "building new payload"); @@ -308,7 +352,7 @@ impl OpBuilder<'_, Txs> { let mut info = ctx.execute_sequencer_transactions(&mut builder)?; // 3. if mem pool transactions are requested we execute them - if !ctx.attributes().no_tx_pool { + if !ctx.attributes().no_tx_pool() { let best_txs = best(ctx.best_transaction_attributes(builder.evm_mut().block())); if ctx.execute_best_transactions(&mut info, &mut builder, best_txs)?.is_some() { return Ok(BuildOutcomeKind::Cancelled) @@ -344,7 +388,7 @@ impl OpBuilder<'_, Txs> { trie: ExecutedTrieUpdates::Present(Arc::new(trie_updates)), }; - let no_tx_pool = ctx.attributes().no_tx_pool; + let no_tx_pool = ctx.attributes().no_tx_pool(); let payload = OpBuiltPayload::new(ctx.payload_id(), sealed_block, info.total_fees, Some(executed)); @@ -360,16 +404,20 @@ impl OpBuilder<'_, Txs> { } /// Builds the payload and returns its [`ExecutionWitness`] based on the state after execution. - pub fn witness( + pub fn witness( self, state_provider: impl StateProvider, - ctx: &OpPayloadBuilderCtx, + ctx: &OpPayloadBuilderCtx, ) -> Result where - Evm: ConfigureEvm, + Evm: ConfigureEvm< + Primitives = N, + NextBlockEnvCtx: BuildNextEnv, + >, ChainSpec: EthChainSpec + OpHardforks, N: OpPayloadPrimitives, Txs: PayloadTransactions>, + Attrs: OpAttributes, { let mut db = State::builder() .with_database(StateProviderDatabase::new(&state_provider)) @@ -480,7 +528,11 @@ impl ExecutionInfo { /// Container type that holds all necessities to build a new payload. #[derive(derive_more::Debug)] -pub struct OpPayloadBuilderCtx { +pub struct OpPayloadBuilderCtx< + Evm: ConfigureEvm, + ChainSpec, + Attrs = OpPayloadBuilderAttributes::Primitives>>, +> { /// The type that knows how to perform system calls and configure the evm. pub evm_config: Evm, /// The DA config for the payload builder @@ -488,18 +540,21 @@ pub struct OpPayloadBuilderCtx { /// The chainspec pub chain_spec: Arc, /// How to build the payload. - pub config: - PayloadConfig>, HeaderTy>, + pub config: PayloadConfig>, /// Marker to check whether the job has been cancelled. pub cancel: CancelOnDrop, /// The currently best payload. pub best_payload: Option>, } -impl OpPayloadBuilderCtx +impl OpPayloadBuilderCtx where - Evm: ConfigureEvm, + Evm: ConfigureEvm< + Primitives: OpPayloadPrimitives, + NextBlockEnvCtx: BuildNextEnv, ChainSpec>, + >, ChainSpec: EthChainSpec + OpHardforks, + Attrs: OpAttributes>, { /// Returns the parent block the payload will be build on. pub fn parent(&self) -> &SealedHeaderFor { @@ -507,27 +562,10 @@ where } /// Returns the builder attributes. - pub const fn attributes(&self) -> &OpPayloadBuilderAttributes> { + pub const fn attributes(&self) -> &Attrs { &self.config.attributes } - /// Returns the extra data for the block. - /// - /// After holocene this extracts the extra data from the payload - pub fn extra_data(&self) -> Result { - if self.is_holocene_active() { - self.attributes() - .get_holocene_extra_data( - self.chain_spec.base_fee_params_at_timestamp( - self.attributes().payload_attributes.timestamp, - ), - ) - .map_err(PayloadBuilderError::other) - } else { - Ok(Default::default()) - } - } - /// Returns the current fee settings for transactions from the mempool pub fn best_transaction_attributes(&self, block_env: &BlockEnv) -> BestTransactionsAttributes { BestTransactionsAttributes::new( @@ -541,11 +579,6 @@ where self.attributes().payload_id() } - /// Returns true if holocene is active for the payload. - pub fn is_holocene_active(&self) -> bool { - self.chain_spec.is_holocene_active_at_timestamp(self.attributes().timestamp()) - } - /// Returns true if the fees are higher than the previous payload. pub fn is_better_payload(&self, total_fees: U256) -> bool { is_better_payload(self.best_payload.as_ref(), total_fees) @@ -560,27 +593,16 @@ where .builder_for_next_block( db, self.parent(), - OpNextBlockEnvAttributes { - timestamp: self.attributes().timestamp(), - suggested_fee_recipient: self.attributes().suggested_fee_recipient(), - prev_randao: self.attributes().prev_randao(), - gas_limit: self - .attributes() - .gas_limit - .unwrap_or_else(|| self.parent().gas_limit()), - parent_beacon_block_root: self.attributes().parent_beacon_block_root(), - extra_data: self.extra_data()?, - }, + Evm::NextBlockEnvCtx::build_next_env( + self.attributes(), + self.parent(), + self.chain_spec.as_ref(), + ) + .map_err(PayloadBuilderError::other)?, ) .map_err(PayloadBuilderError::other) } -} -impl OpPayloadBuilderCtx -where - Evm: ConfigureEvm, - ChainSpec: EthChainSpec + OpHardforks, -{ /// Executes all sequencer transactions that are included in the payload attributes. pub fn execute_sequencer_transactions( &self, @@ -588,7 +610,7 @@ where ) -> Result { let mut info = ExecutionInfo::new(); - for sequencer_tx in &self.attributes().transactions { + for sequencer_tx in self.attributes().sequencer_transactions() { // A sequencer's block should never contain blob transactions. if sequencer_tx.value().is_eip4844() { return Err(PayloadBuilderError::other( diff --git a/crates/optimism/payload/src/payload.rs b/crates/optimism/payload/src/payload.rs index 0416cf68bab..c84e9c70ec7 100644 --- a/crates/optimism/payload/src/payload.rs +++ b/crates/optimism/payload/src/payload.rs @@ -2,7 +2,7 @@ use std::{fmt::Debug, sync::Arc}; -use alloy_consensus::Block; +use alloy_consensus::{Block, BlockHeader}; use alloy_eips::{ eip1559::BaseFeeParams, eip2718::Decodable2718, eip4895::Withdrawals, eip7685::Requests, }; @@ -17,9 +17,14 @@ use op_alloy_rpc_types_engine::{ OpExecutionPayloadEnvelopeV3, OpExecutionPayloadEnvelopeV4, OpExecutionPayloadV4, }; use reth_chain_state::ExecutedBlockWithTrieUpdates; -use reth_payload_builder::EthPayloadBuilderAttributes; -use reth_payload_primitives::{BuiltPayload, PayloadBuilderAttributes}; -use reth_primitives_traits::{NodePrimitives, SealedBlock, SignedTransaction, WithEncoded}; +use reth_chainspec::EthChainSpec; +use reth_optimism_evm::OpNextBlockEnvAttributes; +use reth_optimism_forks::OpHardforks; +use reth_payload_builder::{EthPayloadBuilderAttributes, PayloadBuilderError}; +use reth_payload_primitives::{BuildNextEnv, BuiltPayload, PayloadBuilderAttributes}; +use reth_primitives_traits::{ + NodePrimitives, SealedBlock, SealedHeader, SignedTransaction, WithEncoded, +}; /// Re-export for use in downstream arguments. pub use op_alloy_rpc_types_engine::OpPayloadAttributes; @@ -65,7 +70,7 @@ impl OpPayloadBuilderAttributes { } } -impl PayloadBuilderAttributes +impl PayloadBuilderAttributes for OpPayloadBuilderAttributes { type RpcPayloadAttributes = OpPayloadAttributes; @@ -377,6 +382,39 @@ pub fn payload_id_optimism( PayloadId::new(out.as_slice()[..8].try_into().expect("sufficient length")) } +impl BuildNextEnv, H, ChainSpec> + for OpNextBlockEnvAttributes +where + H: BlockHeader, + T: SignedTransaction, + ChainSpec: EthChainSpec + OpHardforks, +{ + fn build_next_env( + attributes: &OpPayloadBuilderAttributes, + parent: &SealedHeader, + chain_spec: &ChainSpec, + ) -> Result { + let extra_data = if chain_spec.is_holocene_active_at_timestamp(attributes.timestamp()) { + attributes + .get_holocene_extra_data( + chain_spec.base_fee_params_at_timestamp(attributes.timestamp()), + ) + .map_err(PayloadBuilderError::other)? + } else { + Default::default() + }; + + Ok(Self { + timestamp: attributes.timestamp(), + suggested_fee_recipient: attributes.suggested_fee_recipient(), + prev_randao: attributes.prev_randao(), + gas_limit: attributes.gas_limit.unwrap_or_else(|| parent.gas_limit()), + parent_beacon_block_root: attributes.parent_beacon_block_root(), + extra_data, + }) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/optimism/payload/src/traits.rs b/crates/optimism/payload/src/traits.rs index 6ca07e86e3f..485b8d1df9e 100644 --- a/crates/optimism/payload/src/traits.rs +++ b/crates/optimism/payload/src/traits.rs @@ -1,6 +1,9 @@ use alloy_consensus::BlockBody; use reth_optimism_primitives::{transaction::OpTransaction, DepositReceipt}; -use reth_primitives_traits::{FullBlockHeader, NodePrimitives, SignedTransaction}; +use reth_payload_primitives::PayloadBuilderAttributes; +use reth_primitives_traits::{FullBlockHeader, NodePrimitives, SignedTransaction, WithEncoded}; + +use crate::OpPayloadBuilderAttributes; /// Helper trait to encapsulate common bounds on [`NodePrimitives`] for OP payload builder. pub trait OpPayloadPrimitives: @@ -31,3 +34,27 @@ where type _TX = Tx; type _Header = Header; } + +/// Attributes for the OP payload builder. +pub trait OpAttributes: PayloadBuilderAttributes { + /// Primitive transaction type. + type Transaction: SignedTransaction; + + /// Whether to use the transaction pool for the payload. + fn no_tx_pool(&self) -> bool; + + /// Sequencer transactions to include in the payload. + fn sequencer_transactions(&self) -> &[WithEncoded]; +} + +impl OpAttributes for OpPayloadBuilderAttributes { + type Transaction = T; + + fn no_tx_pool(&self) -> bool { + self.no_tx_pool + } + + fn sequencer_transactions(&self) -> &[WithEncoded] { + &self.transactions + } +} diff --git a/crates/optimism/rpc/src/witness.rs b/crates/optimism/rpc/src/witness.rs index bc86e93f91c..1858b4fd2f1 100644 --- a/crates/optimism/rpc/src/witness.rs +++ b/crates/optimism/rpc/src/witness.rs @@ -3,15 +3,13 @@ use alloy_primitives::B256; use alloy_rpc_types_debug::ExecutionWitness; use jsonrpsee_core::{async_trait, RpcResult}; -use op_alloy_rpc_types_engine::OpPayloadAttributes; use reth_chainspec::ChainSpecProvider; use reth_evm::ConfigureEvm; -use reth_node_api::NodePrimitives; -use reth_optimism_evm::OpNextBlockEnvAttributes; +use reth_node_api::{BuildNextEnv, NodePrimitives}; use reth_optimism_forks::OpHardforks; -use reth_optimism_payload_builder::{OpPayloadBuilder, OpPayloadPrimitives}; +use reth_optimism_payload_builder::{OpAttributes, OpPayloadBuilder, OpPayloadPrimitives}; use reth_optimism_txpool::OpPooledTx; -use reth_primitives_traits::SealedHeader; +use reth_primitives_traits::{SealedHeader, TxTy}; pub use reth_rpc_api::DebugExecutionWitnessApiServer; use reth_rpc_server_types::{result::internal_rpc_err, ToRpcResult}; use reth_storage_api::{ @@ -24,16 +22,16 @@ use std::{fmt::Debug, sync::Arc}; use tokio::sync::{oneshot, Semaphore}; /// An extension to the `debug_` namespace of the RPC API. -pub struct OpDebugWitnessApi { - inner: Arc>, +pub struct OpDebugWitnessApi { + inner: Arc>, } -impl OpDebugWitnessApi { +impl OpDebugWitnessApi { /// Creates a new instance of the `OpDebugWitnessApi`. pub fn new( provider: Provider, task_spawner: Box, - builder: OpPayloadBuilder, + builder: OpPayloadBuilder, ) -> Self { let semaphore = Arc::new(Semaphore::new(3)); let inner = OpDebugWitnessApiInner { provider, builder, task_spawner, semaphore }; @@ -41,7 +39,7 @@ impl OpDebugWitnessApi { } } -impl OpDebugWitnessApi +impl OpDebugWitnessApi where EvmConfig: ConfigureEvm, Provider: NodePrimitivesProvider> @@ -60,8 +58,8 @@ where } #[async_trait] -impl DebugExecutionWitnessApiServer - for OpDebugWitnessApi +impl DebugExecutionWitnessApiServer + for OpDebugWitnessApi where Pool: TransactionPool< Transaction: OpPooledTx::SignedTx>, @@ -72,13 +70,16 @@ where + ChainSpecProvider + Clone + 'static, - EvmConfig: ConfigureEvm - + 'static, + EvmConfig: ConfigureEvm< + Primitives = Provider::Primitives, + NextBlockEnvCtx: BuildNextEnv, + > + 'static, + Attrs: OpAttributes>, { async fn execute_payload( &self, parent_block_hash: B256, - attributes: OpPayloadAttributes, + attributes: Attrs::RpcPayloadAttributes, ) -> RpcResult { let _permit = self.inner.semaphore.acquire().await; @@ -97,20 +98,24 @@ where } } -impl Clone for OpDebugWitnessApi { +impl Clone + for OpDebugWitnessApi +{ fn clone(&self) -> Self { Self { inner: Arc::clone(&self.inner) } } } -impl Debug for OpDebugWitnessApi { +impl Debug + for OpDebugWitnessApi +{ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("OpDebugWitnessApi").finish_non_exhaustive() } } -struct OpDebugWitnessApiInner { +struct OpDebugWitnessApiInner { provider: Provider, - builder: OpPayloadBuilder, + builder: OpPayloadBuilder, task_spawner: Box, semaphore: Arc, } diff --git a/crates/payload/primitives/src/lib.rs b/crates/payload/primitives/src/lib.rs index 5770c1381aa..811b9da7f19 100644 --- a/crates/payload/primitives/src/lib.rs +++ b/crates/payload/primitives/src/lib.rs @@ -26,7 +26,8 @@ pub use error::{ mod traits; pub use traits::{ - BuiltPayload, PayloadAttributes, PayloadAttributesBuilder, PayloadBuilderAttributes, + BuildNextEnv, BuiltPayload, PayloadAttributes, PayloadAttributesBuilder, + PayloadBuilderAttributes, }; mod payload; diff --git a/crates/payload/primitives/src/traits.rs b/crates/payload/primitives/src/traits.rs index a50c9d2a214..868929c2b1b 100644 --- a/crates/payload/primitives/src/traits.rs +++ b/crates/payload/primitives/src/traits.rs @@ -9,7 +9,9 @@ use alloy_primitives::{Address, B256, U256}; use alloy_rpc_types_engine::{PayloadAttributes as EthPayloadAttributes, PayloadId}; use core::fmt; use reth_chain_state::ExecutedBlockWithTrieUpdates; -use reth_primitives_traits::{NodePrimitives, SealedBlock}; +use reth_primitives_traits::{NodePrimitives, SealedBlock, SealedHeader}; + +use crate::PayloadBuilderError; /// Represents a successfully built execution payload (block). /// @@ -44,11 +46,11 @@ pub trait BuiltPayload: Send + Sync + fmt::Debug { /// /// Extends basic payload attributes with additional context needed during the /// building process, tracking in-progress payload jobs and their parameters. -pub trait PayloadBuilderAttributes: Send + Sync + fmt::Debug { +pub trait PayloadBuilderAttributes: Send + Sync + Unpin + fmt::Debug + 'static { /// The external payload attributes format this type can be constructed from. - type RpcPayloadAttributes; + type RpcPayloadAttributes: Send + Sync + 'static; /// The error type used in [`PayloadBuilderAttributes::try_new`]. - type Error: core::error::Error; + type Error: core::error::Error + Send + Sync + 'static; /// Constructs new builder attributes from external payload attributes. /// @@ -144,3 +146,15 @@ pub trait PayloadAttributesBuilder: Send + Sync + 'static { /// Constructs new payload attributes for the given timestamp. fn build(&self, timestamp: u64) -> Attributes; } + +/// Trait to build the EVM environment for the next block from the given payload attributes. +/// +/// Accepts payload attributes from CL, parent header and additional payload builder context. +pub trait BuildNextEnv: Sized { + /// Builds the EVM environment for the next block from the given payload attributes. + fn build_next_env( + attributes: &Attributes, + parent: &SealedHeader
, + ctx: &Ctx, + ) -> Result; +} From c986441d878705c105a86bce231ec283a50f682e Mon Sep 17 00:00:00 2001 From: Tomass <155266802+zeroprooff@users.noreply.github.com> Date: Wed, 23 Jul 2025 16:03:18 +0300 Subject: [PATCH 269/305] fix: correct prune mode assignments in HistoryIndexingStages (#17575) --- crates/stages/stages/src/sets.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/stages/stages/src/sets.rs b/crates/stages/stages/src/sets.rs index 512e4571c96..97c3a3116aa 100644 --- a/crates/stages/stages/src/sets.rs +++ b/crates/stages/stages/src/sets.rs @@ -444,12 +444,12 @@ where .add_stage(IndexStorageHistoryStage::new( self.stages_config.index_storage_history, self.stages_config.etl.clone(), - self.prune_modes.account_history, + self.prune_modes.storage_history, )) .add_stage(IndexAccountHistoryStage::new( self.stages_config.index_account_history, self.stages_config.etl.clone(), - self.prune_modes.storage_history, + self.prune_modes.account_history, )) } } From 9ff444ea9ece7a80ad1fb05573540e018810f344 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 23 Jul 2025 15:34:51 +0200 Subject: [PATCH 270/305] fix(txpool): enforce encoded length check (#17581) --- crates/transaction-pool/src/error.rs | 5 +++ crates/transaction-pool/src/validate/eth.rs | 44 ++++++++++++++++++--- crates/transaction-pool/src/validate/mod.rs | 8 ++++ 3 files changed, 51 insertions(+), 6 deletions(-) diff --git a/crates/transaction-pool/src/error.rs b/crates/transaction-pool/src/error.rs index 3f2948b94ed..b499c57aebd 100644 --- a/crates/transaction-pool/src/error.rs +++ b/crates/transaction-pool/src/error.rs @@ -391,6 +391,11 @@ impl InvalidPoolTransactionError { } } + /// Returns `true` if an import failed due to an oversized transaction + pub const fn is_oversized(&self) -> bool { + matches!(self, Self::OversizedData(_, _)) + } + /// Returns `true` if an import failed due to nonce gap. pub const fn is_nonce_gap(&self) -> bool { matches!(self, Self::Consensus(InvalidTransactionError::NonceNotConsistent { .. })) || diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index 80ee4a040b5..b32401f2cbb 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -361,12 +361,30 @@ where } // Reject transactions over defined size to prevent DOS attacks - let tx_input_len = transaction.input().len(); - if tx_input_len > self.max_tx_input_bytes { - return Err(TransactionValidationOutcome::Invalid( - transaction, - InvalidPoolTransactionError::OversizedData(tx_input_len, self.max_tx_input_bytes), - )) + if transaction.is_eip4844() { + // Since blob transactions are pulled instead of pushed, and only the consensus data is + // kept in memory while the sidecar is cached on disk, there is no critical limit that + // should be enforced. Still, enforcing some cap on the input bytes. blob txs also must + // be executable right away when they enter the pool. + let tx_input_len = transaction.input().len(); + if tx_input_len > self.max_tx_input_bytes { + return Err(TransactionValidationOutcome::Invalid( + transaction, + InvalidPoolTransactionError::OversizedData( + tx_input_len, + self.max_tx_input_bytes, + ), + )) + } + } else { + // ensure the size of the non-blob transaction + let tx_size = transaction.encoded_length(); + if tx_size > self.max_tx_input_bytes { + return Err(TransactionValidationOutcome::Invalid( + transaction, + InvalidPoolTransactionError::OversizedData(tx_size, self.max_tx_input_bytes), + )) + } } // Check whether the init code size has been exceeded. @@ -1578,4 +1596,18 @@ mod tests { let outcome = validator.validate_one(TransactionOrigin::Private, transaction); assert!(outcome.is_invalid()); // Still invalid because sender not in whitelist } + + #[test] + fn reject_oversized_tx() { + let mut transaction = get_transaction(); + transaction.encoded_length = DEFAULT_MAX_TX_INPUT_BYTES + 1; + let provider = MockEthProvider::default(); + + // No minimum priority fee set (default is None) + let validator = create_validator_with_minimum_fee(provider, None, None); + + let outcome = validator.validate_one(TransactionOrigin::External, transaction); + let invalid = outcome.as_invalid().unwrap(); + assert!(invalid.is_oversized()); + } } diff --git a/crates/transaction-pool/src/validate/mod.rs b/crates/transaction-pool/src/validate/mod.rs index 36d9f14addb..bef1297aff1 100644 --- a/crates/transaction-pool/src/validate/mod.rs +++ b/crates/transaction-pool/src/validate/mod.rs @@ -66,6 +66,14 @@ impl TransactionValidationOutcome { } } + /// Returns the [`InvalidPoolTransactionError`] if this is an invalid variant. + pub const fn as_invalid(&self) -> Option<&InvalidPoolTransactionError> { + match self { + Self::Invalid(_, err) => Some(err), + _ => None, + } + } + /// Returns true if the transaction is valid. pub const fn is_valid(&self) -> bool { matches!(self, Self::Valid { .. }) From a72fe7a2d0e85d27f9ffae7bf4a50db3fb4484ef Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Wed, 23 Jul 2025 15:44:33 +0200 Subject: [PATCH 271/305] chore: move validation to standalone fns (#17582) --- crates/ethereum/payload/src/validator.rs | 129 +++++++++++------------ crates/optimism/payload/src/validator.rs | 101 ++++++++++-------- 2 files changed, 121 insertions(+), 109 deletions(-) diff --git a/crates/ethereum/payload/src/validator.rs b/crates/ethereum/payload/src/validator.rs index 75f4b1f474c..ccace26ef80 100644 --- a/crates/ethereum/payload/src/validator.rs +++ b/crates/ethereum/payload/src/validator.rs @@ -28,83 +28,80 @@ impl EthereumExecutionPayloadValidator { } impl EthereumExecutionPayloadValidator { - /// Returns true if the Cancun hardfork is active at the given timestamp. - #[inline] - fn is_cancun_active_at_timestamp(&self, timestamp: u64) -> bool { - self.chain_spec().is_cancun_active_at_timestamp(timestamp) - } - - /// Returns true if the Shanghai hardfork is active at the given timestamp. - #[inline] - fn is_shanghai_active_at_timestamp(&self, timestamp: u64) -> bool { - self.chain_spec().is_shanghai_active_at_timestamp(timestamp) - } - - /// Returns true if the Prague hardfork is active at the given timestamp. - #[inline] - fn is_prague_active_at_timestamp(&self, timestamp: u64) -> bool { - self.chain_spec().is_prague_active_at_timestamp(timestamp) - } - /// Ensures that the given payload does not violate any consensus rules that concern the block's - /// layout, like: - /// - missing or invalid base fee - /// - invalid extra data - /// - invalid transactions - /// - incorrect hash - /// - the versioned hashes passed with the payload do not exactly match transaction versioned - /// hashes - /// - the block does not contain blob transactions if it is pre-cancun + /// layout, /// - /// The checks are done in the order that conforms with the engine-API specification. - /// - /// This is intended to be invoked after receiving the payload from the CLI. - /// The additional [`MaybeCancunPayloadFields`](alloy_rpc_types_engine::MaybeCancunPayloadFields) are not part of the payload, but are additional fields in the `engine_newPayloadV3` RPC call, See also - /// - /// If the cancun fields are provided this also validates that the versioned hashes in the block - /// match the versioned hashes passed in the - /// [`CancunPayloadFields`](alloy_rpc_types_engine::CancunPayloadFields), if the cancun payload - /// fields are provided. If the payload fields are not provided, but versioned hashes exist - /// in the block, this is considered an error: [`PayloadError::InvalidVersionedHashes`]. - /// - /// This validates versioned hashes according to the Engine API Cancun spec: - /// + /// See also [`ensure_well_formed_payload`] pub fn ensure_well_formed_payload( &self, payload: ExecutionData, ) -> Result>, PayloadError> { - let ExecutionData { payload, sidecar } = payload; + ensure_well_formed_payload(&self.chain_spec, payload) + } +} + +/// Ensures that the given payload does not violate any consensus rules that concern the block's +/// layout, like: +/// - missing or invalid base fee +/// - invalid extra data +/// - invalid transactions +/// - incorrect hash +/// - the versioned hashes passed with the payload do not exactly match transaction versioned +/// hashes +/// - the block does not contain blob transactions if it is pre-cancun +/// +/// The checks are done in the order that conforms with the engine-API specification. +/// +/// This is intended to be invoked after receiving the payload from the CLI. +/// The additional [`MaybeCancunPayloadFields`](alloy_rpc_types_engine::MaybeCancunPayloadFields) are not part of the payload, but are additional fields in the `engine_newPayloadV3` RPC call, See also +/// +/// If the cancun fields are provided this also validates that the versioned hashes in the block +/// match the versioned hashes passed in the +/// [`CancunPayloadFields`](alloy_rpc_types_engine::CancunPayloadFields), if the cancun payload +/// fields are provided. If the payload fields are not provided, but versioned hashes exist +/// in the block, this is considered an error: [`PayloadError::InvalidVersionedHashes`]. +/// +/// This validates versioned hashes according to the Engine API Cancun spec: +/// +pub fn ensure_well_formed_payload( + chain_spec: ChainSpec, + payload: ExecutionData, +) -> Result>, PayloadError> +where + ChainSpec: EthereumHardforks, + T: SignedTransaction, +{ + let ExecutionData { payload, sidecar } = payload; - let expected_hash = payload.block_hash(); + let expected_hash = payload.block_hash(); - // First parse the block - let sealed_block = payload.try_into_block_with_sidecar(&sidecar)?.seal_slow(); + // First parse the block + let sealed_block = payload.try_into_block_with_sidecar(&sidecar)?.seal_slow(); - // Ensure the hash included in the payload matches the block hash - if expected_hash != sealed_block.hash() { - return Err(PayloadError::BlockHash { - execution: sealed_block.hash(), - consensus: expected_hash, - }) - } + // Ensure the hash included in the payload matches the block hash + if expected_hash != sealed_block.hash() { + return Err(PayloadError::BlockHash { + execution: sealed_block.hash(), + consensus: expected_hash, + }) + } - shanghai::ensure_well_formed_fields( - sealed_block.body(), - self.is_shanghai_active_at_timestamp(sealed_block.timestamp), - )?; + shanghai::ensure_well_formed_fields( + sealed_block.body(), + chain_spec.is_shanghai_active_at_timestamp(sealed_block.timestamp), + )?; - cancun::ensure_well_formed_fields( - &sealed_block, - sidecar.cancun(), - self.is_cancun_active_at_timestamp(sealed_block.timestamp), - )?; + cancun::ensure_well_formed_fields( + &sealed_block, + sidecar.cancun(), + chain_spec.is_cancun_active_at_timestamp(sealed_block.timestamp), + )?; - prague::ensure_well_formed_fields( - sealed_block.body(), - sidecar.prague(), - self.is_prague_active_at_timestamp(sealed_block.timestamp), - )?; + prague::ensure_well_formed_fields( + sealed_block.body(), + sidecar.prague(), + chain_spec.is_prague_active_at_timestamp(sealed_block.timestamp), + )?; - Ok(sealed_block) - } + Ok(sealed_block) } diff --git a/crates/optimism/payload/src/validator.rs b/crates/optimism/payload/src/validator.rs index b287c553989..fa0d610469c 100644 --- a/crates/optimism/payload/src/validator.rs +++ b/crates/optimism/payload/src/validator.rs @@ -27,59 +27,74 @@ where } /// Ensures that the given payload does not violate any consensus rules that concern the block's - /// layout, like: - /// - missing or invalid base fee - /// - invalid extra data - /// - invalid transactions - /// - incorrect hash - /// - block contains blob transactions or blob versioned hashes - /// - block contains l1 withdrawals + /// layout. /// - /// The checks are done in the order that conforms with the engine-API specification. - /// - /// This is intended to be invoked after receiving the payload from the CLI. - /// The additional fields, starting with [`MaybeCancunPayloadFields`](alloy_rpc_types_engine::MaybeCancunPayloadFields), are not part of the payload, but are additional fields starting in the `engine_newPayloadV3` RPC call, See also - /// - /// If the cancun fields are provided this also validates that the versioned hashes in the block - /// are empty as well as those passed in the sidecar. If the payload fields are not provided. - /// - /// Validation according to specs . + /// See also [`ensure_well_formed_payload`]. pub fn ensure_well_formed_payload( &self, payload: OpExecutionData, ) -> Result>, OpPayloadError> { - let OpExecutionData { payload, sidecar } = payload; + ensure_well_formed_payload(self.chain_spec(), payload) + } +} - let expected_hash = payload.block_hash(); +/// Ensures that the given payload does not violate any consensus rules that concern the block's +/// layout, like: +/// - missing or invalid base fee +/// - invalid extra data +/// - invalid transactions +/// - incorrect hash +/// - block contains blob transactions or blob versioned hashes +/// - block contains l1 withdrawals +/// +/// The checks are done in the order that conforms with the engine-API specification. +/// +/// This is intended to be invoked after receiving the payload from the CLI. +/// The additional fields, starting with [`MaybeCancunPayloadFields`](alloy_rpc_types_engine::MaybeCancunPayloadFields), are not part of the payload, but are additional fields starting in the `engine_newPayloadV3` RPC call, See also +/// +/// If the cancun fields are provided this also validates that the versioned hashes in the block +/// are empty as well as those passed in the sidecar. If the payload fields are not provided. +/// +/// Validation according to specs . +pub fn ensure_well_formed_payload( + chain_spec: ChainSpec, + payload: OpExecutionData, +) -> Result>, OpPayloadError> +where + ChainSpec: OpHardforks, + T: SignedTransaction, +{ + let OpExecutionData { payload, sidecar } = payload; - // First parse the block - let sealed_block = payload.try_into_block_with_sidecar(&sidecar)?.seal_slow(); + let expected_hash = payload.block_hash(); - // Ensure the hash included in the payload matches the block hash - if expected_hash != sealed_block.hash() { - return Err(PayloadError::BlockHash { - execution: sealed_block.hash(), - consensus: expected_hash, - })? - } + // First parse the block + let sealed_block = payload.try_into_block_with_sidecar(&sidecar)?.seal_slow(); - shanghai::ensure_well_formed_fields( - sealed_block.body(), - self.is_shanghai_active_at_timestamp(sealed_block.timestamp), - )?; + // Ensure the hash included in the payload matches the block hash + if expected_hash != sealed_block.hash() { + return Err(PayloadError::BlockHash { + execution: sealed_block.hash(), + consensus: expected_hash, + })? + } - cancun::ensure_well_formed_header_and_sidecar_fields( - &sealed_block, - sidecar.ecotone(), - self.is_cancun_active_at_timestamp(sealed_block.timestamp), - )?; + shanghai::ensure_well_formed_fields( + sealed_block.body(), + chain_spec.is_shanghai_active_at_timestamp(sealed_block.timestamp), + )?; - prague::ensure_well_formed_fields( - sealed_block.body(), - sidecar.isthmus(), - self.is_prague_active_at_timestamp(sealed_block.timestamp), - )?; + cancun::ensure_well_formed_header_and_sidecar_fields( + &sealed_block, + sidecar.ecotone(), + chain_spec.is_cancun_active_at_timestamp(sealed_block.timestamp), + )?; - Ok(sealed_block) - } + prague::ensure_well_formed_fields( + sealed_block.body(), + sidecar.isthmus(), + chain_spec.is_prague_active_at_timestamp(sealed_block.timestamp), + )?; + + Ok(sealed_block) } From 8bd6bf5dc10edeb3a921e4797e5b3224859da923 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Wed, 23 Jul 2025 15:46:41 +0200 Subject: [PATCH 272/305] feat(engine): add validate_payload and validate_block methods to EngineValidator trait (#17429) --- .../engine/tree/src/tree/payload_validator.rs | 30 +++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) diff --git a/crates/engine/tree/src/tree/payload_validator.rs b/crates/engine/tree/src/tree/payload_validator.rs index c4a756da9c6..a8cac2e31ef 100644 --- a/crates/engine/tree/src/tree/payload_validator.rs +++ b/crates/engine/tree/src/tree/payload_validator.rs @@ -16,8 +16,9 @@ use reth_consensus::{ConsensusError, FullConsensus}; use reth_engine_primitives::{InvalidBlockHook, PayloadValidator}; use reth_evm::{ConfigureEvm, SpecFor}; use reth_payload_primitives::{ - EngineApiMessageVersion, EngineObjectValidationError, InvalidPayloadAttributesError, - NewPayloadError, PayloadAttributes, PayloadOrAttributes, PayloadTypes, + BuiltPayload, EngineApiMessageVersion, EngineObjectValidationError, + InvalidPayloadAttributesError, NewPayloadError, PayloadAttributes, PayloadOrAttributes, + PayloadTypes, }; use reth_primitives_traits::{ AlloyBlockHeader, Block, BlockBody, GotExpected, NodePrimitives, RecoveredBlock, SealedHeader, @@ -1014,4 +1015,29 @@ pub trait EngineValidator: } Ok(()) } + + /// Validates a payload received from engine API. + fn validate_payload( + &mut self, + payload: Self::ExecutionData, + _ctx: TreeCtx<'_, ::Primitives>, + ) -> Result, NewPayloadError> { + // Default implementation: try to convert using existing method + match self.ensure_well_formed_payload(payload) { + Ok(block) => { + Ok(PayloadValidationOutcome::Valid { block, trie_updates: TrieUpdates::default() }) + } + Err(error) => Err(error), + } + } + + /// Validates a block downloaded from the network. + fn validate_block( + &self, + _block: &RecoveredBlock, + _ctx: TreeCtx<'_, ::Primitives>, + ) -> Result<(), ConsensusError> { + // Default implementation: accept all blocks + Ok(()) + } } From bf36f9521125d455d2c6d7bbe512f8d12c93c80c Mon Sep 17 00:00:00 2001 From: Starkey Date: Thu, 24 Jul 2025 01:10:24 +0630 Subject: [PATCH 273/305] docs: fix the parameters (#17586) --- crates/chainspec/src/spec.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/chainspec/src/spec.rs b/crates/chainspec/src/spec.rs index 2ed1f769608..2800640b708 100644 --- a/crates/chainspec/src/spec.rs +++ b/crates/chainspec/src/spec.rs @@ -1050,9 +1050,9 @@ mod tests { "Expected fork ID {expected_id:?}, computed fork ID {computed_id:?} for hardfork {hardfork}" ); if matches!(hardfork, EthereumHardfork::Shanghai) { - if let Some(shangai_id) = spec.shanghai_fork_id() { + if let Some(shanghai_id) = spec.shanghai_fork_id() { assert_eq!( - expected_id, &shangai_id, + expected_id, &shanghai_id, "Expected fork ID {expected_id:?}, computed fork ID {computed_id:?} for Shanghai hardfork" ); } else { From eaaf1ab4d8b0b545232c48e08609deb82884dc47 Mon Sep 17 00:00:00 2001 From: Micke <155267459+reallesee@users.noreply.github.com> Date: Wed, 23 Jul 2025 22:41:57 +0200 Subject: [PATCH 274/305] fix: remove extra space in PostStateRootMismatch error message (#17590) --- crates/stateless/src/validation.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/stateless/src/validation.rs b/crates/stateless/src/validation.rs index a2a93f38e26..165deac1bb3 100644 --- a/crates/stateless/src/validation.rs +++ b/crates/stateless/src/validation.rs @@ -71,7 +71,7 @@ pub enum StatelessValidationError { HeaderDeserializationFailed, /// Error when the computed state root does not match the one in the block header. - #[error("mismatched post- state root: {got}\n {expected}")] + #[error("mismatched post-state root: {got}\n {expected}")] PostStateRootMismatch { /// The computed post-state root got: B256, From 6b23818c76c537f8525dcaf919df550a3d70be57 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Thu, 24 Jul 2025 01:39:36 +0300 Subject: [PATCH 275/305] refactor: small simplifications for tree types (#17589) --- crates/engine/tree/benches/state_root_task.rs | 3 +- crates/engine/tree/src/tree/metrics.rs | 4 +- crates/engine/tree/src/tree/mod.rs | 2 +- .../tree/src/tree/payload_processor/mod.rs | 14 ++-- .../engine/tree/src/tree/payload_validator.rs | 83 +++++-------------- 5 files changed, 32 insertions(+), 74 deletions(-) diff --git a/crates/engine/tree/benches/state_root_task.rs b/crates/engine/tree/benches/state_root_task.rs index 710311be40d..af886236abc 100644 --- a/crates/engine/tree/benches/state_root_task.rs +++ b/crates/engine/tree/benches/state_root_task.rs @@ -9,7 +9,6 @@ use alloy_primitives::{Address, B256}; use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; use proptest::test_runner::TestRunner; use rand::Rng; -use reth_chain_state::EthPrimitives; use reth_chainspec::ChainSpec; use reth_db_common::init::init_genesis; use reth_engine_tree::tree::{ @@ -220,7 +219,7 @@ fn bench_state_root(c: &mut Criterion) { let state_updates = create_bench_state_updates(params); setup_provider(&factory, &state_updates).expect("failed to setup provider"); - let payload_processor = PayloadProcessor::::new( + let payload_processor = PayloadProcessor::new( WorkloadExecutor::default(), EthEvmConfig::new(factory.chain_spec()), &TreeConfig::default(), diff --git a/crates/engine/tree/src/tree/metrics.rs b/crates/engine/tree/src/tree/metrics.rs index 4d5b58c6f04..96002180049 100644 --- a/crates/engine/tree/src/tree/metrics.rs +++ b/crates/engine/tree/src/tree/metrics.rs @@ -7,7 +7,7 @@ use reth_trie::updates::TrieUpdates; /// Metrics for the `EngineApi`. #[derive(Debug, Default)] -pub struct EngineApiMetrics { +pub(crate) struct EngineApiMetrics { /// Engine API-specific metrics. pub(crate) engine: EngineMetrics, /// Block executor metrics. @@ -21,7 +21,7 @@ pub struct EngineApiMetrics { /// Metrics for the entire blockchain tree #[derive(Metrics)] #[metrics(scope = "blockchain_tree")] -pub struct TreeMetrics { +pub(crate) struct TreeMetrics { /// The highest block number in the canonical chain pub canonical_chain_height: Gauge, /// The number of reorgs diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index cda5e5365a6..43ca738aab1 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -272,7 +272,7 @@ where /// The engine API variant of this handler engine_kind: EngineApiKind, /// The type responsible for processing new payloads - payload_processor: PayloadProcessor, + payload_processor: PayloadProcessor, /// The EVM configuration. evm_config: C, /// Precompile cache map. diff --git a/crates/engine/tree/src/tree/payload_processor/mod.rs b/crates/engine/tree/src/tree/payload_processor/mod.rs index a5042c529d4..a6c6969049d 100644 --- a/crates/engine/tree/src/tree/payload_processor/mod.rs +++ b/crates/engine/tree/src/tree/payload_processor/mod.rs @@ -53,10 +53,9 @@ use configured_sparse_trie::ConfiguredSparseTrie; /// Entrypoint for executing the payload. #[derive(Debug)] -pub struct PayloadProcessor +pub struct PayloadProcessor where - N: NodePrimitives, - Evm: ConfigureEvm, + Evm: ConfigureEvm, { /// The executor used by to spawn tasks. executor: WorkloadExecutor, @@ -81,10 +80,9 @@ where >, /// Whether to use the parallel sparse trie. use_parallel_sparse_trie: bool, - _marker: std::marker::PhantomData, } -impl PayloadProcessor +impl PayloadProcessor where N: NodePrimitives, Evm: ConfigureEvm, @@ -107,12 +105,11 @@ where precompile_cache_map, sparse_state_trie: Arc::default(), use_parallel_sparse_trie: config.enable_parallel_sparse_trie(), - _marker: Default::default(), } } } -impl PayloadProcessor +impl PayloadProcessor where N: NodePrimitives, Evm: ConfigureEvm + 'static, @@ -508,7 +505,6 @@ mod tests { use rand::Rng; use reth_chainspec::ChainSpec; use reth_db_common::init::init_genesis; - use reth_ethereum_primitives::EthPrimitives; use reth_evm::OnStateHook; use reth_evm_ethereum::EthEvmConfig; use reth_primitives_traits::{Account, StorageEntry}; @@ -623,7 +619,7 @@ mod tests { } } - let mut payload_processor = PayloadProcessor::::new( + let mut payload_processor = PayloadProcessor::new( WorkloadExecutor::default(), EthEvmConfig::new(factory.chain_spec()), &TreeConfig::default(), diff --git a/crates/engine/tree/src/tree/payload_validator.rs b/crates/engine/tree/src/tree/payload_validator.rs index a8cac2e31ef..440bef5ecba 100644 --- a/crates/engine/tree/src/tree/payload_validator.rs +++ b/crates/engine/tree/src/tree/payload_validator.rs @@ -2,6 +2,7 @@ use crate::tree::{ cached_state::CachedStateProvider, + executor::WorkloadExecutor, instrumented_state::InstrumentedStateProvider, payload_processor::PayloadProcessor, precompile_cache::{CachedPrecompile, CachedPrecompileMetrics, PrecompileCacheMap}, @@ -158,76 +159,35 @@ impl<'a, N: NodePrimitives> TreeCtx<'a, N> { /// This type contains common validation, execution, and state root computation logic that can be /// used by network-specific payload validators (e.g., Ethereum, Optimism). It is not meant to be /// used as a standalone component, but rather as a building block for concrete implementations. -pub struct TreePayloadValidator +#[derive(derive_more::Debug)] +pub struct TreePayloadValidator where - N: NodePrimitives, - P: DatabaseProviderFactory - + BlockReader - + BlockNumReader - + StateProviderFactory - + StateReader - + StateCommitmentProvider - + HashedPostStateProvider - + HeaderProvider
- + Clone - + 'static, - C: ConfigureEvm + 'static, + Evm: ConfigureEvm, { /// Provider for database access. provider: P, /// Consensus implementation for validation. - consensus: Arc>, + consensus: Arc>, /// EVM configuration. - evm_config: C, + evm_config: Evm, /// Configuration for the tree. config: TreeConfig, /// Payload processor for state root computation. - payload_processor: PayloadProcessor, + payload_processor: PayloadProcessor, /// Precompile cache map. - precompile_cache_map: PrecompileCacheMap>, + precompile_cache_map: PrecompileCacheMap>, /// Precompile cache metrics. precompile_cache_metrics: HashMap, /// Tracks invalid headers to prevent duplicate hook calls. invalid_headers: InvalidHeaderCache, /// Hook to call when invalid blocks are encountered. - invalid_block_hook: Box>, + #[debug(skip)] + invalid_block_hook: Box>, /// Metrics for the engine api. metrics: EngineApiMetrics, } -impl std::fmt::Debug for TreePayloadValidator -where - N: NodePrimitives, - P: DatabaseProviderFactory - + BlockReader - + BlockNumReader - + StateProviderFactory - + StateReader - + StateCommitmentProvider - + HashedPostStateProvider - + HeaderProvider
- + Clone - + std::fmt::Debug - + 'static, - C: ConfigureEvm + 'static, -{ - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("TreePayloadValidator") - .field("provider", &self.provider) - .field("consensus", &"Arc") - .field("evm_config", &self.evm_config) - .field("config", &self.config) - .field("payload_processor", &self.payload_processor) - .field("precompile_cache_map", &self.precompile_cache_map) - .field("precompile_cache_metrics", &self.precompile_cache_metrics) - .field("invalid_headers", &self.invalid_headers) - .field("invalid_block_hook", &"Box") - .field("metrics", &self.metrics) - .finish() - } -} - -impl TreePayloadValidator +impl TreePayloadValidator where N: NodePrimitives, P: DatabaseProviderFactory @@ -240,32 +200,35 @@ where + HeaderProvider
+ Clone + 'static, - C: ConfigureEvm + 'static, + Evm: ConfigureEvm + 'static, { /// Creates a new `TreePayloadValidator`. #[allow(clippy::too_many_arguments)] pub fn new( provider: P, consensus: Arc>, - evm_config: C, + evm_config: Evm, config: TreeConfig, - payload_processor: PayloadProcessor, - precompile_cache_map: PrecompileCacheMap>, - invalid_headers_cache_size: u32, invalid_block_hook: Box>, - metrics: EngineApiMetrics, ) -> Self { + let precompile_cache_map = PrecompileCacheMap::default(); + let payload_processor = PayloadProcessor::new( + WorkloadExecutor::default(), + evm_config.clone(), + &config, + precompile_cache_map.clone(), + ); Self { provider, consensus, evm_config, - config, payload_processor, precompile_cache_map, precompile_cache_metrics: HashMap::new(), - invalid_headers: InvalidHeaderCache::new(invalid_headers_cache_size), + invalid_headers: InvalidHeaderCache::new(config.max_invalid_header_cache_length()), + config, invalid_block_hook, - metrics, + metrics: EngineApiMetrics::default(), } } From e29707f0ee0bb4b310c0d5e23213053bdd2d8fab Mon Sep 17 00:00:00 2001 From: Daniel Ramirez Date: Wed, 23 Jul 2025 18:10:53 -0500 Subject: [PATCH 276/305] feat: Add IPC socket permission configuration (#17497) Co-authored-by: Matthias Seitz --- crates/node/core/src/args/rpc_server.rs | 7 +++++ crates/rpc/ipc/src/server/mod.rs | 42 ++++++++++++++++++++++++- crates/rpc/rpc-builder/src/config.rs | 1 + docs/vocs/docs/pages/cli/reth/node.mdx | 5 +++ 4 files changed, 54 insertions(+), 1 deletion(-) diff --git a/crates/node/core/src/args/rpc_server.rs b/crates/node/core/src/args/rpc_server.rs index 5a2d32353b7..07a0eb93303 100644 --- a/crates/node/core/src/args/rpc_server.rs +++ b/crates/node/core/src/args/rpc_server.rs @@ -94,6 +94,12 @@ pub struct RpcServerArgs { #[arg(long, default_value_t = constants::DEFAULT_IPC_ENDPOINT.to_string())] pub ipcpath: String, + /// Set the permissions for the IPC socket file, in octal format. + /// + /// If not specified, the permissions will be set by the system's umask. + #[arg(long = "ipc.permissions")] + pub ipc_socket_permissions: Option, + /// Auth server address to listen on #[arg(long = "authrpc.addr", default_value_t = IpAddr::V4(Ipv4Addr::LOCALHOST))] pub auth_addr: IpAddr, @@ -337,6 +343,7 @@ impl Default for RpcServerArgs { ws_api: None, ipcdisable: false, ipcpath: constants::DEFAULT_IPC_ENDPOINT.to_string(), + ipc_socket_permissions: None, auth_addr: Ipv4Addr::LOCALHOST.into(), auth_port: constants::DEFAULT_AUTH_PORT, auth_jwtsecret: None, diff --git a/crates/rpc/ipc/src/server/mod.rs b/crates/rpc/ipc/src/server/mod.rs index e9e00a7f6c0..ece2eef7803 100644 --- a/crates/rpc/ipc/src/server/mod.rs +++ b/crates/rpc/ipc/src/server/mod.rs @@ -139,7 +139,20 @@ where .to_fs_name::() .and_then(|name| ListenerOptions::new().name(name).create_tokio()) { - Ok(listener) => listener, + Ok(listener) => { + #[cfg(unix)] + { + // set permissions only on unix + use std::os::unix::fs::PermissionsExt; + if let Some(perms_str) = &self.cfg.ipc_socket_permissions { + if let Ok(mode) = u32::from_str_radix(&perms_str.replace("0o", ""), 8) { + let perms = std::fs::Permissions::from_mode(mode); + let _ = std::fs::set_permissions(&self.endpoint, perms); + } + } + } + listener + } Err(err) => { on_ready .send(Err(IpcServerStartError { endpoint: self.endpoint.clone(), source: err })) @@ -550,6 +563,8 @@ pub struct Settings { message_buffer_capacity: u32, /// Custom tokio runtime to run the server on. tokio_runtime: Option, + /// The permissions to create the IPC socket with. + ipc_socket_permissions: Option, } impl Default for Settings { @@ -562,6 +577,7 @@ impl Default for Settings { max_subscriptions_per_connection: 1024, message_buffer_capacity: 1024, tokio_runtime: None, + ipc_socket_permissions: None, } } } @@ -648,6 +664,12 @@ impl Builder { self } + /// Sets the permissions for the IPC socket file. + pub fn set_ipc_socket_permissions(mut self, permissions: Option) -> Self { + self.settings.ipc_socket_permissions = permissions; + self + } + /// Configure custom `subscription ID` provider for the server to use /// to when getting new subscription calls. /// @@ -768,6 +790,24 @@ mod tests { use tokio::sync::broadcast; use tokio_stream::wrappers::BroadcastStream; + #[tokio::test] + #[cfg(unix)] + async fn test_ipc_socket_permissions() { + use std::os::unix::fs::PermissionsExt; + let endpoint = &dummy_name(); + let perms = "0777"; + let server = Builder::default() + .set_ipc_socket_permissions(Some(perms.to_string())) + .build(endpoint.clone()); + let module = RpcModule::new(()); + let handle = server.start(module).await.unwrap(); + tokio::spawn(handle.stopped()); + + let meta = std::fs::metadata(endpoint).unwrap(); + let perms = meta.permissions(); + assert_eq!(perms.mode() & 0o777, 0o777); + } + async fn pipe_from_stream_with_bounded_buffer( pending: PendingSubscriptionSink, stream: BroadcastStream, diff --git a/crates/rpc/rpc-builder/src/config.rs b/crates/rpc/rpc-builder/src/config.rs index e2ae09e71ce..602f4e275e5 100644 --- a/crates/rpc/rpc-builder/src/config.rs +++ b/crates/rpc/rpc-builder/src/config.rs @@ -174,6 +174,7 @@ impl RethRpcServerConfig for RpcServerArgs { .max_request_body_size(self.rpc_max_request_size_bytes()) .max_response_body_size(self.rpc_max_response_size_bytes()) .max_connections(self.rpc_max_connections.get()) + .set_ipc_socket_permissions(self.ipc_socket_permissions.clone()) } fn rpc_server_config(&self) -> RpcServerConfig { diff --git a/docs/vocs/docs/pages/cli/reth/node.mdx b/docs/vocs/docs/pages/cli/reth/node.mdx index 6eba046f921..d059f35e400 100644 --- a/docs/vocs/docs/pages/cli/reth/node.mdx +++ b/docs/vocs/docs/pages/cli/reth/node.mdx @@ -287,6 +287,11 @@ RPC: [default: .ipc] + --ipc.permissions + Set the permissions for the IPC socket file, in octal format. + + If not specified, the permissions will be set by the system's umask. + --authrpc.addr Auth server address to listen on From dc90eb2ffe6758ad35827585ef80a3391b3a3f63 Mon Sep 17 00:00:00 2001 From: sashaodessa <140454972+sashaodessa@users.noreply.github.com> Date: Thu, 24 Jul 2025 03:00:25 +0200 Subject: [PATCH 277/305] fix: typo in Cargo.toml (#17588) --- crates/ethereum/node/Cargo.toml | 2 +- crates/optimism/node/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/ethereum/node/Cargo.toml b/crates/ethereum/node/Cargo.toml index 7c3613c46ea..6f76a9f47f5 100644 --- a/crates/ethereum/node/Cargo.toml +++ b/crates/ethereum/node/Cargo.toml @@ -47,7 +47,7 @@ alloy-rpc-types-eth.workspace = true alloy-rpc-types-engine.workspace = true # revm with required ethereum features -# Note: this must be kept to ensure all features are poperly enabled/forwarded +# Note: this must be kept to ensure all features are properly enabled/forwarded revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg"] } # misc diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index 6ce9aff49b9..42a580c3aaa 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -46,7 +46,7 @@ reth-optimism-forks.workspace = true reth-optimism-primitives = { workspace = true, features = ["serde", "serde-bincode-compat", "reth-codec"] } # revm with required optimism features -# Note: this must be kept to ensure all features are poperly enabled/forwarded +# Note: this must be kept to ensure all features are properly enabled/forwarded revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg"] } op-revm.workspace = true From 876e964cbcbd85ade6fdec40bb35f08690332854 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 24 Jul 2025 10:42:18 +0200 Subject: [PATCH 278/305] chore: introduce engine module (#17591) --- Cargo.lock | 2 ++ crates/ethereum/reth/Cargo.toml | 2 ++ crates/ethereum/reth/src/lib.rs | 9 +++++++++ crates/optimism/reth/Cargo.toml | 2 ++ crates/optimism/reth/src/lib.rs | 11 ++++++++++- 5 files changed, 25 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index c70093e6288..1cfb81a59e3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8179,6 +8179,7 @@ dependencies = [ "reth-consensus", "reth-consensus-common", "reth-db", + "reth-engine-local", "reth-eth-wire", "reth-ethereum-cli", "reth-ethereum-consensus", @@ -9066,6 +9067,7 @@ dependencies = [ "reth-consensus", "reth-consensus-common", "reth-db", + "reth-engine-local", "reth-eth-wire", "reth-evm", "reth-exex", diff --git a/crates/ethereum/reth/Cargo.toml b/crates/ethereum/reth/Cargo.toml index 0522e6f84dc..fef17491b77 100644 --- a/crates/ethereum/reth/Cargo.toml +++ b/crates/ethereum/reth/Cargo.toml @@ -38,6 +38,7 @@ reth-trie-db = { workspace = true, optional = true } reth-node-builder = { workspace = true, optional = true } reth-tasks = { workspace = true, optional = true } reth-cli-util = { workspace = true, optional = true } +reth-engine-local = { workspace = true, optional = true } # reth-ethereum reth-ethereum-primitives.workspace = true @@ -126,6 +127,7 @@ node = [ "node-api", "dep:reth-node-ethereum", "dep:reth-node-builder", + "dep:reth-engine-local", "rpc", "trie-db", ] diff --git a/crates/ethereum/reth/src/lib.rs b/crates/ethereum/reth/src/lib.rs index 2a3a6135495..7c0141dc9a0 100644 --- a/crates/ethereum/reth/src/lib.rs +++ b/crates/ethereum/reth/src/lib.rs @@ -115,6 +115,15 @@ pub mod node { pub use reth_node_ethereum::*; } +/// Re-exported ethereum engine types +#[cfg(feature = "node")] +pub mod engine { + #[doc(inline)] + pub use reth_engine_local as local; + #[doc(inline)] + pub use reth_node_ethereum::engine::*; +} + /// Re-exported reth trie types #[cfg(feature = "trie")] pub mod trie { diff --git a/crates/optimism/reth/Cargo.toml b/crates/optimism/reth/Cargo.toml index 150a50fc84d..ae673efecf1 100644 --- a/crates/optimism/reth/Cargo.toml +++ b/crates/optimism/reth/Cargo.toml @@ -38,6 +38,7 @@ reth-trie-db = { workspace = true, optional = true } reth-node-builder = { workspace = true, optional = true } reth-tasks = { workspace = true, optional = true } reth-cli-util = { workspace = true, optional = true } +reth-engine-local = { workspace = true, optional = true } # reth-op reth-optimism-primitives.workspace = true @@ -110,6 +111,7 @@ node = [ "node-api", "dep:reth-optimism-node", "dep:reth-node-builder", + "dep:reth-engine-local", "rpc", "trie-db", ] diff --git a/crates/optimism/reth/src/lib.rs b/crates/optimism/reth/src/lib.rs index 3028b07b237..3252e2bd908 100644 --- a/crates/optimism/reth/src/lib.rs +++ b/crates/optimism/reth/src/lib.rs @@ -111,7 +111,7 @@ pub mod storage { pub use reth_storage_api::*; } -/// Re-exported ethereum node +/// Re-exported optimism node #[cfg(feature = "node-api")] pub mod node { #[doc(inline)] @@ -124,6 +124,15 @@ pub mod node { pub use reth_optimism_node::*; } +/// Re-exported engine types +#[cfg(feature = "node")] +pub mod engine { + #[doc(inline)] + pub use reth_engine_local as local; + #[doc(inline)] + pub use reth_optimism_node::engine::*; +} + /// Re-exported reth trie types #[cfg(feature = "trie")] pub mod trie { From de5cbfe4cc53c12894a68b0b528a4e062499de81 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9a=20Narzis?= <78718413+lean-apple@users.noreply.github.com> Date: Fri, 25 Jul 2025 13:31:41 +0200 Subject: [PATCH 279/305] test(era1): add more `Receipt` tests to verify decoding (#17592) --- crates/era/src/era1_types.rs | 48 ++++---- crates/era/src/era_types.rs | 15 +-- crates/era/src/execution_types.rs | 176 ++++++++++++++++++++--------- crates/era/src/lib.rs | 2 + crates/era/src/test_utils.rs | 177 ++++++++++++++++++++++++++++++ crates/era/tests/it/roundtrip.rs | 35 +++++- 6 files changed, 367 insertions(+), 86 deletions(-) create mode 100644 crates/era/src/test_utils.rs diff --git a/crates/era/src/era1_types.rs b/crates/era/src/era1_types.rs index 48a5486bd5b..58f51b42419 100644 --- a/crates/era/src/era1_types.rs +++ b/crates/era/src/era1_types.rs @@ -159,29 +159,37 @@ impl Era1Id { #[cfg(test)] mod tests { use super::*; - use crate::execution_types::{ - CompressedBody, CompressedHeader, CompressedReceipts, TotalDifficulty, + use crate::{ + test_utils::{create_sample_block, create_test_block_with_compressed_data}, + DecodeCompressed, }; + use alloy_consensus::ReceiptWithBloom; use alloy_primitives::{B256, U256}; - /// Helper function to create a sample block tuple - fn create_sample_block(data_size: usize) -> BlockTuple { - // Create a compressed header with very sample data - let header_data = vec![0xAA; data_size]; - let header = CompressedHeader::new(header_data); - - // Create a compressed body - let body_data = vec![0xBB; data_size * 2]; - let body = CompressedBody::new(body_data); - - // Create compressed receipts - let receipts_data = vec![0xCC; data_size]; - let receipts = CompressedReceipts::new(receipts_data); - - let difficulty = TotalDifficulty::new(U256::from(data_size)); - - // Create and return the block tuple - BlockTuple::new(header, body, receipts, difficulty) + #[test] + fn test_alloy_components_decode_and_receipt_in_bloom() { + // Create a block tuple from compressed data + let block: BlockTuple = create_test_block_with_compressed_data(30); + + // Decode and decompress the block header + let header: alloy_consensus::Header = block.header.decode().unwrap(); + assert_eq!(header.number, 30, "Header block number should match"); + assert_eq!(header.difficulty, U256::from(30 * 1000), "Header difficulty should match"); + assert_eq!(header.gas_limit, 5000000, "Gas limit should match"); + assert_eq!(header.gas_used, 21000, "Gas used should match"); + assert_eq!(header.timestamp, 1609459200 + 30, "Timestamp should match"); + assert_eq!(header.base_fee_per_gas, Some(10), "Base fee per gas should match"); + assert!(header.withdrawals_root.is_some(), "Should have withdrawals root"); + assert!(header.blob_gas_used.is_none(), "Should not have blob gas used"); + assert!(header.excess_blob_gas.is_none(), "Should not have excess blob gas"); + + let body: alloy_consensus::BlockBody = + block.body.decode().unwrap(); + assert_eq!(body.ommers.len(), 0, "Should have no ommers"); + assert!(body.withdrawals.is_some(), "Should have withdrawals field"); + + let receipts: Vec = block.receipts.decode().unwrap(); + assert_eq!(receipts.len(), 1, "Should have exactly 1 receipt"); } #[test] diff --git a/crates/era/src/era_types.rs b/crates/era/src/era_types.rs index d145a08daa7..65b80f5b384 100644 --- a/crates/era/src/era_types.rs +++ b/crates/era/src/era_types.rs @@ -126,22 +126,10 @@ impl IndexEntry for SlotIndex { mod tests { use super::*; use crate::{ - consensus_types::{CompressedBeaconState, CompressedSignedBeaconBlock}, e2s_types::{Entry, IndexEntry}, + test_utils::{create_beacon_block, create_beacon_state}, }; - /// Helper function to create a simple beacon block - fn create_beacon_block(data_size: usize) -> CompressedSignedBeaconBlock { - let block_data = vec![0xAA; data_size]; - CompressedSignedBeaconBlock::new(block_data) - } - - /// Helper function to create a simple beacon state - fn create_beacon_state(data_size: usize) -> CompressedBeaconState { - let state_data = vec![0xBB; data_size]; - CompressedBeaconState::new(state_data) - } - #[test] fn test_slot_index_roundtrip() { let starting_slot = 1000; @@ -208,6 +196,7 @@ mod tests { assert_eq!(era_group.state_slot_index.starting_slot, 1000); assert_eq!(era_group.state_slot_index.offsets, vec![100, 200, 300]); } + #[test] fn test_era_group_with_block_index() { let blocks = vec![create_beacon_block(10), create_beacon_block(15)]; diff --git a/crates/era/src/execution_types.rs b/crates/era/src/execution_types.rs index 4a1e33df533..6feb2873fbd 100644 --- a/crates/era/src/execution_types.rs +++ b/crates/era/src/execution_types.rs @@ -1,4 +1,4 @@ -//! Execution layer specific types for era1 files +//! Execution layer specific types for `.era1` files //! //! Contains implementations for compressed execution layer data structures: //! - [`CompressedHeader`] - Block header @@ -9,6 +9,67 @@ //! These types use Snappy compression to match the specification. //! //! See also +//! +//! # Examples +//! +//! ## [`CompressedHeader`] +//! +//! ```rust +//! use alloy_consensus::Header; +//! use reth_era::{execution_types::CompressedHeader, DecodeCompressed}; +//! +//! let header = Header { number: 100, ..Default::default() }; +//! // Compress the header: rlp encoding and Snappy compression +//! let compressed = CompressedHeader::from_header(&header)?; +//! // Decompressed and decode typed compressed header +//! let decoded_header: Header = compressed.decode_header()?; +//! assert_eq!(decoded_header.number, 100); +//! # Ok::<(), reth_era::e2s_types::E2sError>(()) +//! ``` +//! +//! ## [`CompressedBody`] +//! +//! ```rust +//! use alloy_consensus::{BlockBody, Header}; +//! use alloy_primitives::Bytes; +//! use reth_era::{execution_types::CompressedBody, DecodeCompressed}; +//! use reth_ethereum_primitives::TransactionSigned; +//! +//! let body: BlockBody = BlockBody { +//! transactions: vec![Bytes::from(vec![1, 2, 3])], +//! ommers: vec![], +//! withdrawals: None, +//! }; +//! // Compress the body: rlp encoding and snappy compression +//! let compressed_body = CompressedBody::from_body(&body)?; +//! // Decode back to typed body by decompressing and decoding +//! let decoded_body: alloy_consensus::BlockBody = +//! compressed_body.decode()?; +//! assert_eq!(decoded_body.transactions.len(), 1); +//! # Ok::<(), reth_era::e2s_types::E2sError>(()) +//! ``` +//! +//! ## [`CompressedReceipts`] +//! +//! ```rust +//! use alloy_consensus::ReceiptWithBloom; +//! use reth_era::{execution_types::CompressedReceipts, DecodeCompressed}; +//! use reth_ethereum_primitives::{Receipt, TxType}; +//! +//! let receipt = Receipt { +//! tx_type: TxType::Legacy, +//! success: true, +//! cumulative_gas_used: 21000, +//! logs: vec![], +//! }; +//! let receipt_with_bloom = ReceiptWithBloom { receipt, logs_bloom: Default::default() }; +//! // Compress the receipt: rlp encoding and snappy compression +//! let compressed_receipt_data = CompressedReceipts::from_encodable(&receipt_with_bloom)?; +//! // Get raw receipt by decoding and decompressing compressed and encoded receipt +//! let decompressed_receipt = compressed_receipt_data.decode::()?; +//! assert_eq!(decompressed_receipt.receipt.cumulative_gas_used, 21000); +//! # Ok::<(), reth_era::e2s_types::E2sError>(()) +//! `````` use crate::{ e2s_types::{E2sError, Entry}, @@ -158,7 +219,7 @@ impl CompressedHeader { self.decode() } - /// Create a [`CompressedHeader`] from a header. + /// Create a [`CompressedHeader`] from a header pub fn from_header(header: &H) -> Result { let encoder = SnappyRlpCodec::new(); let compressed = encoder.encode(header)?; @@ -499,34 +560,14 @@ impl BlockTuple { #[cfg(test)] mod tests { use super::*; + use crate::test_utils::{create_header, create_test_receipt, create_test_receipts}; use alloy_eips::eip4895::Withdrawals; - use alloy_primitives::{Address, Bytes, B64}; + use alloy_primitives::{Bytes, U256}; + use reth_ethereum_primitives::{Receipt, TxType}; #[test] fn test_header_conversion_roundtrip() { - let header = Header { - parent_hash: B256::default(), - ommers_hash: B256::default(), - beneficiary: Address::default(), - state_root: B256::default(), - transactions_root: B256::default(), - receipts_root: B256::default(), - logs_bloom: Default::default(), - difficulty: U256::from(123456u64), - number: 100, - gas_limit: 5000000, - gas_used: 21000, - timestamp: 1609459200, - extra_data: Bytes::default(), - mix_hash: B256::default(), - nonce: B64::default(), - base_fee_per_gas: Some(10), - withdrawals_root: None, - blob_gas_used: None, - excess_blob_gas: None, - parent_beacon_block_root: None, - requests_hash: None, - }; + let header = create_header(); let compressed_header = CompressedHeader::from_header(&header).unwrap(); @@ -592,29 +633,7 @@ mod tests { #[test] fn test_block_tuple_with_data() { // Create block with transactions and withdrawals - let header = Header { - parent_hash: B256::default(), - ommers_hash: B256::default(), - beneficiary: Address::default(), - state_root: B256::default(), - transactions_root: B256::default(), - receipts_root: B256::default(), - logs_bloom: Default::default(), - difficulty: U256::from(123456u64), - number: 100, - gas_limit: 5000000, - gas_used: 21000, - timestamp: 1609459200, - extra_data: Bytes::default(), - mix_hash: B256::default(), - nonce: B64::default(), - base_fee_per_gas: Some(10), - withdrawals_root: Some(B256::default()), - blob_gas_used: None, - excess_blob_gas: None, - parent_beacon_block_root: None, - requests_hash: None, - }; + let header = create_header(); let transactions = vec![Bytes::from(vec![1, 2, 3, 4]), Bytes::from(vec![5, 6, 7, 8])]; @@ -639,4 +658,63 @@ mod tests { assert_eq!(decoded_block.body.transactions[1], Bytes::from(vec![5, 6, 7, 8])); assert!(decoded_block.body.withdrawals.is_some()); } + + #[test] + fn test_single_receipt_compression_roundtrip() { + let test_receipt = create_test_receipt(TxType::Eip1559, true, 21000, 2); + + // Compress the receipt + let compressed_receipts = + CompressedReceipts::from_encodable(&test_receipt).expect("Failed to compress receipt"); + + // Verify compression + assert!(!compressed_receipts.data.is_empty()); + + // Decode the compressed receipt back + let decoded_receipt: Receipt = + compressed_receipts.decode().expect("Failed to decode compressed receipt"); + + // Verify that the decoded receipt matches the original + assert_eq!(decoded_receipt.tx_type, test_receipt.tx_type); + assert_eq!(decoded_receipt.success, test_receipt.success); + assert_eq!(decoded_receipt.cumulative_gas_used, test_receipt.cumulative_gas_used); + assert_eq!(decoded_receipt.logs.len(), test_receipt.logs.len()); + + // Verify each log + for (original_log, decoded_log) in test_receipt.logs.iter().zip(decoded_receipt.logs.iter()) + { + assert_eq!(decoded_log.address, original_log.address); + assert_eq!(decoded_log.data.topics(), original_log.data.topics()); + } + } + + #[test] + fn test_receipt_list_compression() { + let receipts = create_test_receipts(); + + // Compress the list of receipts + let compressed_receipts = CompressedReceipts::from_encodable_list(&receipts) + .expect("Failed to compress receipt list"); + + // Decode the compressed receipts back + // Note: most likely the decoding for real era files will be done to reach + // `Vec`` + let decoded_receipts: Vec = + compressed_receipts.decode().expect("Failed to decode compressed receipt list"); + + // Verify that the decoded receipts match the original + assert_eq!(decoded_receipts.len(), receipts.len()); + + for (original, decoded) in receipts.iter().zip(decoded_receipts.iter()) { + assert_eq!(decoded.tx_type, original.tx_type); + assert_eq!(decoded.success, original.success); + assert_eq!(decoded.cumulative_gas_used, original.cumulative_gas_used); + assert_eq!(decoded.logs.len(), original.logs.len()); + + for (original_log, decoded_log) in original.logs.iter().zip(decoded.logs.iter()) { + assert_eq!(decoded_log.address, original_log.address); + assert_eq!(decoded_log.data.topics(), original_log.data.topics()); + } + } + } } diff --git a/crates/era/src/lib.rs b/crates/era/src/lib.rs index 97ffa8b26c0..45383e3eead 100644 --- a/crates/era/src/lib.rs +++ b/crates/era/src/lib.rs @@ -19,6 +19,8 @@ pub mod era1_file; pub mod era1_types; pub mod era_types; pub mod execution_types; +#[cfg(test)] +pub(crate) mod test_utils; use crate::e2s_types::E2sError; use alloy_rlp::Decodable; diff --git a/crates/era/src/test_utils.rs b/crates/era/src/test_utils.rs new file mode 100644 index 00000000000..96b2545be16 --- /dev/null +++ b/crates/era/src/test_utils.rs @@ -0,0 +1,177 @@ +//! Utilities helpers to create era data structures for testing purposes. + +use crate::{ + consensus_types::{CompressedBeaconState, CompressedSignedBeaconBlock}, + execution_types::{ + BlockTuple, CompressedBody, CompressedHeader, CompressedReceipts, TotalDifficulty, + }, +}; +use alloy_consensus::{Header, ReceiptWithBloom}; +use alloy_primitives::{Address, BlockNumber, Bytes, Log, LogData, B256, B64, U256}; +use reth_ethereum_primitives::{Receipt, TxType}; + +// Helper function to create a test header +pub(crate) fn create_header() -> Header { + Header { + parent_hash: B256::default(), + ommers_hash: B256::default(), + beneficiary: Address::default(), + state_root: B256::default(), + transactions_root: B256::default(), + receipts_root: B256::default(), + logs_bloom: Default::default(), + difficulty: U256::from(123456u64), + number: 100, + gas_limit: 5000000, + gas_used: 21000, + timestamp: 1609459200, + extra_data: Bytes::default(), + mix_hash: B256::default(), + nonce: B64::default(), + base_fee_per_gas: Some(10), + withdrawals_root: Some(B256::default()), + blob_gas_used: None, + excess_blob_gas: None, + parent_beacon_block_root: None, + requests_hash: None, + } +} + +// Helper function to create a test receipt with customizable parameters +pub(crate) fn create_test_receipt( + tx_type: TxType, + success: bool, + cumulative_gas_used: u64, + log_count: usize, +) -> Receipt { + let mut logs = Vec::new(); + + for i in 0..log_count { + let address_byte = (i + 1) as u8; + let topic_byte = (i + 10) as u8; + let data_byte = (i + 100) as u8; + + logs.push(Log { + address: Address::from([address_byte; 20]), + data: LogData::new_unchecked( + vec![B256::from([topic_byte; 32]), B256::from([topic_byte + 1; 32])], + alloy_primitives::Bytes::from(vec![data_byte, data_byte + 1, data_byte + 2]), + ), + }); + } + + Receipt { tx_type, success, cumulative_gas_used, logs } +} + +// Helper function to create a list of test receipts with different characteristics +pub(crate) fn create_test_receipts() -> Vec { + vec![ + // Legacy transaction, successful, no logs + create_test_receipt(TxType::Legacy, true, 21000, 0), + // EIP-2930 transaction, failed, one log + create_test_receipt(TxType::Eip2930, false, 42000, 1), + // EIP-1559 transaction, successful, multiple logs + create_test_receipt(TxType::Eip1559, true, 63000, 3), + // EIP-4844 transaction, successful, two logs + create_test_receipt(TxType::Eip4844, true, 84000, 2), + // EIP-7702 transaction, failed, no logs + create_test_receipt(TxType::Eip7702, false, 105000, 0), + ] +} + +pub(crate) fn create_test_receipt_with_bloom( + tx_type: TxType, + success: bool, + cumulative_gas_used: u64, + log_count: usize, +) -> ReceiptWithBloom { + let receipt = create_test_receipt(tx_type, success, cumulative_gas_used, log_count); + ReceiptWithBloom { receipt: receipt.into(), logs_bloom: Default::default() } +} + +// Helper function to create a sample block tuple +pub(crate) fn create_sample_block(data_size: usize) -> BlockTuple { + // Create a compressed header with very sample data - not compressed for simplicity + let header_data = vec![0xAA; data_size]; + let header = CompressedHeader::new(header_data); + + // Create a compressed body with very sample data - not compressed for simplicity + let body_data = vec![0xBB; data_size * 2]; + let body = CompressedBody::new(body_data); + + // Create compressed receipts with very sample data - not compressed for simplicity + let receipts_data = vec![0xCC; data_size]; + let receipts = CompressedReceipts::new(receipts_data); + + let difficulty = TotalDifficulty::new(U256::from(data_size)); + + // Create and return the block tuple + BlockTuple::new(header, body, receipts, difficulty) +} + +// Helper function to create a test block with compressed data +pub(crate) fn create_test_block_with_compressed_data(number: BlockNumber) -> BlockTuple { + use alloy_consensus::{BlockBody, Header}; + use alloy_eips::eip4895::Withdrawals; + use alloy_primitives::{Address, Bytes, B256, B64, U256}; + + // Create test header + let header = Header { + parent_hash: B256::default(), + ommers_hash: B256::default(), + beneficiary: Address::default(), + state_root: B256::default(), + transactions_root: B256::default(), + receipts_root: B256::default(), + logs_bloom: Default::default(), + difficulty: U256::from(number * 1000), + number, + gas_limit: 5000000, + gas_used: 21000, + timestamp: 1609459200 + number, + extra_data: Bytes::default(), + mix_hash: B256::default(), + nonce: B64::default(), + base_fee_per_gas: Some(10), + withdrawals_root: Some(B256::default()), + blob_gas_used: None, + excess_blob_gas: None, + parent_beacon_block_root: None, + requests_hash: None, + }; + + // Create test body + let body: BlockBody = BlockBody { + transactions: vec![Bytes::from(vec![(number % 256) as u8; 10])], + ommers: vec![], + withdrawals: Some(Withdrawals(vec![])), + }; + + // Create test receipt list with bloom + let receipts_list: Vec = vec![create_test_receipt_with_bloom( + reth_ethereum_primitives::TxType::Legacy, + true, + 21000, + 0, + )]; + + // Compressed test compressed + let compressed_header = CompressedHeader::from_header(&header).unwrap(); + let compressed_body = CompressedBody::from_body(&body).unwrap(); + let compressed_receipts = CompressedReceipts::from_encodable_list(&receipts_list).unwrap(); + let total_difficulty = TotalDifficulty::new(U256::from(number * 1000)); + + BlockTuple::new(compressed_header, compressed_body, compressed_receipts, total_difficulty) +} + +/// Helper function to create a simple beacon block +pub(crate) fn create_beacon_block(data_size: usize) -> CompressedSignedBeaconBlock { + let block_data = vec![0xAA; data_size]; + CompressedSignedBeaconBlock::new(block_data) +} + +/// Helper function to create a simple beacon state +pub(crate) fn create_beacon_state(data_size: usize) -> CompressedBeaconState { + let state_data = vec![0xBB; data_size]; + CompressedBeaconState::new(state_data) +} diff --git a/crates/era/tests/it/roundtrip.rs b/crates/era/tests/it/roundtrip.rs index 2397094646a..0689ef383e2 100644 --- a/crates/era/tests/it/roundtrip.rs +++ b/crates/era/tests/it/roundtrip.rs @@ -7,13 +7,15 @@ //! - Writing the data back to a new file //! - Confirming that all original data is preserved throughout the process -use alloy_consensus::{BlockBody, BlockHeader, Header}; +use alloy_consensus::{BlockBody, BlockHeader, Header, ReceiptWithBloom}; use rand::{prelude::IndexedRandom, rng}; use reth_era::{ e2s_types::IndexEntry, era1_file::{Era1File, Era1Reader, Era1Writer}, era1_types::{Era1Group, Era1Id}, - execution_types::{BlockTuple, CompressedBody, CompressedHeader, TotalDifficulty}, + execution_types::{ + BlockTuple, CompressedBody, CompressedHeader, CompressedReceipts, TotalDifficulty, + }, }; use reth_ethereum_primitives::TransactionSigned; use std::io::Cursor; @@ -144,6 +146,21 @@ async fn test_file_roundtrip( "Ommers count should match after roundtrip" ); + // Decode receipts + let original_receipts_decoded = + original_block.receipts.decode::>()?; + let roundtrip_receipts_decoded = + roundtrip_block.receipts.decode::>()?; + + assert_eq!( + original_receipts_decoded, roundtrip_receipts_decoded, + "Block {block_number} decoded receipts should be identical after roundtrip" + ); + assert_eq!( + original_receipts_data, roundtrip_receipts_data, + "Block {block_number} receipts data should be identical after roundtrip" + ); + // Check withdrawals presence/absence matches assert_eq!( original_decoded_body.withdrawals.is_some(), @@ -179,11 +196,21 @@ async fn test_file_roundtrip( "Transaction count should match after re-compression" ); + // Re-encore and re-compress the receipts + let recompressed_receipts = + CompressedReceipts::from_encodable(&roundtrip_receipts_decoded)?; + let recompressed_receipts_data = recompressed_receipts.decompress()?; + + assert_eq!( + original_receipts_data.len(), + recompressed_receipts_data.len(), + "Receipts length should match after re-compression" + ); + let recompressed_block = BlockTuple::new( recompressed_header, recompressed_body, - original_block.receipts.clone(), /* reuse original receipts directly as it not - * possible to decode them */ + recompressed_receipts, TotalDifficulty::new(original_block.total_difficulty.value), ); From a7cbf81b65acacbca5e089593d0516ade936550f Mon Sep 17 00:00:00 2001 From: Emilia Hane Date: Fri, 25 Jul 2025 13:34:24 +0200 Subject: [PATCH 280/305] test(sdk): Add test for using node builder with noop components (#17560) --- Cargo.lock | 3 ++ crates/net/network-api/src/noop.rs | 2 +- crates/net/p2p/src/full_block.rs | 8 +++- crates/node/builder/Cargo.toml | 7 ++++ crates/node/builder/src/builder/states.rs | 48 +++++++++++++++++++++++ crates/payload/builder/src/noop.rs | 7 ++++ 6 files changed, 73 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1cfb81a59e3..917657038a9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8834,7 +8834,9 @@ dependencies = [ "reth-engine-service", "reth-engine-tree", "reth-engine-util", + "reth-ethereum-engine-primitives", "reth-evm", + "reth-evm-ethereum", "reth-exex", "reth-fs-util", "reth-invalid-block-hooks", @@ -8843,6 +8845,7 @@ dependencies = [ "reth-network-p2p", "reth-node-api", "reth-node-core", + "reth-node-ethereum", "reth-node-ethstats", "reth-node-events", "reth-node-metrics", diff --git a/crates/net/network-api/src/noop.rs b/crates/net/network-api/src/noop.rs index 3d6b295e7f3..c650db0afc4 100644 --- a/crates/net/network-api/src/noop.rs +++ b/crates/net/network-api/src/noop.rs @@ -163,7 +163,7 @@ where impl BlockDownloaderProvider for NoopNetwork where - Net: NetworkPrimitives + Default, + Net: NetworkPrimitives, { type Client = NoopFullBlockClient; diff --git a/crates/net/p2p/src/full_block.rs b/crates/net/p2p/src/full_block.rs index 3a7422c8418..8dbf3ce5690 100644 --- a/crates/net/p2p/src/full_block.rs +++ b/crates/net/p2p/src/full_block.rs @@ -647,7 +647,7 @@ enum RangeResponseResult { } /// A headers+bodies client implementation that does nothing. -#[derive(Debug, Default, Clone)] +#[derive(Debug, Clone)] #[non_exhaustive] pub struct NoopFullBlockClient(PhantomData); @@ -743,6 +743,12 @@ where type Block = Net::Block; } +impl Default for NoopFullBlockClient { + fn default() -> Self { + Self(PhantomData::) + } +} + #[cfg(test)] mod tests { use reth_ethereum_primitives::BlockBody; diff --git a/crates/node/builder/Cargo.toml b/crates/node/builder/Cargo.toml index 9172dc30462..ba02aba2649 100644 --- a/crates/node/builder/Cargo.toml +++ b/crates/node/builder/Cargo.toml @@ -85,6 +85,11 @@ tracing.workspace = true [dev-dependencies] tempfile.workspace = true +reth-ethereum-engine-primitives.workspace = true +reth-payload-builder = { workspace = true, features = ["test-utils"] } +reth-node-ethereum.workspace = true +reth-provider = { workspace = true, features = ["test-utils"] } +reth-evm-ethereum = { workspace = true, features = ["test-utils"] } [features] default = [] @@ -105,6 +110,8 @@ test-utils = [ "reth-db-api/test-utils", "reth-provider/test-utils", "reth-transaction-pool/test-utils", + "reth-evm-ethereum/test-utils", + "reth-node-ethereum/test-utils", ] op = [ "reth-db?/op", diff --git a/crates/node/builder/src/builder/states.rs b/crates/node/builder/src/builder/states.rs index 42646122781..bbb3e250917 100644 --- a/crates/node/builder/src/builder/states.rs +++ b/crates/node/builder/src/builder/states.rs @@ -287,3 +287,51 @@ where }) } } + +#[cfg(test)] +mod test { + use super::*; + use crate::components::Components; + use reth_consensus::noop::NoopConsensus; + use reth_db_api::mock::DatabaseMock; + use reth_ethereum_engine_primitives::EthEngineTypes; + use reth_evm::noop::NoopEvmConfig; + use reth_evm_ethereum::MockEvmConfig; + use reth_network::EthNetworkPrimitives; + use reth_network_api::noop::NoopNetwork; + use reth_node_api::FullNodeTypesAdapter; + use reth_node_ethereum::EthereumNode; + use reth_payload_builder::PayloadBuilderHandle; + use reth_provider::noop::NoopProvider; + use reth_tasks::TaskManager; + use reth_transaction_pool::noop::NoopTransactionPool; + + #[test] + fn test_noop_components() { + let components = Components::< + FullNodeTypesAdapter, + NoopNetwork, + _, + NoopEvmConfig, + _, + > { + transaction_pool: NoopTransactionPool::default(), + evm_config: NoopEvmConfig::default(), + consensus: NoopConsensus::default(), + network: NoopNetwork::default(), + payload_builder_handle: PayloadBuilderHandle::::noop(), + }; + + let task_executor = { + let runtime = tokio::runtime::Runtime::new().unwrap(); + let handle = runtime.handle().clone(); + let manager = TaskManager::new(handle); + manager.executor() + }; + + let node = NodeAdapter { components, task_executor, provider: NoopProvider::default() }; + + // test that node implements `FullNodeComponents`` + as FullNodeComponents>::pool(&node); + } +} diff --git a/crates/payload/builder/src/noop.rs b/crates/payload/builder/src/noop.rs index 6047bffa8b1..c20dac0f2d5 100644 --- a/crates/payload/builder/src/noop.rs +++ b/crates/payload/builder/src/noop.rs @@ -64,3 +64,10 @@ impl Default for NoopPayloadBuilderService { service } } + +impl PayloadBuilderHandle { + /// Returns a new noop instance. + pub fn noop() -> Self { + Self::new(mpsc::unbounded_channel().0) + } +} From c549188a93815e92d200d23c03af36415c2366ae Mon Sep 17 00:00:00 2001 From: Mablr <59505383+mablr@users.noreply.github.com> Date: Fri, 25 Jul 2025 16:35:36 +0200 Subject: [PATCH 281/305] feat(rpc): add method to configure custom tokio runtime for RPC server (#17611) --- crates/rpc/rpc-builder/src/lib.rs | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 0005e2af253..6c1866836e5 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -1191,6 +1191,22 @@ impl RpcServerConfig { self } + /// Configures a custom tokio runtime for the rpc server. + pub fn with_tokio_runtime(mut self, tokio_runtime: tokio::runtime::Handle) -> Self { + if let Some(http_server_config) = self.http_server_config { + self.http_server_config = + Some(http_server_config.custom_tokio_runtime(tokio_runtime.clone())); + } + if let Some(ws_server_config) = self.ws_server_config { + self.ws_server_config = + Some(ws_server_config.custom_tokio_runtime(tokio_runtime.clone())); + } + if let Some(ipc_server_config) = self.ipc_server_config { + self.ipc_server_config = Some(ipc_server_config.custom_tokio_runtime(tokio_runtime)); + } + self + } + /// Returns true if any server is configured. /// /// If no server is configured, no server will be launched on [`RpcServerConfig::start`]. From 0a416d33d7952e4e9d6d6181a918498878a1146c Mon Sep 17 00:00:00 2001 From: Starkey Date: Sat, 26 Jul 2025 02:46:24 +1200 Subject: [PATCH 282/305] docs: correct error comments in networking optimism modules (#17602) --- crates/optimism/reth/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/optimism/reth/src/lib.rs b/crates/optimism/reth/src/lib.rs index 3252e2bd908..dd5fb5ba6c8 100644 --- a/crates/optimism/reth/src/lib.rs +++ b/crates/optimism/reth/src/lib.rs @@ -10,7 +10,7 @@ #![cfg_attr(not(feature = "std"), no_std)] #![allow(unused_crate_dependencies)] -/// Re-exported ethereum types +/// Re-exported optimism types #[doc(inline)] pub use reth_optimism_primitives::*; From 73091305ac93b03ac514cbb5182995ae0ee92163 Mon Sep 17 00:00:00 2001 From: Federico Gimenez Date: Sat, 26 Jul 2025 11:53:49 +0200 Subject: [PATCH 283/305] chore: make clippy happy (#17620) --- crates/rpc/rpc-eth-api/src/helpers/trace.rs | 8 +++--- .../transaction-pool/src/test_utils/mock.rs | 27 ++++++++++--------- 2 files changed, 20 insertions(+), 15 deletions(-) diff --git a/crates/rpc/rpc-eth-api/src/helpers/trace.rs b/crates/rpc/rpc-eth-api/src/helpers/trace.rs index fe21f80756c..e640e4f8f0f 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/trace.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/trace.rs @@ -315,11 +315,13 @@ pub trait Trace: LoadState> { // prepare transactions, we do everything upfront to reduce time spent with open // state - let max_transactions = - highest_index.map_or(block.body().transaction_count(), |highest| { + let max_transactions = highest_index.map_or_else( + || block.body().transaction_count(), + |highest| { // we need + 1 because the index is 0-based highest as usize + 1 - }); + }, + ); let mut idx = 0; diff --git a/crates/transaction-pool/src/test_utils/mock.rs b/crates/transaction-pool/src/test_utils/mock.rs index 9ddde67ba59..afa69b1f95a 100644 --- a/crates/transaction-pool/src/test_utils/mock.rs +++ b/crates/transaction-pool/src/test_utils/mock.rs @@ -799,21 +799,24 @@ impl alloy_consensus::Transaction for MockTransaction { } fn effective_gas_price(&self, base_fee: Option) -> u128 { - base_fee.map_or(self.max_fee_per_gas(), |base_fee| { - // if the tip is greater than the max priority fee per gas, set it to the max - // priority fee per gas + base fee - let tip = self.max_fee_per_gas().saturating_sub(base_fee as u128); - if let Some(max_tip) = self.max_priority_fee_per_gas() { - if tip > max_tip { - max_tip + base_fee as u128 + base_fee.map_or_else( + || self.max_fee_per_gas(), + |base_fee| { + // if the tip is greater than the max priority fee per gas, set it to the max + // priority fee per gas + base fee + let tip = self.max_fee_per_gas().saturating_sub(base_fee as u128); + if let Some(max_tip) = self.max_priority_fee_per_gas() { + if tip > max_tip { + max_tip + base_fee as u128 + } else { + // otherwise return the max fee per gas + self.max_fee_per_gas() + } } else { - // otherwise return the max fee per gas self.max_fee_per_gas() } - } else { - self.max_fee_per_gas() - } - }) + }, + ) } fn is_dynamic_fee(&self) -> bool { From 3f3ccc3aa848fb8e450a04f2c8710bbf84d1fc9c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9a=20Narzis?= <78718413+lean-apple@users.noreply.github.com> Date: Sat, 26 Jul 2025 12:11:17 +0200 Subject: [PATCH 284/305] chore: remove duplicate deps (#17618) --- bin/reth-bench/Cargo.toml | 1 - crates/chain-state/Cargo.toml | 1 - crates/chainspec/Cargo.toml | 1 - crates/consensus/common/Cargo.toml | 1 - crates/engine/service/Cargo.toml | 1 - crates/engine/tree/Cargo.toml | 3 --- crates/era-downloader/Cargo.toml | 2 -- crates/era-utils/Cargo.toml | 5 +---- crates/ethereum/cli/Cargo.toml | 3 --- crates/ethereum/evm/Cargo.toml | 1 - crates/ethereum/node/Cargo.toml | 6 ------ crates/evm/evm/Cargo.toml | 1 - crates/exex/exex/Cargo.toml | 2 -- crates/net/downloaders/Cargo.toml | 3 --- crates/net/eth-wire/Cargo.toml | 1 - crates/net/network/Cargo.toml | 1 - crates/optimism/chainspec/Cargo.toml | 2 -- crates/optimism/cli/Cargo.toml | 2 -- crates/optimism/consensus/Cargo.toml | 2 -- crates/optimism/evm/Cargo.toml | 1 - crates/optimism/node/Cargo.toml | 4 ---- crates/optimism/storage/Cargo.toml | 1 - crates/payload/builder/Cargo.toml | 1 - crates/revm/Cargo.toml | 1 - crates/rpc/rpc-builder/Cargo.toml | 4 ---- crates/rpc/rpc-engine-api/Cargo.toml | 1 - crates/rpc/rpc-testing-util/Cargo.toml | 1 - crates/rpc/rpc/Cargo.toml | 3 --- crates/stages/stages/Cargo.toml | 3 +-- crates/storage/db-common/Cargo.toml | 1 - crates/storage/provider/Cargo.toml | 4 +--- crates/trie/parallel/Cargo.toml | 1 - testing/testing-utils/Cargo.toml | 4 ---- 33 files changed, 3 insertions(+), 66 deletions(-) diff --git a/bin/reth-bench/Cargo.toml b/bin/reth-bench/Cargo.toml index f677521567a..891fa4f9780 100644 --- a/bin/reth-bench/Cargo.toml +++ b/bin/reth-bench/Cargo.toml @@ -65,7 +65,6 @@ humantime.workspace = true csv.workspace = true [dev-dependencies] -reth-tracing.workspace = true [features] default = ["jemalloc"] diff --git a/crates/chain-state/Cargo.toml b/crates/chain-state/Cargo.toml index 39a26f49378..be3b5a981d1 100644 --- a/crates/chain-state/Cargo.toml +++ b/crates/chain-state/Cargo.toml @@ -53,7 +53,6 @@ reth-primitives-traits = { workspace = true, features = ["test-utils"] } reth-testing-utils.workspace = true alloy-signer.workspace = true alloy-signer-local.workspace = true -alloy-consensus.workspace = true rand.workspace = true [features] diff --git a/crates/chainspec/Cargo.toml b/crates/chainspec/Cargo.toml index 6d09d71c634..4d3c23117b3 100644 --- a/crates/chainspec/Cargo.toml +++ b/crates/chainspec/Cargo.toml @@ -35,7 +35,6 @@ derive_more.workspace = true alloy-trie = { workspace = true, features = ["arbitrary"] } alloy-eips = { workspace = true, features = ["arbitrary"] } alloy-rlp = { workspace = true, features = ["arrayvec"] } -alloy-genesis.workspace = true [features] default = ["std"] diff --git a/crates/consensus/common/Cargo.toml b/crates/consensus/common/Cargo.toml index 96dea6c232f..901e8697cd5 100644 --- a/crates/consensus/common/Cargo.toml +++ b/crates/consensus/common/Cargo.toml @@ -23,7 +23,6 @@ alloy-eips.workspace = true [dev-dependencies] alloy-primitives = { workspace = true, features = ["rand"] } reth-ethereum-primitives.workspace = true -alloy-consensus.workspace = true rand.workspace = true [features] diff --git a/crates/engine/service/Cargo.toml b/crates/engine/service/Cargo.toml index e2932ec6faa..89eb6bdda51 100644 --- a/crates/engine/service/Cargo.toml +++ b/crates/engine/service/Cargo.toml @@ -39,7 +39,6 @@ reth-ethereum-consensus.workspace = true reth-ethereum-engine-primitives.workspace = true reth-evm-ethereum.workspace = true reth-exex-types.workspace = true -reth-chainspec.workspace = true reth-primitives-traits.workspace = true reth-node-ethereum.workspace = true diff --git a/crates/engine/tree/Cargo.toml b/crates/engine/tree/Cargo.toml index 2609466b28e..6ed37c342c5 100644 --- a/crates/engine/tree/Cargo.toml +++ b/crates/engine/tree/Cargo.toml @@ -82,18 +82,15 @@ reth-evm = { workspace = true, features = ["test-utils"] } reth-exex-types.workspace = true reth-network-p2p = { workspace = true, features = ["test-utils"] } reth-prune-types.workspace = true -reth-prune.workspace = true reth-rpc-convert.workspace = true reth-stages = { workspace = true, features = ["test-utils"] } reth-static-file.workspace = true reth-testing-utils.workspace = true reth-tracing.workspace = true -reth-trie-db.workspace = true reth-node-ethereum.workspace = true reth-e2e-test-utils.workspace = true # alloy -alloy-rlp.workspace = true revm-state.workspace = true assert_matches.workspace = true diff --git a/crates/era-downloader/Cargo.toml b/crates/era-downloader/Cargo.toml index 84a5187a70f..54ae581813a 100644 --- a/crates/era-downloader/Cargo.toml +++ b/crates/era-downloader/Cargo.toml @@ -35,8 +35,6 @@ sha2.workspace = true sha2.features = ["std"] [dev-dependencies] -tokio.workspace = true -tokio.features = ["fs", "io-util", "macros"] tempfile.workspace = true test-case.workspace = true futures.workspace = true diff --git a/crates/era-utils/Cargo.toml b/crates/era-utils/Cargo.toml index 6d48e338386..731a9bb9242 100644 --- a/crates/era-utils/Cargo.toml +++ b/crates/era-utils/Cargo.toml @@ -28,8 +28,7 @@ reth-storage-api.workspace = true reth-primitives-traits.workspace = true # async -tokio.workspace = true -tokio.features = ["fs", "io-util"] +tokio = { workspace = true, features = ["fs", "io-util", "macros", "rt-multi-thread"] } futures-util.workspace = true # errors @@ -43,8 +42,6 @@ reth-provider.features = ["test-utils"] reth-db-common.workspace = true # async -tokio.workspace = true -tokio.features = ["fs", "io-util", "macros", "rt-multi-thread"] tokio-util.workspace = true futures.workspace = true bytes.workspace = true diff --git a/crates/ethereum/cli/Cargo.toml b/crates/ethereum/cli/Cargo.toml index a0a2a13fb64..a32ead66fba 100644 --- a/crates/ethereum/cli/Cargo.toml +++ b/crates/ethereum/cli/Cargo.toml @@ -33,9 +33,6 @@ eyre.workspace = true tracing.workspace = true [dev-dependencies] -# reth -reth-cli-commands.workspace = true - # fs tempfile.workspace = true diff --git a/crates/ethereum/evm/Cargo.toml b/crates/ethereum/evm/Cargo.toml index b0f75388ec2..744bcdc5368 100644 --- a/crates/ethereum/evm/Cargo.toml +++ b/crates/ethereum/evm/Cargo.toml @@ -33,7 +33,6 @@ derive_more = { workspace = true, optional = true } [dev-dependencies] reth-testing-utils.workspace = true reth-evm = { workspace = true, features = ["test-utils"] } -reth-execution-types.workspace = true secp256k1.workspace = true alloy-genesis.workspace = true diff --git a/crates/ethereum/node/Cargo.toml b/crates/ethereum/node/Cargo.toml index 6f76a9f47f5..d1ac937e6db 100644 --- a/crates/ethereum/node/Cargo.toml +++ b/crates/ethereum/node/Cargo.toml @@ -54,25 +54,19 @@ revm = { workspace = true, features = ["secp256k1", "blst", "c-kzg"] } eyre.workspace = true [dev-dependencies] -reth-chainspec.workspace = true reth-db.workspace = true reth-exex.workspace = true reth-node-core.workspace = true -reth-payload-primitives.workspace = true reth-e2e-test-utils.workspace = true -reth-rpc-eth-api.workspace = true reth-tasks.workspace = true alloy-primitives.workspace = true alloy-provider.workspace = true alloy-genesis.workspace = true alloy-signer.workspace = true -alloy-eips.workspace = true alloy-sol-types.workspace = true alloy-contract.workspace = true alloy-rpc-types-beacon = { workspace = true, features = ["ssz"] } -alloy-rpc-types-engine.workspace = true -alloy-rpc-types-eth.workspace = true alloy-consensus.workspace = true futures.workspace = true diff --git a/crates/evm/evm/Cargo.toml b/crates/evm/evm/Cargo.toml index b29bcc6be8a..b7515ccc408 100644 --- a/crates/evm/evm/Cargo.toml +++ b/crates/evm/evm/Cargo.toml @@ -36,7 +36,6 @@ metrics = { workspace = true, optional = true } [dev-dependencies] reth-ethereum-primitives.workspace = true reth-ethereum-forks.workspace = true -alloy-consensus.workspace = true metrics-util = { workspace = true, features = ["debugging"] } [features] diff --git a/crates/exex/exex/Cargo.toml b/crates/exex/exex/Cargo.toml index 8d380199002..0d09f0a8c68 100644 --- a/crates/exex/exex/Cargo.toml +++ b/crates/exex/exex/Cargo.toml @@ -54,13 +54,11 @@ tracing.workspace = true [dev-dependencies] reth-db-common.workspace = true reth-evm-ethereum.workspace = true -reth-node-api.workspace = true reth-primitives-traits = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } reth-testing-utils.workspace = true alloy-genesis.workspace = true -alloy-consensus.workspace = true rand.workspace = true secp256k1.workspace = true diff --git a/crates/net/downloaders/Cargo.toml b/crates/net/downloaders/Cargo.toml index 9c833e17047..128da4ff084 100644 --- a/crates/net/downloaders/Cargo.toml +++ b/crates/net/downloaders/Cargo.toml @@ -66,10 +66,7 @@ reth-tracing.workspace = true assert_matches.workspace = true tokio = { workspace = true, features = ["macros", "rt-multi-thread"] } -alloy-rlp.workspace = true -itertools.workspace = true rand.workspace = true - tempfile.workspace = true [features] diff --git a/crates/net/eth-wire/Cargo.toml b/crates/net/eth-wire/Cargo.toml index 93a3ed4aa77..47c372dba24 100644 --- a/crates/net/eth-wire/Cargo.toml +++ b/crates/net/eth-wire/Cargo.toml @@ -51,7 +51,6 @@ reth-tracing.workspace = true alloy-consensus.workspace = true test-fuzz.workspace = true tokio = { workspace = true, features = ["rt", "rt-multi-thread"] } -tokio-util = { workspace = true, features = ["io", "codec"] } rand.workspace = true secp256k1 = { workspace = true, features = ["global-context", "std", "recovery"] } rand_08.workspace = true diff --git a/crates/net/network/Cargo.toml b/crates/net/network/Cargo.toml index 167fe4f26da..84fa656234d 100644 --- a/crates/net/network/Cargo.toml +++ b/crates/net/network/Cargo.toml @@ -89,7 +89,6 @@ reth-tracing.workspace = true reth-transaction-pool = { workspace = true, features = ["test-utils"] } # alloy deps for testing against nodes -alloy-consensus.workspace = true alloy-genesis.workspace = true # misc diff --git a/crates/optimism/chainspec/Cargo.toml b/crates/optimism/chainspec/Cargo.toml index e35b5b77c7e..55201164701 100644 --- a/crates/optimism/chainspec/Cargo.toml +++ b/crates/optimism/chainspec/Cargo.toml @@ -48,8 +48,6 @@ op-alloy-consensus.workspace = true [dev-dependencies] reth-chainspec = { workspace = true, features = ["test-utils"] } -alloy-genesis.workspace = true -op-alloy-rpc-types.workspace = true [features] default = ["std"] diff --git a/crates/optimism/cli/Cargo.toml b/crates/optimism/cli/Cargo.toml index 1661c3be476..0da12c42b02 100644 --- a/crates/optimism/cli/Cargo.toml +++ b/crates/optimism/cli/Cargo.toml @@ -68,8 +68,6 @@ op-alloy-consensus.workspace = true [dev-dependencies] tempfile.workspace = true reth-stages = { workspace = true, features = ["test-utils"] } -reth-db-common.workspace = true -reth-cli-commands.workspace = true [build-dependencies] reth-optimism-chainspec = { workspace = true, features = ["std", "superchain-configs"] } diff --git a/crates/optimism/consensus/Cargo.toml b/crates/optimism/consensus/Cargo.toml index 2276f911cd8..e681112eea0 100644 --- a/crates/optimism/consensus/Cargo.toml +++ b/crates/optimism/consensus/Cargo.toml @@ -43,12 +43,10 @@ reth-provider = { workspace = true, features = ["test-utils"] } reth-db-common.workspace = true reth-revm.workspace = true reth-trie.workspace = true -reth-optimism-chainspec.workspace = true reth-optimism-node.workspace = true reth-db-api = { workspace = true, features = ["op"] } alloy-chains.workspace = true -alloy-primitives.workspace = true op-alloy-consensus.workspace = true diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index 98288c5383e..7cdc297a769 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -45,7 +45,6 @@ thiserror.workspace = true reth-evm = { workspace = true, features = ["test-utils"] } reth-revm = { workspace = true, features = ["test-utils"] } alloy-genesis.workspace = true -alloy-consensus.workspace = true reth-optimism-primitives = { workspace = true, features = ["arbitrary"] } [features] diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index 42a580c3aaa..db5be42a998 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -72,7 +72,6 @@ serde_json = { workspace = true, optional = true } [dev-dependencies] reth-optimism-node = { workspace = true, features = ["test-utils"] } reth-db = { workspace = true, features = ["op"] } -reth-node-core.workspace = true reth-node-builder = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } reth-tasks.workspace = true @@ -80,10 +79,7 @@ reth-payload-util.workspace = true reth-payload-validator.workspace = true reth-revm = { workspace = true, features = ["std"] } -alloy-primitives.workspace = true -op-alloy-consensus.workspace = true alloy-network.workspace = true -alloy-consensus.workspace = true futures.workspace = true alloy-eips.workspace = true diff --git a/crates/optimism/storage/Cargo.toml b/crates/optimism/storage/Cargo.toml index 56ced8d74e1..564d6e38cda 100644 --- a/crates/optimism/storage/Cargo.toml +++ b/crates/optimism/storage/Cargo.toml @@ -26,7 +26,6 @@ alloy-consensus.workspace = true [dev-dependencies] reth-codecs = { workspace = true, features = ["test-utils"] } -reth-db-api.workspace = true reth-prune-types.workspace = true reth-stages-types.workspace = true diff --git a/crates/payload/builder/Cargo.toml b/crates/payload/builder/Cargo.toml index 5ae0425f3e1..222af0a664d 100644 --- a/crates/payload/builder/Cargo.toml +++ b/crates/payload/builder/Cargo.toml @@ -38,7 +38,6 @@ tracing.workspace = true [dev-dependencies] alloy-primitives.workspace = true -alloy-consensus.workspace = true tokio = { workspace = true, features = ["sync", "rt"] } diff --git a/crates/revm/Cargo.toml b/crates/revm/Cargo.toml index 95ffe22f05a..629b5faf00d 100644 --- a/crates/revm/Cargo.toml +++ b/crates/revm/Cargo.toml @@ -27,7 +27,6 @@ revm.workspace = true [dev-dependencies] reth-trie.workspace = true reth-ethereum-forks.workspace = true -alloy-primitives.workspace = true alloy-consensus.workspace = true [features] diff --git a/crates/rpc/rpc-builder/Cargo.toml b/crates/rpc/rpc-builder/Cargo.toml index 50b284698ed..12da375f143 100644 --- a/crates/rpc/rpc-builder/Cargo.toml +++ b/crates/rpc/rpc-builder/Cargo.toml @@ -52,10 +52,7 @@ alloy-provider = { workspace = true, features = ["ws", "ipc"] } alloy-network.workspace = true [dev-dependencies] -reth-primitives-traits.workspace = true reth-ethereum-primitives.workspace = true -reth-chainspec.workspace = true -reth-network-api.workspace = true reth-network-peers.workspace = true reth-evm-ethereum.workspace = true reth-ethereum-engine-primitives.workspace = true @@ -76,6 +73,5 @@ alloy-rpc-types-trace.workspace = true alloy-eips.workspace = true alloy-rpc-types-engine.workspace = true -tokio = { workspace = true, features = ["rt", "rt-multi-thread"] } serde_json.workspace = true clap = { workspace = true, features = ["derive"] } diff --git a/crates/rpc/rpc-engine-api/Cargo.toml b/crates/rpc/rpc-engine-api/Cargo.toml index da119de5b2c..9185b6d5b8e 100644 --- a/crates/rpc/rpc-engine-api/Cargo.toml +++ b/crates/rpc/rpc-engine-api/Cargo.toml @@ -50,7 +50,6 @@ parking_lot.workspace = true reth-ethereum-engine-primitives.workspace = true reth-provider = { workspace = true, features = ["test-utils"] } reth-ethereum-primitives.workspace = true -reth-primitives-traits.workspace = true reth-payload-builder = { workspace = true, features = ["test-utils"] } reth-testing-utils.workspace = true alloy-rlp.workspace = true diff --git a/crates/rpc/rpc-testing-util/Cargo.toml b/crates/rpc/rpc-testing-util/Cargo.toml index 65b10feef98..2d074ef2368 100644 --- a/crates/rpc/rpc-testing-util/Cargo.toml +++ b/crates/rpc/rpc-testing-util/Cargo.toml @@ -36,4 +36,3 @@ similar-asserts.workspace = true tokio = { workspace = true, features = ["rt-multi-thread", "macros", "rt"] } reth-rpc-eth-api.workspace = true jsonrpsee-http-client.workspace = true -alloy-rpc-types-trace.workspace = true diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index 12a7f42b263..e0d1fcb601f 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -93,16 +93,13 @@ itertools.workspace = true [dev-dependencies] reth-ethereum-primitives.workspace = true -reth-evm-ethereum.workspace = true reth-testing-utils.workspace = true reth-transaction-pool = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } reth-db-api.workspace = true -alloy-consensus.workspace = true rand.workspace = true -jsonrpsee-types.workspace = true jsonrpsee = { workspace = true, features = ["client"] } [features] diff --git a/crates/stages/stages/Cargo.toml b/crates/stages/stages/Cargo.toml index 68e1f99d7e7..532888ca27a 100644 --- a/crates/stages/stages/Cargo.toml +++ b/crates/stages/stages/Cargo.toml @@ -76,7 +76,6 @@ reth-execution-errors.workspace = true reth-consensus = { workspace = true, features = ["test-utils"] } reth-network-p2p = { workspace = true, features = ["test-utils"] } reth-downloaders.workspace = true -reth-revm.workspace = true reth-static-file.workspace = true reth-stages-api = { workspace = true, features = ["test-utils"] } reth-testing-utils.workspace = true @@ -87,7 +86,7 @@ reth-tracing.workspace = true alloy-primitives = { workspace = true, features = ["getrandom", "rand"] } alloy-rlp.workspace = true -itertools.workspace = true + tokio = { workspace = true, features = ["rt", "sync", "macros"] } assert_matches.workspace = true rand.workspace = true diff --git a/crates/storage/db-common/Cargo.toml b/crates/storage/db-common/Cargo.toml index 7d05bc9815f..7ddcaaa01b8 100644 --- a/crates/storage/db-common/Cargo.toml +++ b/crates/storage/db-common/Cargo.toml @@ -43,7 +43,6 @@ tracing.workspace = true [dev-dependencies] reth-db = { workspace = true, features = ["mdbx"] } reth-provider = { workspace = true, features = ["test-utils"] } -alloy-consensus.workspace = true [lints] workspace = true diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index c45fde7729c..82a3726c43e 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -74,14 +74,12 @@ reth-ethereum-primitives.workspace = true revm-database-interface.workspace = true revm-state.workspace = true -parking_lot.workspace = true + tempfile.workspace = true assert_matches.workspace = true rand.workspace = true -eyre.workspace = true tokio = { workspace = true, features = ["sync", "macros", "rt-multi-thread"] } -alloy-consensus.workspace = true [features] test-utils = [ diff --git a/crates/trie/parallel/Cargo.toml b/crates/trie/parallel/Cargo.toml index 3ee2c8b653d..a29268c2465 100644 --- a/crates/trie/parallel/Cargo.toml +++ b/crates/trie/parallel/Cargo.toml @@ -48,7 +48,6 @@ reth-trie = { workspace = true, features = ["test-utils"] } # misc rand.workspace = true -rayon.workspace = true criterion.workspace = true proptest.workspace = true proptest-arbitrary-interop.workspace = true diff --git a/testing/testing-utils/Cargo.toml b/testing/testing-utils/Cargo.toml index eb4cb4e4449..06e73631ef8 100644 --- a/testing/testing-utils/Cargo.toml +++ b/testing/testing-utils/Cargo.toml @@ -23,7 +23,3 @@ alloy-eips.workspace = true rand.workspace = true secp256k1 = { workspace = true, features = ["rand"] } rand_08.workspace = true - -[dev-dependencies] -alloy-eips.workspace = true -reth-primitives-traits.workspace = true From 5748cf92a1a201496cae8f65a7151a48be168c65 Mon Sep 17 00:00:00 2001 From: anim001k <140460766+anim001k@users.noreply.github.com> Date: Sat, 26 Jul 2025 13:34:15 +0200 Subject: [PATCH 285/305] fix: Benchmarking Link in database.md (#17553) --- docs/design/database.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/design/database.md b/docs/design/database.md index 42ec8eb8c6c..381136d7bf0 100644 --- a/docs/design/database.md +++ b/docs/design/database.md @@ -9,7 +9,7 @@ - We want Reth's serialized format to be able to trade off read/write speed for size, depending on who the user is. - To achieve that, we created the [Encode/Decode/Compress/Decompress traits](https://github.com/paradigmxyz/reth/blob/main/crates/storage/db-api/src/table.rs) to make the (de)serialization of database `Table::Key` and `Table::Values` generic. - - This allows for [out-of-the-box benchmarking](https://github.com/paradigmxyz/reth/blob/0d9b9a392d4196793736522f3fc2ac804991b45d/crates/db/benches/encoding_iai.rs#L5) (using [Criterion](https://github.com/bheisler/criterion.rs)) + - This allows for [out-of-the-box benchmarking](https://github.com/paradigmxyz/reth/blob/main/crates/storage/db/benches/criterion.rs) (using [Criterion](https://github.com/bheisler/criterion.rs)) - It also enables [out-of-the-box fuzzing](https://github.com/paradigmxyz/reth/blob/main/crates/storage/db-api/src/tables/codecs/fuzz/mod.rs) using [trailofbits/test-fuzz](https://github.com/trailofbits/test-fuzz). - We implemented that trait for the following encoding formats: - [Ethereum-specific Compact Encoding](https://github.com/paradigmxyz/reth/blob/0d9b9a392d4196793736522f3fc2ac804991b45d/crates/codecs/derive/src/compact/mod.rs): A lot of Ethereum datatypes have unnecessary zeros when serialized, or optional (e.g. on empty hashes) which would be nice not to pay in storage costs. From 8796a77cfadf8bbc03e2fa27bd883fca1366da79 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Sat, 26 Jul 2025 13:51:42 +0200 Subject: [PATCH 286/305] feat: support any network type in eth api builder (#17617) Co-authored-by: Arsenii Kulikov --- crates/ethereum/node/src/node.rs | 31 +++++---- crates/rpc/rpc-convert/src/transaction.rs | 84 +++++++++++++++-------- crates/rpc/rpc-eth-types/src/receipt.rs | 5 +- crates/rpc/rpc/src/eth/builder.rs | 41 +++++++++++ crates/rpc/rpc/src/eth/core.rs | 9 +-- 5 files changed, 122 insertions(+), 48 deletions(-) diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index ccaf9e209a5..202c496d33a 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -5,7 +5,6 @@ use crate::{EthEngineTypes, EthEvmConfig}; use alloy_eips::{eip7840::BlobParams, merge::EPOCH_SLOTS}; use alloy_network::Ethereum; use alloy_rpc_types_engine::ExecutionData; -use alloy_rpc_types_eth::TransactionRequest; use reth_chainspec::{ChainSpec, EthChainSpec, EthereumHardforks, Hardforks}; use reth_engine_local::LocalPayloadAttributesBuilder; use reth_engine_primitives::EngineTypes; @@ -42,7 +41,9 @@ use reth_rpc::{ }; use reth_rpc_api::servers::BlockSubmissionValidationApiServer; use reth_rpc_builder::{config::RethRpcServerConfig, middleware::RethRpcMiddleware}; -use reth_rpc_eth_api::{helpers::pending_block::BuildPendingEnv, RpcConvert, SignableTxRequest}; +use reth_rpc_eth_api::{ + helpers::pending_block::BuildPendingEnv, RpcConvert, RpcTypes, SignableTxRequest, +}; use reth_rpc_eth_types::{error::FromEvmError, EthApiError}; use reth_rpc_server_types::RethRpcModule; use reth_tracing::tracing::{debug, info}; @@ -52,7 +53,7 @@ use reth_transaction_pool::{ }; use reth_trie_db::MerklePatriciaTrie; use revm::context::TxEnv; -use std::{default::Default, sync::Arc, time::SystemTime}; +use std::{default::Default, marker::PhantomData, sync::Arc, time::SystemTime}; /// Type configuration for a regular Ethereum node. #[derive(Debug, Default, Clone, Copy)] @@ -136,28 +137,34 @@ impl NodeTypes for EthereumNode { } /// Builds [`EthApi`](reth_rpc::EthApi) for Ethereum. -#[derive(Debug, Default)] -pub struct EthereumEthApiBuilder; +#[derive(Debug)] +pub struct EthereumEthApiBuilder(PhantomData); + +impl Default for EthereumEthApiBuilder { + fn default() -> Self { + Self(Default::default()) + } +} -impl EthApiBuilder for EthereumEthApiBuilder +impl EthApiBuilder for EthereumEthApiBuilder where N: FullNodeComponents< Types: NodeTypes, Evm: ConfigureEvm>>, >, - EthRpcConverterFor: RpcConvert< + NetworkT: RpcTypes>>, + EthRpcConverterFor: RpcConvert< Primitives = PrimitivesTy, TxEnv = TxEnvFor, Error = EthApiError, - Network = Ethereum, + Network = NetworkT, >, - TransactionRequest: SignableTxRequest>, EthApiError: FromEvmError, { - type EthApi = EthApiFor; + type EthApi = EthApiFor; async fn build_eth_api(self, ctx: EthApiCtx<'_, N>) -> eyre::Result { - Ok(ctx.eth_api_builder().build()) + Ok(ctx.eth_api_builder().map_converter(|r| r.with_network()).build()) } } @@ -181,7 +188,7 @@ where fn default() -> Self { Self { inner: RpcAddOns::new( - EthereumEthApiBuilder, + EthereumEthApiBuilder::default(), EthereumEngineValidatorBuilder::default(), BasicEngineApiBuilder::default(), Default::default(), diff --git a/crates/rpc/rpc-convert/src/transaction.rs b/crates/rpc/rpc-convert/src/transaction.rs index 2654bde0474..302b8b10f1a 100644 --- a/crates/rpc/rpc-convert/src/transaction.rs +++ b/crates/rpc/rpc-convert/src/transaction.rs @@ -387,7 +387,7 @@ impl TryIntoTxEnv for TransactionRequest { #[error("Failed to convert transaction into RPC response: {0}")] pub struct TransactionConversionError(String); -/// Generic RPC response object converter for `Evm` and network `E`. +/// Generic RPC response object converter for `Evm` and network `Network`. /// /// The main purpose of this struct is to provide an implementation of [`RpcConvert`] for generic /// associated types. This struct can then be used for conversions in RPC method handlers. @@ -402,41 +402,61 @@ pub struct TransactionConversionError(String); /// is [`TransactionInfo`] then `()` can be used as `Map` which trivially passes over the input /// object. #[derive(Debug)] -pub struct RpcConverter { - phantom: PhantomData<(E, Evm)>, +pub struct RpcConverter { + network: PhantomData, + evm: PhantomData, receipt_converter: Receipt, header_converter: Header, mapper: Map, } -impl RpcConverter { +impl RpcConverter { /// Creates a new [`RpcConverter`] with `receipt_converter` and `mapper`. pub const fn new(receipt_converter: Receipt) -> Self { - Self { phantom: PhantomData, receipt_converter, header_converter: (), mapper: () } + Self { + network: PhantomData, + evm: PhantomData, + receipt_converter, + header_converter: (), + mapper: (), + } } } -impl RpcConverter { +impl RpcConverter { + /// Converts the network type + pub fn with_network(self) -> RpcConverter { + let Self { receipt_converter, header_converter, mapper, evm, .. } = self; + RpcConverter { + receipt_converter, + header_converter, + mapper, + network: Default::default(), + evm, + } + } + /// Configures the header converter. pub fn with_header_converter( self, header_converter: HeaderNew, - ) -> RpcConverter { - let Self { receipt_converter, header_converter: _, mapper, phantom } = self; - RpcConverter { receipt_converter, header_converter, mapper, phantom } + ) -> RpcConverter { + let Self { receipt_converter, header_converter: _, mapper, network, evm } = self; + RpcConverter { receipt_converter, header_converter, mapper, network, evm } } /// Configures the mapper. pub fn with_mapper( self, mapper: MapNew, - ) -> RpcConverter { - let Self { receipt_converter, header_converter, mapper: _, phantom } = self; - RpcConverter { receipt_converter, header_converter, mapper, phantom } + ) -> RpcConverter { + let Self { receipt_converter, header_converter, mapper: _, network, evm } = self; + RpcConverter { receipt_converter, header_converter, mapper, network, evm } } } -impl Default for RpcConverter +impl Default + for RpcConverter where Receipt: Default, Header: Default, @@ -444,7 +464,8 @@ where { fn default() -> Self { Self { - phantom: PhantomData, + network: Default::default(), + evm: Default::default(), receipt_converter: Default::default(), header_converter: Default::default(), mapper: Default::default(), @@ -452,12 +473,13 @@ where } } -impl Clone - for RpcConverter +impl Clone + for RpcConverter { fn clone(&self) -> Self { Self { - phantom: PhantomData, + network: Default::default(), + evm: Default::default(), receipt_converter: self.receipt_converter.clone(), header_converter: self.header_converter.clone(), mapper: self.mapper.clone(), @@ -465,18 +487,19 @@ impl Clone } } -impl RpcConvert for RpcConverter +impl RpcConvert + for RpcConverter where N: NodePrimitives, - E: RpcTypes + Send + Sync + Unpin + Clone + Debug, + Network: RpcTypes + Send + Sync + Unpin + Clone + Debug, Evm: ConfigureEvm + 'static, - TxTy: IntoRpcTx + Clone + Debug, - RpcTxReq: TryIntoSimTx> + TryIntoTxEnv>, + TxTy: IntoRpcTx + Clone + Debug, + RpcTxReq: TryIntoSimTx> + TryIntoTxEnv>, Receipt: ReceiptConverter< N, - RpcReceipt = RpcReceipt, + RpcReceipt = RpcReceipt, Error: From - + From< as TryIntoTxEnv>>::Err> + + From< as TryIntoTxEnv>>::Err> + for<'a> From<>>::Err> + Error + Unpin @@ -488,10 +511,10 @@ where + Unpin + Clone + Debug, - Header: HeaderConverter, RpcHeader>, + Header: HeaderConverter, RpcHeader>, Map: for<'a> TxInfoMapper< &'a TxTy, - Out = as IntoRpcTx>::TxInfo, + Out = as IntoRpcTx>::TxInfo, > + Clone + Debug + Unpin @@ -500,7 +523,7 @@ where + 'static, { type Primitives = N; - type Network = E; + type Network = Network; type TxEnv = TxEnvFor; type Error = Receipt::Error; @@ -508,20 +531,23 @@ where &self, tx: Recovered>, tx_info: TransactionInfo, - ) -> Result { + ) -> Result { let (tx, signer) = tx.into_parts(); let tx_info = self.mapper.try_map(&tx, tx_info)?; Ok(tx.into_rpc_tx(signer, tx_info)) } - fn build_simulate_v1_transaction(&self, request: RpcTxReq) -> Result, Self::Error> { + fn build_simulate_v1_transaction( + &self, + request: RpcTxReq, + ) -> Result, Self::Error> { Ok(request.try_into_sim_tx().map_err(|e| TransactionConversionError(e.to_string()))?) } fn tx_env( &self, - request: RpcTxReq, + request: RpcTxReq, cfg_env: &CfgEnv, block_env: &BlockEnv, ) -> Result { diff --git a/crates/rpc/rpc-eth-types/src/receipt.rs b/crates/rpc/rpc-eth-types/src/receipt.rs index 37700ffcd1d..4ea4ad1daf5 100644 --- a/crates/rpc/rpc-eth-types/src/receipt.rs +++ b/crates/rpc/rpc-eth-types/src/receipt.rs @@ -1,5 +1,6 @@ //! RPC receipt response builder, extends a layer one receipt with layer two data. +use crate::EthApiError; use alloy_consensus::{ReceiptEnvelope, Transaction, TxReceipt}; use alloy_eips::eip7840::BlobParams; use alloy_primitives::{Address, TxKind}; @@ -10,8 +11,6 @@ use reth_primitives_traits::NodePrimitives; use reth_rpc_convert::transaction::{ConvertReceiptInput, ReceiptConverter}; use std::{borrow::Cow, sync::Arc}; -use crate::EthApiError; - /// Builds an [`TransactionReceipt`] obtaining the inner receipt envelope from the given closure. pub fn build_receipt( input: &ConvertReceiptInput<'_, N>, @@ -88,8 +87,8 @@ where N: NodePrimitives, ChainSpec: EthChainSpec + 'static, { - type Error = EthApiError; type RpcReceipt = TransactionReceipt; + type Error = EthApiError; fn convert_receipts( &self, diff --git a/crates/rpc/rpc/src/eth/builder.rs b/crates/rpc/rpc/src/eth/builder.rs index 283722701ce..2e6a6dcf91f 100644 --- a/crates/rpc/rpc/src/eth/builder.rs +++ b/crates/rpc/rpc/src/eth/builder.rs @@ -57,6 +57,47 @@ where } } +impl EthApiBuilder { + /// Converts the RPC converter type of this builder + pub fn map_converter(self, f: F) -> EthApiBuilder + where + F: FnOnce(Rpc) -> R, + { + let Self { + components, + rpc_converter, + gas_cap, + max_simulate_blocks, + eth_proof_window, + fee_history_cache_config, + proof_permits, + eth_state_cache_config, + eth_cache, + gas_oracle_config, + gas_oracle, + blocking_task_pool, + task_spawner, + next_env, + } = self; + EthApiBuilder { + components, + rpc_converter: f(rpc_converter), + gas_cap, + max_simulate_blocks, + eth_proof_window, + fee_history_cache_config, + proof_permits, + eth_state_cache_config, + eth_cache, + gas_oracle_config, + gas_oracle, + blocking_task_pool, + task_spawner, + next_env, + } + } +} + impl EthApiBuilder>> where N: RpcNodeCore>, diff --git a/crates/rpc/rpc/src/eth/core.rs b/crates/rpc/rpc/src/eth/core.rs index a5fa5d3f651..ffad40b0117 100644 --- a/crates/rpc/rpc/src/eth/core.rs +++ b/crates/rpc/rpc/src/eth/core.rs @@ -34,17 +34,18 @@ use tokio::sync::{broadcast, Mutex}; const DEFAULT_BROADCAST_CAPACITY: usize = 2000; /// Helper type alias for [`RpcConverter`] with components from the given [`FullNodeComponents`]. -pub type EthRpcConverterFor = RpcConverter< - Ethereum, +pub type EthRpcConverterFor = RpcConverter< + NetworkT, ::Evm, EthReceiptConverter<<::Provider as ChainSpecProvider>::ChainSpec>, >; /// Helper type alias for [`EthApi`] with components from the given [`FullNodeComponents`]. -pub type EthApiFor = EthApi>; +pub type EthApiFor = EthApi>; /// Helper type alias for [`EthApi`] with components from the given [`FullNodeComponents`]. -pub type EthApiBuilderFor = EthApiBuilder>; +pub type EthApiBuilderFor = + EthApiBuilder>; /// `Eth` API implementation. /// From e63dafb3b5f3a4f24590110ede59ddc36ea046a7 Mon Sep 17 00:00:00 2001 From: crStiv Date: Sat, 26 Jul 2025 17:39:23 +0300 Subject: [PATCH 287/305] docs: fix typos (#17624) --- crates/trie/trie/src/trie.rs | 2 +- docs/vocs/docs/pages/jsonrpc/admin.mdx | 2 +- docs/vocs/docs/pages/jsonrpc/debug.mdx | 4 ++-- docs/vocs/docs/pages/run/ethereum.mdx | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/crates/trie/trie/src/trie.rs b/crates/trie/trie/src/trie.rs index e6f5463b7df..c4e3dfcb477 100644 --- a/crates/trie/trie/src/trie.rs +++ b/crates/trie/trie/src/trie.rs @@ -134,7 +134,7 @@ where pub fn root(self) -> Result { match self.calculate(false)? { StateRootProgress::Complete(root, _, _) => Ok(root), - StateRootProgress::Progress(..) => unreachable!(), // update retenion is disabled + StateRootProgress::Progress(..) => unreachable!(), // update retention is disabled } } diff --git a/docs/vocs/docs/pages/jsonrpc/admin.mdx b/docs/vocs/docs/pages/jsonrpc/admin.mdx index cf1ef29c05b..481a4f76d76 100644 --- a/docs/vocs/docs/pages/jsonrpc/admin.mdx +++ b/docs/vocs/docs/pages/jsonrpc/admin.mdx @@ -43,7 +43,7 @@ Disconnects from a peer if the connection exists. Returns a `bool` indicating wh ## `admin_addTrustedPeer` -Adds the given peer to a list of trusted peers, which allows the peer to always connect, even if there would be no room for it otherwise. +Adds the given peer to a list of trusted peers, which allows the peer to always connect, even if there is no room for it otherwise. It returns a `bool` indicating whether the peer was added to the list or not. diff --git a/docs/vocs/docs/pages/jsonrpc/debug.mdx b/docs/vocs/docs/pages/jsonrpc/debug.mdx index aa3a47685c6..5b435d7dca7 100644 --- a/docs/vocs/docs/pages/jsonrpc/debug.mdx +++ b/docs/vocs/docs/pages/jsonrpc/debug.mdx @@ -55,7 +55,7 @@ Returns the structured logs created during the execution of EVM between two bloc ## `debug_traceBlock` -The `debug_traceBlock` method will return a full stack trace of all invoked opcodes of all transaction that were included in this block. +The `debug_traceBlock` method will return a full stack trace of all invoked opcodes of all transactions that were included in this block. This expects an RLP-encoded block. @@ -93,7 +93,7 @@ The `debug_traceTransaction` debugging method will attempt to run the transactio ## `debug_traceCall` -The `debug_traceCall` method lets you run an `eth_call` within the context of the given block execution using the final state of parent block as the base. +The `debug_traceCall` method lets you run an `eth_call` within the context of the given block execution using the final state of the parent block as the base. The first argument (just as in `eth_call`) is a transaction request. diff --git a/docs/vocs/docs/pages/run/ethereum.mdx b/docs/vocs/docs/pages/run/ethereum.mdx index 6e068dcd312..e5663d63041 100644 --- a/docs/vocs/docs/pages/run/ethereum.mdx +++ b/docs/vocs/docs/pages/run/ethereum.mdx @@ -4,7 +4,7 @@ description: How to run Reth on Ethereum mainnet and testnets. # Running Reth on Ethereum Mainnet or testnets -Reth is an [_execution client_](https://ethereum.org/en/developers/docs/nodes-and-clients/#execution-clients). After Ethereum's transition to Proof of Stake (aka the Merge) it became required to run a [_consensus client_](https://ethereum.org/en/developers/docs/nodes-and-clients/#consensus-clients) along your execution client in order to sync into any "post-Merge" network. This is because the Ethereum execution layer now outsources consensus to a separate component, known as the consensus client. +Reth is an [_execution client_](https://ethereum.org/en/developers/docs/nodes-and-clients/#execution-clients). After Ethereum's transition to Proof of Stake (aka the Merge) it became required to run a [_consensus client_](https://ethereum.org/en/developers/docs/nodes-and-clients/#consensus-clients) along with your execution client in order to sync into any "post-Merge" network. This is because the Ethereum execution layer now outsources consensus to a separate component, known as the consensus client. Consensus clients decide what blocks are part of the chain, while execution clients only validate that transactions and blocks are valid in themselves and with respect to the world state. In other words, execution clients execute blocks and transactions and check their validity, while consensus clients determine which valid blocks should be part of the chain. Therefore, running a consensus client in parallel with the execution client is necessary to ensure synchronization and participation in the network. @@ -77,7 +77,7 @@ If you don't intend on running validators on your node you can add: --disable-deposit-contract-sync ``` -The `--checkpoint-sync-url` argument value can be replaced with any checkpoint sync endpoint from a [community maintained list](https://eth-clients.github.io/checkpoint-sync-endpoints/#mainnet). +The `--checkpoint-sync-url` argument value can be replaced with any checkpoint sync endpoint from a [community-maintained list](https://eth-clients.github.io/checkpoint-sync-endpoints/#mainnet). Your Reth node should start receiving "fork choice updated" messages, and begin syncing the chain. From 812dd04b807d689f4661651d32d021dfb50dc38f Mon Sep 17 00:00:00 2001 From: MozirDmitriy Date: Sun, 27 Jul 2025 13:28:18 +0300 Subject: [PATCH 288/305] fix: correct comment for is_latest_invalid method (#17621) --- crates/engine/local/src/miner.rs | 2 +- crates/engine/primitives/src/forkchoice.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/engine/local/src/miner.rs b/crates/engine/local/src/miner.rs index a3318f1f5c2..290790d61f5 100644 --- a/crates/engine/local/src/miner.rs +++ b/crates/engine/local/src/miner.rs @@ -69,7 +69,7 @@ impl Future for MiningMode { } } -/// Local miner advancing the chain/ +/// Local miner advancing the chain #[derive(Debug)] pub struct LocalMiner { /// The payload attribute builder for the engine diff --git a/crates/engine/primitives/src/forkchoice.rs b/crates/engine/primitives/src/forkchoice.rs index 2fe47d807c5..69cb5990711 100644 --- a/crates/engine/primitives/src/forkchoice.rs +++ b/crates/engine/primitives/src/forkchoice.rs @@ -56,7 +56,7 @@ impl ForkchoiceStateTracker { self.latest_status().is_some_and(|s| s.is_syncing()) } - /// Returns whether the latest received FCU is syncing: [`ForkchoiceStatus::Invalid`] + /// Returns whether the latest received FCU is invalid: [`ForkchoiceStatus::Invalid`] pub fn is_latest_invalid(&self) -> bool { self.latest_status().is_some_and(|s| s.is_invalid()) } From 7ed3ab0ec63457ed6b8e2d9d8ab7808d60bc9143 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sun, 27 Jul 2025 18:40:27 +0200 Subject: [PATCH 289/305] chore(deps): weekly `cargo update` (#17628) Co-authored-by: github-merge-queue <118344674+github-merge-queue@users.noreply.github.com> Co-authored-by: Matthias Seitz --- Cargo.lock | 414 ++++++++++-------- Cargo.toml | 4 +- .../engine/tree/src/tree/precompile_cache.rs | 5 +- 3 files changed, 249 insertions(+), 174 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 917657038a9..276ab04a465 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -97,9 +97,9 @@ checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "alloy-chains" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5674914c2cfdb866c21cb0c09d82374ee39a1395cf512e7515f4c014083b3fff" +checksum = "4195a29a4b87137b2bb02105e746102873bc03561805cf45c0e510c961f160e6" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -107,7 +107,7 @@ dependencies = [ "num_enum", "proptest", "serde", - "strum 0.27.1", + "strum 0.27.2", ] [[package]] @@ -259,9 +259,9 @@ dependencies = [ [[package]] name = "alloy-evm" -version = "0.15.0" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28de0dd1bbb0634ef7c3715e8e60176b77b82f8b6b15b2e35fe64cf6640f6550" +checksum = "b2a3c4a8d217f8ac0d0e5f890979646037d59a85fd3fc8f5b03d2f7a59b8d134" dependencies = [ "alloy-consensus", "alloy-eips", @@ -373,9 +373,9 @@ dependencies = [ [[package]] name = "alloy-op-evm" -version = "0.15.0" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0afe768962308a08b42fddef8a4296324f140b5a8dd0d4360038229885ce9434" +checksum = "e0286cb45e87871995815db4ce8bc560ba35f7db4cc084e48a79b355db3342bd" dependencies = [ "alloy-consensus", "alloy-eips", @@ -423,7 +423,7 @@ dependencies = [ "paste", "proptest", "proptest-derive", - "rand 0.9.1", + "rand 0.9.2", "ruint", "rustc-hash 2.1.1", "serde", @@ -640,7 +640,7 @@ dependencies = [ "jsonwebtoken", "rand 0.8.5", "serde", - "strum 0.27.1", + "strum 0.27.2", ] [[package]] @@ -924,7 +924,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9f916ff6d52f219c44a9684aea764ce2c7e1d53bd4a724c9b127863aeacc30bb" dependencies = [ "alloy-primitives", - "darling", + "darling 0.20.11", "proc-macro2", "quote", "syn 2.0.104", @@ -1922,9 +1922,9 @@ dependencies = [ [[package]] name = "bytemuck_derive" -version = "1.9.3" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ecc273b49b3205b83d648f0690daa588925572cc5063745bfe547fe7ec8e1a1" +checksum = "441473f2b4b0459a68628c744bc61d23e730fb00128b841d30fa4bb3972257e4" dependencies = [ "proc-macro2", "quote", @@ -2613,9 +2613,9 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "4.2.0" +version = "4.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "373b7c5dbd637569a2cca66e8d66b8c446a1e7bf064ea321d265d7b3dfe7c97e" +checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" dependencies = [ "cfg-if", "cpufeatures", @@ -2644,8 +2644,18 @@ version = "0.20.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee" dependencies = [ - "darling_core", - "darling_macro", + "darling_core 0.20.11", + "darling_macro 0.20.11", +] + +[[package]] +name = "darling" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a79c4acb1fd5fa3d9304be4c76e031c54d2e92d172a393e24b19a14fe8532fe9" +dependencies = [ + "darling_core 0.21.0", + "darling_macro 0.21.0", ] [[package]] @@ -2662,13 +2672,38 @@ dependencies = [ "syn 2.0.104", ] +[[package]] +name = "darling_core" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74875de90daf30eb59609910b84d4d368103aaec4c924824c6799b28f77d6a1d" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim", + "syn 2.0.104", +] + [[package]] name = "darling_macro" version = "0.20.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" dependencies = [ - "darling_core", + "darling_core 0.20.11", + "quote", + "syn 2.0.104", +] + +[[package]] +name = "darling_macro" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e79f8e61677d5df9167cd85265f8e5f64b215cdea3fb55eebc3e622e44c7a146" +dependencies = [ + "darling_core 0.21.0", "quote", "syn 2.0.104", ] @@ -2811,7 +2846,7 @@ version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d5bcf7b024d6835cfb3d473887cd966994907effbe9227e8c8219824d06c4e8" dependencies = [ - "darling", + "darling 0.20.11", "proc-macro2", "quote", "syn 2.0.104", @@ -2944,7 +2979,7 @@ dependencies = [ "parking_lot", "rand 0.8.5", "smallvec", - "socket2", + "socket2 0.5.10", "tokio", "tracing", "uint 0.10.0", @@ -3224,7 +3259,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0dd55d08012b4e0dfcc92b8d6081234df65f2986ad34cc76eeed69c5e2ce7506" dependencies = [ - "darling", + "darling 0.20.11", "proc-macro2", "quote", "syn 2.0.104", @@ -3717,9 +3752,9 @@ dependencies = [ [[package]] name = "fiat-crypto" -version = "0.3.0" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64cd1e32ddd350061ae6edb1b082d7c54915b5c672c389143b9a63403a109f24" +checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" [[package]] name = "filetime" @@ -4210,7 +4245,7 @@ dependencies = [ "idna", "ipnet", "once_cell", - "rand 0.9.1", + "rand 0.9.2", "ring", "serde", "thiserror 2.0.12", @@ -4233,7 +4268,7 @@ dependencies = [ "moka", "once_cell", "parking_lot", - "rand 0.9.1", + "rand 0.9.2", "resolv-conf", "serde", "smallvec", @@ -4391,14 +4426,14 @@ dependencies = [ "tokio", "tokio-rustls", "tower-service", - "webpki-roots 1.0.1", + "webpki-roots 1.0.2", ] [[package]] name = "hyper-util" -version = "0.1.15" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f66d5bd4c6f02bf0542fad85d626775bab9258cf795a4256dcaf3161114d1df" +checksum = "8d9b05277c7e8da2c93a568989bb6207bef0112e8d17df7a6eda4a3cf143bc5e" dependencies = [ "base64 0.22.1", "bytes", @@ -4412,7 +4447,7 @@ dependencies = [ "libc", "percent-encoding", "pin-project-lite", - "socket2", + "socket2 0.6.0", "tokio", "tower-service", "tracing", @@ -4795,11 +4830,11 @@ dependencies = [ [[package]] name = "instability" -version = "0.3.7" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bf9fed6d91cfb734e7476a06bde8300a1b94e217e1b523b6f0cd1a01998c71d" +checksum = "435d80800b936787d62688c927b6490e887c7ef5ff9ce922c6c6050fca75eb9a" dependencies = [ - "darling", + "darling 0.20.11", "indoc", "proc-macro2", "quote", @@ -4841,9 +4876,9 @@ dependencies = [ [[package]] name = "io-uring" -version = "0.7.8" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b86e202f00093dcba4275d4636b93ef9dd75d025ae560d2521b45ea28ab49013" +checksum = "d93587f37623a1a17d94ef2bc9ada592f5465fe7732084ab7beefabe5c77c0c4" dependencies = [ "bitflags 2.9.1", "cfg-if", @@ -4856,7 +4891,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2", + "socket2 0.5.10", "widestring", "windows-sys 0.48.0", "winreg", @@ -5029,7 +5064,7 @@ dependencies = [ "jsonrpsee-types", "parking_lot", "pin-project", - "rand 0.9.1", + "rand 0.9.2", "rustc-hash 2.1.1", "serde", "serde_json", @@ -5283,9 +5318,9 @@ dependencies = [ [[package]] name = "libredox" -version = "0.1.4" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1580801010e535496706ba011c15f8532df6b42297d2e471fec38ceadd8c0638" +checksum = "4488594b9328dee448adb906d8b126d9b7deb7cf5c22161ee591610bb1be83c0" dependencies = [ "bitflags 2.9.1", "libc", @@ -5587,7 +5622,7 @@ dependencies = [ "metrics", "ordered-float", "quanta", - "rand 0.9.1", + "rand 0.9.2", "rand_xoshiro", "sketches-ddsketch", ] @@ -5980,9 +6015,9 @@ checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" [[package]] name = "op-alloy-consensus" -version = "0.18.12" +version = "0.18.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eda4af86c3185b06f8d70986a591c087f054c5217cc7ce53cd0ec36dc42d7425" +checksum = "d3c719b26da6d9cac18c3a35634d6ab27a74a304ed9b403b43749c22e57a389f" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6006,9 +6041,9 @@ checksum = "a79f352fc3893dcd670172e615afef993a41798a1d3fc0db88a3e60ef2e70ecc" [[package]] name = "op-alloy-network" -version = "0.18.12" +version = "0.18.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab526485e1aee4dbd929aaa431aaa9db8678c936ee7d1449760f783ae45afa01" +checksum = "66be312d3446099f1c46b3bb4bbaccdd4b3d6fb3668921158e3d47dff0a8d4a0" dependencies = [ "alloy-consensus", "alloy-network", @@ -6022,9 +6057,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-jsonrpsee" -version = "0.18.12" +version = "0.18.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f34feb6c3aef85c9ab9198f1402867030e54d13f6c66dda18235497ac808cb0" +checksum = "3833995acfc568fdac3684f037c4ed3f1f2bd2ef5deeb3f46ecee32aafa34c8e" dependencies = [ "alloy-primitives", "jsonrpsee", @@ -6032,9 +6067,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types" -version = "0.18.12" +version = "0.18.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98bfe0a4e1225930ffe288a9b3ce0d95c6fc2ee6696132e5ad7ecc7b0ee139a8" +checksum = "99911fa02e717a96ba24de59874b20cf31c9d116ce79ed4e0253267260b6922f" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6052,9 +6087,9 @@ dependencies = [ [[package]] name = "op-alloy-rpc-types-engine" -version = "0.18.12" +version = "0.18.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a420102c1b857a4ba373fcaf674d5c0499fd3705ddce95be9a69f3561c337b3" +checksum = "50cf45d43a3d548fdc39d9bfab6ba13cc06b3214ef4b9c36d3efbf3faea1b9f1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6092,9 +6127,9 @@ dependencies = [ [[package]] name = "op-revm" -version = "8.0.3" +version = "8.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee9ba9cab294a5ed02afd1a1060220762b3c52911acab635db33822e93f7276d" +checksum = "5ce1dc7533f4e5716c55cd3d62488c6200cb4dfda96e0c75a7e484652464343b" dependencies = [ "auto_impl", "once_cell", @@ -6190,7 +6225,7 @@ dependencies = [ "glob", "opentelemetry", "percent-encoding", - "rand 0.9.1", + "rand 0.9.2", "serde_json", "thiserror 2.0.12", "tracing", @@ -6539,9 +6574,9 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.35" +version = "0.2.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "061c1221631e079b26479d25bbf2275bfe5917ae8419cd7e34f13bfc2aa7539a" +checksum = "ff24dfcda44452b9816fff4cd4227e1bb73ff5a2f1bc1105aa92fb8565ce44d2" dependencies = [ "proc-macro2", "syn 2.0.104", @@ -6643,7 +6678,7 @@ dependencies = [ "bitflags 2.9.1", "lazy_static", "num-traits", - "rand 0.9.1", + "rand 0.9.2", "rand_chacha 0.9.0", "rand_xorshift", "regex-syntax 0.8.5", @@ -6750,7 +6785,7 @@ dependencies = [ "quinn-udp", "rustc-hash 2.1.1", "rustls", - "socket2", + "socket2 0.5.10", "thiserror 2.0.12", "tokio", "tracing", @@ -6766,7 +6801,7 @@ dependencies = [ "bytes", "getrandom 0.3.3", "lru-slab", - "rand 0.9.1", + "rand 0.9.2", "ring", "rustc-hash 2.1.1", "rustls", @@ -6787,7 +6822,7 @@ dependencies = [ "cfg_aliases", "libc", "once_cell", - "socket2", + "socket2 0.5.10", "tracing", "windows-sys 0.59.0", ] @@ -6840,9 +6875,9 @@ dependencies = [ [[package]] name = "rand" -version = "0.9.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fbfd9d094a40bf3ae768db9361049ace4c0e04a4fd6b359518bd7b73a73dd97" +checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" dependencies = [ "rand_chacha 0.9.0", "rand_core 0.9.3", @@ -6992,9 +7027,9 @@ checksum = "d3edd4d5d42c92f0a659926464d4cce56b562761267ecf0f469d85b7de384175" [[package]] name = "redox_syscall" -version = "0.5.13" +version = "0.5.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d04b7d0ee6b4a0207a0a7adb104d23ecb0b47d6beae7152d0fa34b692b29fd6" +checksum = "7251471db004e509f4e75a62cca9435365b5ec7bcdff530d612ac7c87c44a792" dependencies = [ "bitflags 2.9.1", ] @@ -7141,7 +7176,7 @@ dependencies = [ "wasm-bindgen-futures", "wasm-streams", "web-sys", - "webpki-roots 1.0.1", + "webpki-roots 1.0.2", ] [[package]] @@ -7272,7 +7307,7 @@ dependencies = [ "metrics", "parking_lot", "pin-project", - "rand 0.9.1", + "rand 0.9.2", "reth-chainspec", "reth-errors", "reth-ethereum-primitives", @@ -7422,7 +7457,7 @@ dependencies = [ "eyre", "libc", "rand 0.8.5", - "rand 0.9.1", + "rand 0.9.2", "reth-fs-util", "secp256k1 0.30.0", "serde", @@ -7503,7 +7538,7 @@ dependencies = [ "alloy-consensus", "alloy-eips", "alloy-primitives", - "rand 0.9.1", + "rand 0.9.2", "reth-chainspec", "reth-consensus", "reth-ethereum-primitives", @@ -7561,7 +7596,7 @@ dependencies = [ "rustc-hash 2.1.1", "serde", "serde_json", - "strum 0.27.1", + "strum 0.27.2", "sysinfo", "tempfile", "thiserror 2.0.12", @@ -7582,7 +7617,7 @@ dependencies = [ "parity-scale-codec", "proptest", "proptest-arbitrary-interop", - "rand 0.9.1", + "rand 0.9.2", "reth-codecs", "reth-db-models", "reth-ethereum-primitives", @@ -7683,7 +7718,7 @@ dependencies = [ "itertools 0.14.0", "metrics", "rand 0.8.5", - "rand 0.9.1", + "rand 0.9.2", "reth-chainspec", "reth-ethereum-forks", "reth-metrics", @@ -7707,7 +7742,7 @@ dependencies = [ "hickory-resolver", "linked_hash_set", "parking_lot", - "rand 0.9.1", + "rand 0.9.2", "reth-chainspec", "reth-ethereum-forks", "reth-network-peers", @@ -7737,7 +7772,7 @@ dependencies = [ "itertools 0.14.0", "metrics", "pin-project", - "rand 0.9.1", + "rand 0.9.2", "rayon", "reth-chainspec", "reth-config", @@ -7952,7 +7987,7 @@ dependencies = [ "parking_lot", "proptest", "rand 0.8.5", - "rand 0.9.1", + "rand 0.9.2", "rayon", "reth-chain-state", "reth-chainspec", @@ -8037,7 +8072,7 @@ dependencies = [ "ethereum_ssz", "ethereum_ssz_derive", "eyre", - "rand 0.9.1", + "rand 0.9.2", "reqwest", "reth-era-downloader", "reth-ethereum-primitives", @@ -8122,7 +8157,7 @@ dependencies = [ "proptest", "proptest-arbitrary-interop", "rand 0.8.5", - "rand 0.9.1", + "rand 0.9.2", "reth-codecs", "reth-ecies", "reth-eth-wire-types", @@ -8158,7 +8193,7 @@ dependencies = [ "derive_more", "proptest", "proptest-arbitrary-interop", - "rand 0.9.1", + "rand 0.9.2", "reth-chainspec", "reth-codecs-derive", "reth-ethereum-primitives", @@ -8317,7 +8352,7 @@ dependencies = [ "proptest", "proptest-arbitrary-interop", "rand 0.8.5", - "rand 0.9.1", + "rand 0.9.2", "reth-codecs", "reth-primitives-traits", "reth-zstd-compressors", @@ -8406,7 +8441,7 @@ dependencies = [ "arbitrary", "bincode 1.3.3", "derive_more", - "rand 0.9.1", + "rand 0.9.2", "reth-ethereum-primitives", "reth-primitives-traits", "reth-trie-common", @@ -8428,7 +8463,7 @@ dependencies = [ "itertools 0.14.0", "metrics", "parking_lot", - "rand 0.9.1", + "rand 0.9.2", "reth-chain-state", "reth-chainspec", "reth-config", @@ -8499,7 +8534,7 @@ dependencies = [ "alloy-primitives", "arbitrary", "bincode 1.3.3", - "rand 0.9.1", + "rand 0.9.2", "reth-chain-state", "reth-ethereum-primitives", "reth-execution-types", @@ -8554,7 +8589,7 @@ dependencies = [ "interprocess", "jsonrpsee", "pin-project", - "rand 0.9.1", + "rand 0.9.2", "reth-tracing", "serde", "serde_json", @@ -8577,7 +8612,7 @@ dependencies = [ "derive_more", "indexmap 2.10.0", "parking_lot", - "rand 0.9.1", + "rand 0.9.2", "reth-mdbx-sys", "smallvec", "tempfile", @@ -8646,7 +8681,7 @@ dependencies = [ "parking_lot", "pin-project", "rand 0.8.5", - "rand 0.9.1", + "rand 0.9.2", "reth-chainspec", "reth-consensus", "reth-discv4", @@ -8740,7 +8775,7 @@ dependencies = [ "alloy-rlp", "enr", "rand 0.8.5", - "rand 0.9.1", + "rand 0.9.2", "secp256k1 0.30.0", "serde_json", "serde_with", @@ -8771,7 +8806,7 @@ dependencies = [ "derive_more", "lz4_flex", "memmap2", - "rand 0.9.1", + "rand 0.9.2", "reth-fs-util", "serde", "tempfile", @@ -8887,7 +8922,7 @@ dependencies = [ "futures", "humantime", "proptest", - "rand 0.9.1", + "rand 0.9.2", "reth-chainspec", "reth-cli-util", "reth-config", @@ -8915,7 +8950,7 @@ dependencies = [ "secp256k1 0.30.0", "serde", "shellexpand", - "strum 0.27.1", + "strum 0.27.2", "thiserror 2.0.12", "tokio", "toml", @@ -8943,7 +8978,7 @@ dependencies = [ "alloy-sol-types", "eyre", "futures", - "rand 0.9.1", + "rand 0.9.2", "reth-chainspec", "reth-db", "reth-e2e-test-utils", @@ -9041,7 +9076,7 @@ dependencies = [ "reqwest", "reth-metrics", "reth-tasks", - "socket2", + "socket2 0.5.10", "tikv-jemalloc-ctl", "tokio", "tower", @@ -9356,7 +9391,7 @@ dependencies = [ "proptest", "proptest-arbitrary-interop", "rand 0.8.5", - "rand 0.9.1", + "rand 0.9.2", "reth-codecs", "reth-primitives-traits", "reth-zstd-compressors", @@ -9593,7 +9628,7 @@ dependencies = [ "proptest", "proptest-arbitrary-interop", "rand 0.8.5", - "rand 0.9.1", + "rand 0.9.2", "rayon", "reth-chainspec", "reth-codecs", @@ -9623,7 +9658,7 @@ dependencies = [ "metrics", "notify", "parking_lot", - "rand 0.9.1", + "rand 0.9.2", "rayon", "reth-chain-state", "reth-chainspec", @@ -9651,7 +9686,7 @@ dependencies = [ "revm-database", "revm-database-interface", "revm-state", - "strum 0.27.1", + "strum 0.27.2", "tempfile", "tokio", "tracing", @@ -9727,8 +9762,8 @@ dependencies = [ "reth-ress-protocol", "reth-storage-errors", "reth-tracing", - "strum 0.27.1", - "strum_macros 0.27.1", + "strum 0.27.2", + "strum_macros 0.27.2", "tokio", "tokio-stream", "tracing", @@ -9810,7 +9845,7 @@ dependencies = [ "jsonwebtoken", "parking_lot", "pin-project", - "rand 0.9.1", + "rand 0.9.2", "reth-chain-state", "reth-chainspec", "reth-consensus", @@ -10097,7 +10132,7 @@ dependencies = [ "jsonrpsee-core", "jsonrpsee-types", "metrics", - "rand 0.9.1", + "rand 0.9.2", "reth-chain-state", "reth-chainspec", "reth-errors", @@ -10153,7 +10188,7 @@ dependencies = [ "reth-errors", "reth-network-api", "serde", - "strum 0.27.1", + "strum 0.27.2", ] [[package]] @@ -10173,7 +10208,7 @@ dependencies = [ "itertools 0.14.0", "num-traits", "paste", - "rand 0.9.1", + "rand 0.9.2", "rayon", "reqwest", "reth-chainspec", @@ -10256,7 +10291,7 @@ dependencies = [ "modular-bitfield", "proptest", "proptest-arbitrary-interop", - "rand 0.9.1", + "rand 0.9.2", "reth-codecs", "reth-trie-common", "serde", @@ -10321,7 +10356,7 @@ dependencies = [ "derive_more", "reth-nippy-jar", "serde", - "strum 0.27.1", + "strum 0.27.2", ] [[package]] @@ -10417,7 +10452,7 @@ dependencies = [ "alloy-genesis", "alloy-primitives", "rand 0.8.5", - "rand 0.9.1", + "rand 0.9.2", "reth-ethereum-primitives", "reth-primitives-traits", "secp256k1 0.30.0", @@ -10478,7 +10513,7 @@ dependencies = [ "paste", "proptest", "proptest-arbitrary-interop", - "rand 0.9.1", + "rand 0.9.2", "reth-chain-state", "reth-chainspec", "reth-eth-wire-types", @@ -10491,7 +10526,7 @@ dependencies = [ "reth-storage-api", "reth-tasks", "reth-tracing", - "revm-interpreter", + "revm-interpreter 23.0.2", "revm-primitives", "rustc-hash 2.1.1", "schnellru", @@ -10607,7 +10642,7 @@ dependencies = [ "metrics", "proptest", "proptest-arbitrary-interop", - "rand 0.9.1", + "rand 0.9.2", "rayon", "reth-db-api", "reth-execution-errors", @@ -10641,7 +10676,7 @@ dependencies = [ "proptest", "proptest-arbitrary-interop", "rand 0.8.5", - "rand 0.9.1", + "rand 0.9.2", "reth-execution-errors", "reth-metrics", "reth-primitives-traits", @@ -10670,7 +10705,7 @@ dependencies = [ "proptest", "proptest-arbitrary-interop", "rand 0.8.5", - "rand 0.9.1", + "rand 0.9.2", "rayon", "reth-execution-errors", "reth-primitives-traits", @@ -10692,18 +10727,18 @@ dependencies = [ [[package]] name = "revm" -version = "27.0.3" +version = "27.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70a84455f03d3480d4ed2e7271c15f2ec95b758e86d57cb8d258a8ff1c22e9a4" +checksum = "5e6bf82101a1ad8a2b637363a37aef27f88b4efc8a6e24c72bf5f64923dc5532" dependencies = [ "revm-bytecode", "revm-context", - "revm-context-interface", + "revm-context-interface 9.0.0", "revm-database", "revm-database-interface", "revm-handler", "revm-inspector", - "revm-interpreter", + "revm-interpreter 24.0.0", "revm-precompile", "revm-primitives", "revm-state", @@ -10711,9 +10746,9 @@ dependencies = [ [[package]] name = "revm-bytecode" -version = "6.0.1" +version = "6.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a685758a4f375ae9392b571014b9779cfa63f0d8eb91afb4626ddd958b23615" +checksum = "6922f7f4fbc15ca61ea459711ff75281cc875648c797088c34e4e064de8b8a7c" dependencies = [ "bitvec", "once_cell", @@ -10724,14 +10759,14 @@ dependencies = [ [[package]] name = "revm-context" -version = "8.0.3" +version = "8.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a990abf66b47895ca3e915d5f3652bb7c6a4cff6e5351fdf0fc2795171fd411c" +checksum = "9cd508416a35a4d8a9feaf5ccd06ac6d6661cd31ee2dc0252f9f7316455d71f9" dependencies = [ "cfg-if", "derive-where", "revm-bytecode", - "revm-context-interface", + "revm-context-interface 9.0.0", "revm-database-interface", "revm-primitives", "revm-state", @@ -10754,11 +10789,27 @@ dependencies = [ "serde", ] +[[package]] +name = "revm-context-interface" +version = "9.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc90302642d21c8f93e0876e201f3c5f7913c4fcb66fb465b0fd7b707dfe1c79" +dependencies = [ + "alloy-eip2930", + "alloy-eip7702", + "auto_impl", + "either", + "revm-database-interface", + "revm-primitives", + "revm-state", + "serde", +] + [[package]] name = "revm-database" -version = "7.0.1" +version = "7.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7db360729b61cc347f9c2f12adb9b5e14413aea58778cf9a3b7676c6a4afa115" +checksum = "c61495e01f01c343dd90e5cb41f406c7081a360e3506acf1be0fc7880bfb04eb" dependencies = [ "alloy-eips", "revm-bytecode", @@ -10770,9 +10821,9 @@ dependencies = [ [[package]] name = "revm-database-interface" -version = "7.0.1" +version = "7.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8500194cad0b9b1f0567d72370795fd1a5e0de9ec719b1607fa1566a23f039a" +checksum = "c20628d6cd62961a05f981230746c16854f903762d01937f13244716530bf98f" dependencies = [ "auto_impl", "either", @@ -10783,17 +10834,17 @@ dependencies = [ [[package]] name = "revm-handler" -version = "8.0.3" +version = "8.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03c35a17a38203976f97109e20eccf6732447ce6c9c42973bae42732b2e957ff" +checksum = "1529c8050e663be64010e80ec92bf480315d21b1f2dbf65540028653a621b27d" dependencies = [ "auto_impl", "derive-where", "revm-bytecode", "revm-context", - "revm-context-interface", + "revm-context-interface 9.0.0", "revm-database-interface", - "revm-interpreter", + "revm-interpreter 24.0.0", "revm-precompile", "revm-primitives", "revm-state", @@ -10802,16 +10853,16 @@ dependencies = [ [[package]] name = "revm-inspector" -version = "8.0.3" +version = "8.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e69abf6a076741bd5cd87b7d6c1b48be2821acc58932f284572323e81a8d4179" +checksum = "f78db140e332489094ef314eaeb0bd1849d6d01172c113ab0eb6ea8ab9372926" dependencies = [ "auto_impl", "either", "revm-context", "revm-database-interface", "revm-handler", - "revm-interpreter", + "revm-interpreter 24.0.0", "revm-primitives", "revm-state", "serde", @@ -10845,16 +10896,28 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d95c4a9a1662d10b689b66b536ddc2eb1e89f5debfcabc1a2d7b8417a2fa47cd" dependencies = [ "revm-bytecode", - "revm-context-interface", + "revm-context-interface 8.0.1", + "revm-primitives", + "serde", +] + +[[package]] +name = "revm-interpreter" +version = "24.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff9d7d9d71e8a33740b277b602165b6e3d25fff091ba3d7b5a8d373bf55f28a7" +dependencies = [ + "revm-bytecode", + "revm-context-interface 9.0.0", "revm-primitives", "serde", ] [[package]] name = "revm-precompile" -version = "24.0.1" +version = "25.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b68d54a4733ac36bd29ee645c3c2e5e782fb63f199088d49e2c48c64a9fedc15" +checksum = "4cee3f336b83621294b4cfe84d817e3eef6f3d0fce00951973364cc7f860424d" dependencies = [ "ark-bls12-381", "ark-bn254", @@ -10879,9 +10942,9 @@ dependencies = [ [[package]] name = "revm-primitives" -version = "20.0.0" +version = "20.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52cdf897b3418f2ee05bcade64985e5faed2dbaa349b2b5f27d3d6bfd10fff2a" +checksum = "66145d3dc61c0d6403f27fc0d18e0363bb3b7787e67970a05c71070092896599" dependencies = [ "alloy-primitives", "num_enum", @@ -10890,9 +10953,9 @@ dependencies = [ [[package]] name = "revm-state" -version = "7.0.1" +version = "7.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "106fec5c634420118c7d07a6c37110186ae7f23025ceac3a5dbe182eea548363" +checksum = "7cc830a0fd2600b91e371598e3d123480cd7bb473dd6def425a51213aa6c6d57" dependencies = [ "bitflags 2.9.1", "revm-bytecode", @@ -11067,7 +11130,7 @@ dependencies = [ "primitive-types", "proptest", "rand 0.8.5", - "rand 0.9.1", + "rand 0.9.2", "rlp", "ruint-macro", "serde", @@ -11141,15 +11204,15 @@ dependencies = [ [[package]] name = "rustix" -version = "1.0.7" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c71e83d6afe7ff64890ec6b71d6a69bb8a610ab78ce364b3352876bb4c801266" +checksum = "11181fbabf243db407ef8df94a6ce0b2f9a733bd8be4ad02b4eda9602296cac8" dependencies = [ "bitflags 2.9.1", "errno", "libc", "linux-raw-sys 0.9.4", - "windows-sys 0.59.0", + "windows-sys 0.60.2", ] [[package]] @@ -11356,7 +11419,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c3c81b43dc2d8877c216a3fccf76677ee1ebccd429566d3e67447290d0c42b2" dependencies = [ "bitcoin_hashes", - "rand 0.9.1", + "rand 0.9.2", "secp256k1-sys 0.11.0", ] @@ -11471,9 +11534,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.140" +version = "1.0.141" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" +checksum = "30b9eff21ebe718216c6ec64e1d9ac57087aad11efc64e32002bce4a0d4c03d3" dependencies = [ "indexmap 2.10.0", "itoa", @@ -11540,7 +11603,7 @@ version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de90945e6565ce0d9a25098082ed4ee4002e047cb59892c318d66821e14bb30f" dependencies = [ - "darling", + "darling 0.20.11", "proc-macro2", "quote", "syn 2.0.104", @@ -11786,6 +11849,16 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "socket2" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "233504af464074f9d066d7b5416c5f9b894a5862a6506e306f7b816cdd6f1807" +dependencies = [ + "libc", + "windows-sys 0.59.0", +] + [[package]] name = "soketto" version = "0.8.1" @@ -11847,11 +11920,11 @@ dependencies = [ [[package]] name = "strum" -version = "0.27.1" +version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f64def088c51c9510a8579e3c5d67c65349dcf755e5479ad3d010aa6454e2c32" +checksum = "af23d6f6c1a224baef9d3f61e287d2761385a5b88fdab4eb4c6f11aeb54c4bcf" dependencies = [ - "strum_macros 0.27.1", + "strum_macros 0.27.2", ] [[package]] @@ -11869,14 +11942,13 @@ dependencies = [ [[package]] name = "strum_macros" -version = "0.27.1" +version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c77a8c5abcaf0f9ce05d62342b7d298c346515365c36b673df4ebe3ced01fde8" +checksum = "7695ce3845ea4b33927c055a39dc438a45b059f7c1b3d91d38d10355fb8cbca7" dependencies = [ "heck", "proc-macro2", "quote", - "rustversion", "syn 2.0.104", ] @@ -11997,7 +12069,7 @@ dependencies = [ "fastrand 2.3.0", "getrandom 0.3.3", "once_cell", - "rustix 1.0.7", + "rustix 1.0.8", "windows-sys 0.59.0", ] @@ -12036,9 +12108,9 @@ dependencies = [ [[package]] name = "test-fuzz" -version = "7.2.0" +version = "7.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae2f06b1ae65cbf4dc1f4975279cee7dbf70fcca269bdbdd8aabd20a79e6785c" +checksum = "bb4eb3ad07d6df1b12c23bc2d034e35a80c25d2e1232d083b42c081fd01c1c63" dependencies = [ "serde", "serde_combinators", @@ -12049,9 +12121,9 @@ dependencies = [ [[package]] name = "test-fuzz-internal" -version = "7.2.0" +version = "7.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac6a1dc2074c20c6410ac75687be17808a22abfd449e28301a95d72974b91768" +checksum = "53b853a8b27e0c335dd114f182fc808b917ced20dbc1bcdab79cc3e023b38762" dependencies = [ "bincode 2.0.1", "cargo_metadata 0.19.2", @@ -12060,11 +12132,11 @@ dependencies = [ [[package]] name = "test-fuzz-macro" -version = "7.2.0" +version = "7.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "190423aabaca6cec8392cf45e471777e036d424a14d979db8033f25cc417f1ad" +checksum = "eb25760cf823885b202e5cc8ef8dc385e80ef913537656129ea8b34470280601" dependencies = [ - "darling", + "darling 0.21.0", "heck", "itertools 0.14.0", "prettyplease", @@ -12075,9 +12147,9 @@ dependencies = [ [[package]] name = "test-fuzz-runtime" -version = "7.2.0" +version = "7.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f05723662ca81651b49dd87b50cae65a0a38523aa1c0cb6b049a8c4f5c2c7836" +checksum = "c9b807e6d99cb6157a3f591ccf9f02187730a5774b9b1f066ff7dffba329495e" dependencies = [ "hex", "num-traits", @@ -12271,9 +12343,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.46.1" +version = "1.47.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0cc3a2344dafbe23a245241fe8b09735b521110d30fcefbbd5feb1797ca35d17" +checksum = "43864ed400b6043a4757a25c7a64a8efde741aed79a056a2fb348a406701bb35" dependencies = [ "backtrace", "bytes", @@ -12284,9 +12356,9 @@ dependencies = [ "pin-project-lite", "signal-hook-registry", "slab", - "socket2", + "socket2 0.6.0", "tokio-macros", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -12640,9 +12712,9 @@ dependencies = [ [[package]] name = "tracy-client-sys" -version = "0.26.0" +version = "0.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f9612d9503675b07b244922ea6f6f3cdd88c43add1b3498084613fc88cdf69d" +checksum = "319c70195101a93f56db4c74733e272d720768e13471f400c78406a326b172b0" dependencies = [ "cc", "windows-targets 0.52.6", @@ -12667,7 +12739,7 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0bee2ea1551f90040ab0e34b6fb7f2fa3bad8acc925837ac654f2c78a13e3089" dependencies = [ - "darling", + "darling 0.20.11", "proc-macro2", "quote", "syn 2.0.104", @@ -12706,7 +12778,7 @@ dependencies = [ "http", "httparse", "log", - "rand 0.9.1", + "rand 0.9.2", "rustls", "rustls-pki-types", "sha1", @@ -13133,14 +13205,14 @@ version = "0.26.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75c7f0ef91146ebfb530314f5f1d24528d7f0767efbfd31dce919275413e393e" dependencies = [ - "webpki-root-certs 1.0.1", + "webpki-root-certs 1.0.2", ] [[package]] name = "webpki-root-certs" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86138b15b2b7d561bc4469e77027b8dd005a43dc502e9031d1f5afc8ce1f280e" +checksum = "4e4ffd8df1c57e87c325000a3d6ef93db75279dc3a231125aac571650f22b12a" dependencies = [ "rustls-pki-types", ] @@ -13151,14 +13223,14 @@ version = "0.26.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "521bc38abb08001b01866da9f51eb7c5d647a19260e00054a8c7fd5f9e57f7a9" dependencies = [ - "webpki-roots 1.0.1", + "webpki-roots 1.0.2", ] [[package]] name = "webpki-roots" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8782dd5a41a24eed3a4f40b606249b3e236ca61adf1f25ea4d45c73de122b502" +checksum = "7e8983c3ab33d6fb807cfcdad2491c4ea8cbc8ed839181c7dfd9c67c83e261b2" dependencies = [ "rustls-pki-types", ] @@ -13796,7 +13868,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af3a19837351dc82ba89f8a125e22a3c475f05aba604acc023d62b2739ae2909" dependencies = [ "libc", - "rustix 1.0.7", + "rustix 1.0.8", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index f11cb5158ca..d647f604621 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -471,7 +471,7 @@ revm-inspectors = "0.27.1" alloy-chains = { version = "0.2.5", default-features = false } alloy-dyn-abi = "1.3.0" alloy-eip2124 = { version = "0.2.0", default-features = false } -alloy-evm = { version = "0.15", default-features = false } +alloy-evm = { version = "0.16", default-features = false } alloy-primitives = { version = "1.3.0", default-features = false, features = ["map-foldhash"] } alloy-rlp = { version = "0.3.10", default-features = false, features = ["core-net"] } alloy-sol-macro = "1.3.0" @@ -509,7 +509,7 @@ alloy-transport-ipc = { version = "1.0.23", default-features = false } alloy-transport-ws = { version = "1.0.23", default-features = false } # op -alloy-op-evm = { version = "0.15", default-features = false } +alloy-op-evm = { version = "0.16", default-features = false } alloy-op-hardforks = "0.2.2" op-alloy-rpc-types = { version = "0.18.12", default-features = false } op-alloy-rpc-types-engine = { version = "0.18.12", default-features = false } diff --git a/crates/engine/tree/src/tree/precompile_cache.rs b/crates/engine/tree/src/tree/precompile_cache.rs index 9838856317f..cc3d173fb84 100644 --- a/crates/engine/tree/src/tree/precompile_cache.rs +++ b/crates/engine/tree/src/tree/precompile_cache.rs @@ -266,7 +266,7 @@ mod tests { #[test] fn test_precompile_cache_basic() { let dyn_precompile: DynPrecompile = |_input: PrecompileInput<'_>| -> PrecompileResult { - Ok(PrecompileOutput { gas_used: 0, bytes: Bytes::default() }) + Ok(PrecompileOutput { gas_used: 0, bytes: Bytes::default(), reverted: false }) } .into(); @@ -276,6 +276,7 @@ mod tests { let output = PrecompileOutput { gas_used: 50, bytes: alloy_primitives::Bytes::copy_from_slice(b"cached_result"), + reverted: false, }; let key = CacheKey::new(SpecId::PRAGUE, b"test_input".into()); @@ -307,6 +308,7 @@ mod tests { Ok(PrecompileOutput { gas_used: 5000, bytes: alloy_primitives::Bytes::copy_from_slice(b"output_from_precompile_1"), + reverted: false, }) } } @@ -320,6 +322,7 @@ mod tests { Ok(PrecompileOutput { gas_used: 7000, bytes: alloy_primitives::Bytes::copy_from_slice(b"output_from_precompile_2"), + reverted: false, }) } } From d392c3fdf293b72f265966eaddf24e0562fac9cc Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Mon, 28 Jul 2025 12:07:25 +0300 Subject: [PATCH 290/305] chore: relax `Cli::run_with_components` (#17630) --- crates/ethereum/cli/src/interface.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/crates/ethereum/cli/src/interface.rs b/crates/ethereum/cli/src/interface.rs index e62dad13d09..dc88b8eb7f8 100644 --- a/crates/ethereum/cli/src/interface.rs +++ b/crates/ethereum/cli/src/interface.rs @@ -14,7 +14,7 @@ use reth_cli_commands::{ }; use reth_cli_runner::CliRunner; use reth_db::DatabaseEnv; -use reth_node_api::{NodePrimitives, NodeTypes}; +use reth_node_api::NodePrimitives; use reth_node_builder::{NodeBuilder, WithLaunchContext}; use reth_node_core::{ args::LogArgs, @@ -126,7 +126,7 @@ impl Cli { ) -> eyre::Result<()> where N: CliNodeTypes< - Primitives: NodePrimitives, + Primitives: NodePrimitives>, ChainSpec: Hardforks, >, C: ChainSpecParser, @@ -182,9 +182,11 @@ impl Cli { ) -> eyre::Result<()>, ) -> eyre::Result<()> where - N: CliNodeTypes, + N: CliNodeTypes< + Primitives: NodePrimitives>, + ChainSpec: Hardforks, + >, C: ChainSpecParser, - <::Primitives as NodePrimitives>::BlockHeader: From
, { // Add network name if available to the logs dir if let Some(chain_spec) = self.command.chain_spec() { From 9d1af5a09cc7794a767858eb3219a24b7e52fc16 Mon Sep 17 00:00:00 2001 From: Arsenii Kulikov Date: Mon, 28 Jul 2025 12:09:55 +0300 Subject: [PATCH 291/305] refactor: introduce Enginvalidator in tree (#17598) Co-authored-by: Matthias Seitz --- Cargo.lock | 8 +- crates/engine/primitives/src/lib.rs | 51 +- crates/engine/service/src/service.rs | 23 +- crates/engine/tree/src/tree/error.rs | 12 + crates/engine/tree/src/tree/mod.rs | 668 +++------ .../engine/tree/src/tree/payload_validator.rs | 1198 ++++++++--------- crates/engine/tree/src/tree/tests.rs | 23 +- crates/engine/util/src/reorg.rs | 9 +- crates/ethereum/node/Cargo.toml | 2 - crates/ethereum/node/src/engine.rs | 11 +- crates/ethereum/node/src/node.rs | 2 +- crates/node/builder/src/launch/engine.rs | 14 +- crates/node/builder/src/rpc.rs | 5 +- crates/optimism/node/Cargo.toml | 3 +- crates/optimism/node/src/engine.rs | 14 +- crates/optimism/rpc/Cargo.toml | 1 - crates/optimism/rpc/src/engine.rs | 3 +- crates/payload/primitives/src/payload.rs | 12 +- crates/rpc/rpc-engine-api/Cargo.toml | 1 - crates/rpc/rpc-engine-api/src/engine_api.rs | 3 +- crates/rpc/rpc/src/validation.rs | 32 +- examples/custom-engine-types/Cargo.toml | 1 - examples/custom-engine-types/src/main.rs | 45 +- examples/custom-node/Cargo.toml | 2 +- examples/custom-node/src/engine.rs | 43 +- 25 files changed, 912 insertions(+), 1274 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 276ab04a465..362c0f4387d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3371,7 +3371,6 @@ dependencies = [ "alloy-rpc-types", "eyre", "reth-basic-payload-builder", - "reth-engine-tree", "reth-ethereum", "reth-ethereum-payload-builder", "reth-payload-builder", @@ -3435,7 +3434,7 @@ dependencies = [ "reth-chain-state", "reth-codecs", "reth-db-api", - "reth-engine-tree", + "reth-engine-primitives", "reth-ethereum", "reth-network-peers", "reth-node-builder", @@ -8984,7 +8983,6 @@ dependencies = [ "reth-e2e-test-utils", "reth-engine-local", "reth-engine-primitives", - "reth-engine-tree", "reth-ethereum-consensus", "reth-ethereum-engine-primitives", "reth-ethereum-payload-builder", @@ -9301,7 +9299,7 @@ dependencies = [ "reth-db", "reth-e2e-test-utils", "reth-engine-local", - "reth-engine-tree", + "reth-engine-primitives", "reth-evm", "reth-network", "reth-node-api", @@ -9431,7 +9429,6 @@ dependencies = [ "op-revm", "reqwest", "reth-chainspec", - "reth-engine-tree", "reth-evm", "reth-metrics", "reth-node-api", @@ -10051,7 +10048,6 @@ dependencies = [ "parking_lot", "reth-chainspec", "reth-engine-primitives", - "reth-engine-tree", "reth-ethereum-engine-primitives", "reth-ethereum-primitives", "reth-metrics", diff --git a/crates/engine/primitives/src/lib.rs b/crates/engine/primitives/src/lib.rs index 45e087526ea..75e3bd81ca7 100644 --- a/crates/engine/primitives/src/lib.rs +++ b/crates/engine/primitives/src/lib.rs @@ -11,8 +11,12 @@ extern crate alloc; +use alloy_consensus::BlockHeader; use reth_errors::ConsensusError; -use reth_payload_primitives::{NewPayloadError, PayloadTypes}; +use reth_payload_primitives::{ + EngineApiMessageVersion, EngineObjectValidationError, InvalidPayloadAttributesError, + NewPayloadError, PayloadAttributes, PayloadOrAttributes, PayloadTypes, +}; use reth_primitives_traits::{Block, RecoveredBlock}; use reth_trie_common::HashedPostState; use serde::{de::DeserializeOwned, Serialize}; @@ -100,15 +104,30 @@ pub trait EngineTypes: + 'static; } +/// Type that validates the payloads processed by the engine. +pub trait EngineValidator: PayloadValidator { + /// Validates the presence or exclusion of fork-specific fields based on the payload attributes + /// and the message version. + fn validate_version_specific_fields( + &self, + version: EngineApiMessageVersion, + payload_or_attrs: PayloadOrAttributes<'_, Types::ExecutionData, Types::PayloadAttributes>, + ) -> Result<(), EngineObjectValidationError>; + + /// Ensures that the payload attributes are valid for the given [`EngineApiMessageVersion`]. + fn ensure_well_formed_attributes( + &self, + version: EngineApiMessageVersion, + attributes: &Types::PayloadAttributes, + ) -> Result<(), EngineObjectValidationError>; +} + /// Type that validates an [`ExecutionPayload`]. #[auto_impl::auto_impl(&, Arc)] -pub trait PayloadValidator: Send + Sync + Unpin + 'static { +pub trait PayloadValidator: Send + Sync + Unpin + 'static { /// The block type used by the engine. type Block: Block; - /// The execution payload type used by the engine. - type ExecutionData; - /// Ensures that the given payload does not violate any consensus rules that concern the block's /// layout. /// @@ -119,7 +138,7 @@ pub trait PayloadValidator: Send + Sync + Unpin + 'static { /// engine-API specification. fn ensure_well_formed_payload( &self, - payload: Self::ExecutionData, + payload: Types::ExecutionData, ) -> Result, NewPayloadError>; /// Verifies payload post-execution w.r.t. hashed state updates. @@ -131,4 +150,24 @@ pub trait PayloadValidator: Send + Sync + Unpin + 'static { // method not used by l1 Ok(()) } + + /// Validates the payload attributes with respect to the header. + /// + /// By default, this enforces that the payload attributes timestamp is greater than the + /// timestamp according to: + /// > 7. Client software MUST ensure that payloadAttributes.timestamp is greater than + /// > timestamp + /// > of a block referenced by forkchoiceState.headBlockHash. + /// + /// See also: + fn validate_payload_attributes_against_header( + &self, + attr: &Types::PayloadAttributes, + header: &::Header, + ) -> Result<(), InvalidPayloadAttributesError> { + if attr.timestamp() <= header.timestamp() { + return Err(InvalidPayloadAttributesError::InvalidTimestamp); + } + Ok(()) + } } diff --git a/crates/engine/service/src/service.rs b/crates/engine/service/src/service.rs index 63a85300fa1..367186995f9 100644 --- a/crates/engine/service/src/service.rs +++ b/crates/engine/service/src/service.rs @@ -8,7 +8,7 @@ use reth_engine_tree::{ download::BasicBlockDownloader, engine::{EngineApiKind, EngineApiRequest, EngineApiRequestHandler, EngineHandler}, persistence::PersistenceHandle, - tree::{EngineApiTreeHandler, EngineValidator, InvalidBlockHook, TreeConfig}, + tree::{EngineApiTreeHandler, EngineValidator, TreeConfig}, }; pub use reth_engine_tree::{ chain::{ChainEvent, ChainOrchestrator}, @@ -82,12 +82,11 @@ where payload_builder: PayloadBuilderHandle, payload_validator: V, tree_config: TreeConfig, - invalid_block_hook: Box>, sync_metrics_tx: MetricEventsSender, evm_config: C, ) -> Self where - V: EngineValidator>, + V: EngineValidator, C: ConfigureEvm + 'static, { let engine_kind = @@ -108,7 +107,6 @@ where payload_builder, canonical_in_memory_state, tree_config, - invalid_block_hook, engine_kind, evm_config, ); @@ -150,7 +148,10 @@ mod tests { use super::*; use reth_chainspec::{ChainSpecBuilder, MAINNET}; use reth_engine_primitives::BeaconEngineMessage; - use reth_engine_tree::{test_utils::TestPipelineBuilder, tree::NoopInvalidBlockHook}; + use reth_engine_tree::{ + test_utils::TestPipelineBuilder, + tree::{BasicEngineValidator, NoopInvalidBlockHook}, + }; use reth_ethereum_consensus::EthBeaconConsensus; use reth_ethereum_engine_primitives::EthEngineTypes; use reth_evm_ethereum::EthEvmConfig; @@ -195,6 +196,15 @@ mod tests { let pruner = Pruner::new_with_factory(provider_factory.clone(), vec![], 0, 0, None, rx); let evm_config = EthEvmConfig::new(chain_spec.clone()); + let engine_validator = BasicEngineValidator::new( + blockchain_db.clone(), + consensus.clone(), + evm_config.clone(), + engine_payload_validator, + TreeConfig::default(), + Box::new(NoopInvalidBlockHook::default()), + ); + let (sync_metrics_tx, _sync_metrics_rx) = unbounded_channel(); let (tx, _rx) = unbounded_channel(); let _eth_service = EngineService::new( @@ -208,9 +218,8 @@ mod tests { blockchain_db, pruner, PayloadBuilderHandle::new(tx), - engine_payload_validator, + engine_validator, TreeConfig::default(), - Box::new(NoopInvalidBlockHook::default()), sync_metrics_tx, evm_config, ); diff --git a/crates/engine/tree/src/tree/error.rs b/crates/engine/tree/src/tree/error.rs index b7932f876ed..f7b1111df06 100644 --- a/crates/engine/tree/src/tree/error.rs +++ b/crates/engine/tree/src/tree/error.rs @@ -5,6 +5,7 @@ use alloy_primitives::B256; use reth_consensus::ConsensusError; use reth_errors::{BlockExecutionError, BlockValidationError, ProviderError}; use reth_evm::execute::InternalBlockExecutionError; +use reth_payload_primitives::NewPayloadError; use reth_primitives_traits::{Block, BlockBody, SealedBlock}; use tokio::sync::oneshot::error::TryRecvError; @@ -189,3 +190,14 @@ pub enum InsertBlockValidationError { #[error(transparent)] Validation(#[from] BlockValidationError), } + +/// Errors that may occur when inserting a payload. +#[derive(Debug, thiserror::Error)] +pub enum InsertPayloadError { + /// Block validation error + #[error(transparent)] + Block(#[from] InsertBlockError), + /// Payload validation error + #[error(transparent)] + Payload(#[from] NewPayloadError), +} diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index 43ca738aab1..55b2bc4c21b 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -3,22 +3,16 @@ use crate::{ chain::FromOrchestrator, engine::{DownloadRequest, EngineApiEvent, EngineApiKind, EngineApiRequest, FromEngine}, persistence::PersistenceHandle, - tree::{ - cached_state::CachedStateProvider, executor::WorkloadExecutor, metrics::EngineApiMetrics, - }, + tree::{error::InsertPayloadError, metrics::EngineApiMetrics, payload_validator::TreeCtx}, }; use alloy_consensus::BlockHeader; -use alloy_eips::{merge::EPOCH_SLOTS, BlockNumHash, NumHash}; -use alloy_evm::block::BlockExecutor; -use alloy_primitives::{Address, B256}; +use alloy_eips::{eip1898::BlockWithParent, merge::EPOCH_SLOTS, BlockNumHash, NumHash}; +use alloy_primitives::B256; use alloy_rpc_types_engine::{ ForkchoiceState, PayloadStatus, PayloadStatusEnum, PayloadValidationError, }; -use error::{InsertBlockError, InsertBlockErrorKind, InsertBlockFatalError}; -use instrumented_state::InstrumentedStateProvider; -use payload_processor::sparse_trie::StateRootComputeOutcome; +use error::{InsertBlockError, InsertBlockFatalError}; use persistence_state::CurrentPersistenceAction; -use precompile_cache::{CachedPrecompile, CachedPrecompileMetrics, PrecompileCacheMap}; use reth_chain_state::{ CanonicalInMemoryState, ExecutedBlock, ExecutedBlockWithTrieUpdates, ExecutedTrieUpdates, MemoryOverlayStateProvider, NewCanonicalChain, @@ -30,27 +24,23 @@ use reth_engine_primitives::{ ForkchoiceStateTracker, OnForkChoiceUpdated, }; use reth_errors::{ConsensusError, ProviderResult}; -use reth_evm::{ConfigureEvm, Evm, SpecFor}; +use reth_evm::ConfigureEvm; use reth_payload_builder::PayloadBuilderHandle; -use reth_payload_primitives::{EngineApiMessageVersion, PayloadBuilderAttributes, PayloadTypes}; -use reth_primitives_traits::{ - Block, GotExpected, NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader, +use reth_payload_primitives::{ + BuiltPayload, EngineApiMessageVersion, NewPayloadError, PayloadBuilderAttributes, PayloadTypes, }; +use reth_primitives_traits::{Block, NodePrimitives, RecoveredBlock, SealedBlock, SealedHeader}; use reth_provider::{ providers::ConsistentDbView, BlockNumReader, BlockReader, DBProvider, DatabaseProviderFactory, - ExecutionOutcome, HashedPostStateProvider, ProviderError, StateCommitmentProvider, - StateProvider, StateProviderBox, StateProviderFactory, StateReader, StateRootProvider, - TransactionVariant, + HashedPostStateProvider, ProviderError, StateCommitmentProvider, StateProviderBox, + StateProviderFactory, StateReader, StateRootProvider, TransactionVariant, }; -use reth_revm::{database::StateProviderDatabase, State}; +use reth_revm::database::StateProviderDatabase; use reth_stages_api::ControlFlow; -use reth_trie::{updates::TrieUpdates, HashedPostState, TrieInput}; +use reth_trie::{HashedPostState, TrieInput}; use reth_trie_db::{DatabaseHashedPostState, StateCommitment}; -use reth_trie_parallel::root::{ParallelStateRoot, ParallelStateRootError}; use state::TreeState; use std::{ - borrow::Cow, - collections::HashMap, fmt::Debug, sync::{ mpsc::{Receiver, RecvError, RecvTimeoutError, Sender}, @@ -86,10 +76,9 @@ pub use block_buffer::BlockBuffer; pub use invalid_block_hook::{InvalidBlockHooks, NoopInvalidBlockHook}; pub use invalid_headers::InvalidHeaderCache; pub use payload_processor::*; -pub use payload_validator::{EngineValidator, TreePayloadValidator}; +pub use payload_validator::{BasicEngineValidator, EngineValidator}; pub use persistence_state::PersistenceState; pub use reth_engine_primitives::TreeConfig; -use reth_evm::execute::BlockExecutionOutput; pub mod state; @@ -267,18 +256,10 @@ where config: TreeConfig, /// Metrics for the engine api. metrics: EngineApiMetrics, - /// An invalid block hook. - invalid_block_hook: Box>, /// The engine API variant of this handler engine_kind: EngineApiKind, - /// The type responsible for processing new payloads - payload_processor: PayloadProcessor, /// The EVM configuration. evm_config: C, - /// Precompile cache map. - precompile_cache_map: PrecompileCacheMap>, - /// Metrics for precompile cache, stored per address to avoid re-allocation. - precompile_cache_metrics: HashMap, } impl std::fmt::Debug @@ -301,9 +282,7 @@ where .field("payload_builder", &self.payload_builder) .field("config", &self.config) .field("metrics", &self.metrics) - .field("invalid_block_hook", &format!("{:p}", self.invalid_block_hook)) .field("engine_kind", &self.engine_kind) - .field("payload_processor", &self.payload_processor) .field("evm_config", &self.evm_config) .finish() } @@ -323,8 +302,8 @@ where

::Provider: BlockReader, C: ConfigureEvm + 'static, - T: PayloadTypes, - V: EngineValidator, + T: PayloadTypes>, + V: EngineValidator, { /// Creates a new [`EngineApiTreeHandler`]. #[expect(clippy::too_many_arguments)] @@ -344,15 +323,6 @@ where ) -> Self { let (incoming_tx, incoming) = std::sync::mpsc::channel(); - let precompile_cache_map = PrecompileCacheMap::default(); - - let payload_processor = PayloadProcessor::new( - WorkloadExecutor::default(), - evm_config.clone(), - &config, - precompile_cache_map.clone(), - ); - Self { provider, consensus, @@ -368,20 +338,11 @@ where config, metrics: Default::default(), incoming_tx, - invalid_block_hook: Box::new(NoopInvalidBlockHook), engine_kind, - payload_processor, evm_config, - precompile_cache_map, - precompile_cache_metrics: HashMap::new(), } } - /// Sets the invalid block hook. - fn set_invalid_block_hook(&mut self, invalid_block_hook: Box>) { - self.invalid_block_hook = invalid_block_hook; - } - /// Creates a new [`EngineApiTreeHandler`] instance and spawns it in its /// own thread. /// @@ -396,7 +357,6 @@ where payload_builder: PayloadBuilderHandle, canonical_in_memory_state: CanonicalInMemoryState, config: TreeConfig, - invalid_block_hook: Box>, kind: EngineApiKind, evm_config: C, ) -> (Sender, N::Block>>, UnboundedReceiver>) @@ -417,7 +377,7 @@ where kind, ); - let mut task = Self::new( + let task = Self::new( provider, consensus, payload_validator, @@ -431,7 +391,6 @@ where kind, evm_config, ); - task.set_invalid_block_hook(invalid_block_hook); let incoming = task.incoming_tx.clone(); std::thread::Builder::new().name("Tree Task".to_string()).spawn(|| task.run()).unwrap(); (incoming, outgoing) @@ -556,52 +515,43 @@ where // // This validation **MUST** be instantly run in all cases even during active sync process. let parent_hash = payload.parent_hash(); - let block = match self.payload_validator.ensure_well_formed_payload(payload) { - Ok(block) => block, - Err(error) => { - error!(target: "engine::tree", %error, "Invalid payload"); - // we need to convert the error to a payload status (response to the CL) - - let latest_valid_hash = - if error.is_block_hash_mismatch() || error.is_invalid_versioned_hashes() { - // Engine-API rules: - // > `latestValidHash: null` if the blockHash validation has failed () - // > `latestValidHash: null` if the expected and the actual arrays don't match () - None - } else { - self.latest_valid_hash_for_invalid_payload(parent_hash)? - }; - - let status = PayloadStatusEnum::from(error); - return Ok(TreeOutcome::new(PayloadStatus::new(status, latest_valid_hash))) - } - }; self.metrics .block_validation .record_payload_validation(validation_start.elapsed().as_secs_f64()); - let num_hash = block.num_hash(); + let num_hash = payload.num_hash(); let engine_event = BeaconConsensusEngineEvent::BlockReceived(num_hash); self.emit_event(EngineApiEvent::BeaconConsensus(engine_event)); - let block_hash = block.hash(); + let block_hash = num_hash.hash; let mut lowest_buffered_ancestor = self.lowest_buffered_ancestor_or(block_hash); if lowest_buffered_ancestor == block_hash { - lowest_buffered_ancestor = block.parent_hash(); - } + lowest_buffered_ancestor = parent_hash; + } + + // now check if the block has an invalid ancestor + if let Some(invalid) = self.state.invalid_headers.get(&lowest_buffered_ancestor) { + // Here we might have 2 cases + // 1. the block is well formed and indeed links to an invalid header, meaning we should + // remember it as invalid + // 2. the block is not well formed (i.e block hash is incorrect), and we should just + // return an error and forget it + let block = match self.payload_validator.ensure_well_formed_payload(payload) { + Ok(block) => block, + Err(error) => { + let status = self.on_new_payload_error(error, parent_hash)?; + return Ok(TreeOutcome::new(status)) + } + }; - // now check the block itself - if let Some(status) = - self.check_invalid_ancestor_with_head(lowest_buffered_ancestor, &block)? - { + let status = self.on_invalid_new_payload(block.into_sealed_block(), invalid)?; return Ok(TreeOutcome::new(status)) } let status = if self.backfill_sync_state.is_idle() { let mut latest_valid_hash = None; - let num_hash = block.num_hash(); - match self.insert_block(block) { + match self.insert_payload(payload) { Ok(status) => { let status = match status { InsertPayloadOk::Inserted(BlockStatus::Valid) => { @@ -622,12 +572,25 @@ where PayloadStatus::new(status, latest_valid_hash) } - Err(error) => self.on_insert_block_error(error)?, + Err(error) => match error { + InsertPayloadError::Block(error) => self.on_insert_block_error(error)?, + InsertPayloadError::Payload(error) => { + self.on_new_payload_error(error, parent_hash)? + } + }, } - } else if let Err(error) = self.buffer_block(block) { - self.on_insert_block_error(error)? } else { - PayloadStatus::from_status(PayloadStatusEnum::Syncing) + match self.payload_validator.ensure_well_formed_payload(payload) { + // if the block is well-formed, buffer it for later + Ok(block) => { + if let Err(error) = self.buffer_block(block) { + self.on_insert_block_error(error)? + } else { + PayloadStatus::from_status(PayloadStatusEnum::Syncing) + } + } + Err(error) => self.on_new_payload_error(error, parent_hash)?, + } }; let mut outcome = TreeOutcome::new(status); @@ -749,24 +712,24 @@ where /// /// The header is required as an arg, because we might be checking that the header is a fork /// block before it's in the tree state and before it's in the database. - fn is_fork(&self, target_header: &SealedHeader) -> ProviderResult { - let target_hash = target_header.hash(); + fn is_fork(&self, target: BlockWithParent) -> ProviderResult { + let target_hash = target.block.hash; // verify that the given hash is not part of an extension of the canon chain. let canonical_head = self.state.tree_state.canonical_head(); let mut current_hash; - let mut current_block = Cow::Borrowed(target_header); + let mut current_block = target; loop { - if current_block.hash() == canonical_head.hash { + if current_block.block.hash == canonical_head.hash { return Ok(false) } // We already passed the canonical head - if current_block.number() <= canonical_head.number { + if current_block.block.number <= canonical_head.number { break } - current_hash = current_block.parent_hash(); + current_hash = current_block.parent; let Some(next_block) = self.sealed_header_by_hash(current_hash)? else { break }; - current_block = Cow::Owned(next_block); + current_block = next_block.block_with_parent(); } // verify that the given hash is not already part of canonical chain stored in memory @@ -782,26 +745,6 @@ where Ok(true) } - /// Check if the given block has any ancestors with missing trie updates. - fn has_ancestors_with_missing_trie_updates( - &self, - target_header: &SealedHeader, - ) -> bool { - // Walk back through the chain starting from the parent of the target block - let mut current_hash = target_header.parent_hash(); - while let Some(block) = self.state.tree_state.blocks_by_hash.get(¤t_hash) { - // Check if this block is missing trie updates - if block.trie.is_missing() { - return true; - } - - // Move to the parent block - current_hash = block.recovered_block().parent_hash(); - } - - false - } - /// Returns the persisting kind for the input block. fn persisting_kind_for(&self, block: &N::BlockHeader) -> PersistingKind { // Check that we're currently persisting. @@ -1665,14 +1608,23 @@ where // check if the check hash was previously marked as invalid let Some(header) = self.state.invalid_headers.get(&check) else { return Ok(None) }; + Ok(Some(self.on_invalid_new_payload(head.clone(), header)?)) + } + + /// Invoked when a new payload received is invalid. + fn on_invalid_new_payload( + &mut self, + head: SealedBlock, + invalid: BlockWithParent, + ) -> ProviderResult { // populate the latest valid hash field - let status = self.prepare_invalid_response(header.parent)?; + let status = self.prepare_invalid_response(invalid.parent)?; // insert the head block into the invalid header cache - self.state.invalid_headers.insert_with_invalid_ancestor(head.hash(), header); - self.emit_event(BeaconConsensusEngineEvent::InvalidBlock(Box::new(head.clone()))); + self.state.invalid_headers.insert_with_invalid_ancestor(head.hash(), invalid); + self.emit_event(BeaconConsensusEngineEvent::InvalidBlock(Box::new(head))); - Ok(Some(status)) + Ok(status) } /// Checks if the given `head` points to an invalid header, which requires a specific response @@ -1976,21 +1928,6 @@ where } } - /// Invoke the invalid block hook if this is a new invalid block. - fn on_invalid_block( - &mut self, - parent_header: &SealedHeader, - block: &RecoveredBlock, - output: &BlockExecutionOutput, - trie_updates: Option<(&TrieUpdates, B256)>, - ) { - if self.state.invalid_headers.get(&block.hash()).is_some() { - // we already marked this block as invalid - return; - } - self.invalid_block_hook.on_invalid_block(parent_header, block, output, trie_updates); - } - /// This handles downloaded blocks that are shown to be disconnected from the canonical chain. /// /// This mainly compares the missing parent of the downloaded block with the current canonical @@ -2094,315 +2031,108 @@ where Ok(None) } + fn insert_payload( + &mut self, + payload: T::ExecutionData, + ) -> Result> { + self.insert_block_or_payload( + payload.block_with_parent(), + payload, + |validator, payload, ctx| validator.validate_payload(payload, ctx), + |this, payload| Ok(this.payload_validator.ensure_well_formed_payload(payload)?), + ) + } + fn insert_block( &mut self, block: RecoveredBlock, ) -> Result> { - match self.insert_block_inner(block) { - Ok(result) => Ok(result), - Err((kind, block)) => Err(InsertBlockError::new(block.into_sealed_block(), kind)), - } + self.insert_block_or_payload( + block.block_with_parent(), + block, + |validator, block, ctx| validator.validate_block(block, ctx), + |_, block| Ok(block), + ) } - fn insert_block_inner( + fn insert_block_or_payload( &mut self, - block: RecoveredBlock, - ) -> Result)> { - /// A helper macro that returns the block in case there was an error - macro_rules! ensure_ok { - ($expr:expr) => { - match $expr { - Ok(val) => val, - Err(e) => return Err((e.into(), block)), - } - }; - } - - let block_num_hash = block.num_hash(); - debug!(target: "engine::tree", block=?block_num_hash, parent = ?block.parent_hash(), state_root = ?block.state_root(), "Inserting new block into tree"); - - if ensure_ok!(self.block_by_hash(block.hash())).is_some() { - return Ok(InsertPayloadOk::AlreadySeen(BlockStatus::Valid)) - } - - let start = Instant::now(); - - trace!(target: "engine::tree", block=?block_num_hash, "Validating block consensus"); - - // validate block consensus rules - ensure_ok!(self.validate_block(&block)); - - trace!(target: "engine::tree", block=?block_num_hash, parent=?block.parent_hash(), "Fetching block state provider"); - let Some(provider_builder) = ensure_ok!(self.state_provider_builder(block.parent_hash())) - else { - // we don't have the state required to execute this block, buffering it and find the - // missing parent block - let missing_ancestor = self - .state - .buffer - .lowest_ancestor(&block.parent_hash()) - .map(|block| block.parent_num_hash()) - .unwrap_or_else(|| block.parent_num_hash()); - - self.state.buffer.insert_block(block); - - return Ok(InsertPayloadOk::Inserted(BlockStatus::Disconnected { - head: self.state.tree_state.current_canonical_head, - missing_ancestor, - })) - }; - - // now validate against the parent - let Some(parent_block) = ensure_ok!(self.sealed_header_by_hash(block.parent_hash())) else { - return Err(( - InsertBlockErrorKind::Provider(ProviderError::HeaderNotFound( - block.parent_hash().into(), - )), - block, - )) - }; - - if let Err(e) = - self.consensus.validate_header_against_parent(block.sealed_header(), &parent_block) - { - warn!(target: "engine::tree", ?block, "Failed to validate header {} against parent: {e}", block.hash()); - return Err((e.into(), block)) - } - - let state_provider = ensure_ok!(provider_builder.build()); - - // We only run the parallel state root if we are not currently persisting any blocks or - // persisting blocks that are all ancestors of the one we are executing. - // - // If we're committing ancestor blocks, then: any trie updates being committed are a subset - // of the in-memory trie updates collected before fetching reverts. So any diff in - // reverts (pre vs post commit) is already covered by the in-memory trie updates we - // collect in `compute_state_root_parallel`. - // - // See https://github.com/paradigmxyz/reth/issues/12688 for more details - let persisting_kind = self.persisting_kind_for(block.header()); - // don't run parallel if state root fallback is set - let run_parallel_state_root = - persisting_kind.can_run_parallel_state_root() && !self.config.state_root_fallback(); - - // Use state root task only if: - // 1. No persistence is in progress - // 2. Config allows it - // 3. No ancestors with missing trie updates. If any exist, it will mean that every state - // root task proof calculation will include a lot of unrelated paths in the prefix sets. - // It's cheaper to run a parallel state root that does one walk over trie tables while - // accounting for the prefix sets. - let has_ancestors_with_missing_trie_updates = - self.has_ancestors_with_missing_trie_updates(block.sealed_header()); - let mut use_state_root_task = run_parallel_state_root && - self.config.use_state_root_task() && - !has_ancestors_with_missing_trie_updates; - - debug!( - target: "engine::tree", - block=?block_num_hash, - run_parallel_state_root, - has_ancestors_with_missing_trie_updates, - use_state_root_task, - config_allows_state_root_task=self.config.use_state_root_task(), - "Deciding which state root algorithm to run" - ); - - // use prewarming background task - let header = block.clone_sealed_header(); - let txs = block.clone_transactions_recovered().collect(); - let mut handle = if use_state_root_task { - // use background tasks for state root calc - let consistent_view = - ensure_ok!(ConsistentDbView::new_with_latest_tip(self.provider.clone())); - - // Compute trie input - let trie_input_start = Instant::now(); - let res = self.compute_trie_input( - persisting_kind, - ensure_ok!(consistent_view.provider_ro()), - block.header().parent_hash(), - ); - let trie_input = match res { - Ok(val) => val, - Err(e) => return Err((InsertBlockErrorKind::Other(Box::new(e)), block)), - }; + block_id: BlockWithParent, + input: Input, + execute: impl FnOnce( + &mut V, + Input, + TreeCtx<'_, N>, + ) -> Result, Err>, + convert_to_block: impl FnOnce(&mut Self, Input) -> Result, Err>, + ) -> Result + where + Err: From>, + { + let block_num_hash = block_id.block; + debug!(target: "engine::tree", block=?block_num_hash, parent = ?block_id.parent, "Inserting new block into tree"); - self.metrics - .block_validation - .trie_input_duration - .record(trie_input_start.elapsed().as_secs_f64()); - - // Use state root task only if prefix sets are empty, otherwise proof generation is too - // expensive because it requires walking over the paths in the prefix set in every - // proof. - if trie_input.prefix_sets.is_empty() { - self.payload_processor.spawn( - header, - txs, - provider_builder, - consistent_view, - trie_input, - &self.config, - ) - } else { - debug!(target: "engine::tree", block=?block_num_hash, "Disabling state root task due to non-empty prefix sets"); - use_state_root_task = false; - self.payload_processor.spawn_cache_exclusive(header, txs, provider_builder) + match self.block_by_hash(block_num_hash.hash) { + Err(err) => { + let block = convert_to_block(self, input)?; + return Err(InsertBlockError::new(block.into_sealed_block(), err.into()).into()); } - } else { - self.payload_processor.spawn_cache_exclusive(header, txs, provider_builder) - }; - - // Use cached state provider before executing, used in execution after prewarming threads - // complete - let state_provider = CachedStateProvider::new_with_caches( - state_provider, - handle.caches(), - handle.cache_metrics(), - ); - - let (output, execution_finish) = if self.config.state_provider_metrics() { - let state_provider = InstrumentedStateProvider::from_state_provider(&state_provider); - let (output, execution_finish) = - ensure_ok!(self.execute_block(&state_provider, &block, &handle)); - state_provider.record_total_latency(); - (output, execution_finish) - } else { - let (output, execution_finish) = - ensure_ok!(self.execute_block(&state_provider, &block, &handle)); - (output, execution_finish) + Ok(Some(_)) => { + // We now assume that we already have this block in the tree. However, we need to + // run the conversion to ensure that the block hash is valid. + convert_to_block(self, input)?; + return Ok(InsertPayloadOk::AlreadySeen(BlockStatus::Valid)) + } + _ => {} }; - // after executing the block we can stop executing transactions - handle.stop_prewarming_execution(); + // Ensure that the parent state is available. + match self.state_provider_builder(block_id.parent) { + Err(err) => { + let block = convert_to_block(self, input)?; + return Err(InsertBlockError::new(block.into_sealed_block(), err.into()).into()); + } + Ok(None) => { + let block = convert_to_block(self, input)?; - if let Err(err) = self.consensus.validate_block_post_execution(&block, &output) { - // call post-block hook - self.on_invalid_block(&parent_block, &block, &output, None); - return Err((err.into(), block)) - } + // we don't have the state required to execute this block, buffering it and find the + // missing parent block + let missing_ancestor = self + .state + .buffer + .lowest_ancestor(&block.parent_hash()) + .map(|block| block.parent_num_hash()) + .unwrap_or_else(|| block.parent_num_hash()); - let hashed_state = self.provider.hashed_post_state(&output.state); + self.state.buffer.insert_block(block); - if let Err(err) = self - .payload_validator - .validate_block_post_execution_with_hashed_state(&hashed_state, &block) - { - // call post-block hook - self.on_invalid_block(&parent_block, &block, &output, None); - return Err((err.into(), block)) - } - - debug!(target: "engine::tree", block=?block_num_hash, "Calculating block state root"); - - let root_time = Instant::now(); - - let mut maybe_state_root = None; - - if run_parallel_state_root { - // if we new payload extends the current canonical change we attempt to use the - // background task or try to compute it in parallel - if use_state_root_task { - debug!(target: "engine::tree", block=?block_num_hash, "Using sparse trie state root algorithm"); - match handle.state_root() { - Ok(StateRootComputeOutcome { state_root, trie_updates }) => { - let elapsed = execution_finish.elapsed(); - info!(target: "engine::tree", ?state_root, ?elapsed, "State root task finished"); - // we double check the state root here for good measure - if state_root == block.header().state_root() { - maybe_state_root = Some((state_root, trie_updates, elapsed)) - } else { - warn!( - target: "engine::tree", - ?state_root, - block_state_root = ?block.header().state_root(), - "State root task returned incorrect state root" - ); - } - } - Err(error) => { - debug!(target: "engine::tree", %error, "Background parallel state root computation failed"); - } - } - } else { - debug!(target: "engine::tree", block=?block_num_hash, "Using parallel state root algorithm"); - match self.compute_state_root_parallel( - persisting_kind, - block.header().parent_hash(), - &hashed_state, - ) { - Ok(result) => { - info!( - target: "engine::tree", - block = ?block_num_hash, - regular_state_root = ?result.0, - "Regular root task finished" - ); - maybe_state_root = Some((result.0, result.1, root_time.elapsed())); - } - Err(ParallelStateRootError::Provider(ProviderError::ConsistentView(error))) => { - debug!(target: "engine::tree", %error, "Parallel state root computation failed consistency check, falling back"); - } - Err(error) => return Err((InsertBlockErrorKind::Other(Box::new(error)), block)), - } + return Ok(InsertPayloadOk::Inserted(BlockStatus::Disconnected { + head: self.state.tree_state.current_canonical_head, + missing_ancestor, + })) } + Ok(Some(_)) => {} } - let (state_root, trie_output, root_elapsed) = if let Some(maybe_state_root) = - maybe_state_root - { - maybe_state_root - } else { - // fallback is to compute the state root regularly in sync - if self.config.state_root_fallback() { - debug!(target: "engine::tree", block=?block_num_hash, "Using state root fallback for testing"); - } else { - warn!(target: "engine::tree", block=?block_num_hash, ?persisting_kind, "Failed to compute state root in parallel"); - self.metrics.block_validation.state_root_parallel_fallback_total.increment(1); + // determine whether we are on a fork chain + let is_fork = match self.is_fork(block_id) { + Err(err) => { + let block = convert_to_block(self, input)?; + return Err(InsertBlockError::new(block.into_sealed_block(), err.into()).into()); } - - let (root, updates) = - ensure_ok!(state_provider.state_root_with_updates(hashed_state.clone())); - (root, updates, root_time.elapsed()) + Ok(is_fork) => is_fork, }; - self.metrics.block_validation.record_state_root(&trie_output, root_elapsed.as_secs_f64()); - debug!(target: "engine::tree", ?root_elapsed, block=?block_num_hash, "Calculated state root"); - - // ensure state root matches - if state_root != block.header().state_root() { - // call post-block hook - self.on_invalid_block(&parent_block, &block, &output, Some((&trie_output, state_root))); - return Err(( - ConsensusError::BodyStateRootDiff( - GotExpected { got: state_root, expected: block.header().state_root() }.into(), - ) - .into(), - block, - )) - } - - // terminate prewarming task with good state output - handle.terminate_caching(Some(output.state.clone())); + let ctx = TreeCtx::new( + &mut self.state, + &self.persistence_state, + &self.canonical_in_memory_state, + is_fork, + ); - let is_fork = ensure_ok!(self.is_fork(block.sealed_header())); + let start = Instant::now(); - // If the block is a fork, we don't save the trie updates, because they may be incorrect. - // Instead, they will be recomputed on persistence. - let trie_updates = if is_fork { - ExecutedTrieUpdates::Missing - } else { - ExecutedTrieUpdates::Present(Arc::new(trie_output)) - }; - let executed: ExecutedBlockWithTrieUpdates = ExecutedBlockWithTrieUpdates { - block: ExecutedBlock { - recovered_block: Arc::new(block), - execution_output: Arc::new(ExecutionOutcome::from((output, block_num_hash.number))), - hashed_state: Arc::new(hashed_state), - }, - trie: trie_updates, - }; + let executed = execute(&mut self.payload_validator, input, ctx)?; // if the parent is the canonical head, we can insert the block as the pending block if self.state.tree_state.canonical_block_hash() == executed.recovered_block().parent_hash() @@ -2427,73 +2157,6 @@ where Ok(InsertPayloadOk::Inserted(BlockStatus::Valid)) } - /// Executes a block with the given state provider - fn execute_block( - &mut self, - state_provider: S, - block: &RecoveredBlock, - handle: &PayloadHandle, - ) -> Result<(BlockExecutionOutput, Instant), InsertBlockErrorKind> { - debug!(target: "engine::tree", block=?block.num_hash(), "Executing block"); - let mut db = State::builder() - .with_database(StateProviderDatabase::new(&state_provider)) - .with_bundle_update() - .without_state_clear() - .build(); - let mut executor = self.evm_config.executor_for_block(&mut db, block); - - if !self.config.precompile_cache_disabled() { - executor.evm_mut().precompiles_mut().map_precompiles(|address, precompile| { - let metrics = self - .precompile_cache_metrics - .entry(*address) - .or_insert_with(|| CachedPrecompileMetrics::new_with_address(*address)) - .clone(); - CachedPrecompile::wrap( - precompile, - self.precompile_cache_map.cache_for_address(*address), - *self.evm_config.evm_env(block.header()).spec_id(), - Some(metrics), - ) - }); - } - - let execution_start = Instant::now(); - let output = self.metrics.executor.execute_metered( - executor, - block, - Box::new(handle.state_hook()), - )?; - let execution_finish = Instant::now(); - let execution_time = execution_finish.duration_since(execution_start); - debug!(target: "engine::tree", elapsed = ?execution_time, number=?block.number(), "Executed block"); - Ok((output, execution_finish)) - } - - /// Compute state root for the given hashed post state in parallel. - /// - /// # Returns - /// - /// Returns `Ok(_)` if computed successfully. - /// Returns `Err(_)` if error was encountered during computation. - /// `Err(ProviderError::ConsistentView(_))` can be safely ignored and fallback computation - /// should be used instead. - fn compute_state_root_parallel( - &self, - persisting_kind: PersistingKind, - parent_hash: B256, - hashed_state: &HashedPostState, - ) -> Result<(B256, TrieUpdates), ParallelStateRootError> { - let consistent_view = ConsistentDbView::new_with_latest_tip(self.provider.clone())?; - - let mut input = - self.compute_trie_input(persisting_kind, consistent_view.provider_ro()?, parent_hash)?; - // Extend with block we are validating root for. - input.append_ref(hashed_state); - - ParallelStateRoot::new(consistent_view, input).incremental_root_with_updates() - } - /// Computes the trie input at the provided parent hash. /// /// The goal of this function is to take in-memory blocks and generate a [`TrieInput`] that @@ -2632,6 +2295,29 @@ where )) } + /// Handles a [`NewPayloadError`] by converting it to a [`PayloadStatus`]. + fn on_new_payload_error( + &mut self, + error: NewPayloadError, + parent_hash: B256, + ) -> ProviderResult { + error!(target: "engine::tree", %error, "Invalid payload"); + // we need to convert the error to a payload status (response to the CL) + + let latest_valid_hash = + if error.is_block_hash_mismatch() || error.is_invalid_versioned_hashes() { + // Engine-API rules: + // > `latestValidHash: null` if the blockHash validation has failed () + // > `latestValidHash: null` if the expected and the actual arrays don't match () + None + } else { + self.latest_valid_hash_for_invalid_payload(parent_hash)? + }; + + let status = PayloadStatusEnum::from(error); + Ok(PayloadStatus::new(status, latest_valid_hash)) + } + /// Attempts to find the header for the given block hash if it is canonical. pub fn find_canonical_header( &self, diff --git a/crates/engine/tree/src/tree/payload_validator.rs b/crates/engine/tree/src/tree/payload_validator.rs index 440bef5ecba..3b3ef0c90d1 100644 --- a/crates/engine/tree/src/tree/payload_validator.rs +++ b/crates/engine/tree/src/tree/payload_validator.rs @@ -1,128 +1,64 @@ -//! Concrete implementation of the `PayloadValidator` trait. +//! Types and traits for validating blocks and payloads. use crate::tree::{ cached_state::CachedStateProvider, + error::{InsertBlockError, InsertBlockErrorKind, InsertPayloadError}, executor::WorkloadExecutor, instrumented_state::InstrumentedStateProvider, payload_processor::PayloadProcessor, + persistence_state::CurrentPersistenceAction, precompile_cache::{CachedPrecompile, CachedPrecompileMetrics, PrecompileCacheMap}, - ConsistentDbView, EngineApiMetrics, EngineApiTreeState, InvalidHeaderCache, PersistingKind, - StateProviderDatabase, TreeConfig, + sparse_trie::StateRootComputeOutcome, + ConsistentDbView, EngineApiMetrics, EngineApiTreeState, PayloadHandle, PersistenceState, + PersistingKind, StateProviderBuilder, StateProviderDatabase, TreeConfig, }; -use alloy_eips::BlockNumHash; use alloy_evm::{block::BlockExecutor, Evm}; use alloy_primitives::B256; -use reth_chain_state::CanonicalInMemoryState; +use reth_chain_state::{ + CanonicalInMemoryState, ExecutedBlock, ExecutedBlockWithTrieUpdates, ExecutedTrieUpdates, +}; use reth_consensus::{ConsensusError, FullConsensus}; use reth_engine_primitives::{InvalidBlockHook, PayloadValidator}; +use reth_errors::ProviderResult; use reth_evm::{ConfigureEvm, SpecFor}; use reth_payload_primitives::{ - BuiltPayload, EngineApiMessageVersion, EngineObjectValidationError, - InvalidPayloadAttributesError, NewPayloadError, PayloadAttributes, PayloadOrAttributes, - PayloadTypes, + BuiltPayload, InvalidPayloadAttributesError, NewPayloadError, PayloadTypes, }; use reth_primitives_traits::{ - AlloyBlockHeader, Block, BlockBody, GotExpected, NodePrimitives, RecoveredBlock, SealedHeader, + AlloyBlockHeader, BlockTy, GotExpected, NodePrimitives, RecoveredBlock, SealedHeader, }; use reth_provider::{ BlockExecutionOutput, BlockNumReader, BlockReader, DBProvider, DatabaseProviderFactory, - HashedPostStateProvider, HeaderProvider, ProviderError, StateCommitmentProvider, StateProvider, - StateProviderFactory, StateReader, + ExecutionOutcome, HashedPostStateProvider, ProviderError, StateCommitmentProvider, + StateProvider, StateProviderFactory, StateReader, StateRootProvider, }; use reth_revm::db::State; use reth_trie::{updates::TrieUpdates, HashedPostState, TrieInput}; use reth_trie_db::{DatabaseHashedPostState, StateCommitment}; use reth_trie_parallel::root::{ParallelStateRoot, ParallelStateRootError}; use std::{collections::HashMap, sync::Arc, time::Instant}; -use tracing::{debug, trace}; - -/// Outcome of validating a payload -#[derive(Debug)] -pub enum PayloadValidationOutcome { - /// Payload is valid and produced a block - Valid { - /// The block created from the payload - block: RecoveredBlock, - /// The trie updates from state root computation - trie_updates: reth_trie::updates::TrieUpdates, - }, - /// Payload is invalid but block construction succeeded - Invalid { - /// The block created from the payload - block: RecoveredBlock, - /// The validation error - error: NewPayloadError, - }, -} - -/// Information about the current persistence state for validation context -#[derive(Debug, Clone, Copy)] -pub struct PersistenceInfo { - /// The last persisted block - pub last_persisted_block: BlockNumHash, - /// The current persistence action, if any - pub current_action: Option, -} - -impl PersistenceInfo { - /// Creates a new persistence info with no current action - pub const fn new(last_persisted_block: BlockNumHash) -> Self { - Self { last_persisted_block, current_action: None } - } - - /// Creates persistence info with a saving blocks action - pub const fn with_saving_blocks( - last_persisted_block: BlockNumHash, - highest: BlockNumHash, - ) -> Self { - Self { - last_persisted_block, - current_action: Some(PersistenceAction::SavingBlocks { highest }), - } - } - - /// Creates persistence info with a removing blocks action - pub const fn with_removing_blocks( - last_persisted_block: BlockNumHash, - new_tip_num: u64, - ) -> Self { - Self { - last_persisted_block, - current_action: Some(PersistenceAction::RemovingBlocks { new_tip_num }), - } - } -} - -/// The type of persistence action currently in progress -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum PersistenceAction { - /// Saving blocks to disk - SavingBlocks { - /// The highest block being saved - highest: BlockNumHash, - }, - /// Removing blocks from disk - RemovingBlocks { - /// The new tip after removal - new_tip_num: u64, - }, -} +use tracing::{debug, error, info, trace, warn}; -/// Context providing access to tree state during validation +/// Context providing access to tree state during validation. +/// +/// This context is provided to the [`EngineValidator`] and includes the state of the tree's +/// internals pub struct TreeCtx<'a, N: NodePrimitives> { /// The engine API tree state - state: &'a EngineApiTreeState, + state: &'a mut EngineApiTreeState, /// Information about the current persistence state - persistence_info: PersistenceInfo, + persistence: &'a PersistenceState, /// Reference to the canonical in-memory state canonical_in_memory_state: &'a CanonicalInMemoryState, + /// Whether the currently validated block is on a fork chain. + is_fork: bool, } impl<'a, N: NodePrimitives> std::fmt::Debug for TreeCtx<'a, N> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("TreeCtx") .field("state", &"EngineApiTreeState") - .field("persistence_info", &self.persistence_info) + .field("persistence_info", &self.persistence) .field("canonical_in_memory_state", &self.canonical_in_memory_state) .finish() } @@ -131,36 +67,76 @@ impl<'a, N: NodePrimitives> std::fmt::Debug for TreeCtx<'a, N> { impl<'a, N: NodePrimitives> TreeCtx<'a, N> { /// Creates a new tree context pub const fn new( - state: &'a EngineApiTreeState, - persistence_info: PersistenceInfo, + state: &'a mut EngineApiTreeState, + persistence: &'a PersistenceState, canonical_in_memory_state: &'a CanonicalInMemoryState, + is_fork: bool, ) -> Self { - Self { state, persistence_info, canonical_in_memory_state } + Self { state, persistence, canonical_in_memory_state, is_fork } + } + + /// Returns a reference to the engine tree state + pub const fn state(&self) -> &EngineApiTreeState { + &*self.state } - /// Returns a reference to the engine API tree state - pub const fn state(&self) -> &'a EngineApiTreeState { + /// Returns a mutable reference to the engine tree state + pub const fn state_mut(&mut self) -> &mut EngineApiTreeState { self.state } /// Returns a reference to the persistence info - pub const fn persistence_info(&self) -> &PersistenceInfo { - &self.persistence_info + pub const fn persistence(&self) -> &PersistenceState { + self.persistence } /// Returns a reference to the canonical in-memory state pub const fn canonical_in_memory_state(&self) -> &'a CanonicalInMemoryState { self.canonical_in_memory_state } + + /// Returns whether the currently validated block is on a fork chain. + pub const fn is_fork(&self) -> bool { + self.is_fork + } + + /// Determines the persisting kind for the given block based on persistence info. + /// + /// Based on the given header it returns whether any conflicting persistence operation is + /// currently in progress. + /// + /// This is adapted from the `persisting_kind_for` method in `EngineApiTreeHandler`. + pub fn persisting_kind_for(&self, block: &N::BlockHeader) -> PersistingKind { + // Check that we're currently persisting. + let Some(action) = self.persistence().current_action() else { + return PersistingKind::NotPersisting + }; + // Check that the persistince action is saving blocks, not removing them. + let CurrentPersistenceAction::SavingBlocks { highest } = action else { + return PersistingKind::PersistingNotDescendant + }; + + // The block being validated can only be a descendant if its number is higher than + // the highest block persisting. Otherwise, it's likely a fork of a lower block. + if block.number() > highest.number && self.state().tree_state.is_descendant(*highest, block) + { + return PersistingKind::PersistingDescendant + } + + // In all other cases, the block is not a descendant. + PersistingKind::PersistingNotDescendant + } } /// A helper type that provides reusable payload validation logic for network-specific validators. /// +/// This type satisfies [`EngineValidator`] and is responsible for executing blocks/payloads. +/// /// This type contains common validation, execution, and state root computation logic that can be /// used by network-specific payload validators (e.g., Ethereum, Optimism). It is not meant to be /// used as a standalone component, but rather as a building block for concrete implementations. #[derive(derive_more::Debug)] -pub struct TreePayloadValidator +pub struct BasicEngineValidator where Evm: ConfigureEvm, { @@ -178,26 +154,24 @@ where precompile_cache_map: PrecompileCacheMap>, /// Precompile cache metrics. precompile_cache_metrics: HashMap, - /// Tracks invalid headers to prevent duplicate hook calls. - invalid_headers: InvalidHeaderCache, /// Hook to call when invalid blocks are encountered. #[debug(skip)] invalid_block_hook: Box>, /// Metrics for the engine api. metrics: EngineApiMetrics, + /// Validator for the payload. + validator: V, } -impl TreePayloadValidator +impl BasicEngineValidator where N: NodePrimitives, - P: DatabaseProviderFactory - + BlockReader - + BlockNumReader + P: DatabaseProviderFactory + + BlockReader

+ StateProviderFactory + StateReader + StateCommitmentProvider + HashedPostStateProvider - + HeaderProvider
+ Clone + 'static, Evm: ConfigureEvm + 'static, @@ -208,6 +182,7 @@ where provider: P, consensus: Arc>, evm_config: Evm, + validator: V, config: TreeConfig, invalid_block_hook: Box>, ) -> Self { @@ -225,10 +200,10 @@ where payload_processor, precompile_cache_map, precompile_cache_metrics: HashMap::new(), - invalid_headers: InvalidHeaderCache::new(config.max_invalid_header_cache_length()), config, invalid_block_hook, metrics: EngineApiMetrics::default(), + validator, } } @@ -239,394 +214,381 @@ where /// - Block execution /// - State root computation /// - Fork detection - pub fn validate_block_with_state( + pub fn validate_block_with_state>>( &mut self, block: RecoveredBlock, - ctx: TreeCtx<'_, N>, - ) -> Result, NewPayloadError> + mut ctx: TreeCtx<'_, N>, + ) -> ValidationOutcome)> where - N::Block: Block>, + V: PayloadValidator, { - // Helper macro to preserve block context when returning errors + /// A helper macro that returns the block in case there was an error macro_rules! ensure_ok { ($expr:expr) => { match $expr { Ok(val) => val, - Err(e) => { - let error = NewPayloadError::Other(Box::new(e)); - return Ok(PayloadValidationOutcome::Invalid { block, error }); - } + Err(e) => return Err((e.into(), block)), } }; } - // Extract references we need before moving ctx - let tree_state = ctx.state(); - let persistence_info = *ctx.persistence_info(); - - // Then validate the block using the validate_block method - if let Err(consensus_error) = self.validate_block(&block, ctx) { - trace!(target: "engine::tree", block=?block.num_hash(), ?consensus_error, "Block validation failed"); - let payload_error = NewPayloadError::Other(Box::new(consensus_error)); - return Ok(PayloadValidationOutcome::Invalid { block, error: payload_error }); - } + let block_num_hash = block.num_hash(); - // Get the parent block's state to execute against - let parent_hash = block.header().parent_hash(); + trace!(target: "engine::tree", block=?block_num_hash, "Validating block consensus"); + // validate block consensus rules + ensure_ok!(self.validate_block_inner(&block)); - // Get parent header for error context - let parent_header = ensure_ok!(self.get_parent_header(parent_hash, tree_state)); + trace!(target: "engine::tree", block=?block_num_hash, parent=?block.parent_hash(), "Fetching block state provider"); + let Some(provider_builder) = + ensure_ok!(self.state_provider_builder(block.parent_hash(), ctx.state())) + else { + // this is pre-validated in the tree + return Err(( + InsertBlockErrorKind::Provider(ProviderError::HeaderNotFound( + block.parent_hash().into(), + )), + block, + )) + }; - // Create StateProviderBuilder - let provider_builder = match self.create_state_provider_builder(parent_hash, tree_state) { - Ok(builder) => builder, - Err(e) => { - let error = NewPayloadError::Other(Box::new(e)); - return Ok(PayloadValidationOutcome::Invalid { block, error }); - } + // now validate against the parent + let Some(parent_block) = + ensure_ok!(self.sealed_header_by_hash(block.parent_hash(), ctx.state())) + else { + return Err(( + InsertBlockErrorKind::Provider(ProviderError::HeaderNotFound( + block.parent_hash().into(), + )), + block, + )) }; - // Determine persisting kind and state root task decision early for handle creation - let persisting_kind = - self.persisting_kind_for(block.header(), &persistence_info, tree_state); + if let Err(e) = + self.consensus.validate_header_against_parent(block.sealed_header(), &parent_block) + { + warn!(target: "engine::tree", ?block, "Failed to validate header {} against parent: {e}", block.hash()); + return Err((e.into(), block)) + } + + let state_provider = ensure_ok!(provider_builder.build()); + + // We only run the parallel state root if we are not currently persisting any blocks or + // persisting blocks that are all ancestors of the one we are executing. + // + // If we're committing ancestor blocks, then: any trie updates being committed are a subset + // of the in-memory trie updates collected before fetching reverts. So any diff in + // reverts (pre vs post commit) is already covered by the in-memory trie updates we + // collect in `compute_state_root_parallel`. + // + // See https://github.com/paradigmxyz/reth/issues/12688 for more details + let persisting_kind = ctx.persisting_kind_for(block.header()); + // don't run parallel if state root fallback is set let run_parallel_state_root = persisting_kind.can_run_parallel_state_root() && !self.config.state_root_fallback(); + + // Use state root task only if: + // 1. No persistence is in progress + // 2. Config allows it + // 3. No ancestors with missing trie updates. If any exist, it will mean that every state + // root task proof calculation will include a lot of unrelated paths in the prefix sets. + // It's cheaper to run a parallel state root that does one walk over trie tables while + // accounting for the prefix sets. let has_ancestors_with_missing_trie_updates = - self.has_ancestors_with_missing_trie_updates(block.sealed_header(), tree_state); - let use_state_root_task = run_parallel_state_root && + self.has_ancestors_with_missing_trie_updates(block.sealed_header(), ctx.state()); + let mut use_state_root_task = run_parallel_state_root && self.config.use_state_root_task() && !has_ancestors_with_missing_trie_updates; - // Build the state provider - let state_provider = ensure_ok!(provider_builder.build()); - - // Create a PayloadHandle for state hook support - let (mut handle, use_state_root_task) = self.spawn_payload_tasks( - &block, - provider_builder, + debug!( + target: "engine::tree", + block=?block_num_hash, + run_parallel_state_root, + has_ancestors_with_missing_trie_updates, use_state_root_task, - tree_state, - &persistence_info, + config_allows_state_root_task=self.config.use_state_root_task(), + "Deciding which state root algorithm to run" ); - // Execute the block with proper state provider wrapping - let (output, execution_time) = match self.execute_block_with_state_provider( - state_provider, - &block, - &handle, - ) { - Ok(result) => result, - Err(error) => { - trace!(target: "engine::tree", block=?block.num_hash(), ?error, "Block execution failed"); - return Ok(PayloadValidationOutcome::Invalid { block, error }); + // use prewarming background task + let header = block.clone_sealed_header(); + let txs = block.clone_transactions_recovered().collect(); + let mut handle = if use_state_root_task { + // use background tasks for state root calc + let consistent_view = + ensure_ok!(ConsistentDbView::new_with_latest_tip(self.provider.clone())); + + // Compute trie input + let trie_input_start = Instant::now(); + let res = self.compute_trie_input( + persisting_kind, + ensure_ok!(consistent_view.provider_ro()), + block.header().parent_hash(), + ctx.state(), + ); + let trie_input = match res { + Ok(val) => val, + Err(e) => return Err((InsertBlockErrorKind::Other(Box::new(e)), block)), + }; + + self.metrics + .block_validation + .trie_input_duration + .record(trie_input_start.elapsed().as_secs_f64()); + + // Use state root task only if prefix sets are empty, otherwise proof generation is too + // expensive because it requires walking over the paths in the prefix set in every + // proof. + if trie_input.prefix_sets.is_empty() { + self.payload_processor.spawn( + header, + txs, + provider_builder, + consistent_view, + trie_input, + &self.config, + ) + } else { + debug!(target: "engine::tree", block=?block_num_hash, "Disabling state root task due to non-empty prefix sets"); + use_state_root_task = false; + self.payload_processor.spawn_cache_exclusive(header, txs, provider_builder) } + } else { + self.payload_processor.spawn_cache_exclusive(header, txs, provider_builder) }; - debug!(target: "engine::tree", block=?block.num_hash(), ?execution_time, "Block executed"); + // Use cached state provider before executing, used in execution after prewarming threads + // complete + let state_provider = CachedStateProvider::new_with_caches( + state_provider, + handle.caches(), + handle.cache_metrics(), + ); + + let (output, execution_finish) = if self.config.state_provider_metrics() { + let state_provider = InstrumentedStateProvider::from_state_provider(&state_provider); + let (output, execution_finish) = + ensure_ok!(self.execute_block(&state_provider, &block, &handle)); + state_provider.record_total_latency(); + (output, execution_finish) + } else { + let (output, execution_finish) = + ensure_ok!(self.execute_block(&state_provider, &block, &handle)); + (output, execution_finish) + }; - // Stop prewarming after execution + // after executing the block we can stop executing transactions handle.stop_prewarming_execution(); - // Perform post-execution validation - if let Err(consensus_error) = self.consensus.validate_block_post_execution(&block, &output) - { - trace!(target: "engine::tree", block=?block.num_hash(), ?consensus_error, "Post-execution validation failed"); - let error = NewPayloadError::Other(Box::new(consensus_error)); - return Ok(PayloadValidationOutcome::Invalid { block, error }); + if let Err(err) = self.consensus.validate_block_post_execution(&block, &output) { + // call post-block hook + self.on_invalid_block(&parent_block, &block, &output, None, ctx.state_mut()); + return Err((err.into(), block)) } - // Compute hashed post state let hashed_state = self.provider.hashed_post_state(&output.state); - debug!(target: "engine::tree", block=?block.num_hash(), "Calculating block state root"); + if let Err(err) = + self.validator.validate_block_post_execution_with_hashed_state(&hashed_state, &block) + { + // call post-block hook + self.on_invalid_block(&parent_block, &block, &output, None, ctx.state_mut()); + return Err((err.into(), block)) + } - debug!( - target: "engine::tree", - block=?block.num_hash(), - ?persisting_kind, - run_parallel_state_root, - has_ancestors_with_missing_trie_updates, - use_state_root_task, - config_allows_state_root_task=self.config.use_state_root_task(), - "Deciding which state root algorithm to run" - ); + debug!(target: "engine::tree", block=?block_num_hash, "Calculating block state root"); + + let root_time = Instant::now(); + + let mut maybe_state_root = None; + + if run_parallel_state_root { + // if we new payload extends the current canonical change we attempt to use the + // background task or try to compute it in parallel + if use_state_root_task { + debug!(target: "engine::tree", block=?block_num_hash, "Using sparse trie state root algorithm"); + match handle.state_root() { + Ok(StateRootComputeOutcome { state_root, trie_updates }) => { + let elapsed = execution_finish.elapsed(); + info!(target: "engine::tree", ?state_root, ?elapsed, "State root task finished"); + // we double check the state root here for good measure + if state_root == block.header().state_root() { + maybe_state_root = Some((state_root, trie_updates, elapsed)) + } else { + warn!( + target: "engine::tree", + ?state_root, + block_state_root = ?block.header().state_root(), + "State root task returned incorrect state root" + ); + } + } + Err(error) => { + debug!(target: "engine::tree", %error, "Background parallel state root computation failed"); + } + } + } else { + debug!(target: "engine::tree", block=?block_num_hash, "Using parallel state root algorithm"); + match self.compute_state_root_parallel( + persisting_kind, + block.header().parent_hash(), + &hashed_state, + ctx.state(), + ) { + Ok(result) => { + info!( + target: "engine::tree", + block = ?block_num_hash, + regular_state_root = ?result.0, + "Regular root task finished" + ); + maybe_state_root = Some((result.0, result.1, root_time.elapsed())); + } + Err(ParallelStateRootError::Provider(ProviderError::ConsistentView(error))) => { + debug!(target: "engine::tree", %error, "Parallel state root computation failed consistency check, falling back"); + } + Err(error) => return Err((InsertBlockErrorKind::Other(Box::new(error)), block)), + } + } + } - let state_root_start = Instant::now(); - let (state_root, trie_updates) = match self.compute_state_root_with_strategy( - &block, - &hashed_state, - tree_state, - persisting_kind, - run_parallel_state_root, - use_state_root_task, - &mut handle, - execution_time, - ) { - Ok(result) => result, - Err(error) => return Ok(PayloadValidationOutcome::Invalid { block, error }), - }; + let (state_root, trie_output, root_elapsed) = if let Some(maybe_state_root) = + maybe_state_root + { + maybe_state_root + } else { + // fallback is to compute the state root regularly in sync + if self.config.state_root_fallback() { + debug!(target: "engine::tree", block=?block_num_hash, "Using state root fallback for testing"); + } else { + warn!(target: "engine::tree", block=?block_num_hash, ?persisting_kind, "Failed to compute state root in parallel"); + self.metrics.block_validation.state_root_parallel_fallback_total.increment(1); + } - let state_root_elapsed = state_root_start.elapsed(); - self.metrics - .block_validation - .record_state_root(&trie_updates, state_root_elapsed.as_secs_f64()); + let (root, updates) = + ensure_ok!(state_provider.state_root_with_updates(hashed_state.clone())); + (root, updates, root_time.elapsed()) + }; - debug!(target: "engine::tree", ?state_root, ?state_root_elapsed, block=?block.num_hash(), "Calculated state root"); + self.metrics.block_validation.record_state_root(&trie_output, root_elapsed.as_secs_f64()); + debug!(target: "engine::tree", ?root_elapsed, block=?block_num_hash, "Calculated state root"); - // Ensure state root matches + // ensure state root matches if state_root != block.header().state_root() { // call post-block hook self.on_invalid_block( - &parent_header, + &parent_block, &block, &output, - Some((&trie_updates, state_root)), + Some((&trie_output, state_root)), + ctx.state_mut(), ); - let error = NewPayloadError::Other(Box::new(ConsensusError::BodyStateRootDiff( - GotExpected { got: state_root, expected: block.header().state_root() }.into(), - ))); - return Ok(PayloadValidationOutcome::Invalid { block, error }); + return Err(( + ConsensusError::BodyStateRootDiff( + GotExpected { got: state_root, expected: block.header().state_root() }.into(), + ) + .into(), + block, + )) } - Ok(PayloadValidationOutcome::Valid { block, trie_updates }) - } + // terminate prewarming task with good state output + handle.terminate_caching(Some(output.state.clone())); - /// Validates a block according to consensus rules. - /// - /// This method performs: - /// - Header validation - /// - Pre-execution validation - /// - Parent header validation - /// - /// This method is intended to be used by network-specific validators as part of their - /// block validation flow. - pub fn validate_block( - &self, - block: &RecoveredBlock, - ctx: TreeCtx<'_, N>, - ) -> Result<(), ConsensusError> - where - N::Block: Block, - { - let block_num_hash = block.num_hash(); - debug!(target: "engine::tree", block=?block_num_hash, parent = ?block.header().parent_hash(), "Validating downloaded block"); + // If the block is a fork, we don't save the trie updates, because they may be incorrect. + // Instead, they will be recomputed on persistence. + let trie_updates = if ctx.is_fork() { + ExecutedTrieUpdates::Missing + } else { + ExecutedTrieUpdates::Present(Arc::new(trie_output)) + }; - // Validate block consensus rules - trace!(target: "engine::tree", block=?block_num_hash, "Validating block header"); - self.consensus.validate_header(block.sealed_header())?; + Ok(ExecutedBlockWithTrieUpdates { + block: ExecutedBlock { + recovered_block: Arc::new(block), + execution_output: Arc::new(ExecutionOutcome::from((output, block_num_hash.number))), + hashed_state: Arc::new(hashed_state), + }, + trie: trie_updates, + }) + } - trace!(target: "engine::tree", block=?block_num_hash, "Validating block pre-execution"); - self.consensus.validate_block_pre_execution(block)?; + /// Return sealed block from database or in-memory state by hash. + fn sealed_header_by_hash( + &self, + hash: B256, + state: &EngineApiTreeState, + ) -> ProviderResult>> { + // check memory first + let block = + state.tree_state.block_by_hash(hash).map(|block| block.as_ref().clone_sealed_header()); + + if block.is_some() { + Ok(block) + } else { + self.provider.sealed_header_by_hash(hash) + } + } - // Get parent header for validation - let parent_hash = block.header().parent_hash(); - let parent_header = self - .get_parent_header(parent_hash, ctx.state()) - .map_err(|e| ConsensusError::Other(e.to_string()))?; + /// Validate if block is correct and satisfies all the consensus rules that concern the header + /// and block body itself. + fn validate_block_inner(&self, block: &RecoveredBlock) -> Result<(), ConsensusError> { + if let Err(e) = self.consensus.validate_header(block.sealed_header()) { + error!(target: "engine::tree", ?block, "Failed to validate header {}: {e}", block.hash()); + return Err(e) + } - // Validate against parent - trace!(target: "engine::tree", block=?block_num_hash, "Validating block against parent"); - self.consensus.validate_header_against_parent(block.sealed_header(), &parent_header)?; + if let Err(e) = self.consensus.validate_block_pre_execution(block.sealed_block()) { + error!(target: "engine::tree", ?block, "Failed to validate block {}: {e}", block.hash()); + return Err(e) + } - debug!(target: "engine::tree", block=?block_num_hash, "Block validation complete"); Ok(()) } - /// Executes the given block using the provided state provider. - fn execute_block( + /// Executes a block with the given state provider + fn execute_block( &mut self, - state_provider: &S, + state_provider: S, block: &RecoveredBlock, - handle: &crate::tree::PayloadHandle, - ) -> Result<(BlockExecutionOutput, Instant), NewPayloadError> - where - S: StateProvider, - { - trace!(target: "engine::tree", block = ?block.num_hash(), "Executing block"); - - // Create state database + handle: &PayloadHandle, + ) -> Result<(BlockExecutionOutput, Instant), InsertBlockErrorKind> { + debug!(target: "engine::tree", block=?block.num_hash(), "Executing block"); let mut db = State::builder() - .with_database(StateProviderDatabase::new(state_provider)) + .with_database(StateProviderDatabase::new(&state_provider)) .with_bundle_update() .without_state_clear() .build(); - - // Configure executor for the block let mut executor = self.evm_config.executor_for_block(&mut db, block); - // Configure precompile caching if enabled if !self.config.precompile_cache_disabled() { - // Get the spec id before the closure - let spec_id = *self.evm_config.evm_env(block.header()).spec_id(); - executor.evm_mut().precompiles_mut().map_precompiles(|address, precompile| { let metrics = self .precompile_cache_metrics .entry(*address) .or_insert_with(|| CachedPrecompileMetrics::new_with_address(*address)) .clone(); - let cache = self.precompile_cache_map.cache_for_address(*address); - CachedPrecompile::wrap(precompile, cache, spec_id, Some(metrics)) + CachedPrecompile::wrap( + precompile, + self.precompile_cache_map.cache_for_address(*address), + *self.evm_config.evm_env(block.header()).spec_id(), + Some(metrics), + ) }); } - // Execute the block - let start = Instant::now(); - let output = self - .metrics - .executor - .execute_metered(executor, block, Box::new(handle.state_hook())) - .map_err(|e| NewPayloadError::Other(Box::new(e)))?; - - Ok((output, start)) - } - - /// Executes a block with proper state provider wrapping and optional instrumentation. - /// - /// This method wraps the base state provider with: - /// 1. `CachedStateProvider` for cache support - /// 2. `InstrumentedStateProvider` for metrics (if enabled) - fn execute_block_with_state_provider( - &mut self, - state_provider: S, - block: &RecoveredBlock, - handle: &crate::tree::PayloadHandle, - ) -> Result<(BlockExecutionOutput, Instant), NewPayloadError> - where - S: StateProvider, - { - // Wrap state provider with cached state provider for execution - let cached_state_provider = CachedStateProvider::new_with_caches( - state_provider, - handle.caches(), - handle.cache_metrics(), - ); - - // Execute the block with optional instrumentation - if self.config.state_provider_metrics() { - let instrumented_provider = - InstrumentedStateProvider::from_state_provider(&cached_state_provider); - let result = self.execute_block(&instrumented_provider, block, handle); - instrumented_provider.record_total_latency(); - result - } else { - self.execute_block(&cached_state_provider, block, handle) - } - } - - /// Computes the state root for the given block. - /// - /// This method attempts to compute the state root in parallel if configured and conditions - /// allow, otherwise falls back to synchronous computation. - fn compute_state_root( - &self, - parent_hash: B256, - hashed_state: &HashedPostState, - ) -> Result<(B256, TrieUpdates), NewPayloadError> { - // Get the state provider for the parent block - let state_provider = self - .provider - .history_by_block_hash(parent_hash) - .map_err(|e| NewPayloadError::Other(Box::new(e)))?; - - // Compute the state root with trie updates - let (state_root, trie_updates) = state_provider - .state_root_with_updates(hashed_state.clone()) - .map_err(|e| NewPayloadError::Other(Box::new(e)))?; - - Ok((state_root, trie_updates)) - } - - /// Attempts to get the state root from the background task. - fn try_state_root_from_task( - &self, - handle: &mut crate::tree::PayloadHandle, - block: &RecoveredBlock, - execution_time: Instant, - ) -> Option<(B256, TrieUpdates)> { - match handle.state_root() { - Ok(crate::tree::payload_processor::sparse_trie::StateRootComputeOutcome { - state_root, - trie_updates, - }) => { - let elapsed = execution_time.elapsed(); - debug!(target: "engine::tree", ?state_root, ?elapsed, "State root task finished"); - - // Double check the state root matches what we expect - if state_root == block.header().state_root() { - Some((state_root, trie_updates)) - } else { - debug!( - target: "engine::tree", - ?state_root, - block_state_root = ?block.header().state_root(), - "State root task returned incorrect state root" - ); - None - } - } - Err(error) => { - debug!(target: "engine::tree", %error, "Background state root computation failed"); - None - } - } - } - - /// Computes state root with appropriate strategy based on configuration. - #[allow(clippy::too_many_arguments)] - fn compute_state_root_with_strategy( - &self, - block: &RecoveredBlock, - hashed_state: &HashedPostState, - tree_state: &EngineApiTreeState, - persisting_kind: PersistingKind, - run_parallel_state_root: bool, - use_state_root_task: bool, - handle: &mut crate::tree::PayloadHandle, - execution_time: Instant, - ) -> Result<(B256, TrieUpdates), NewPayloadError> { - let parent_hash = block.header().parent_hash(); - - if !run_parallel_state_root { - // Use synchronous computation - return self.compute_state_root(parent_hash, hashed_state); - } - - // Parallel state root is enabled - if use_state_root_task { - debug!(target: "engine::tree", block=?block.num_hash(), "Using sparse trie state root algorithm"); - - // Try to get state root from background task first - if let Some((state_root, trie_updates)) = - self.try_state_root_from_task(handle, block, execution_time) - { - return Ok((state_root, trie_updates)); - } - - // Background task failed or returned incorrect root, fall back to parallel - debug!(target: "engine::tree", "Falling back to parallel state root computation"); - } else { - debug!(target: "engine::tree", block=?block.num_hash(), "Using parallel state root algorithm"); - } - - // Try parallel computation - match self.compute_state_root_parallel( - parent_hash, - hashed_state, - tree_state, - persisting_kind, - ) { - Ok(result) => Ok(result), - Err(ParallelStateRootError::Provider(ProviderError::ConsistentView(error))) => { - debug!(target: "engine::tree", %error, "Parallel state root computation failed consistency check, falling back to synchronous"); - self.metrics.block_validation.state_root_parallel_fallback_total.increment(1); - self.compute_state_root(parent_hash, hashed_state) - } - Err(error) => Err(NewPayloadError::Other(Box::new(error))), - } + let execution_start = Instant::now(); + let output = self.metrics.executor.execute_metered( + executor, + block, + Box::new(handle.state_hook()), + )?; + let execution_finish = Instant::now(); + let execution_time = execution_finish.duration_since(execution_start); + debug!(target: "engine::tree", elapsed = ?execution_time, number=?block.number(), "Executed block"); + Ok((output, execution_finish)) } - /// Computes state root in parallel. + /// Compute state root for the given hashed post state in parallel. /// /// # Returns /// @@ -636,216 +598,87 @@ where /// should be used instead. fn compute_state_root_parallel( &self, + persisting_kind: PersistingKind, parent_hash: B256, hashed_state: &HashedPostState, - tree_state: &EngineApiTreeState, - persisting_kind: PersistingKind, + state: &EngineApiTreeState, ) -> Result<(B256, TrieUpdates), ParallelStateRootError> { let consistent_view = ConsistentDbView::new_with_latest_tip(self.provider.clone())?; - // Compute trie input using the tree state let mut input = self.compute_trie_input( + persisting_kind, consistent_view.provider_ro()?, parent_hash, - tree_state, - persisting_kind, + state, )?; - - // Extend with block we are validating root for + // Extend with block we are validating root for. input.append_ref(hashed_state); ParallelStateRoot::new(consistent_view, input).incremental_root_with_updates() } /// Check if the given block has any ancestors with missing trie updates. - /// - /// This walks back through the chain starting from the parent of the target block - /// and checks if any ancestor blocks are missing trie updates. fn has_ancestors_with_missing_trie_updates( &self, target_header: &SealedHeader, - tree_state: &EngineApiTreeState, + state: &EngineApiTreeState, ) -> bool { // Walk back through the chain starting from the parent of the target block let mut current_hash = target_header.parent_hash(); - while let Some(block) = tree_state.tree_state.executed_block_by_hash(current_hash) { + while let Some(block) = state.tree_state.blocks_by_hash.get(¤t_hash) { // Check if this block is missing trie updates if block.trie.is_missing() { return true; } // Move to the parent block - current_hash = block.block.recovered_block.header().parent_hash(); + current_hash = block.recovered_block().parent_hash(); } false } - /// Determines the persisting kind for the given block based on persistence info. - /// - /// This is adapted from the `persisting_kind_for` method in `EngineApiTreeHandler`. - fn persisting_kind_for( - &self, - block: &N::BlockHeader, - persistence_info: &PersistenceInfo, - tree_state: &EngineApiTreeState, - ) -> PersistingKind { - // Check that we're currently persisting - let Some(action) = &persistence_info.current_action else { - return PersistingKind::NotPersisting; - }; - - // Check that the persistence action is saving blocks, not removing them - let PersistenceAction::SavingBlocks { highest } = action else { - return PersistingKind::PersistingNotDescendant; - }; - - // The block being validated can only be a descendant if its number is higher than - // the highest block persisting. Otherwise, it's likely a fork of a lower block. - if block.number() > highest.number && tree_state.tree_state.is_descendant(*highest, block) { - PersistingKind::PersistingDescendant - } else { - PersistingKind::PersistingNotDescendant - } - } - - /// Creates a payload handle for the given block. - /// - /// This method decides whether to use full spawn (with background state root tasks) - /// or cache-only spawn based on the current conditions. - /// - /// Returns a tuple of (`PayloadHandle`, `use_state_root_task`) where `use_state_root_task` - /// indicates whether the state root task was actually enabled (it may be disabled - /// if prefix sets are non-empty). - fn spawn_payload_tasks( - &mut self, - block: &RecoveredBlock, - provider_builder: crate::tree::StateProviderBuilder, - use_state_root_task: bool, - tree_state: &EngineApiTreeState, - persistence_info: &PersistenceInfo, - ) -> (crate::tree::PayloadHandle, bool) { - let header = block.clone_sealed_header(); - let txs = block.clone_transactions_recovered().collect(); - - if !use_state_root_task { - // Use cache-only spawn when state root tasks are not needed - let handle = - self.payload_processor.spawn_cache_exclusive(header, txs, provider_builder); - return (handle, false); - } - - // Try to use full spawn with background state root computation support - let Ok(consistent_view) = ConsistentDbView::new_with_latest_tip(self.provider.clone()) - else { - // Fall back to cache-only spawn if consistent view fails - let handle = - self.payload_processor.spawn_cache_exclusive(header, txs, provider_builder); - return (handle, false); - }; - - let Ok(provider_ro) = consistent_view.provider_ro() else { - // Fall back to cache-only spawn if provider creation fails - let handle = - self.payload_processor.spawn_cache_exclusive(header, txs, provider_builder); - return (handle, false); - }; - - // For the handle creation, we need to determine persisting kind again - // This could be optimized by passing it from validate_payload - let persisting_kind = - self.persisting_kind_for(block.header(), persistence_info, tree_state); - - let trie_input_start = Instant::now(); - let Ok(trie_input) = self.compute_trie_input( - provider_ro, - block.header().parent_hash(), - tree_state, - persisting_kind, - ) else { - // Fall back to cache-only spawn if trie input computation fails - let handle = - self.payload_processor.spawn_cache_exclusive(header, txs, provider_builder); - return (handle, false); - }; - let trie_input_elapsed = trie_input_start.elapsed(); - self.metrics.block_validation.trie_input_duration.record(trie_input_elapsed.as_secs_f64()); - - // Use state root task only if prefix sets are empty, otherwise proof generation is too - // expensive because it requires walking over the paths in the prefix set in every - // proof. - if trie_input.prefix_sets.is_empty() { - let handle = self.payload_processor.spawn( - header, - txs, - provider_builder, - consistent_view, - trie_input, - &self.config, - ); - (handle, true) - } else { - debug!(target: "engine::tree", block=?block.num_hash(), "Disabling state root task due to non-empty prefix sets"); - let handle = - self.payload_processor.spawn_cache_exclusive(header, txs, provider_builder); - (handle, false) - } - } - - /// Retrieves the parent header from tree state or database. - fn get_parent_header( - &self, - parent_hash: B256, - tree_state: &EngineApiTreeState, - ) -> Result, ProviderError> { - // First try to get from tree state - if let Some(parent_block) = tree_state.tree_state.executed_block_by_hash(parent_hash) { - Ok(parent_block.block.recovered_block.sealed_header().clone()) - } else { - // Fallback to database - let header = self - .provider - .header(&parent_hash)? - .ok_or_else(|| ProviderError::HeaderNotFound(parent_hash.into()))?; - Ok(SealedHeader::seal_slow(header)) - } - } - /// Creates a `StateProviderBuilder` for the given parent hash. /// /// This method checks if the parent is in the tree state (in-memory) or persisted to disk, /// and creates the appropriate provider builder. - fn create_state_provider_builder( + fn state_provider_builder( &self, - parent_hash: B256, - tree_state: &EngineApiTreeState, - ) -> Result, ProviderError> { - if let Some((historical, blocks)) = tree_state.tree_state.blocks_by_hash(parent_hash) { - // Parent is in memory, create builder with overlay - Ok(crate::tree::StateProviderBuilder::new( + hash: B256, + state: &EngineApiTreeState, + ) -> ProviderResult>> { + if let Some((historical, blocks)) = state.tree_state.blocks_by_hash(hash) { + debug!(target: "engine::tree", %hash, %historical, "found canonical state for block in memory, creating provider builder"); + // the block leads back to the canonical chain + return Ok(Some(StateProviderBuilder::new( self.provider.clone(), historical, Some(blocks), - )) - } else { - // Parent is not in memory, check if it's persisted - self.provider - .header(&parent_hash)? - .ok_or_else(|| ProviderError::HeaderNotFound(parent_hash.into()))?; - // Parent is persisted, create builder without overlay - Ok(crate::tree::StateProviderBuilder::new(self.provider.clone(), parent_hash, None)) + ))) } + + // Check if the block is persisted + if let Some(header) = self.provider.header(&hash)? { + debug!(target: "engine::tree", %hash, number = %header.number(), "found canonical state for block in database, creating provider builder"); + // For persisted blocks, we create a builder that will fetch state directly from the + // database + return Ok(Some(StateProviderBuilder::new(self.provider.clone(), hash, None))) + } + + debug!(target: "engine::tree", %hash, "no canonical state found for block"); + Ok(None) } /// Called when an invalid block is encountered during validation. fn on_invalid_block( - &mut self, + &self, parent_header: &SealedHeader, block: &RecoveredBlock, output: &BlockExecutionOutput, trie_updates: Option<(&TrieUpdates, B256)>, + state: &mut EngineApiTreeState, ) { - if self.invalid_headers.get(&block.hash()).is_some() { + if state.invalid_headers.get(&block.hash()).is_some() { // we already marked this block as invalid return; } @@ -853,40 +686,61 @@ where } /// Computes the trie input at the provided parent hash. - fn compute_trie_input( + /// + /// The goal of this function is to take in-memory blocks and generate a [`TrieInput`] that + /// serves as an overlay to the database blocks. + /// + /// It works as follows: + /// 1. Collect in-memory blocks that are descendants of the provided parent hash using + /// [`crate::tree::TreeState::blocks_by_hash`]. + /// 2. If the persistence is in progress, and the block that we're computing the trie input for + /// is a descendant of the currently persisting blocks, we need to be sure that in-memory + /// blocks are not overlapping with the database blocks that may have been already persisted. + /// To do that, we're filtering out in-memory blocks that are lower than the highest database + /// block. + /// 3. Once in-memory blocks are collected and optionally filtered, we compute the + /// [`HashedPostState`] from them. + fn compute_trie_input( &self, + persisting_kind: PersistingKind, provider: TP, parent_hash: B256, - tree_state: &EngineApiTreeState, - persisting_kind: PersistingKind, - ) -> Result - where - TP: DBProvider + BlockNumReader, - { + state: &EngineApiTreeState, + ) -> ProviderResult { let mut input = TrieInput::default(); - let best_block_number = - provider.best_block_number().map_err(ParallelStateRootError::Provider)?; + let best_block_number = provider.best_block_number()?; - // Get blocks from tree state - let (historical, mut blocks) = tree_state + let (mut historical, mut blocks) = state .tree_state .blocks_by_hash(parent_hash) .map_or_else(|| (parent_hash.into(), vec![]), |(hash, blocks)| (hash.into(), blocks)); - // Filter blocks based on persisting kind - if matches!(persisting_kind, PersistingKind::PersistingDescendant) { - // If we are persisting a descendant, filter out upto the last persisted block - let last_persisted_block_number = provider - .convert_hash_or_number(historical) - .map_err(ParallelStateRootError::Provider)? - .ok_or_else(|| { - ParallelStateRootError::Provider(ProviderError::BlockHashNotFound( - historical.as_hash().unwrap(), - )) - })?; - - blocks.retain(|b| b.recovered_block().header().number() > last_persisted_block_number); + // If the current block is a descendant of the currently persisting blocks, then we need to + // filter in-memory blocks, so that none of them are already persisted in the database. + if persisting_kind.is_descendant() { + // Iterate over the blocks from oldest to newest. + while let Some(block) = blocks.last() { + let recovered_block = block.recovered_block(); + if recovered_block.number() <= best_block_number { + // Remove those blocks that lower than or equal to the highest database + // block. + blocks.pop(); + } else { + // If the block is higher than the best block number, stop filtering, as it's + // the first block that's not in the database. + break + } + } + + historical = if let Some(block) = blocks.last() { + // If there are any in-memory blocks left after filtering, set the anchor to the + // parent of the oldest block. + (block.recovered_block().number() - 1).into() + } else { + // Otherwise, set the anchor to the original provided parent hash. + parent_hash.into() + }; } if blocks.is_empty() { @@ -895,26 +749,22 @@ where debug!(target: "engine::tree", %parent_hash, %historical, blocks = blocks.len(), "Parent found in memory"); } - // Convert the historical block to the block number + // Convert the historical block to the block number. let block_number = provider - .convert_hash_or_number(historical) - .map_err(ParallelStateRootError::Provider)? - .ok_or_else(|| { - ParallelStateRootError::Provider(ProviderError::BlockHashNotFound( - historical.as_hash().unwrap(), - )) - })?; - - // Retrieve revert state for historical block + .convert_hash_or_number(historical)? + .ok_or_else(|| ProviderError::BlockHashNotFound(historical.as_hash().unwrap()))?; + + // Retrieve revert state for historical block. let revert_state = if block_number == best_block_number { - // No revert state needed if we're at the best block + // We do not check against the `last_block_number` here because + // `HashedPostState::from_reverts` only uses the database tables, and not static files. debug!(target: "engine::tree", block_number, best_block_number, "Empty revert state"); HashedPostState::default() } else { let revert_state = HashedPostState::from_reverts::< ::KeyHasher, >(provider.tx_ref(), block_number + 1) - .map_err(|e| ParallelStateRootError::Provider(ProviderError::from(e)))?; + .map_err(ProviderError::from)?; debug!( target: "engine::tree", block_number, @@ -927,7 +777,7 @@ where }; input.append(revert_state); - // Extend with contents of parent in-memory blocks + // Extend with contents of parent in-memory blocks. input.extend_with_blocks( blocks.iter().rev().map(|block| (block.hashed_state(), block.trie_updates())), ); @@ -936,29 +786,18 @@ where } } +/// Output of block or payload validation. +pub type ValidationOutcome>> = + Result, E>; + /// Type that validates the payloads processed by the engine. -pub trait EngineValidator: - PayloadValidator +/// +/// This provides the necessary functions for validating/executing payloads/blocks. +pub trait EngineValidator< + Types: PayloadTypes, + N: NodePrimitives = <::BuiltPayload as BuiltPayload>::Primitives, +>: Send + Sync + 'static { - /// Validates the presence or exclusion of fork-specific fields based on the payload attributes - /// and the message version. - fn validate_version_specific_fields( - &self, - version: EngineApiMessageVersion, - payload_or_attrs: PayloadOrAttributes< - '_, - Types::ExecutionData, - ::PayloadAttributes, - >, - ) -> Result<(), EngineObjectValidationError>; - - /// Ensures that the payload attributes are valid for the given [`EngineApiMessageVersion`]. - fn ensure_well_formed_attributes( - &self, - version: EngineApiMessageVersion, - attributes: &::PayloadAttributes, - ) -> Result<(), EngineObjectValidationError>; - /// Validates the payload attributes with respect to the header. /// /// By default, this enforces that the payload attributes timestamp is greater than the @@ -970,37 +809,84 @@ pub trait EngineValidator: /// See also: fn validate_payload_attributes_against_header( &self, - attr: &::PayloadAttributes, - header: &::Header, - ) -> Result<(), InvalidPayloadAttributesError> { - if attr.timestamp() <= header.timestamp() { - return Err(InvalidPayloadAttributesError::InvalidTimestamp); - } - Ok(()) - } + attr: &Types::PayloadAttributes, + header: &N::BlockHeader, + ) -> Result<(), InvalidPayloadAttributesError>; + + /// Ensures that the given payload does not violate any consensus rules that concern the block's + /// layout. + /// + /// This function must convert the payload into the executable block and pre-validate its + /// fields. + /// + /// Implementers should ensure that the checks are done in the order that conforms with the + /// engine-API specification. + fn ensure_well_formed_payload( + &self, + payload: Types::ExecutionData, + ) -> Result, NewPayloadError>; /// Validates a payload received from engine API. fn validate_payload( &mut self, - payload: Self::ExecutionData, - _ctx: TreeCtx<'_, ::Primitives>, - ) -> Result, NewPayloadError> { - // Default implementation: try to convert using existing method - match self.ensure_well_formed_payload(payload) { - Ok(block) => { - Ok(PayloadValidationOutcome::Valid { block, trie_updates: TrieUpdates::default() }) - } - Err(error) => Err(error), - } - } + payload: Types::ExecutionData, + ctx: TreeCtx<'_, N>, + ) -> ValidationOutcome>; /// Validates a block downloaded from the network. fn validate_block( + &mut self, + block: RecoveredBlock, + ctx: TreeCtx<'_, N>, + ) -> ValidationOutcome; +} + +impl EngineValidator for BasicEngineValidator +where + P: DatabaseProviderFactory + + BlockReader
+ + StateProviderFactory + + StateReader + + StateCommitmentProvider + + HashedPostStateProvider + + Clone + + 'static, + N: NodePrimitives, + Evm: ConfigureEvm + 'static, + Types: PayloadTypes>, + V: PayloadValidator, +{ + fn validate_payload_attributes_against_header( &self, - _block: &RecoveredBlock, - _ctx: TreeCtx<'_, ::Primitives>, - ) -> Result<(), ConsensusError> { - // Default implementation: accept all blocks - Ok(()) + attr: &Types::PayloadAttributes, + header: &N::BlockHeader, + ) -> Result<(), InvalidPayloadAttributesError> { + self.validator.validate_payload_attributes_against_header(attr, header) + } + + fn ensure_well_formed_payload( + &self, + payload: Types::ExecutionData, + ) -> Result, NewPayloadError> { + let block = self.validator.ensure_well_formed_payload(payload)?; + Ok(block) + } + + fn validate_payload( + &mut self, + payload: Types::ExecutionData, + ctx: TreeCtx<'_, N>, + ) -> ValidationOutcome> { + let block = self.validator.ensure_well_formed_payload(payload)?; + Ok(EngineValidator::::validate_block(self, block, ctx)?) + } + + fn validate_block( + &mut self, + block: RecoveredBlock, + ctx: TreeCtx<'_, N>, + ) -> ValidationOutcome { + self.validate_block_with_state(block, ctx) + .map_err(|(kind, block)| InsertBlockError::new(block.into_sealed_block(), kind)) } } diff --git a/crates/engine/tree/src/tree/tests.rs b/crates/engine/tree/src/tree/tests.rs index d6e4babfeaf..fde19023ece 100644 --- a/crates/engine/tree/src/tree/tests.rs +++ b/crates/engine/tree/src/tree/tests.rs @@ -1,5 +1,5 @@ use super::*; -use crate::{persistence::PersistenceAction, tree::EngineValidator}; +use crate::persistence::PersistenceAction; use alloy_consensus::Header; use alloy_primitives::{ map::{HashMap, HashSet}, @@ -10,13 +10,13 @@ use alloy_rpc_types_engine::{ExecutionData, ExecutionPayloadSidecar, ExecutionPa use assert_matches::assert_matches; use reth_chain_state::{test_utils::TestBlockBuilder, BlockState}; use reth_chainspec::{ChainSpec, HOLESKY, MAINNET}; -use reth_engine_primitives::ForkchoiceStatus; +use reth_engine_primitives::{EngineValidator, ForkchoiceStatus}; use reth_ethereum_consensus::EthBeaconConsensus; use reth_ethereum_engine_primitives::EthEngineTypes; use reth_ethereum_primitives::{Block, EthPrimitives}; use reth_evm_ethereum::MockEvmConfig; use reth_primitives_traits::Block as _; -use reth_provider::test_utils::MockEthProvider; +use reth_provider::{test_utils::MockEthProvider, ExecutionOutcome}; use reth_trie::HashedPostState; use std::{ collections::BTreeMap, @@ -28,13 +28,12 @@ use std::{ #[derive(Debug, Clone)] struct MockEngineValidator; -impl reth_engine_primitives::PayloadValidator for MockEngineValidator { +impl reth_engine_primitives::PayloadValidator for MockEngineValidator { type Block = Block; - type ExecutionData = alloy_rpc_types_engine::ExecutionData; fn ensure_well_formed_payload( &self, - payload: Self::ExecutionData, + payload: ExecutionData, ) -> Result< reth_primitives_traits::RecoveredBlock, reth_payload_primitives::NewPayloadError, @@ -130,7 +129,7 @@ struct TestHarness { EthPrimitives, MockEthProvider, EthEngineTypes, - MockEngineValidator, + BasicEngineValidator, MockEvmConfig, >, to_tree_tx: Sender, Block>>, @@ -178,11 +177,19 @@ impl TestHarness { let payload_builder = PayloadBuilderHandle::new(to_payload_service); let evm_config = MockEvmConfig::default(); + let engine_validator = BasicEngineValidator::new( + provider.clone(), + consensus.clone(), + evm_config.clone(), + payload_validator, + TreeConfig::default(), + Box::new(NoopInvalidBlockHook::default()), + ); let tree = EngineApiTreeHandler::new( provider.clone(), consensus, - payload_validator, + engine_validator, from_tree_tx, engine_api_tree_state, canonical_in_memory_state, diff --git a/crates/engine/util/src/reorg.rs b/crates/engine/util/src/reorg.rs index 050a384c446..269c9eb1500 100644 --- a/crates/engine/util/src/reorg.rs +++ b/crates/engine/util/src/reorg.rs @@ -103,7 +103,7 @@ where + StateProviderFactory + ChainSpecProvider, Evm: ConfigureEvm, - Validator: PayloadValidator>, + Validator: PayloadValidator>, { type Item = S::Item; @@ -236,19 +236,20 @@ where } } -fn create_reorg_head( +fn create_reorg_head( provider: &Provider, evm_config: &Evm, payload_validator: &Validator, mut depth: usize, - next_payload: Validator::ExecutionData, + next_payload: T::ExecutionData, ) -> RethResult>> where Provider: BlockReader
, Block = BlockTy> + StateProviderFactory + ChainSpecProvider, Evm: ConfigureEvm, - Validator: PayloadValidator>, + T: PayloadTypes, + Validator: PayloadValidator>, { // Ensure next payload is valid. let next_block = diff --git a/crates/ethereum/node/Cargo.toml b/crates/ethereum/node/Cargo.toml index d1ac937e6db..f709fd62837 100644 --- a/crates/ethereum/node/Cargo.toml +++ b/crates/ethereum/node/Cargo.toml @@ -32,7 +32,6 @@ reth-rpc-builder.workspace = true reth-rpc-server-types.workspace = true reth-node-api.workspace = true reth-chainspec.workspace = true -reth-engine-tree.workspace = true reth-revm = { workspace = true, features = ["std"] } reth-trie-db.workspace = true reth-rpc-eth-types.workspace = true @@ -95,5 +94,4 @@ test-utils = [ "reth-evm/test-utils", "reth-primitives-traits/test-utils", "reth-evm-ethereum/test-utils", - "reth-engine-tree/test-utils", ] diff --git a/crates/ethereum/node/src/engine.rs b/crates/ethereum/node/src/engine.rs index 1c4ea2ce404..34cda0e9d60 100644 --- a/crates/ethereum/node/src/engine.rs +++ b/crates/ethereum/node/src/engine.rs @@ -6,8 +6,7 @@ pub use alloy_rpc_types_engine::{ ExecutionPayloadV1, PayloadAttributes as EthPayloadAttributes, }; use reth_chainspec::{EthChainSpec, EthereumHardforks}; -use reth_engine_primitives::PayloadValidator; -use reth_engine_tree::tree::EngineValidator; +use reth_engine_primitives::{EngineValidator, PayloadValidator}; use reth_ethereum_payload_builder::EthereumExecutionPayloadValidator; use reth_ethereum_primitives::Block; use reth_node_api::PayloadTypes; @@ -37,12 +36,12 @@ impl EthereumEngineValidator { } } -impl PayloadValidator for EthereumEngineValidator +impl PayloadValidator for EthereumEngineValidator where ChainSpec: EthChainSpec + EthereumHardforks + 'static, + Types: PayloadTypes, { type Block = Block; - type ExecutionData = ExecutionData; fn ensure_well_formed_payload( &self, @@ -61,7 +60,7 @@ where fn validate_version_specific_fields( &self, version: EngineApiMessageVersion, - payload_or_attrs: PayloadOrAttributes<'_, Self::ExecutionData, EthPayloadAttributes>, + payload_or_attrs: PayloadOrAttributes<'_, Types::ExecutionData, EthPayloadAttributes>, ) -> Result<(), EngineObjectValidationError> { payload_or_attrs .execution_requests() @@ -79,7 +78,7 @@ where validate_version_specific_fields( self.chain_spec(), version, - PayloadOrAttributes::::PayloadAttributes( + PayloadOrAttributes::::PayloadAttributes( attributes, ), ) diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index 202c496d33a..36dec1a2192 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -260,7 +260,7 @@ where self, ctx: reth_node_api::AddOnsContext<'_, N>, ) -> eyre::Result { - let validation_api = ValidationApi::new( + let validation_api = ValidationApi::<_, _, ::Payload>::new( ctx.node.provider().clone(), Arc::new(ctx.node.consensus().clone()), ctx.node.evm_config().clone(), diff --git a/crates/node/builder/src/launch/engine.rs b/crates/node/builder/src/launch/engine.rs index 4dcf1107278..c3a4a9c7d0e 100644 --- a/crates/node/builder/src/launch/engine.rs +++ b/crates/node/builder/src/launch/engine.rs @@ -15,7 +15,7 @@ use reth_db_api::{database_metrics::DatabaseMetrics, Database}; use reth_engine_service::service::{ChainEvent, EngineService}; use reth_engine_tree::{ engine::{EngineApiRequest, EngineRequestHandler}, - tree::TreeConfig, + tree::{BasicEngineValidator, TreeConfig}, }; use reth_engine_util::EngineMessageStreamExt; use reth_exex::ExExManagerHandle; @@ -207,6 +207,15 @@ where // during this run. .maybe_store_messages(node_config.debug.engine_api_store.clone()); + let engine_validator = BasicEngineValidator::new( + ctx.blockchain_db().clone(), + consensus.clone(), + ctx.components().evm_config().clone(), + engine_payload_validator, + engine_tree_config.clone(), + ctx.invalid_block_hook().await?, + ); + let mut engine_service = EngineService::new( consensus.clone(), ctx.chain_spec(), @@ -218,9 +227,8 @@ where ctx.blockchain_db().clone(), pruner, ctx.components().payload_builder_handle().clone(), - engine_payload_validator, + engine_validator, engine_tree_config, - ctx.invalid_block_hook().await?, ctx.sync_metrics_tx(), ctx.components().evm_config().clone(), ); diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index 021e30b6dcb..0a5c31f7ab1 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -9,10 +9,9 @@ use alloy_rpc_types_engine::ExecutionData; use jsonrpsee::{core::middleware::layer::Either, RpcModule}; use reth_chain_state::CanonStateSubscriptions; use reth_chainspec::{ChainSpecProvider, EthereumHardforks}; -use reth_engine_tree::tree::EngineValidator; use reth_node_api::{ - AddOnsContext, BlockTy, EngineTypes, FullNodeComponents, FullNodeTypes, NodeAddOns, NodeTypes, - PayloadTypes, PrimitivesTy, + AddOnsContext, BlockTy, EngineTypes, EngineValidator, FullNodeComponents, FullNodeTypes, + NodeAddOns, NodeTypes, PayloadTypes, PrimitivesTy, }; use reth_node_core::{ node_config::NodeConfig, diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index db5be42a998..9ef5e5f7a78 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -30,7 +30,7 @@ reth-tasks = { workspace = true, optional = true } reth-trie-common.workspace = true reth-node-core.workspace = true reth-rpc-engine-api.workspace = true -reth-engine-tree.workspace = true +reth-engine-primitives.workspace = true reth-engine-local = { workspace = true, features = ["op"] } reth-rpc-api.workspace = true @@ -113,7 +113,6 @@ test-utils = [ "reth-optimism-primitives/arbitrary", "reth-primitives-traits/test-utils", "reth-trie-common/test-utils", - "reth-engine-tree/test-utils", ] reth-codec = ["reth-optimism-primitives/reth-codec"] diff --git a/crates/optimism/node/src/engine.rs b/crates/optimism/node/src/engine.rs index 8e6e466d037..75012d34374 100644 --- a/crates/optimism/node/src/engine.rs +++ b/crates/optimism/node/src/engine.rs @@ -6,7 +6,7 @@ use op_alloy_rpc_types_engine::{ OpPayloadAttributes, }; use reth_consensus::ConsensusError; -use reth_engine_tree::tree::EngineValidator; +use reth_engine_primitives::EngineValidator; use reth_node_api::{ payload::{ validate_parent_beacon_block_root_presence, EngineApiMessageVersion, @@ -112,18 +112,18 @@ where } } -impl PayloadValidator for OpEngineValidator +impl PayloadValidator for OpEngineValidator where P: StateProviderFactory + Unpin + 'static, Tx: SignedTransaction + Unpin + 'static, ChainSpec: OpHardforks + Send + Sync + 'static, + Types: PayloadTypes, { type Block = alloy_consensus::Block; - type ExecutionData = OpExecutionData; fn ensure_well_formed_payload( &self, - payload: Self::ExecutionData, + payload: OpExecutionData, ) -> Result, NewPayloadError> { let sealed_block = self.inner.ensure_well_formed_payload(payload).map_err(NewPayloadError::other)?; @@ -165,7 +165,7 @@ impl EngineValidator for OpEngineValidator::ExecutionData, + ExecutionData = OpExecutionData, BuiltPayload: BuiltPayload>, >, P: StateProviderFactory + Unpin + 'static, @@ -205,7 +205,7 @@ where validate_version_specific_fields( self.chain_spec(), version, - PayloadOrAttributes::::PayloadAttributes( + PayloadOrAttributes::::PayloadAttributes( attributes, ), )?; @@ -290,7 +290,7 @@ mod test { use alloy_primitives::{b64, Address, B256, B64}; use alloy_rpc_types_engine::PayloadAttributes; use reth_chainspec::ChainSpec; - use reth_engine_tree::tree::EngineValidator; + use reth_engine_primitives::EngineValidator; use reth_optimism_chainspec::{OpChainSpec, BASE_SEPOLIA}; use reth_provider::noop::NoopProvider; use reth_trie_common::KeccakKeyHasher; diff --git a/crates/optimism/rpc/Cargo.toml b/crates/optimism/rpc/Cargo.toml index 233f3dd134c..97f598628ef 100644 --- a/crates/optimism/rpc/Cargo.toml +++ b/crates/optimism/rpc/Cargo.toml @@ -27,7 +27,6 @@ reth-node-api.workspace = true reth-node-builder.workspace = true reth-chainspec.workspace = true reth-rpc-engine-api.workspace = true -reth-engine-tree.workspace = true # op-reth reth-optimism-evm.workspace = true diff --git a/crates/optimism/rpc/src/engine.rs b/crates/optimism/rpc/src/engine.rs index 523f997e002..ac2cb7fcb2c 100644 --- a/crates/optimism/rpc/src/engine.rs +++ b/crates/optimism/rpc/src/engine.rs @@ -14,8 +14,7 @@ use op_alloy_rpc_types_engine::{ SuperchainSignal, }; use reth_chainspec::EthereumHardforks; -use reth_engine_tree::tree::EngineValidator; -use reth_node_api::EngineTypes; +use reth_node_api::{EngineTypes, EngineValidator}; use reth_rpc_api::IntoEngineApiRpcModule; use reth_rpc_engine_api::EngineApi; use reth_storage_api::{BlockReader, HeaderProvider, StateProviderFactory}; diff --git a/crates/payload/primitives/src/payload.rs b/crates/payload/primitives/src/payload.rs index 9648a5675c0..709a37768f4 100644 --- a/crates/payload/primitives/src/payload.rs +++ b/crates/payload/primitives/src/payload.rs @@ -2,7 +2,7 @@ use crate::{MessageValidationKind, PayloadAttributes}; use alloc::vec::Vec; -use alloy_eips::{eip4895::Withdrawal, eip7685::Requests}; +use alloy_eips::{eip1898::BlockWithParent, eip4895::Withdrawal, eip7685::Requests, BlockNumHash}; use alloy_primitives::B256; use alloy_rpc_types_engine::ExecutionData; use core::fmt::Debug; @@ -25,6 +25,16 @@ pub trait ExecutionPayload: /// Returns this block's number (height). fn block_number(&self) -> u64; + /// Returns this block's number hash. + fn num_hash(&self) -> BlockNumHash { + BlockNumHash::new(self.block_number(), self.block_hash()) + } + + /// Returns a [`BlockWithParent`] for this block. + fn block_with_parent(&self) -> BlockWithParent { + BlockWithParent::new(self.parent_hash(), self.num_hash()) + } + /// Returns the withdrawals included in this payload. /// /// Returns `None` for pre-Shanghai blocks. diff --git a/crates/rpc/rpc-engine-api/Cargo.toml b/crates/rpc/rpc-engine-api/Cargo.toml index 9185b6d5b8e..825eb485fc2 100644 --- a/crates/rpc/rpc-engine-api/Cargo.toml +++ b/crates/rpc/rpc-engine-api/Cargo.toml @@ -21,7 +21,6 @@ reth-payload-builder-primitives.workspace = true reth-payload-primitives.workspace = true reth-tasks.workspace = true reth-engine-primitives.workspace = true -reth-engine-tree.workspace = true reth-transaction-pool.workspace = true reth-primitives-traits.workspace = true diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index 9ed34c5a1e6..8738e94abe9 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -18,8 +18,7 @@ use async_trait::async_trait; use jsonrpsee_core::{server::RpcModule, RpcResult}; use parking_lot::Mutex; use reth_chainspec::EthereumHardforks; -use reth_engine_primitives::{BeaconConsensusEngineHandle, EngineTypes}; -use reth_engine_tree::tree::EngineValidator; +use reth_engine_primitives::{BeaconConsensusEngineHandle, EngineTypes, EngineValidator}; use reth_payload_builder::PayloadStore; use reth_payload_primitives::{ validate_payload_timestamp, EngineApiMessageVersion, ExecutionPayload, diff --git a/crates/rpc/rpc/src/validation.rs b/crates/rpc/rpc/src/validation.rs index 9cdc20e3ca8..0b484fd13a8 100644 --- a/crates/rpc/rpc/src/validation.rs +++ b/crates/rpc/rpc/src/validation.rs @@ -26,7 +26,7 @@ use reth_metrics::{ metrics::{gauge, Gauge}, Metrics, }; -use reth_node_api::NewPayloadError; +use reth_node_api::{NewPayloadError, PayloadTypes}; use reth_primitives_traits::{ constants::GAS_LIMIT_BOUND_DIVISOR, BlockBody, GotExpected, NodePrimitives, RecoveredBlock, SealedBlock, SealedHeaderFor, @@ -45,14 +45,15 @@ use tracing::warn; /// The type that implements the `validation` rpc namespace trait #[derive(Clone, Debug, derive_more::Deref)] -pub struct ValidationApi { +pub struct ValidationApi { #[deref] - inner: Arc>, + inner: Arc>, } -impl ValidationApi +impl ValidationApi where E: ConfigureEvm, + T: PayloadTypes, { /// Create a new instance of the [`ValidationApi`] pub fn new( @@ -62,10 +63,7 @@ where config: ValidationApiConfig, task_spawner: Box, payload_validator: Arc< - dyn PayloadValidator< - Block = ::Block, - ExecutionData = ExecutionData, - >, + dyn PayloadValidator::Block>, >, ) -> Self { let ValidationApiConfig { disallow, validation_window } = config; @@ -112,13 +110,14 @@ where } } -impl ValidationApi +impl ValidationApi where Provider: BlockReaderIdExt
::BlockHeader> + ChainSpecProvider + StateProviderFactory + 'static, E: ConfigureEvm + 'static, + T: PayloadTypes, { /// Validates the given block and a [`BidTrace`] against it. pub async fn validate_message_against_block( @@ -465,7 +464,7 @@ where } #[async_trait] -impl BlockSubmissionValidationApiServer for ValidationApi +impl BlockSubmissionValidationApiServer for ValidationApi where Provider: BlockReaderIdExt
::BlockHeader> + ChainSpecProvider @@ -473,6 +472,7 @@ where + Clone + 'static, E: ConfigureEvm + 'static, + T: PayloadTypes, { async fn validate_builder_submission_v1( &self, @@ -545,18 +545,14 @@ where } } -pub struct ValidationApiInner { +pub struct ValidationApiInner { /// The provider that can interact with the chain. provider: Provider, /// Consensus implementation. consensus: Arc>, /// Execution payload validator. - payload_validator: Arc< - dyn PayloadValidator< - Block = ::Block, - ExecutionData = ExecutionData, - >, - >, + payload_validator: + Arc::Block>>, /// Block executor factory. evm_config: E, /// Set of disallowed addresses @@ -590,7 +586,7 @@ fn hash_disallow_list(disallow: &HashSet
) -> String { format!("{:x}", hasher.finalize()) } -impl fmt::Debug for ValidationApiInner { +impl fmt::Debug for ValidationApiInner { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("ValidationApiInner").finish_non_exhaustive() } diff --git a/examples/custom-engine-types/Cargo.toml b/examples/custom-engine-types/Cargo.toml index 1c1144d1bb9..50bd58620e3 100644 --- a/examples/custom-engine-types/Cargo.toml +++ b/examples/custom-engine-types/Cargo.toml @@ -10,7 +10,6 @@ reth-payload-builder.workspace = true reth-basic-payload-builder.workspace = true reth-ethereum-payload-builder.workspace = true reth-ethereum = { workspace = true, features = ["test-utils", "node", "node-api", "pool"] } -reth-engine-tree.workspace = true reth-tracing.workspace = true reth-trie-db.workspace = true alloy-genesis.workspace = true diff --git a/examples/custom-engine-types/src/main.rs b/examples/custom-engine-types/src/main.rs index d339019d167..06da2f3263e 100644 --- a/examples/custom-engine-types/src/main.rs +++ b/examples/custom-engine-types/src/main.rs @@ -29,15 +29,14 @@ use alloy_rpc_types::{ Withdrawal, }; use reth_basic_payload_builder::{BuildArguments, BuildOutcome, PayloadBuilder, PayloadConfig}; -use reth_engine_tree::tree::EngineValidator; use reth_ethereum::{ chainspec::{Chain, ChainSpec, ChainSpecProvider}, node::{ api::{ payload::{EngineApiMessageVersion, EngineObjectValidationError, PayloadOrAttributes}, - validate_version_specific_fields, AddOnsContext, EngineTypes, FullNodeComponents, - FullNodeTypes, InvalidPayloadAttributesError, NewPayloadError, NodeTypes, - PayloadAttributes, PayloadBuilderAttributes, PayloadTypes, PayloadValidator, + validate_version_specific_fields, AddOnsContext, EngineTypes, EngineValidator, + FullNodeComponents, FullNodeTypes, InvalidPayloadAttributesError, NewPayloadError, + NodeTypes, PayloadAttributes, PayloadBuilderAttributes, PayloadTypes, PayloadValidator, }, builder::{ components::{BasicPayloadServiceBuilder, ComponentsBuilder, PayloadBuilderBuilder}, @@ -52,11 +51,11 @@ use reth_ethereum::{ EthEvmConfig, EthereumEthApiBuilder, }, pool::{PoolTransaction, TransactionPool}, - primitives::{RecoveredBlock, SealedBlock}, + primitives::{Block, RecoveredBlock, SealedBlock}, provider::{EthStorage, StateProviderFactory}, rpc::types::engine::ExecutionPayload, tasks::TaskManager, - Block, EthPrimitives, TransactionSigned, + EthPrimitives, TransactionSigned, }; use reth_ethereum_payload_builder::{EthereumBuilderConfig, EthereumExecutionPayloadValidator}; use reth_payload_builder::{EthBuiltPayload, EthPayloadBuilderAttributes, PayloadBuilderError}; @@ -192,9 +191,8 @@ impl CustomEngineValidator { } } -impl PayloadValidator for CustomEngineValidator { - type Block = Block; - type ExecutionData = ExecutionData; +impl PayloadValidator for CustomEngineValidator { + type Block = reth_ethereum::Block; fn ensure_well_formed_payload( &self, @@ -203,16 +201,22 @@ impl PayloadValidator for CustomEngineValidator { let sealed_block = self.inner.ensure_well_formed_payload(payload)?; sealed_block.try_recover().map_err(|e| NewPayloadError::Other(e.into())) } + + fn validate_payload_attributes_against_header( + &self, + _attr: &CustomPayloadAttributes, + _header: &::Header, + ) -> Result<(), InvalidPayloadAttributesError> { + // skip default timestamp validation + Ok(()) + } } -impl EngineValidator for CustomEngineValidator -where - T: PayloadTypes, -{ +impl EngineValidator for CustomEngineValidator { fn validate_version_specific_fields( &self, version: EngineApiMessageVersion, - payload_or_attrs: PayloadOrAttributes<'_, Self::ExecutionData, T::PayloadAttributes>, + payload_or_attrs: PayloadOrAttributes<'_, ExecutionData, CustomPayloadAttributes>, ) -> Result<(), EngineObjectValidationError> { validate_version_specific_fields(self.chain_spec(), version, payload_or_attrs) } @@ -220,12 +224,12 @@ where fn ensure_well_formed_attributes( &self, version: EngineApiMessageVersion, - attributes: &T::PayloadAttributes, + attributes: &CustomPayloadAttributes, ) -> Result<(), EngineObjectValidationError> { validate_version_specific_fields( self.chain_spec(), version, - PayloadOrAttributes::::PayloadAttributes( + PayloadOrAttributes::::PayloadAttributes( attributes, ), )?; @@ -239,15 +243,6 @@ where Ok(()) } - - fn validate_payload_attributes_against_header( - &self, - _attr: &::PayloadAttributes, - _header: &::Header, - ) -> Result<(), InvalidPayloadAttributesError> { - // skip default timestamp validation - Ok(()) - } } /// Custom engine validator builder diff --git a/examples/custom-node/Cargo.toml b/examples/custom-node/Cargo.toml index 9722919f7d8..203860bc2e1 100644 --- a/examples/custom-node/Cargo.toml +++ b/examples/custom-node/Cargo.toml @@ -16,8 +16,8 @@ reth-db-api.workspace = true reth-op = { workspace = true, features = ["node", "pool", "rpc"] } reth-payload-builder.workspace = true reth-rpc-api.workspace = true +reth-engine-primitives.workspace = true reth-rpc-engine-api.workspace = true -reth-engine-tree.workspace = true reth-ethereum = { workspace = true, features = ["node-api", "network", "evm", "pool", "trie", "storage-api"] } # revm diff --git a/examples/custom-node/src/engine.rs b/examples/custom-node/src/engine.rs index d441b94afa5..4c8bff3a1fd 100644 --- a/examples/custom-node/src/engine.rs +++ b/examples/custom-node/src/engine.rs @@ -5,22 +5,23 @@ use crate::{ }; use op_alloy_rpc_types_engine::{OpExecutionData, OpExecutionPayload}; use reth_chain_state::ExecutedBlockWithTrieUpdates; -use reth_engine_tree::tree::EngineValidator; +use reth_engine_primitives::EngineValidator; use reth_ethereum::{ node::api::{ validate_version_specific_fields, AddOnsContext, BuiltPayload, EngineApiMessageVersion, - EngineObjectValidationError, ExecutionPayload, FullNodeComponents, - InvalidPayloadAttributesError, NewPayloadError, NodePrimitives, PayloadAttributes, - PayloadBuilderAttributes, PayloadOrAttributes, PayloadTypes, PayloadValidator, + EngineObjectValidationError, ExecutionPayload, FullNodeComponents, NewPayloadError, + NodePrimitives, PayloadAttributes, PayloadBuilderAttributes, PayloadOrAttributes, + PayloadTypes, PayloadValidator, }, primitives::{RecoveredBlock, SealedBlock}, storage::StateProviderFactory, trie::{KeccakKeyHasher, KeyHasher}, }; -use reth_node_builder::rpc::EngineValidatorBuilder; +use reth_node_builder::{rpc::EngineValidatorBuilder, InvalidPayloadAttributesError}; use reth_op::{ node::{ - engine::OpEngineValidator, OpBuiltPayload, OpPayloadAttributes, OpPayloadBuilderAttributes, + engine::OpEngineValidator, OpBuiltPayload, OpEngineTypes, OpPayloadAttributes, + OpPayloadBuilderAttributes, }, OpTransactionSigned, }; @@ -216,18 +217,20 @@ where } } -impl

PayloadValidator for CustomEngineValidator

+impl

PayloadValidator for CustomEngineValidator

where P: StateProviderFactory + Send + Sync + Unpin + 'static, { type Block = crate::primitives::block::Block; - type ExecutionData = CustomExecutionData; fn ensure_well_formed_payload( &self, payload: CustomExecutionData, ) -> Result, NewPayloadError> { - let sealed_block = self.inner.ensure_well_formed_payload(payload.inner)?; + let sealed_block = PayloadValidator::::ensure_well_formed_payload( + &self.inner, + payload.inner, + )?; let (block, senders) = sealed_block.split_sealed(); let (header, body) = block.split_sealed_header_body(); let header = CustomHeader { inner: header.into_header(), extension: payload.extension }; @@ -236,6 +239,15 @@ where Ok(block.with_senders(senders)) } + + fn validate_payload_attributes_against_header( + &self, + _attr: &OpPayloadAttributes, + _header: &::Header, + ) -> Result<(), InvalidPayloadAttributesError> { + // skip default timestamp validation + Ok(()) + } } impl

EngineValidator for CustomEngineValidator

@@ -245,7 +257,7 @@ where fn validate_version_specific_fields( &self, version: EngineApiMessageVersion, - payload_or_attrs: PayloadOrAttributes<'_, Self::ExecutionData, OpPayloadAttributes>, + payload_or_attrs: PayloadOrAttributes<'_, CustomExecutionData, OpPayloadAttributes>, ) -> Result<(), EngineObjectValidationError> { validate_version_specific_fields(self.chain_spec(), version, payload_or_attrs) } @@ -258,7 +270,7 @@ where validate_version_specific_fields( self.chain_spec(), version, - PayloadOrAttributes::::PayloadAttributes(attributes), + PayloadOrAttributes::::PayloadAttributes(attributes), )?; // custom validation logic - ensure that the custom field is not zero @@ -270,15 +282,6 @@ where Ok(()) } - - fn validate_payload_attributes_against_header( - &self, - _attr: &OpPayloadAttributes, - _header: &::Header, - ) -> Result<(), InvalidPayloadAttributesError> { - // skip default timestamp validation - Ok(()) - } } /// Custom error type used in payload attributes validation From b1ae2175db1a48c4c6ee723b13d5f3471ec03b9c Mon Sep 17 00:00:00 2001 From: Gregory Edison Date: Tue, 29 Jul 2025 13:17:06 +0200 Subject: [PATCH 292/305] feat: update ScrollAddOns Signed-off-by: Gregory Edison --- Cargo.lock | 962 +++++++++++++++---- Cargo.toml | 32 +- crates/e2e-test-utils/src/testsuite/setup.rs | 3 + crates/ethereum/node/tests/e2e/dev.rs | 4 +- crates/net/network-api/Cargo.toml | 2 + crates/node/builder/src/builder/states.rs | 6 +- crates/node/builder/src/launch/debug.rs | 2 +- crates/node/builder/src/launch/engine.rs | 3 - crates/scroll/cli/Cargo.toml | 2 + crates/scroll/evm/Cargo.toml | 1 + crates/scroll/node/Cargo.toml | 2 + crates/scroll/node/src/addons.rs | 118 ++- crates/scroll/node/src/builder/payload.rs | 2 +- crates/scroll/node/src/node.rs | 34 +- crates/scroll/rpc/Cargo.toml | 8 - examples/custom-node/Cargo.toml | 2 + 16 files changed, 945 insertions(+), 238 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 362c0f4387d..17cdb408cd3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -851,7 +851,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d3615ec64d775fec840f4e9d5c8e1f739eb1854d8d28db093fb3d4805e0cb53" dependencies = [ "alloy-json-rpc", + "alloy-rpc-types-engine", "alloy-transport", + "http-body-util", + "hyper", + "hyper-tls", + "hyper-util", + "jsonwebtoken", "reqwest", "serde_json", "tower", @@ -2297,7 +2303,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "117725a109d387c937a1533ce01b450cbde6b88abceea8473c4d7a85853cda3c" dependencies = [ "lazy_static", - "windows-sys 0.59.0", + "windows-sys 0.48.0", ] [[package]] @@ -2419,6 +2425,16 @@ dependencies = [ "unicode-segmentation", ] +[[package]] +name = "core-foundation" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "core-foundation" version = "0.10.1" @@ -3189,6 +3205,17 @@ dependencies = [ "syn 2.0.104", ] +[[package]] +name = "enumn" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f9ed6b3789237c8a0c1c505af1c7eb2c560df6186f01b098c3a1064ea532f38" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + [[package]] name = "equivalent" version = "1.0.2" @@ -3202,7 +3229,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "778e2ac28f6c47af28e4907f13ffd1e1ddbd400980a9abd7c8df189bf578a5ad" dependencies = [ "libc", - "windows-sys 0.60.2", + "windows-sys 0.52.0", ] [[package]] @@ -3441,6 +3468,7 @@ dependencies = [ "reth-op", "reth-optimism-forks", "reth-payload-builder", + "reth-primitives-traits", "reth-rpc-api", "reth-rpc-engine-api", "revm", @@ -3801,6 +3829,21 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + [[package]] name = "form_urlencoded" version = "1.2.1" @@ -4428,6 +4471,22 @@ dependencies = [ "webpki-roots 1.0.2", ] +[[package]] +name = "hyper-tls" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" +dependencies = [ + "bytes", + "http-body-util", + "hyper", + "hyper-util", + "native-tls", + "tokio", + "tokio-native-tls", + "tower-service", +] + [[package]] name = "hyper-util" version = "0.1.16" @@ -4447,9 +4506,11 @@ dependencies = [ "percent-encoding", "pin-project-lite", "socket2 0.6.0", + "system-configuration", "tokio", "tower-service", "tracing", + "windows-registry", ] [[package]] @@ -4464,7 +4525,7 @@ dependencies = [ "js-sys", "log", "wasm-bindgen", - "windows-core 0.61.2", + "windows-core 0.57.0", ] [[package]] @@ -4920,7 +4981,7 @@ checksum = "e04d7f318608d35d4b61ddd75cbdaee86b023ebe2bd5a66ee0915f0bf93095a9" dependencies = [ "hermit-abi", "libc", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -5276,7 +5337,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "07033963ba89ebaf1584d767badaa2e8fcec21aedea6b8c0346d487d49c28667" dependencies = [ "cfg-if", - "windows-targets 0.53.2", + "windows-targets 0.48.5", ] [[package]] @@ -5791,6 +5852,23 @@ dependencies = [ "unsigned-varint", ] +[[package]] +name = "native-tls" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87de3442987e9dbec73158d5c715e7ad9072fda936bb03d19d7fa10e00520f0e" +dependencies = [ + "libc", + "log", + "openssl", + "openssl-probe", + "openssl-sys", + "schannel", + "security-framework 2.11.1", + "security-framework-sys", + "tempfile", +] + [[package]] name = "nom" version = "7.1.3" @@ -6127,8 +6205,7 @@ dependencies = [ [[package]] name = "op-revm" version = "8.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ce1dc7533f4e5716c55cd3d62488c6200cb4dfda96e0c75a7e484652464343b" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv83#60bf14c2b0190435824b37c04ad1e40f4b41479e" dependencies = [ "auto_impl", "once_cell", @@ -6142,12 +6219,50 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" +[[package]] +name = "openssl" +version = "0.10.73" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8505734d46c8ab1e19a1dce3aef597ad87dcb4c37e7188231769bd6bd51cebf8" +dependencies = [ + "bitflags 2.9.1", + "cfg-if", + "foreign-types", + "libc", + "once_cell", + "openssl-macros", + "openssl-sys", +] + +[[package]] +name = "openssl-macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.104", +] + [[package]] name = "openssl-probe" version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" +[[package]] +name = "openssl-sys" +version = "0.9.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90096e2e47630d78b7d1c20952dc621f957103f8bc2c8359ec81290d75238571" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + [[package]] name = "opentelemetry" version = "0.29.1" @@ -6823,7 +6938,7 @@ dependencies = [ "once_cell", "socket2 0.5.10", "tracing", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -7623,6 +7738,7 @@ dependencies = [ "reth-optimism-primitives", "reth-primitives-traits", "reth-prune-types", + "reth-scroll-primitives", "reth-stages-types", "reth-storage-errors", "reth-trie-common", @@ -7905,6 +8021,7 @@ dependencies = [ "reth-payload-primitives", "reth-provider", "reth-transaction-pool", + "scroll-alloy-rpc-types-engine", "tokio", "tokio-stream", "tracing", @@ -8393,6 +8510,7 @@ dependencies = [ "reth-storage-errors", "reth-trie-common", "revm", + "scroll-alloy-evm", ] [[package]] @@ -8737,6 +8855,7 @@ dependencies = [ "reth-network-p2p", "reth-network-peers", "reth-network-types", + "reth-primitives-traits", "reth-tokio-util", "serde", "thiserror 2.0.12", @@ -9557,6 +9676,7 @@ dependencies = [ "reth-chainspec", "reth-errors", "reth-primitives-traits", + "scroll-alloy-rpc-types-engine", "serde", "thiserror 2.0.12", "tokio", @@ -9632,6 +9752,7 @@ dependencies = [ "revm-bytecode", "revm-primitives", "revm-state", + "scroll-alloy-consensus", "secp256k1 0.30.0", "serde", "serde_json", @@ -10007,8 +10128,13 @@ dependencies = [ "reth-evm", "reth-optimism-primitives", "reth-primitives-traits", + "reth-scroll-primitives", "reth-storage-api", "revm-context", + "revm-scroll", + "scroll-alloy-consensus", + "scroll-alloy-evm", + "scroll-alloy-rpc-types", "thiserror 2.0.12", ] @@ -10188,153 +10314,453 @@ dependencies = [ ] [[package]] -name = "reth-stages" +name = "reth-scroll-chainspec" version = "1.6.0" dependencies = [ + "alloy-chains", "alloy-consensus", "alloy-eips", + "alloy-genesis", "alloy-primitives", - "alloy-rlp", - "assert_matches", - "bincode 1.3.3", - "blake3", - "codspeed-criterion-compat", - "eyre", - "futures-util", - "itertools 0.14.0", - "num-traits", - "paste", - "rand 0.9.2", - "rayon", - "reqwest", + "alloy-serde", + "auto_impl", + "derive_more", + "once_cell", "reth-chainspec", - "reth-codecs", - "reth-config", - "reth-consensus", - "reth-db", - "reth-db-api", - "reth-downloaders", - "reth-era", - "reth-era-downloader", - "reth-era-utils", - "reth-ethereum-consensus", - "reth-ethereum-primitives", - "reth-etl", - "reth-evm", - "reth-evm-ethereum", - "reth-execution-errors", - "reth-execution-types", - "reth-exex", - "reth-fs-util", - "reth-network-p2p", + "reth-ethereum-forks", "reth-network-peers", "reth-primitives-traits", - "reth-provider", - "reth-prune", - "reth-prune-types", - "reth-revm", - "reth-stages-api", - "reth-static-file", - "reth-static-file-types", - "reth-storage-errors", - "reth-testing-utils", - "reth-tracing", - "reth-trie", - "reth-trie-db", + "reth-scroll-forks", + "reth-trie-common", + "scroll-alloy-hardforks", "serde", - "tempfile", - "thiserror 2.0.12", - "tokio", + "serde_json", +] + +[[package]] +name = "reth-scroll-cli" +version = "1.6.0" +dependencies = [ + "clap", + "eyre", + "proptest", + "reth-cli", + "reth-cli-commands", + "reth-cli-runner", + "reth-consensus", + "reth-db", + "reth-node-builder", + "reth-node-core", + "reth-node-metrics", + "reth-scroll-chainspec", + "reth-scroll-consensus", + "reth-scroll-evm", + "reth-scroll-node", + "reth-scroll-primitives", + "reth-tracing", + "scroll-alloy-consensus", "tracing", ] [[package]] -name = "reth-stages-api" +name = "reth-scroll-consensus" version = "1.6.0" dependencies = [ - "alloy-eips", + "alloy-consensus", "alloy-primitives", - "aquamarine", - "assert_matches", - "auto_impl", - "futures-util", - "metrics", + "reth-chainspec", "reth-consensus", - "reth-errors", - "reth-metrics", - "reth-network-p2p", + "reth-consensus-common", + "reth-ethereum-consensus", + "reth-execution-types", "reth-primitives-traits", - "reth-provider", - "reth-prune", - "reth-stages-types", - "reth-static-file", - "reth-static-file-types", - "reth-testing-utils", - "reth-tokio-util", + "reth-scroll-primitives", + "scroll-alloy-hardforks", "thiserror 2.0.12", - "tokio", - "tokio-stream", "tracing", ] [[package]] -name = "reth-stages-types" +name = "reth-scroll-engine-primitives" version = "1.6.0" dependencies = [ + "alloy-consensus", + "alloy-eips", "alloy-primitives", + "alloy-rlp", + "alloy-rpc-types-engine", "arbitrary", - "bytes", - "modular-bitfield", - "proptest", - "proptest-arbitrary-interop", + "eyre", "rand 0.9.2", - "reth-codecs", - "reth-trie-common", + "reth-chain-state", + "reth-chainspec", + "reth-engine-primitives", + "reth-payload-builder", + "reth-payload-primitives", + "reth-primitives", + "reth-primitives-traits", + "reth-scroll-chainspec", + "reth-scroll-primitives", + "scroll-alloy-hardforks", + "scroll-alloy-rpc-types-engine", "serde", - "test-fuzz", + "sha2 0.10.9", ] [[package]] -name = "reth-stateless" +name = "reth-scroll-evm" version = "1.6.0" dependencies = [ "alloy-consensus", + "alloy-eips", + "alloy-evm", "alloy-primitives", - "alloy-rlp", - "alloy-rpc-types-debug", - "alloy-trie", - "itertools 0.14.0", + "derive_more", + "eyre", "reth-chainspec", - "reth-consensus", - "reth-errors", - "reth-ethereum-consensus", - "reth-ethereum-primitives", "reth-evm", + "reth-execution-types", + "reth-primitives", "reth-primitives-traits", - "reth-revm", - "reth-trie-common", - "reth-trie-sparse", - "serde", - "serde_with", + "reth-rpc-eth-api", + "reth-scroll-chainspec", + "reth-scroll-forks", + "reth-scroll-primitives", + "reth-storage-api", + "revm", + "revm-primitives", + "revm-scroll", + "scroll-alloy-consensus", + "scroll-alloy-evm", + "scroll-alloy-hardforks", "thiserror 2.0.12", + "tracing", ] [[package]] -name = "reth-static-file" +name = "reth-scroll-forks" version = "1.6.0" dependencies = [ + "alloy-chains", "alloy-primitives", - "assert_matches", - "parking_lot", - "rayon", - "reth-codecs", - "reth-db", - "reth-db-api", - "reth-primitives-traits", - "reth-provider", - "reth-prune-types", - "reth-stages", - "reth-stages-types", + "auto_impl", + "once_cell", + "reth-ethereum-forks", + "scroll-alloy-hardforks", + "serde", +] + +[[package]] +name = "reth-scroll-node" +version = "1.6.0" +dependencies = [ + "alloy-consensus", + "alloy-genesis", + "alloy-primitives", + "alloy-rpc-types-engine", + "alloy-rpc-types-eth", + "clap", + "eyre", + "reth-chainspec", + "reth-db", + "reth-e2e-test-utils", + "reth-engine-local", + "reth-eth-wire-types", + "reth-evm", + "reth-network", + "reth-node-api", + "reth-node-builder", + "reth-node-core", + "reth-node-types", + "reth-payload-builder", + "reth-primitives", + "reth-primitives-traits", + "reth-provider", + "reth-revm", + "reth-rpc-eth-types", + "reth-rpc-server-types", + "reth-scroll-chainspec", + "reth-scroll-consensus", + "reth-scroll-engine-primitives", + "reth-scroll-evm", + "reth-scroll-node", + "reth-scroll-payload", + "reth-scroll-primitives", + "reth-scroll-rpc", + "reth-scroll-txpool", + "reth-tasks", + "reth-tracing", + "reth-transaction-pool", + "reth-trie-db", + "revm", + "scroll-alloy-consensus", + "scroll-alloy-evm", + "scroll-alloy-hardforks", + "scroll-alloy-network", + "scroll-alloy-rpc-types-engine", + "serde_json", + "tokio", + "tracing", +] + +[[package]] +name = "reth-scroll-payload" +version = "1.6.0" +dependencies = [ + "alloy-consensus", + "alloy-primitives", + "alloy-rlp", + "futures-util", + "reth-basic-payload-builder", + "reth-chain-state", + "reth-chainspec", + "reth-evm", + "reth-execution-types", + "reth-payload-builder", + "reth-payload-primitives", + "reth-payload-util", + "reth-primitives-traits", + "reth-revm", + "reth-scroll-chainspec", + "reth-scroll-engine-primitives", + "reth-scroll-evm", + "reth-scroll-primitives", + "reth-storage-api", + "reth-transaction-pool", + "revm", + "scroll-alloy-hardforks", + "thiserror 2.0.12", + "tracing", +] + +[[package]] +name = "reth-scroll-primitives" +version = "1.6.0" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-primitives", + "alloy-rlp", + "arbitrary", + "bytes", + "modular-bitfield", + "once_cell", + "rand 0.9.2", + "reth-codecs", + "reth-primitives-traits", + "reth-zstd-compressors", + "rstest", + "scroll-alloy-consensus", + "serde", +] + +[[package]] +name = "reth-scroll-rpc" +version = "1.6.0" +dependencies = [ + "alloy-consensus", + "alloy-json-rpc", + "alloy-primitives", + "alloy-rpc-client", + "alloy-rpc-types-eth", + "alloy-transport", + "alloy-transport-http", + "eyre", + "jsonrpsee-types", + "reqwest", + "reth-evm", + "reth-node-api", + "reth-node-builder", + "reth-primitives-traits", + "reth-provider", + "reth-rpc", + "reth-rpc-convert", + "reth-rpc-eth-api", + "reth-rpc-eth-types", + "reth-scroll-primitives", + "reth-tasks", + "reth-transaction-pool", + "revm", + "scroll-alloy-consensus", + "scroll-alloy-network", + "scroll-alloy-rpc-types", + "thiserror 2.0.12", + "tokio", + "tracing", +] + +[[package]] +name = "reth-scroll-txpool" +version = "1.6.0" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-primitives", + "c-kzg", + "derive_more", + "parking_lot", + "reth-chainspec", + "reth-primitives-traits", + "reth-provider", + "reth-revm", + "reth-scroll-chainspec", + "reth-scroll-evm", + "reth-scroll-forks", + "reth-scroll-primitives", + "reth-storage-api", + "reth-transaction-pool", + "revm-scroll", + "scroll-alloy-consensus", +] + +[[package]] +name = "reth-stages" +version = "1.6.0" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-primitives", + "alloy-rlp", + "assert_matches", + "bincode 1.3.3", + "blake3", + "codspeed-criterion-compat", + "eyre", + "futures-util", + "itertools 0.14.0", + "num-traits", + "paste", + "rand 0.9.2", + "rayon", + "reqwest", + "reth-chainspec", + "reth-codecs", + "reth-config", + "reth-consensus", + "reth-db", + "reth-db-api", + "reth-downloaders", + "reth-era", + "reth-era-downloader", + "reth-era-utils", + "reth-ethereum-consensus", + "reth-ethereum-primitives", + "reth-etl", + "reth-evm", + "reth-evm-ethereum", + "reth-execution-errors", + "reth-execution-types", + "reth-exex", + "reth-fs-util", + "reth-network-p2p", + "reth-network-peers", + "reth-primitives-traits", + "reth-provider", + "reth-prune", + "reth-prune-types", + "reth-revm", + "reth-stages-api", + "reth-static-file", + "reth-static-file-types", + "reth-storage-errors", + "reth-testing-utils", + "reth-tracing", + "reth-trie", + "reth-trie-db", + "serde", + "tempfile", + "thiserror 2.0.12", + "tokio", + "tracing", +] + +[[package]] +name = "reth-stages-api" +version = "1.6.0" +dependencies = [ + "alloy-eips", + "alloy-primitives", + "aquamarine", + "assert_matches", + "auto_impl", + "futures-util", + "metrics", + "reth-consensus", + "reth-errors", + "reth-metrics", + "reth-network-p2p", + "reth-primitives-traits", + "reth-provider", + "reth-prune", + "reth-stages-types", + "reth-static-file", + "reth-static-file-types", + "reth-testing-utils", + "reth-tokio-util", + "thiserror 2.0.12", + "tokio", + "tokio-stream", + "tracing", +] + +[[package]] +name = "reth-stages-types" +version = "1.6.0" +dependencies = [ + "alloy-primitives", + "arbitrary", + "bytes", + "modular-bitfield", + "proptest", + "proptest-arbitrary-interop", + "rand 0.9.2", + "reth-codecs", + "reth-trie-common", + "serde", + "test-fuzz", +] + +[[package]] +name = "reth-stateless" +version = "1.6.0" +dependencies = [ + "alloy-consensus", + "alloy-primitives", + "alloy-rlp", + "alloy-rpc-types-debug", + "alloy-trie", + "itertools 0.14.0", + "reth-chainspec", + "reth-consensus", + "reth-errors", + "reth-ethereum-consensus", + "reth-ethereum-primitives", + "reth-evm", + "reth-primitives-traits", + "reth-revm", + "reth-trie-common", + "reth-trie-sparse", + "serde", + "serde_with", + "thiserror 2.0.12", +] + +[[package]] +name = "reth-static-file" +version = "1.6.0" +dependencies = [ + "alloy-primitives", + "assert_matches", + "parking_lot", + "rayon", + "reth-codecs", + "reth-db", + "reth-db-api", + "reth-primitives-traits", + "reth-provider", + "reth-prune-types", + "reth-stages", + "reth-stages-types", "reth-static-file-types", "reth-storage-errors", "reth-testing-utils", @@ -10522,7 +10948,7 @@ dependencies = [ "reth-storage-api", "reth-tasks", "reth-tracing", - "revm-interpreter 23.0.2", + "revm-interpreter", "revm-primitives", "rustc-hash 2.1.1", "schnellru", @@ -10724,17 +11150,16 @@ dependencies = [ [[package]] name = "revm" version = "27.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e6bf82101a1ad8a2b637363a37aef27f88b4efc8a6e24c72bf5f64923dc5532" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv83#60bf14c2b0190435824b37c04ad1e40f4b41479e" dependencies = [ "revm-bytecode", "revm-context", - "revm-context-interface 9.0.0", + "revm-context-interface", "revm-database", "revm-database-interface", "revm-handler", "revm-inspector", - "revm-interpreter 24.0.0", + "revm-interpreter", "revm-precompile", "revm-primitives", "revm-state", @@ -10743,8 +11168,7 @@ dependencies = [ [[package]] name = "revm-bytecode" version = "6.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6922f7f4fbc15ca61ea459711ff75281cc875648c797088c34e4e064de8b8a7c" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv83#60bf14c2b0190435824b37c04ad1e40f4b41479e" dependencies = [ "bitvec", "once_cell", @@ -10756,29 +11180,12 @@ dependencies = [ [[package]] name = "revm-context" version = "8.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cd508416a35a4d8a9feaf5ccd06ac6d6661cd31ee2dc0252f9f7316455d71f9" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv83#60bf14c2b0190435824b37c04ad1e40f4b41479e" dependencies = [ "cfg-if", "derive-where", "revm-bytecode", - "revm-context-interface 9.0.0", - "revm-database-interface", - "revm-primitives", - "revm-state", - "serde", -] - -[[package]] -name = "revm-context-interface" -version = "8.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a303a93102fceccec628265efd550ce49f2817b38ac3a492c53f7d524f18a1ca" -dependencies = [ - "alloy-eip2930", - "alloy-eip7702", - "auto_impl", - "either", + "revm-context-interface", "revm-database-interface", "revm-primitives", "revm-state", @@ -10788,8 +11195,7 @@ dependencies = [ [[package]] name = "revm-context-interface" version = "9.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc90302642d21c8f93e0876e201f3c5f7913c4fcb66fb465b0fd7b707dfe1c79" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv83#60bf14c2b0190435824b37c04ad1e40f4b41479e" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -10804,8 +11210,7 @@ dependencies = [ [[package]] name = "revm-database" version = "7.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c61495e01f01c343dd90e5cb41f406c7081a360e3506acf1be0fc7880bfb04eb" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv83#60bf14c2b0190435824b37c04ad1e40f4b41479e" dependencies = [ "alloy-eips", "revm-bytecode", @@ -10818,8 +11223,7 @@ dependencies = [ [[package]] name = "revm-database-interface" version = "7.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c20628d6cd62961a05f981230746c16854f903762d01937f13244716530bf98f" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv83#60bf14c2b0190435824b37c04ad1e40f4b41479e" dependencies = [ "auto_impl", "either", @@ -10831,16 +11235,15 @@ dependencies = [ [[package]] name = "revm-handler" version = "8.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1529c8050e663be64010e80ec92bf480315d21b1f2dbf65540028653a621b27d" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv83#60bf14c2b0190435824b37c04ad1e40f4b41479e" dependencies = [ "auto_impl", "derive-where", "revm-bytecode", "revm-context", - "revm-context-interface 9.0.0", + "revm-context-interface", "revm-database-interface", - "revm-interpreter 24.0.0", + "revm-interpreter", "revm-precompile", "revm-primitives", "revm-state", @@ -10850,15 +11253,14 @@ dependencies = [ [[package]] name = "revm-inspector" version = "8.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f78db140e332489094ef314eaeb0bd1849d6d01172c113ab0eb6ea8ab9372926" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv83#60bf14c2b0190435824b37c04ad1e40f4b41479e" dependencies = [ "auto_impl", "either", "revm-context", "revm-database-interface", "revm-handler", - "revm-interpreter 24.0.0", + "revm-interpreter", "revm-primitives", "revm-state", "serde", @@ -10885,26 +11287,13 @@ dependencies = [ "thiserror 2.0.12", ] -[[package]] -name = "revm-interpreter" -version = "23.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d95c4a9a1662d10b689b66b536ddc2eb1e89f5debfcabc1a2d7b8417a2fa47cd" -dependencies = [ - "revm-bytecode", - "revm-context-interface 8.0.1", - "revm-primitives", - "serde", -] - [[package]] name = "revm-interpreter" version = "24.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff9d7d9d71e8a33740b277b602165b6e3d25fff091ba3d7b5a8d373bf55f28a7" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv83#60bf14c2b0190435824b37c04ad1e40f4b41479e" dependencies = [ "revm-bytecode", - "revm-context-interface 9.0.0", + "revm-context-interface", "revm-primitives", "serde", ] @@ -10912,8 +11301,7 @@ dependencies = [ [[package]] name = "revm-precompile" version = "25.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cee3f336b83621294b4cfe84d817e3eef6f3d0fce00951973364cc7f860424d" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv83#60bf14c2b0190435824b37c04ad1e40f4b41479e" dependencies = [ "ark-bls12-381", "ark-bn254", @@ -10939,19 +11327,31 @@ dependencies = [ [[package]] name = "revm-primitives" version = "20.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66145d3dc61c0d6403f27fc0d18e0363bb3b7787e67970a05c71070092896599" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv83#60bf14c2b0190435824b37c04ad1e40f4b41479e" dependencies = [ "alloy-primitives", "num_enum", "serde", ] +[[package]] +name = "revm-scroll" +version = "0.1.0" +source = "git+https://github.com/scroll-tech/scroll-revm?branch=feat%2Fv82#151eab7b5772a95b9d3279c44ff638e0119361cf" +dependencies = [ + "auto_impl", + "enumn", + "once_cell", + "revm", + "revm-inspector", + "revm-primitives", + "serde", +] + [[package]] name = "revm-state" version = "7.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cc830a0fd2600b91e371598e3d123480cd7bb473dd6def425a51213aa6c6d57" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv83#60bf14c2b0190435824b37c04ad1e40f4b41479e" dependencies = [ "bitflags 2.9.1", "revm-bytecode", @@ -11195,7 +11595,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.4.15", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -11208,7 +11608,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.9.4", - "windows-sys 0.60.2", + "windows-sys 0.52.0", ] [[package]] @@ -11235,7 +11635,7 @@ dependencies = [ "openssl-probe", "rustls-pki-types", "schannel", - "security-framework", + "security-framework 3.2.0", ] [[package]] @@ -11254,7 +11654,7 @@ version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19787cda76408ec5404443dc8b31795c87cd8fec49762dc75fa727740d34acc1" dependencies = [ - "core-foundation", + "core-foundation 0.10.1", "core-foundation-sys", "jni", "log", @@ -11263,10 +11663,10 @@ dependencies = [ "rustls-native-certs", "rustls-platform-verifier-android", "rustls-webpki", - "security-framework", + "security-framework 3.2.0", "security-framework-sys", "webpki-root-certs 0.26.11", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -11381,6 +11781,155 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" +[[package]] +name = "scroll-alloy-consensus" +version = "1.6.0" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "arbitrary", + "bincode 1.3.3", + "derive_more", + "modular-bitfield", + "proptest", + "proptest-arbitrary-interop", + "rand 0.9.2", + "reth-codecs", + "reth-codecs-derive", + "serde", + "serde_json", + "serde_with", + "test-fuzz", +] + +[[package]] +name = "scroll-alloy-evm" +version = "1.6.0" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-evm", + "alloy-hardforks", + "alloy-primitives", + "auto_impl", + "eyre", + "reth-evm", + "reth-scroll-chainspec", + "reth-scroll-evm", + "revm", + "revm-scroll", + "scroll-alloy-consensus", + "scroll-alloy-hardforks", + "serde", + "zstd", +] + +[[package]] +name = "scroll-alloy-hardforks" +version = "1.6.0" +dependencies = [ + "alloy-hardforks", + "auto_impl", + "serde", +] + +[[package]] +name = "scroll-alloy-network" +version = "1.6.0" +dependencies = [ + "alloy-consensus", + "alloy-network", + "alloy-primitives", + "alloy-provider", + "alloy-rpc-types-eth", + "alloy-signer", + "scroll-alloy-consensus", + "scroll-alloy-rpc-types", +] + +[[package]] +name = "scroll-alloy-provider" +version = "1.6.0" +dependencies = [ + "alloy-primitives", + "alloy-provider", + "alloy-rpc-client", + "alloy-rpc-types-engine", + "alloy-transport", + "alloy-transport-http", + "async-trait", + "derive_more", + "eyre", + "futures-util", + "http-body-util", + "jsonrpsee", + "reqwest", + "reth-engine-primitives", + "reth-payload-builder", + "reth-payload-primitives", + "reth-primitives", + "reth-primitives-traits", + "reth-provider", + "reth-rpc-api", + "reth-rpc-builder", + "reth-rpc-engine-api", + "reth-scroll-chainspec", + "reth-scroll-engine-primitives", + "reth-scroll-node", + "reth-scroll-payload", + "reth-tasks", + "reth-tracing", + "reth-transaction-pool", + "scroll-alloy-network", + "scroll-alloy-rpc-types-engine", + "thiserror 2.0.12", + "tokio", + "tower", +] + +[[package]] +name = "scroll-alloy-rpc-types" +version = "1.6.0" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-network-primitives", + "alloy-primitives", + "alloy-rpc-types-eth", + "alloy-serde", + "arbitrary", + "derive_more", + "scroll-alloy-consensus", + "serde", + "serde_json", + "similar-asserts", +] + +[[package]] +name = "scroll-alloy-rpc-types-engine" +version = "1.6.0" +dependencies = [ + "alloy-primitives", + "alloy-rpc-types-engine", + "arbitrary", + "serde", + "serde_json", +] + +[[package]] +name = "scroll-reth" +version = "1.6.0" +dependencies = [ + "clap", + "reth-cli-util", + "reth-scroll-cli", + "reth-scroll-node", + "tracing", +] + [[package]] name = "sec1" version = "0.7.3" @@ -11437,6 +11986,19 @@ dependencies = [ "cc", ] +[[package]] +name = "security-framework" +version = "2.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" +dependencies = [ + "bitflags 2.9.1", + "core-foundation 0.9.4", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + [[package]] name = "security-framework" version = "3.2.0" @@ -11444,7 +12006,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271720403f46ca04f7ba6f55d438f8bd878d6b8ca0a1046e8228c4145bcbb316" dependencies = [ "bitflags 2.9.1", - "core-foundation", + "core-foundation 0.10.1", "core-foundation-sys", "libc", "security-framework-sys", @@ -12021,6 +12583,27 @@ dependencies = [ "windows 0.57.0", ] +[[package]] +name = "system-configuration" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" +dependencies = [ + "bitflags 2.9.1", + "core-foundation 0.9.4", + "system-configuration-sys", +] + +[[package]] +name = "system-configuration-sys" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "tagptr" version = "0.2.0" @@ -12066,7 +12649,7 @@ dependencies = [ "getrandom 0.3.3", "once_cell", "rustix 1.0.8", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -12368,6 +12951,16 @@ dependencies = [ "syn 2.0.104", ] +[[package]] +name = "tokio-native-tls" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" +dependencies = [ + "native-tls", + "tokio", +] + [[package]] name = "tokio-rustls" version = "0.26.2" @@ -12713,7 +13306,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "319c70195101a93f56db4c74733e272d720768e13471f400c78406a326b172b0" dependencies = [ "cc", - "windows-targets 0.52.6", + "windows-targets 0.48.5", ] [[package]] @@ -13259,7 +13852,7 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.48.0", ] [[package]] @@ -13441,6 +14034,17 @@ dependencies = [ "windows-link", ] +[[package]] +name = "windows-registry" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b8a9ed28765efc97bbc954883f4e6796c33a06546ebafacbabee9696967499e" +dependencies = [ + "windows-link", + "windows-result 0.3.4", + "windows-strings 0.4.2", +] + [[package]] name = "windows-result" version = "0.1.2" diff --git a/Cargo.toml b/Cargo.toml index e6f45b5c00c..c2d50fa9118 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -473,19 +473,19 @@ reth-ress-protocol = { path = "crates/ress/protocol" } reth-ress-provider = { path = "crates/ress/provider" } # revm -revm = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78", default-features = false, features = ["enable_eip7702"] } -revm-bytecode = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78", default-features = false } -revm-database = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78", default-features = false } -revm-state = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78", default-features = false } -revm-primitives = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78", default-features = false } -revm-interpreter = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78", default-features = false } -revm-inspector = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78", default-features = false } -revm-context = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78", default-features = false } -revm-context-interface = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78", default-features = false } -revm-database-interface = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78", default-features = false } -op-revm = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78", default-features = false } -revm-scroll = { git = "https://github.com/scroll-tech/scroll-revm", branch = "main", default-features = false } -revm-inspectors = "0.25.0" +revm = { git = "https://github.com/scroll-tech/revm", branch = "feat/v83", default-features = false, features = ["enable_eip7702"] } +revm-bytecode = { git = "https://github.com/scroll-tech/revm", branch = "feat/v83", default-features = false } +revm-database = { git = "https://github.com/scroll-tech/revm", branch = "feat/v83", default-features = false } +revm-state = { git = "https://github.com/scroll-tech/revm", branch = "feat/v83", default-features = false } +revm-primitives = { git = "https://github.com/scroll-tech/revm", branch = "feat/v83", default-features = false } +revm-interpreter = { git = "https://github.com/scroll-tech/revm", branch = "feat/v83", default-features = false } +revm-inspector = { git = "https://github.com/scroll-tech/revm", branch = "feat/v83", default-features = false } +revm-context = { git = "https://github.com/scroll-tech/revm", branch = "feat/v83", default-features = false } +revm-context-interface = { git = "https://github.com/scroll-tech/revm", branch = "feat/v83", default-features = false } +revm-database-interface = { git = "https://github.com/scroll-tech/revm", branch = "feat/v83", default-features = false } +op-revm = { git = "https://github.com/scroll-tech/revm", branch = "feat/v83", default-features = false } +revm-scroll = { git = "https://github.com/scroll-tech/scroll-revm", branch = "feat/v82", default-features = false } +revm-inspectors = "0.27.1" # eth alloy-chains = { version = "0.2.5", default-features = false } @@ -548,8 +548,6 @@ reth-scroll-primitives = { path = "crates/scroll/primitives", default-features = reth-scroll-rpc = { path = "crates/scroll/rpc" } reth-scroll-trie = { path = "crates/scroll/trie" } reth-scroll-txpool = { path = "crates/scroll/txpool" } -# TODO (scroll): point to crates.io/tag once the crate is published/a tag is created. -poseidon-bn254 = { git = "https://github.com/scroll-tech/poseidon-bn254", rev = "526a64a", features = ["bn254"] } # op alloy-op-evm = { version = "0.16", default-features = false } @@ -756,8 +754,8 @@ walkdir = "2.3.3" vergen-git2 = "1.0.5" [patch.crates-io] -revm = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" } -op-revm = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" } +revm = { git = "https://github.com/scroll-tech/revm", branch = "feat/v83" } +op-revm = { git = "https://github.com/scroll-tech/revm", branch = "feat/v83" } # alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "cfb13aa" } # alloy-contract = { git = "https://github.com/alloy-rs/alloy", rev = "cfb13aa" } # alloy-eips = { git = "https://github.com/alloy-rs/alloy", rev = "cfb13aa" } diff --git a/crates/e2e-test-utils/src/testsuite/setup.rs b/crates/e2e-test-utils/src/testsuite/setup.rs index 63c70c9b663..c91a50d3436 100644 --- a/crates/e2e-test-utils/src/testsuite/setup.rs +++ b/crates/e2e-test-utils/src/testsuite/setup.rs @@ -145,6 +145,7 @@ where LocalPayloadAttributesBuilder: PayloadAttributesBuilder< <::Payload as PayloadTypes>::PayloadAttributes, >, + TmpNodeAddOnsHandle: RpcHandleProvider, TmpNodeEthApi>, { // Create nodes with imported chain data let import_result = self.create_nodes_with_import::(rlp_path).await?; @@ -251,6 +252,7 @@ where LocalPayloadAttributesBuilder: PayloadAttributesBuilder< <::Payload as PayloadTypes>::PayloadAttributes, >, + TmpNodeAddOnsHandle: RpcHandleProvider, TmpNodeEthApi>, { let chain_spec = self.chain_spec.clone().ok_or_else(|| eyre!("Chain specification is required"))?; @@ -286,6 +288,7 @@ where LocalPayloadAttributesBuilder: PayloadAttributesBuilder< <::Payload as PayloadTypes>::PayloadAttributes, >, + TmpNodeAddOnsHandle: RpcHandleProvider, TmpNodeEthApi>, { move |timestamp| { let attributes = PayloadAttributes { diff --git a/crates/ethereum/node/tests/e2e/dev.rs b/crates/ethereum/node/tests/e2e/dev.rs index 4f88d504176..3b14c240102 100644 --- a/crates/ethereum/node/tests/e2e/dev.rs +++ b/crates/ethereum/node/tests/e2e/dev.rs @@ -5,8 +5,8 @@ use futures::StreamExt; use reth_chainspec::ChainSpec; use reth_node_api::{BlockBody, FullNodeComponents, FullNodePrimitives, NodeAddOns, NodeTypes}; use reth_node_builder::{ - rpc::RethRpcAddOns, DebugNodeLauncher, EngineNodeLauncher, FullNode, NodeBuilder, NodeConfig, - NodeHandle, + rpc::{RethRpcAddOns, RpcHandleProvider}, + DebugNodeLauncher, EngineNodeLauncher, FullNode, NodeBuilder, NodeConfig, NodeHandle, }; use reth_node_core::args::DevArgs; use reth_node_ethereum::{node::EthereumAddOns, EthereumNode}; diff --git a/crates/net/network-api/Cargo.toml b/crates/net/network-api/Cargo.toml index b0ebed8bcfb..2bb54aa0d6b 100644 --- a/crates/net/network-api/Cargo.toml +++ b/crates/net/network-api/Cargo.toml @@ -19,6 +19,7 @@ reth-network-p2p.workspace = true reth-eth-wire-types.workspace = true reth-tokio-util.workspace = true reth-ethereum-forks.workspace = true +reth-primitives-traits.workspace = true # ethereum alloy-consensus.workspace = true @@ -48,4 +49,5 @@ serde = [ "reth-ethereum-forks/serde", "alloy-consensus/serde", "alloy-rpc-types-eth/serde", + "reth-primitives-traits/serde", ] diff --git a/crates/node/builder/src/builder/states.rs b/crates/node/builder/src/builder/states.rs index d2b1139f1dd..140802aafb2 100644 --- a/crates/node/builder/src/builder/states.rs +++ b/crates/node/builder/src/builder/states.rs @@ -9,7 +9,7 @@ use crate::{ components::{NodeComponents, NodeComponentsBuilder}, hooks::NodeHooks, launch::LaunchNode, - rpc::{RethRpcAddOns, RethRpcServerHandles, RpcContext}, + rpc::{RethRpcAddOns, RethRpcServerHandles, RpcContext, RpcHandleProvider}, AddOns, ComponentsFor, FullNode, }; @@ -249,7 +249,7 @@ where T: FullNodeTypes, CB: NodeComponentsBuilder, AO: RethRpcAddOns>, - >::Components>, >>::Handle: RpcHandleProvider< NodeAdapter>::Components>, @@ -304,7 +304,7 @@ mod test { use reth_evm::noop::NoopEvmConfig; use reth_evm_ethereum::MockEvmConfig; use reth_network::EthNetworkPrimitives; - use reth_network_api::noop::NoopNetwork; + use reth_network_api::{self, noop::NoopNetwork}; use reth_node_api::FullNodeTypesAdapter; use reth_node_ethereum::EthereumNode; use reth_payload_builder::PayloadBuilderHandle; diff --git a/crates/node/builder/src/launch/debug.rs b/crates/node/builder/src/launch/debug.rs index f2a10f0415d..687717d2705 100644 --- a/crates/node/builder/src/launch/debug.rs +++ b/crates/node/builder/src/launch/debug.rs @@ -184,7 +184,7 @@ where let blockchain_db = handle.node.provider.clone(); let chain_spec = config.chain.clone(); - let beacon_engine_handle = handle.node.add_ons_handle.beacon_engine_handle.clone(); + let beacon_engine_handle = handle.node.rpc_handle().beacon_engine_handle.clone(); let pool = handle.node.pool.clone(); let payload_builder_handle = handle.node.payload_builder_handle.clone(); diff --git a/crates/node/builder/src/launch/engine.rs b/crates/node/builder/src/launch/engine.rs index 1edbd09aeac..f0cb7a4c085 100644 --- a/crates/node/builder/src/launch/engine.rs +++ b/crates/node/builder/src/launch/engine.rs @@ -75,9 +75,6 @@ where CB: NodeComponentsBuilder, AO: RethRpcAddOns> + EngineValidatorAddOn>, - LocalPayloadAttributesBuilder: PayloadAttributesBuilder< - <::Payload as PayloadTypes>::PayloadAttributes, - >, >::Components>, >>::Handle: RpcHandleProvider< diff --git a/crates/scroll/cli/Cargo.toml b/crates/scroll/cli/Cargo.toml index 88e102d194e..d2dd4e51eea 100644 --- a/crates/scroll/cli/Cargo.toml +++ b/crates/scroll/cli/Cargo.toml @@ -25,7 +25,9 @@ reth-tracing.workspace = true # scroll reth-scroll-chainspec.workspace = true +reth-scroll-consensus.workspace = true reth-scroll-evm.workspace = true +reth-scroll-node.workspace = true reth-scroll-primitives = { workspace = true, features = ["reth-codec"] } scroll-alloy-consensus = { workspace = true, optional = true } diff --git a/crates/scroll/evm/Cargo.toml b/crates/scroll/evm/Cargo.toml index 1874d574578..bda6cf9e40a 100644 --- a/crates/scroll/evm/Cargo.toml +++ b/crates/scroll/evm/Cargo.toml @@ -18,6 +18,7 @@ reth-evm = { workspace = true, features = ["scroll-alloy-traits"] } reth-execution-types.workspace = true reth-primitives = { workspace = true, features = ["serde-bincode-compat"] } reth-primitives-traits.workspace = true +reth-rpc-eth-api.workspace = true reth-storage-api.workspace = true # revm diff --git a/crates/scroll/node/Cargo.toml b/crates/scroll/node/Cargo.toml index 58e2e4901c5..01c5794bb5c 100644 --- a/crates/scroll/node/Cargo.toml +++ b/crates/scroll/node/Cargo.toml @@ -59,9 +59,11 @@ reth-scroll-txpool.workspace = true scroll-alloy-consensus.workspace = true scroll-alloy-evm.workspace = true scroll-alloy-hardforks.workspace = true +scroll-alloy-network.workspace = true scroll-alloy-rpc-types-engine.workspace = true # misc +clap.workspace = true eyre.workspace = true serde_json = { workspace = true, optional = true } tracing.workspace = true diff --git a/crates/scroll/node/src/addons.rs b/crates/scroll/node/src/addons.rs index 7e4a75b3396..cd3dcd0429f 100644 --- a/crates/scroll/node/src/addons.rs +++ b/crates/scroll/node/src/addons.rs @@ -1,10 +1,13 @@ -use crate::{ScrollEngineValidator, ScrollEngineValidatorBuilder, ScrollStorage}; +use crate::{ + builder::payload::SCROLL_DEFAULT_PAYLOAD_SIZE_LIMIT, ScrollEngineValidator, + ScrollEngineValidatorBuilder, ScrollStorage, +}; use reth_evm::{ConfigureEvm, EvmFactory, EvmFactoryFor}; use reth_node_api::{AddOnsContext, NodeAddOns}; use reth_node_builder::{ rpc::{ BasicEngineApiBuilder, EngineValidatorAddOn, EngineValidatorBuilder, EthApiBuilder, - RethRpcAddOns, RpcAddOns, RpcHandle, + Identity, RethRpcAddOns, RethRpcMiddleware, RpcAddOns, RpcHandle, }, FullNodeComponents, }; @@ -14,13 +17,15 @@ use reth_scroll_chainspec::ScrollChainSpec; use reth_scroll_engine_primitives::ScrollEngineTypes; use reth_scroll_evm::ScrollNextBlockEnvAttributes; use reth_scroll_primitives::ScrollPrimitives; -use reth_scroll_rpc::{eth::ScrollEthApiBuilder, ScrollEthApi, ScrollEthApiError}; +use reth_scroll_rpc::{eth::ScrollEthApiBuilder, ScrollEthApiError}; use revm::context::TxEnv; use scroll_alloy_evm::ScrollTransactionIntoTxEnv; +use scroll_alloy_network::Scroll; +use std::marker::PhantomData; /// Add-ons for the Scroll follower node. #[derive(Debug)] -pub struct ScrollAddOns +pub struct ScrollAddOns where N: FullNodeComponents, ScrollEthApiBuilder: EthApiBuilder, @@ -32,31 +37,32 @@ where ScrollEthApiBuilder, ScrollEngineValidatorBuilder, BasicEngineApiBuilder, + RpcMiddleWare, >, } -impl Default for ScrollAddOns +impl Default for ScrollAddOns where N: FullNodeComponents>, ScrollEthApiBuilder: EthApiBuilder, { fn default() -> Self { - Self::builder().build() + Self::builder::().build() } } -impl ScrollAddOns +impl ScrollAddOns where N: FullNodeComponents>, ScrollEthApiBuilder: EthApiBuilder, { /// Build a [`ScrollAddOns`] using [`ScrollAddOnsBuilder`]. - pub fn builder() -> ScrollAddOnsBuilder { + pub fn builder() -> ScrollAddOnsBuilder { ScrollAddOnsBuilder::default() } } -impl NodeAddOns for ScrollAddOns +impl NodeAddOns for ScrollAddOns where N: FullNodeComponents< Types: NodeTypes< @@ -69,8 +75,9 @@ where >, ScrollEthApiError: FromEvmError, EvmFactoryFor: EvmFactory>, + RpcMiddleware: RethRpcMiddleware, { - type Handle = RpcHandle>; + type Handle = RpcHandle>::EthApi>; async fn launch_add_ons(self, ctx: AddOnsContext<'_, N>) -> eyre::Result { let Self { rpc_add_ons } = self; @@ -78,7 +85,7 @@ where } } -impl RethRpcAddOns for ScrollAddOns +impl RethRpcAddOns for ScrollAddOns where N: FullNodeComponents< Types: NodeTypes< @@ -91,15 +98,16 @@ where >, ScrollEthApiError: FromEvmError, EvmFactoryFor: EvmFactory>, + RpcMiddleware: RethRpcMiddleware, { - type EthApi = ScrollEthApi; + type EthApi = >::EthApi; fn hooks_mut(&mut self) -> &mut reth_node_builder::rpc::RpcHooks { self.rpc_add_ons.hooks_mut() } } -impl EngineValidatorAddOn for ScrollAddOns +impl EngineValidatorAddOn for ScrollAddOns where N: FullNodeComponents< Types: NodeTypes< @@ -109,6 +117,7 @@ where >, >, ScrollEthApiBuilder: EthApiBuilder, + RpcMiddleware: Send, { type Validator = ScrollEngineValidator; @@ -118,23 +127,94 @@ where } /// A regular scroll evm and executor builder. -#[derive(Debug, Default, Clone)] +#[derive(Debug, Clone)] #[non_exhaustive] -pub struct ScrollAddOnsBuilder {} +pub struct ScrollAddOnsBuilder { + /// Sequencer client, configured to forward submitted transactions to sequencer of given Scroll + /// network. + sequencer_url: Option, + /// Minimum suggested priority fee (tip) + min_suggested_priority_fee: u64, + /// Maximum payload size + payload_size_limit: u64, + /// Marker for network types. + _nt: PhantomData, + /// RPC middleware to use + rpc_middleware: RpcMiddleware, +} + +impl Default for ScrollAddOnsBuilder { + fn default() -> Self { + Self { + sequencer_url: None, + payload_size_limit: SCROLL_DEFAULT_PAYLOAD_SIZE_LIMIT, + // TODO (scroll): update with default values. + min_suggested_priority_fee: 1_000_000, + _nt: PhantomData, + rpc_middleware: Identity::new(), + } + } +} -impl ScrollAddOnsBuilder { +impl ScrollAddOnsBuilder { + /// With a [`SequencerClient`]. + pub fn with_sequencer(mut self, sequencer_client: Option) -> Self { + self.sequencer_url = sequencer_client; + self + } + + /// With minimum suggested priority fee. + pub const fn with_min_suggested_priority_fee( + mut self, + min_suggested_priority_fee: u64, + ) -> Self { + self.min_suggested_priority_fee = min_suggested_priority_fee; + self + } + + /// With maximum payload size limit. + pub const fn with_payload_size_limit(mut self, payload_size_limit: u64) -> Self { + self.payload_size_limit = payload_size_limit; + self + } + + /// Configure the RPC middleware to use + pub fn with_rpc_middleware(self, rpc_middleware: T) -> ScrollAddOnsBuilder { + let Self { sequencer_url, min_suggested_priority_fee, payload_size_limit, _nt, .. } = self; + ScrollAddOnsBuilder { + sequencer_url, + payload_size_limit, + min_suggested_priority_fee, + _nt, + rpc_middleware, + } + } +} + +impl ScrollAddOnsBuilder { /// Builds an instance of [`ScrollAddOns`]. - pub fn build(self) -> ScrollAddOns + pub fn build(self) -> ScrollAddOns where N: FullNodeComponents>, ScrollEthApiBuilder: EthApiBuilder, { + let Self { + sequencer_url, + payload_size_limit, + min_suggested_priority_fee, + rpc_middleware, + .. + } = self; + ScrollAddOns { rpc_add_ons: RpcAddOns::new( - ScrollEthApi::::builder(), - Default::default(), + ScrollEthApiBuilder::new() + .with_sequencer(sequencer_url) + .with_payload_size_limit(payload_size_limit) + .with_min_suggested_priority_fee(min_suggested_priority_fee), Default::default(), Default::default(), + rpc_middleware, ), } } diff --git a/crates/scroll/node/src/builder/payload.rs b/crates/scroll/node/src/builder/payload.rs index 4ea061efebc..c4c2624d8d7 100644 --- a/crates/scroll/node/src/builder/payload.rs +++ b/crates/scroll/node/src/builder/payload.rs @@ -35,7 +35,7 @@ impl Default for ScrollPayloadBuilderBuilder { const SCROLL_GAS_LIMIT: u64 = 20_000_000; const SCROLL_PAYLOAD_BUILDING_DURATION: Duration = Duration::from_secs(1); -const SCROLL_DEFAULT_PAYLOAD_SIZE_LIMIT: u64 = 122_880; +pub(crate) const SCROLL_DEFAULT_PAYLOAD_SIZE_LIMIT: u64 = 122_880; impl ScrollPayloadBuilderBuilder { /// A helper method to initialize [`reth_scroll_payload::ScrollPayloadBuilder`] with the diff --git a/crates/scroll/node/src/node.rs b/crates/scroll/node/src/node.rs index 93f7a0901e7..17832c91290 100644 --- a/crates/scroll/node/src/node.rs +++ b/crates/scroll/node/src/node.rs @@ -1,10 +1,12 @@ //! Node specific implementations for Scroll. use crate::{ - ScrollAddOns, ScrollConsensusBuilder, ScrollExecutorBuilder, ScrollNetworkBuilder, - ScrollPayloadBuilderBuilder, ScrollPoolBuilder, ScrollStorage, + args::ScrollRollupArgs, ScrollAddOns, ScrollAddOnsBuilder, ScrollConsensusBuilder, + ScrollExecutorBuilder, ScrollNetworkBuilder, ScrollPayloadBuilderBuilder, ScrollPoolBuilder, + ScrollStorage, }; -use reth_node_api::FullNodeComponents; +use reth_engine_local::LocalPayloadAttributesBuilder; +use reth_node_api::{FullNodeComponents, PayloadAttributesBuilder, PayloadTypes}; use reth_node_builder::{ components::{BasicPayloadServiceBuilder, ComponentsBuilder}, node::{FullNodeTypes, NodeTypes}, @@ -14,12 +16,23 @@ use reth_scroll_chainspec::ScrollChainSpec; use reth_scroll_engine_primitives::ScrollEngineTypes; use reth_scroll_primitives::ScrollPrimitives; use reth_trie_db::MerklePatriciaTrie; +use scroll_alloy_network::Scroll; +use std::sync::Arc; /// The Scroll node implementation. #[derive(Clone, Debug, Default)] -pub struct ScrollNode; +#[non_exhaustive] +pub struct ScrollNode { + /// Additional Scroll args. + pub args: ScrollRollupArgs, +} impl ScrollNode { + /// Creates a new instance of the Scroll node type. + pub const fn new(args: ScrollRollupArgs) -> Self { + Self { args } + } + /// Returns a [`ComponentsBuilder`] configured for a regular Ethereum node. pub fn components() -> ComponentsBuilder< Node, @@ -71,7 +84,11 @@ where } fn add_ons(&self) -> Self::AddOns { - ScrollAddOns::default() + ScrollAddOnsBuilder::::default() + .with_sequencer(self.args.sequencer.clone()) + .with_min_suggested_priority_fee(self.args.min_suggested_priority_fee) + .with_payload_size_limit(self.args.payload_size_limit) + .build() } } @@ -84,6 +101,13 @@ where fn rpc_to_primitive_block(rpc_block: Self::RpcBlock) -> reth_node_api::BlockTy { rpc_block.into_consensus() } + + fn local_payload_attributes_builder( + chain_spec: &Self::ChainSpec, + ) -> impl PayloadAttributesBuilder<<::Payload as PayloadTypes>::PayloadAttributes> + { + LocalPayloadAttributesBuilder::new(Arc::new(chain_spec.clone())) + } } impl NodeTypes for ScrollNode { diff --git a/crates/scroll/rpc/Cargo.toml b/crates/scroll/rpc/Cargo.toml index a7203c92fb5..a0b84a8367c 100644 --- a/crates/scroll/rpc/Cargo.toml +++ b/crates/scroll/rpc/Cargo.toml @@ -14,7 +14,6 @@ workspace = true [dependencies] # reth reth-evm.workspace = true -reth-primitives.workspace = true reth-primitives-traits.workspace = true reth-provider.workspace = true reth-rpc-eth-api.workspace = true @@ -25,16 +24,10 @@ reth-rpc.workspace = true reth-rpc-convert = { workspace = true, features = ["scroll"] } reth-node-api.workspace = true reth-node-builder.workspace = true -reth-network-api.workspace = true -reth-chainspec.workspace = true # scroll -reth-scroll-chainspec.workspace = true -reth-scroll-evm.workspace = true reth-scroll-primitives = { workspace = true, features = ["serde", "serde-bincode-compat", "reth-codec"] } scroll-alloy-consensus.workspace = true -scroll-alloy-evm.workspace = true -scroll-alloy-hardforks.workspace = true scroll-alloy-network.workspace = true scroll-alloy-rpc-types.workspace = true @@ -55,7 +48,6 @@ reqwest = { workspace = true, default-features = false, features = ["rustls-tls- tracing.workspace = true # async -parking_lot.workspace = true tokio.workspace = true # rpc diff --git a/examples/custom-node/Cargo.toml b/examples/custom-node/Cargo.toml index 203860bc2e1..54a68d98abe 100644 --- a/examples/custom-node/Cargo.toml +++ b/examples/custom-node/Cargo.toml @@ -15,6 +15,7 @@ reth-optimism-forks.workspace = true reth-db-api.workspace = true reth-op = { workspace = true, features = ["node", "pool", "rpc"] } reth-payload-builder.workspace = true +reth-primitives-traits.workspace = true reth-rpc-api.workspace = true reth-engine-primitives.workspace = true reth-rpc-engine-api.workspace = true @@ -70,5 +71,6 @@ arbitrary = [ "reth-db-api/arbitrary", "alloy-rpc-types-eth/arbitrary", "op-alloy-rpc-types/arbitrary", + "reth-primitives-traits/arbitrary", ] default = [] From 485d7abbd095d7c947ba601f02f69e4f24a9b257 Mon Sep 17 00:00:00 2001 From: Gregory Edison Date: Tue, 29 Jul 2025 13:23:29 +0200 Subject: [PATCH 293/305] feat: update bin Signed-off-by: Gregory Edison --- crates/scroll/bin/scroll-reth/src/main.rs | 11 ++- crates/scroll/cli/src/app.rs | 115 ++++++++++++++++++++++ crates/scroll/cli/src/args.rs | 3 - crates/scroll/cli/src/commands/mod.rs | 26 ++++- crates/scroll/cli/src/lib.rs | 82 +++++---------- crates/scroll/node/src/args.rs | 27 +++++ crates/scroll/node/src/lib.rs | 4 + 7 files changed, 202 insertions(+), 66 deletions(-) create mode 100644 crates/scroll/cli/src/app.rs delete mode 100644 crates/scroll/cli/src/args.rs create mode 100644 crates/scroll/node/src/args.rs diff --git a/crates/scroll/bin/scroll-reth/src/main.rs b/crates/scroll/bin/scroll-reth/src/main.rs index b7f49e80903..97daff973af 100644 --- a/crates/scroll/bin/scroll-reth/src/main.rs +++ b/crates/scroll/bin/scroll-reth/src/main.rs @@ -5,8 +5,8 @@ static ALLOC: reth_cli_util::allocator::Allocator = reth_cli_util::allocator::ne fn main() { use clap::Parser; - use reth_scroll_cli::{Cli, ScrollChainSpecParser, ScrollRollupArgs}; - use reth_scroll_node::ScrollNode; + use reth_scroll_cli::{Cli, ScrollChainSpecParser}; + use reth_scroll_node::{ScrollNode, ScrollRollupArgs}; use tracing::info; reth_cli_util::sigsegv_handler::install(); @@ -16,10 +16,11 @@ fn main() { std::env::set_var("RUST_BACKTRACE", "1"); } - if let Err(err) = Cli::::parse() - .run::<_, _, ScrollNode>(|builder, _| async move { + if let Err(err) = + Cli::::parse().run(|builder, args| async move { info!(target: "reth::cli", "Launching node"); - let handle = builder.node(ScrollNode).launch_with_debug_capabilities().await?; + let handle = + builder.node(ScrollNode::new(args)).launch_with_debug_capabilities().await?; handle.node_exit_future.await }) { diff --git a/crates/scroll/cli/src/app.rs b/crates/scroll/cli/src/app.rs new file mode 100644 index 00000000000..fc6caf33844 --- /dev/null +++ b/crates/scroll/cli/src/app.rs @@ -0,0 +1,115 @@ +use crate::{Cli, Commands}; +use eyre::{eyre, Result}; +use reth_cli::chainspec::ChainSpecParser; +use reth_cli_commands::launcher::Launcher; +use reth_cli_runner::CliRunner; +use reth_node_metrics::recorder::install_prometheus_recorder; +use reth_scroll_chainspec::ScrollChainSpec; +use reth_scroll_consensus::ScrollBeaconConsensus; +use reth_scroll_evm::ScrollExecutorProvider; +use reth_scroll_node::ScrollNode; +use reth_tracing::{FileWorkerGuard, Layers}; +use std::{fmt, sync::Arc}; +use tracing::info; + +/// A wrapper around a parsed CLI that handles command execution. +#[derive(Debug)] +pub struct CliApp { + cli: Cli, + runner: Option, + layers: Option, + guard: Option, +} + +impl CliApp +where + C: ChainSpecParser, + Ext: clap::Args + fmt::Debug, +{ + pub(crate) fn new(cli: Cli) -> Self { + Self { cli, runner: None, layers: Some(Layers::new()), guard: None } + } + + /// Sets the runner for the CLI commander. + /// + /// This replaces any existing runner with the provided one. + pub fn set_runner(&mut self, runner: CliRunner) { + self.runner = Some(runner); + } + + /// Access to tracing layers. + /// + /// Returns a mutable reference to the tracing layers, or error + /// if tracing initialized and layers have detached already. + pub fn access_tracing_layers(&mut self) -> Result<&mut Layers> { + self.layers.as_mut().ok_or_else(|| eyre!("Tracing already initialized")) + } + + /// Execute the configured cli command. + /// + /// This accepts a closure that is used to launch the node via the + /// [`NodeCommand`](reth_cli_commands::node::NodeCommand). + pub fn run(mut self, launcher: impl Launcher) -> Result<()> { + let runner = match self.runner.take() { + Some(runner) => runner, + None => CliRunner::try_default_runtime()?, + }; + + // add network name to logs dir + // Add network name if available to the logs dir + if let Some(chain_spec) = self.cli.command.chain_spec() { + self.cli.logs.log_file_directory = + self.cli.logs.log_file_directory.join(chain_spec.chain.to_string()); + } + + self.init_tracing()?; + + // Install the prometheus recorder to be sure to record all metrics + let _ = install_prometheus_recorder(); + + let components = |spec: Arc| { + (ScrollExecutorProvider::scroll(spec.clone()), ScrollBeaconConsensus::new(spec)) + }; + + match self.cli.command { + Commands::Node(command) => { + runner.run_command_until_exit(|ctx| command.execute(ctx, launcher)) + } + Commands::Import(command) => { + runner.run_blocking_until_ctrl_c(command.execute::(components)) + } + Commands::Init(command) => { + runner.run_blocking_until_ctrl_c(command.execute::()) + } + Commands::InitState(command) => { + runner.run_blocking_until_ctrl_c(command.execute::()) + } + Commands::DumpGenesis(command) => runner.run_blocking_until_ctrl_c(command.execute()), + Commands::Db(command) => { + runner.run_blocking_until_ctrl_c(command.execute::()) + } + Commands::Stage(command) => runner + .run_command_until_exit(|ctx| command.execute::(ctx, components)), + Commands::P2P(command) => runner.run_until_ctrl_c(command.execute::()), + Commands::Config(command) => runner.run_until_ctrl_c(command.execute()), + Commands::Recover(command) => { + runner.run_command_until_exit(|ctx| command.execute::(ctx)) + } + Commands::Prune(command) => runner.run_until_ctrl_c(command.execute::()), + #[cfg(feature = "dev")] + Commands::TestVectors(command) => runner.run_until_ctrl_c(command.execute()), + } + } + + /// Initializes tracing with the configured options. + /// + /// If file logging is enabled, this function stores guard to the struct. + pub fn init_tracing(&mut self) -> Result<()> { + if self.guard.is_none() { + let layers = self.layers.take().unwrap_or_default(); + self.guard = self.cli.logs.init_tracing_with_layers(layers)?; + info!(target: "reth::cli", "Initialized tracing, debug log directory: {}", self.cli.logs.log_file_directory); + } + Ok(()) + } +} diff --git a/crates/scroll/cli/src/args.rs b/crates/scroll/cli/src/args.rs deleted file mode 100644 index d51f83146a0..00000000000 --- a/crates/scroll/cli/src/args.rs +++ /dev/null @@ -1,3 +0,0 @@ -/// Rollup arguments for the Scroll node. -#[derive(Debug, clap::Args)] -pub struct ScrollRollupArgs; diff --git a/crates/scroll/cli/src/commands/mod.rs b/crates/scroll/cli/src/commands/mod.rs index 0690fe3a312..1bd4c500164 100644 --- a/crates/scroll/cli/src/commands/mod.rs +++ b/crates/scroll/cli/src/commands/mod.rs @@ -8,7 +8,8 @@ use reth_cli_commands::{ config_cmd, db, dump_genesis, import, init_cmd, init_state, node, node::NoArgs, p2p, prune, recover, stage, }; -use std::fmt; +use reth_scroll_chainspec::ScrollChainSpec; +use std::{fmt, sync::Arc}; /// Commands to be executed #[derive(Debug, Subcommand)] @@ -54,3 +55,26 @@ pub enum Commands< #[command(name = "test-vectors")] TestVectors(test_vectors::Command), } + +impl, Ext: clap::Args + fmt::Debug> + Commands +{ + /// Returns the underlying chain being used for commands + pub fn chain_spec(&self) -> Option<&Arc> { + match self { + Self::Node(cmd) => cmd.chain_spec(), + Self::Init(cmd) => cmd.chain_spec(), + Self::InitState(cmd) => cmd.chain_spec(), + Self::Import(cmd) => cmd.chain_spec(), + Self::DumpGenesis(cmd) => cmd.chain_spec(), + Self::Db(cmd) => cmd.chain_spec(), + Self::Stage(cmd) => cmd.chain_spec(), + Self::P2P(cmd) => cmd.chain_spec(), + Self::Config(_) => None, + Self::Recover(cmd) => cmd.chain_spec(), + Self::Prune(cmd) => cmd.chain_spec(), + #[cfg(feature = "dev")] + Self::TestVectors(_) => None, + } + } +} diff --git a/crates/scroll/cli/src/lib.rs b/crates/scroll/cli/src/lib.rs index 4313f44bcaf..e81f6f07481 100644 --- a/crates/scroll/cli/src/lib.rs +++ b/crates/scroll/cli/src/lib.rs @@ -1,6 +1,7 @@ //! Scroll CLI implementation. -mod args; -pub use args::ScrollRollupArgs; + +mod app; +pub use app::CliApp; mod commands; pub use commands::Commands; @@ -10,22 +11,16 @@ pub use spec::ScrollChainSpecParser; use clap::{value_parser, Parser}; use reth_cli::chainspec::ChainSpecParser; -use reth_cli_commands::{common::CliNodeTypes, launcher::FnLauncher, node::NoArgs}; +use reth_cli_commands::{launcher::FnLauncher, node::NoArgs}; use reth_cli_runner::CliRunner; -use reth_consensus::noop::NoopConsensus; use reth_db::DatabaseEnv; use reth_node_builder::{NodeBuilder, WithLaunchContext}; use reth_node_core::{ args::LogArgs, version::{LONG_VERSION, SHORT_VERSION}, }; -use reth_node_metrics::recorder::install_prometheus_recorder; use reth_scroll_chainspec::ScrollChainSpec; -use reth_scroll_evm::ScrollExecutorProvider; -use reth_scroll_primitives::ScrollPrimitives; -use reth_tracing::FileWorkerGuard; use std::{ffi::OsString, fmt, future::Future, sync::Arc}; -use tracing::info; /// The main scroll cli interface. /// @@ -94,63 +89,36 @@ where C: ChainSpecParser, Ext: clap::Args + fmt::Debug, { + /// Configures the CLI and returns a [`CliApp`] instance. + /// + /// This method is used to prepare the CLI for execution by wrapping it in a + /// [`CliApp`] that can be further configured before running. + pub fn configure(self) -> CliApp { + CliApp::new(self) + } + /// Execute the configured cli command. /// /// This accepts a closure that is used to launch the node via the /// [`NodeCommand`](reth_cli_commands::node::NodeCommand). - pub fn run(mut self, launcher: L) -> eyre::Result<()> + pub fn run(self, launcher: L) -> eyre::Result<()> where L: FnOnce(WithLaunchContext, C::ChainSpec>>, Ext) -> Fut, Fut: Future>, - Types: CliNodeTypes, { - // add network name to logs dir - self.logs.log_file_directory = - self.logs.log_file_directory.join(self.chain.chain().to_string()); - - let _guard = self.init_tracing()?; - info!(target: "reth::cli", "Initialized tracing, debug log directory: {}", self.logs.log_file_directory); - - // Install the prometheus recorder to be sure to record all metrics - let _ = install_prometheus_recorder(); - let components = |spec: Arc| { - (ScrollExecutorProvider::scroll(spec), NoopConsensus::default()) - }; - - let runner = CliRunner::try_default_runtime()?; - match self.command { - Commands::Node(command) => runner.run_command_until_exit(|ctx| { - command.execute(ctx, FnLauncher::new::(launcher)) - }), - Commands::Init(command) => runner.run_blocking_until_ctrl_c(command.execute::()), - Commands::InitState(command) => { - runner.run_blocking_until_ctrl_c(command.execute::()) - } - Commands::Import(command) => { - runner.run_blocking_until_ctrl_c(command.execute::(components)) - } - Commands::DumpGenesis(command) => runner.run_blocking_until_ctrl_c(command.execute()), - Commands::Db(command) => runner.run_blocking_until_ctrl_c(command.execute::()), - Commands::Stage(command) => { - runner.run_command_until_exit(|ctx| command.execute::(ctx, components)) - } - Commands::P2P(command) => runner.run_until_ctrl_c(command.execute::()), - Commands::Config(command) => runner.run_until_ctrl_c(command.execute()), - Commands::Recover(command) => { - runner.run_command_until_exit(|ctx| command.execute::(ctx)) - } - Commands::Prune(command) => runner.run_until_ctrl_c(command.execute::()), - #[cfg(feature = "dev")] - Commands::TestVectors(command) => runner.run_until_ctrl_c(command.execute()), - } + self.with_runner(CliRunner::try_default_runtime()?, launcher) } - /// Initializes tracing with the configured options. - /// - /// If file logging is enabled, this function returns a guard that must be kept alive to ensure - /// that all logs are flushed to disk. - pub fn init_tracing(&self) -> eyre::Result> { - let guard = self.logs.init_tracing()?; - Ok(guard) + /// Execute the configured cli command with the provided [`CliRunner`]. + pub fn with_runner(self, runner: CliRunner, launcher: L) -> eyre::Result<()> + where + L: FnOnce(WithLaunchContext, C::ChainSpec>>, Ext) -> Fut, + Fut: Future>, + { + let mut this = self.configure(); + this.set_runner(runner); + this.run(FnLauncher::new::(async move |builder, chain_spec| { + launcher(builder, chain_spec).await + })) } } diff --git a/crates/scroll/node/src/args.rs b/crates/scroll/node/src/args.rs new file mode 100644 index 00000000000..b02e2726e91 --- /dev/null +++ b/crates/scroll/node/src/args.rs @@ -0,0 +1,27 @@ +use crate::builder::payload::SCROLL_DEFAULT_PAYLOAD_SIZE_LIMIT; + +/// Rollup arguments for the Scroll node. +#[derive(Debug, Clone, clap::Args)] +pub struct ScrollRollupArgs { + /// Endpoint for the sequencer mempool (can be both HTTP and WS) + #[arg(long = "scroll.sequencer")] + pub sequencer: Option, + + /// Minimum suggested priority fee (tip) in wei, default `1_000_000` + #[arg(long = "scroll.min-suggested-priority-fee", default_value_t = 1_000_000)] + pub min_suggested_priority_fee: u64, + + /// Payload size limit, default to [`SCROLL_DEFAULT_PAYLOAD_SIZE_LIMIT`]. + #[arg(long = "scroll.payload-size-limit", default_value_t = SCROLL_DEFAULT_PAYLOAD_SIZE_LIMIT)] + pub payload_size_limit: u64, +} + +impl Default for ScrollRollupArgs { + fn default() -> Self { + Self { + sequencer: None, + min_suggested_priority_fee: 1_000_000, + payload_size_limit: SCROLL_DEFAULT_PAYLOAD_SIZE_LIMIT, + } + } +} diff --git a/crates/scroll/node/src/lib.rs b/crates/scroll/node/src/lib.rs index 2d262086661..099a252bde4 100644 --- a/crates/scroll/node/src/lib.rs +++ b/crates/scroll/node/src/lib.rs @@ -1,4 +1,8 @@ //! Node specific implementations for Scroll. + +mod args; +pub use args::ScrollRollupArgs; + mod builder; pub use builder::{ consensus::ScrollConsensusBuilder, From 2ae382d482b3ed7cc3ce5b8874615f080011c3b0 Mon Sep 17 00:00:00 2001 From: Gregory Edison Date: Tue, 29 Jul 2025 13:27:43 +0200 Subject: [PATCH 294/305] test: update Signed-off-by: Gregory Edison --- crates/net/network-api/src/noop.rs | 13 ++++++++++- crates/scroll/alloy/evm/src/block/curie.rs | 24 ++++---------------- crates/scroll/alloy/evm/src/block/feynman.rs | 24 ++++---------------- crates/scroll/alloy/evm/src/block/mod.rs | 12 ---------- crates/scroll/evm/src/execute.rs | 9 ++++---- 5 files changed, 24 insertions(+), 58 deletions(-) diff --git a/crates/net/network-api/src/noop.rs b/crates/net/network-api/src/noop.rs index c650db0afc4..3f38559524a 100644 --- a/crates/net/network-api/src/noop.rs +++ b/crates/net/network-api/src/noop.rs @@ -7,6 +7,7 @@ use core::{fmt, marker::PhantomData}; use std::net::{IpAddr, SocketAddr}; use crate::{ + block::{EthWireBlockListenerProvider, NewBlockWithPeer}, events::{NetworkPeersEvents, PeerEventStream}, test_utils::{PeersHandle, PeersHandleProvider}, BlockDownloaderProvider, DiscoveryEvent, NetworkError, NetworkEvent, @@ -22,7 +23,7 @@ use reth_network_p2p::{sync::NetworkSyncUpdater, NoopFullBlockClient}; use reth_network_peers::NodeRecord; use reth_network_types::{PeerKind, Reputation, ReputationChangeKind}; use reth_tokio_util::{EventSender, EventStream}; -use tokio::sync::{mpsc, oneshot}; +use tokio::sync::{mpsc, oneshot, oneshot::error::RecvError}; use tokio_stream::wrappers::UnboundedReceiverStream; /// A type that implements all network trait that does nothing. @@ -200,6 +201,16 @@ where } } +impl EthWireBlockListenerProvider for NoopNetwork { + type Block = N::Block; + + async fn eth_wire_block_listener( + &self, + ) -> Result>, RecvError> { + unreachable!() + } +} + impl NetworkPeersEvents for NoopNetwork where Net: NetworkPrimitives, diff --git a/crates/scroll/alloy/evm/src/block/curie.rs b/crates/scroll/alloy/evm/src/block/curie.rs index 7e2e853ad42..3c65e9cc04d 100644 --- a/crates/scroll/alloy/evm/src/block/curie.rs +++ b/crates/scroll/alloy/evm/src/block/curie.rs @@ -102,7 +102,7 @@ pub(super) fn apply_curie_hard_fork(state: &mut State) -> Resu #[cfg(test)] mod tests { - use super::{super::assert_bytecode_eq, *}; + use super::*; use revm::{ database::{ states::{bundle_state::BundleRetention, plain_account::PlainStorage, StorageSlot}, @@ -155,24 +155,8 @@ mod tests { let expected_oracle_info = AccountInfo { code_hash, code: Some(bytecode.clone()), ..Default::default() }; - // TODO: revert back to performing equality check on `AccountInfo` once we bump revm > v78 - let oracle_original_info = oracle.original_info.unwrap(); - assert_bytecode_eq( - oracle_original_info.code.as_ref().unwrap(), - oracle_pre_fork.code.as_ref().unwrap(), - ); - assert_eq!(oracle_original_info.balance, oracle_pre_fork.balance); - assert_eq!(oracle_original_info.nonce, oracle_pre_fork.nonce); - assert_eq!(oracle_original_info.code_hash, oracle_pre_fork.code_hash); - - let oracle_post_info = oracle.info.unwrap(); - assert_bytecode_eq( - oracle_post_info.code.as_ref().unwrap(), - expected_oracle_info.code.as_ref().unwrap(), - ); - assert_eq!(oracle_post_info.balance, expected_oracle_info.balance); - assert_eq!(oracle_post_info.nonce, expected_oracle_info.nonce); - assert_eq!(oracle_post_info.code_hash, expected_oracle_info.code_hash); + assert_eq!(oracle.original_info.unwrap(), oracle_pre_fork); + assert_eq!(oracle.info.unwrap(), expected_oracle_info); // check oracle storage changeset let mut storage = oracle.storage.into_iter().collect::>(); @@ -188,7 +172,7 @@ mod tests { } // check deployed contract - assert_bytecode_eq(bundle.contracts.get(&code_hash).unwrap(), &bytecode); + assert_eq!(bundle.contracts.get(&code_hash).unwrap(), &bytecode); Ok(()) } diff --git a/crates/scroll/alloy/evm/src/block/feynman.rs b/crates/scroll/alloy/evm/src/block/feynman.rs index 0b44cd245c7..c739d4a3abd 100644 --- a/crates/scroll/alloy/evm/src/block/feynman.rs +++ b/crates/scroll/alloy/evm/src/block/feynman.rs @@ -90,7 +90,7 @@ pub(super) fn apply_feynman_hard_fork( #[cfg(test)] mod tests { - use super::{super::assert_bytecode_eq, *}; + use super::*; use revm::{ database::{ states::{bundle_state::BundleRetention, plain_account::PlainStorage, StorageSlot}, @@ -158,24 +158,8 @@ mod tests { let expected_oracle_info = AccountInfo { code_hash, code: Some(bytecode.clone()), ..Default::default() }; - // TODO: revert back to performing equality check on `AccountInfo` once we bump revm > v78 - let oracle_original_info = oracle.original_info.unwrap(); - assert_bytecode_eq( - oracle_original_info.code.as_ref().unwrap(), - oracle_pre_fork.code.as_ref().unwrap(), - ); - assert_eq!(oracle_original_info.balance, oracle_pre_fork.balance); - assert_eq!(oracle_original_info.nonce, oracle_pre_fork.nonce); - assert_eq!(oracle_original_info.code_hash, oracle_pre_fork.code_hash); - - let oracle_post_info = oracle.info.unwrap(); - assert_bytecode_eq( - oracle_post_info.code.as_ref().unwrap(), - expected_oracle_info.code.as_ref().unwrap(), - ); - assert_eq!(oracle_post_info.balance, expected_oracle_info.balance); - assert_eq!(oracle_post_info.nonce, expected_oracle_info.nonce); - assert_eq!(oracle_post_info.code_hash, expected_oracle_info.code_hash); + assert_eq!(oracle.original_info.unwrap(), oracle_pre_fork); + assert_eq!(oracle.info.unwrap(), expected_oracle_info); // check oracle storage changeset let mut storage = oracle.storage.into_iter().collect::>(); @@ -191,7 +175,7 @@ mod tests { } // check deployed contract - assert_bytecode_eq(bundle.contracts.get(&code_hash).unwrap(), &bytecode); + assert_eq!(bundle.contracts.get(&code_hash).unwrap(), &bytecode); Ok(()) } diff --git a/crates/scroll/alloy/evm/src/block/mod.rs b/crates/scroll/alloy/evm/src/block/mod.rs index 054b5a5b96d..ec613d5cd72 100644 --- a/crates/scroll/alloy/evm/src/block/mod.rs +++ b/crates/scroll/alloy/evm/src/block/mod.rs @@ -403,15 +403,3 @@ where ScrollBlockExecutor::new(evm, ctx, &self.spec, &self.receipt_builder) } } - -// TODO: remove this when we bump revm > v78 -/// A helper function that compares asserts that two bytecode instances are equal. -#[cfg(test)] -fn assert_bytecode_eq(expected: &revm::bytecode::Bytecode, actual: &revm::bytecode::Bytecode) { - assert_eq!(expected.legacy_jump_table().unwrap().len, actual.legacy_jump_table().unwrap().len); - assert_eq!( - expected.legacy_jump_table().unwrap().table, - actual.legacy_jump_table().unwrap().table - ); - assert_eq!(expected.bytecode(), actual.bytecode()); -} diff --git a/crates/scroll/evm/src/execute.rs b/crates/scroll/evm/src/execute.rs index 8648e3d8453..9a944ca3199 100644 --- a/crates/scroll/evm/src/execute.rs +++ b/crates/scroll/evm/src/execute.rs @@ -407,16 +407,15 @@ mod tests { let oracle_bytecode = oracle.info.unwrap().code.unwrap(); let bytecode = Bytecode::new_raw(CURIE_L1_GAS_PRICE_ORACLE_BYTECODE); - // TODO: update when we bump to revm > v78 // Note: Eq operator fails due to the presence of `table_ptr` in the `JumpTable` struct // therefore we do a manual comparison. assert_eq!( - bytecode.legacy_jump_table().unwrap().len, - oracle_bytecode.legacy_jump_table().unwrap().len + bytecode.legacy_jump_table().unwrap().len(), + oracle_bytecode.legacy_jump_table().unwrap().len() ); assert_eq!( - bytecode.legacy_jump_table().unwrap().table, - oracle_bytecode.legacy_jump_table().unwrap().table + bytecode.legacy_jump_table().unwrap().as_slice(), + oracle_bytecode.legacy_jump_table().unwrap().as_slice() ); assert_eq!(bytecode.bytecode(), oracle_bytecode.bytecode()); From 6446b46ebf320e57f38e7ef090fa40a840fa935e Mon Sep 17 00:00:00 2001 From: Gregory Edison Date: Tue, 29 Jul 2025 13:35:49 +0200 Subject: [PATCH 295/305] feat: evm and attributes update Signed-off-by: Gregory Edison --- crates/scroll/alloy/evm/src/lib.rs | 87 ++++++------------------ crates/scroll/evm/src/lib.rs | 13 ++++ crates/scroll/node/src/builder/engine.rs | 30 ++++---- 3 files changed, 49 insertions(+), 81 deletions(-) diff --git a/crates/scroll/alloy/evm/src/lib.rs b/crates/scroll/alloy/evm/src/lib.rs index a3a0ff6fbfd..5ec8dc38009 100644 --- a/crates/scroll/alloy/evm/src/lib.rs +++ b/crates/scroll/alloy/evm/src/lib.rs @@ -20,9 +20,8 @@ mod system_caller; extern crate alloc; -use alloc::vec::Vec; use alloy_evm::{precompiles::PrecompilesMap, Database, Evm, EvmEnv, EvmFactory}; -use alloy_primitives::{Address, Bytes, TxKind, U256}; +use alloy_primitives::{Address, Bytes}; use core::{ fmt, ops::{Deref, DerefMut}, @@ -33,7 +32,7 @@ use revm::{ handler::PrecompileProvider, inspector::NoOpInspector, interpreter::{interpreter::EthInterpreter, InterpreterResult}, - Context, ExecuteEvm, InspectEvm, Inspector, + Context, ExecuteEvm, InspectEvm, Inspector, SystemCallEvm, }; use revm_scroll::{ builder::{ @@ -42,7 +41,7 @@ use revm_scroll::{ }, instructions::ScrollInstructions, precompile::ScrollPrecompileProvider, - ScrollSpecId, ScrollTransaction, + ScrollSpecId, }; /// Re-export `TX_L1_FEE_PRECISION_U256` from `revm-scroll` for convenience. @@ -140,69 +139,7 @@ where contract: Address, data: Bytes, ) -> Result, Self::Error> { - let tx = ScrollTransaction { - base: TxEnv { - caller, - kind: TxKind::Call(contract), - // Explicitly set nonce to 0 so revm does not do any nonce checks - nonce: 0, - gas_limit: 30_000_000, - value: U256::ZERO, - data, - // Setting the gas price to zero enforces that no value is transferred as part of - // the call, and that the call will not count against the block's - // gas limit - gas_price: 0, - // The chain ID check is not relevant here and is disabled if set to None - chain_id: None, - // Setting the gas priority fee to None ensures the effective gas price is derived - // from the `gas_price` field, which we need to be zero - gas_priority_fee: None, - access_list: Default::default(), - // blob fields can be None for this tx - blob_hashes: Vec::new(), - max_fee_per_blob_gas: 0, - tx_type: 0, - authorization_list: Default::default(), - }, - rlp_bytes: Some(Default::default()), - // System transactions (similar to L1MessageTx) do not pay a rollup fee, - // so this field is not used; we just set it to the default value. - compression_ratio: Some(TX_L1_FEE_PRECISION_U256), - }; - - let mut gas_limit = tx.base.gas_limit; - let mut basefee = 0; - let mut disable_nonce_check = true; - - // ensure the block gas limit is >= the tx - core::mem::swap(&mut self.block.gas_limit, &mut gas_limit); - // disable the base fee check for this call by setting the base fee to zero - core::mem::swap(&mut self.block.basefee, &mut basefee); - // disable the nonce check - core::mem::swap(&mut self.cfg.disable_nonce_check, &mut disable_nonce_check); - - let mut res = self.transact(ScrollTransactionIntoTxEnv::from(tx)); - - // swap back to the previous gas limit - core::mem::swap(&mut self.block.gas_limit, &mut gas_limit); - // swap back to the previous base fee - core::mem::swap(&mut self.block.basefee, &mut basefee); - // swap back to the previous nonce check flag - core::mem::swap(&mut self.cfg.disable_nonce_check, &mut disable_nonce_check); - - // NOTE: We assume that only the contract storage is modified. Revm currently marks the - // caller and block beneficiary accounts as "touched" when we do the above transact calls, - // and includes them in the result. - // - // We're doing this state cleanup to make sure that changeset only includes the changed - // contract storage. - // Specifically prevents incorrect nonce increment for system contract caller. - if let Ok(res) = &mut res { - res.state.retain(|addr, _| *addr == contract); - } - - res + self.inner.transact_system_call_with_caller_finalize(caller, contract, data) } fn db_mut(&mut self) -> &mut Self::DB { @@ -237,6 +174,22 @@ where fn inspector_mut(&mut self) -> &mut Self::Inspector { &mut self.inner.0.inspector } + + fn components(&self) -> (&Self::DB, &Self::Inspector, &Self::Precompiles) { + ( + &self.inner.0.ctx.journaled_state.database, + &self.inner.0.inspector, + &self.inner.0.precompiles, + ) + } + + fn components_mut(&mut self) -> (&mut Self::DB, &mut Self::Inspector, &mut Self::Precompiles) { + ( + &mut self.inner.0.ctx.journaled_state.database, + &mut self.inner.0.inspector, + &mut self.inner.0.precompiles, + ) + } } /// Factory producing [`ScrollEvm`]s. diff --git a/crates/scroll/evm/src/lib.rs b/crates/scroll/evm/src/lib.rs index 2268a1d7a03..daf887af4ce 100644 --- a/crates/scroll/evm/src/lib.rs +++ b/crates/scroll/evm/src/lib.rs @@ -158,3 +158,16 @@ pub struct ScrollNextBlockEnvAttributes { /// The base fee of the next block. pub base_fee: u64, } + +impl reth_rpc_eth_api::helpers::pending_block::BuildPendingEnv + for ScrollNextBlockEnvAttributes +{ + fn build_pending_env(parent: &reth_primitives_traits::SealedHeader) -> Self { + Self { + timestamp: parent.timestamp().saturating_add(1), + suggested_fee_recipient: parent.beneficiary(), + gas_limit: parent.gas_limit(), + base_fee: parent.base_fee_per_gas().unwrap_or_default(), + } + } +} diff --git a/crates/scroll/node/src/builder/engine.rs b/crates/scroll/node/src/builder/engine.rs index c91fe467d01..97085fdecfb 100644 --- a/crates/scroll/node/src/builder/engine.rs +++ b/crates/scroll/node/src/builder/engine.rs @@ -65,7 +65,7 @@ where fn validate_version_specific_fields( &self, _version: EngineApiMessageVersion, - payload_or_attrs: PayloadOrAttributes<'_, Self::ExecutionData, ScrollPayloadAttributes>, + payload_or_attrs: PayloadOrAttributes<'_, Types::ExecutionData, ScrollPayloadAttributes>, ) -> Result<(), EngineObjectValidationError> { validate_scroll_payload_or_attributes( &payload_or_attrs, @@ -95,17 +95,6 @@ where Ok(()) } - - fn validate_payload_attributes_against_header( - &self, - attr: &::PayloadAttributes, - header: &::Header, - ) -> Result<(), InvalidPayloadAttributesError> { - if attr.timestamp() < header.timestamp() { - return Err(InvalidPayloadAttributesError::InvalidTimestamp); - } - Ok(()) - } } /// Validates the payload or attributes for Scroll. @@ -125,9 +114,11 @@ fn validate_scroll_payload_or_attributes( Ok(()) } -impl PayloadValidator for ScrollEngineValidator { +impl PayloadValidator for ScrollEngineValidator +where + Types: PayloadTypes, +{ type Block = ScrollBlock; - type ExecutionData = ExecutionData; fn ensure_well_formed_payload( &self, @@ -163,4 +154,15 @@ impl PayloadValidator for ScrollEngineValidator { Err(PayloadError::BlockHash { execution: block_hash_no_turn, consensus: expected_hash } .into()) } + + fn validate_payload_attributes_against_header( + &self, + attr: &Types::PayloadAttributes, + header: &::Header, + ) -> Result<(), InvalidPayloadAttributesError> { + if attr.timestamp() < header.timestamp() { + return Err(InvalidPayloadAttributesError::InvalidTimestamp); + } + Ok(()) + } } From daa0600b4be550db77d604552b1506ae859f42af Mon Sep 17 00:00:00 2001 From: Gregory Edison Date: Tue, 29 Jul 2025 13:39:34 +0200 Subject: [PATCH 296/305] feat: eth api update Signed-off-by: Gregory Edison --- .../src/transaction/signed.rs | 30 -- crates/rpc/rpc-convert/src/rpc.rs | 32 ++ crates/scroll/rpc/src/eth/block.rs | 81 +---- crates/scroll/rpc/src/eth/call.rs | 53 +-- crates/scroll/rpc/src/eth/mod.rs | 311 +++++++++--------- crates/scroll/rpc/src/eth/pending_block.rs | 72 +--- crates/scroll/rpc/src/eth/receipt.rs | 108 +++--- crates/scroll/rpc/src/eth/transaction.rs | 73 ++-- 8 files changed, 319 insertions(+), 441 deletions(-) diff --git a/crates/primitives-traits/src/transaction/signed.rs b/crates/primitives-traits/src/transaction/signed.rs index 329eaf2e7f1..d45edc3031b 100644 --- a/crates/primitives-traits/src/transaction/signed.rs +++ b/crates/primitives-traits/src/transaction/signed.rs @@ -190,20 +190,6 @@ mod scroll { Self::Eip7702(tx) => tx.hash(), } } - - fn recover_signer_unchecked_with_buf( - &self, - buf: &mut Vec, - ) -> Result { - match self { - Self::Legacy(tx) => tx.tx().encode_for_signing(buf), - Self::Eip2930(tx) => tx.tx().encode_for_signing(buf), - Self::Eip1559(tx) => tx.tx().encode_for_signing(buf), - Self::Eip7702(tx) => tx.tx().encode_for_signing(buf), - } - let signature_hash = keccak256(buf); - recover_signer_unchecked(self.signature(), signature_hash) - } } impl SignedTransaction for ScrollTxEnvelope { @@ -216,21 +202,5 @@ mod scroll { Self::L1Message(tx) => tx.hash_ref(), } } - - fn recover_signer_unchecked_with_buf( - &self, - buf: &mut Vec, - ) -> Result { - match self { - Self::Legacy(tx) => tx.tx().encode_for_signing(buf), - Self::Eip2930(tx) => tx.tx().encode_for_signing(buf), - Self::Eip1559(tx) => tx.tx().encode_for_signing(buf), - Self::Eip7702(tx) => tx.tx().encode_for_signing(buf), - Self::L1Message(tx) => return Ok(tx.sender), - } - let signature_hash = keccak256(buf); - let signature = self.signature().expect("handled L1 message in previous match"); - recover_signer_unchecked(&signature, signature_hash) - } } } diff --git a/crates/rpc/rpc-convert/src/rpc.rs b/crates/rpc/rpc-convert/src/rpc.rs index bd5555a3013..180f5150b6e 100644 --- a/crates/rpc/rpc-convert/src/rpc.rs +++ b/crates/rpc/rpc-convert/src/rpc.rs @@ -130,3 +130,35 @@ impl SignableTxRequest Ok(signed) } } + +#[cfg(feature = "scroll")] +impl SignableTxRequest + for scroll_alloy_rpc_types::ScrollTransactionRequest +{ + async fn try_build_and_sign( + self, + signer: impl TxSigner + Send, + ) -> Result { + let mut tx = + self.build_typed_tx().map_err(|_| SignTxRequestError::InvalidTransactionRequest)?; + let signature = signer.sign_transaction(&mut tx).await?; + let signed = match tx { + scroll_alloy_consensus::ScrollTypedTransaction::Legacy(tx) => { + scroll_alloy_consensus::ScrollTxEnvelope::Legacy(tx.into_signed(signature)) + } + scroll_alloy_consensus::ScrollTypedTransaction::Eip2930(tx) => { + scroll_alloy_consensus::ScrollTxEnvelope::Eip2930(tx.into_signed(signature)) + } + scroll_alloy_consensus::ScrollTypedTransaction::Eip1559(tx) => { + scroll_alloy_consensus::ScrollTxEnvelope::Eip1559(tx.into_signed(signature)) + } + scroll_alloy_consensus::ScrollTypedTransaction::Eip7702(tx) => { + scroll_alloy_consensus::ScrollTxEnvelope::Eip7702(tx.into_signed(signature)) + } + scroll_alloy_consensus::ScrollTypedTransaction::L1Message(_) => { + return Err(SignTxRequestError::InvalidTransactionRequest); + } + }; + Ok(signed) + } +} diff --git a/crates/scroll/rpc/src/eth/block.rs b/crates/scroll/rpc/src/eth/block.rs index 49bcd5b29e2..7dc8ec9951a 100644 --- a/crates/scroll/rpc/src/eth/block.rs +++ b/crates/scroll/rpc/src/eth/block.rs @@ -1,81 +1,26 @@ //! Loads and formats Scroll block RPC response. -use crate::{eth::ScrollNodeCore, ScrollEthApi, ScrollEthApiError, ScrollReceiptBuilder}; +use crate::{ScrollEthApi, ScrollEthApiError}; -use alloy_consensus::BlockHeader; -use alloy_rpc_types_eth::BlockId; -use reth_chainspec::ChainSpecProvider; -use reth_node_api::BlockBody; -use reth_primitives::TransactionMeta; -use reth_primitives_traits::SignedTransaction; -use reth_provider::{BlockReader, HeaderProvider, ProviderTx}; +use reth_rpc_convert::RpcConvert; use reth_rpc_eth_api::{ - helpers::{EthBlocks, LoadBlock, LoadPendingBlock, LoadReceipt, SpawnBlocking}, - types::RpcTypes, - RpcReceipt, + helpers::{EthBlocks, LoadBlock}, + RpcNodeCore, }; -use reth_scroll_chainspec::ScrollChainSpec; -use reth_scroll_primitives::{ScrollReceipt, ScrollTransactionSigned}; -use reth_transaction_pool::{PoolTransaction, TransactionPool}; -use scroll_alloy_rpc_types::ScrollTransactionReceipt; +use reth_rpc_eth_types::error::FromEvmError; -impl EthBlocks for ScrollEthApi +impl EthBlocks for ScrollEthApi where - Self: LoadBlock< - Error = ScrollEthApiError, - NetworkTypes: RpcTypes, - Provider: BlockReader, - >, - N: ScrollNodeCore + HeaderProvider>, + N: RpcNodeCore, + ScrollEthApiError: FromEvmError, + Rpc: RpcConvert, { - async fn block_receipts( - &self, - block_id: BlockId, - ) -> Result>>, Self::Error> - where - Self: LoadReceipt, - { - if let Some((block, receipts)) = self.load_block_and_receipts(block_id).await? { - let block_number = block.number(); - let base_fee = block.base_fee_per_gas(); - let block_hash = block.hash(); - let excess_blob_gas = block.excess_blob_gas(); - let timestamp = block.timestamp(); - - return block - .body() - .transactions() - .iter() - .zip(receipts.iter()) - .enumerate() - .map(|(idx, (tx, receipt))| -> Result<_, _> { - let meta = TransactionMeta { - tx_hash: *tx.tx_hash(), - index: idx as u64, - block_hash, - block_number, - base_fee, - excess_blob_gas, - timestamp, - }; - ScrollReceiptBuilder::new(tx, meta, receipt, &receipts) - .map(|builder| builder.build()) - }) - .collect::, Self::Error>>() - .map(Some) - } - - Ok(None) - } } -impl LoadBlock for ScrollEthApi +impl LoadBlock for ScrollEthApi where - Self: LoadPendingBlock< - Pool: TransactionPool< - Transaction: PoolTransaction>, - >, - > + SpawnBlocking, - N: ScrollNodeCore, + N: RpcNodeCore, + ScrollEthApiError: FromEvmError, + Rpc: RpcConvert, { } diff --git a/crates/scroll/rpc/src/eth/call.rs b/crates/scroll/rpc/src/eth/call.rs index 2181ed0f01f..99249412db1 100644 --- a/crates/scroll/rpc/src/eth/call.rs +++ b/crates/scroll/rpc/src/eth/call.rs @@ -1,53 +1,36 @@ -use super::ScrollNodeCore; use crate::{ScrollEthApi, ScrollEthApiError}; -use alloy_rpc_types_eth::transaction::TransactionRequest; -use reth_evm::{block::BlockExecutorFactory, ConfigureEvm, EvmFactory, TxEnvFor}; -use reth_primitives_traits::NodePrimitives; -use reth_provider::{errors::ProviderError, ProviderHeader, ProviderTx}; +use reth_evm::TxEnvFor; use reth_rpc_eth_api::{ - helpers::{estimate::EstimateCall, Call, EthCall, LoadBlock, LoadState, SpawnBlocking}, - FullEthApiTypes, RpcConvert, RpcTypes, + helpers::{estimate::EstimateCall, Call, EthCall}, + RpcConvert, RpcNodeCore, }; use reth_rpc_eth_types::error::FromEvmError; -use revm::context::TxEnv; -use scroll_alloy_evm::ScrollTransactionIntoTxEnv; -impl EthCall for ScrollEthApi +impl EthCall for ScrollEthApi where - Self: EstimateCall + LoadBlock + FullEthApiTypes, - N: ScrollNodeCore, + N: RpcNodeCore, + ScrollEthApiError: FromEvmError, + Rpc: + RpcConvert>, { } -impl EstimateCall for ScrollEthApi +impl EstimateCall for ScrollEthApi where - Self: Call, - Self::Error: From, - N: ScrollNodeCore, + N: RpcNodeCore, + ScrollEthApiError: FromEvmError, + Rpc: + RpcConvert>, { } -impl Call for ScrollEthApi +impl Call for ScrollEthApi where - Self: LoadState< - Evm: ConfigureEvm< - Primitives: NodePrimitives< - BlockHeader = ProviderHeader, - SignedTx = ProviderTx, - >, - BlockExecutorFactory: BlockExecutorFactory< - EvmFactory: EvmFactory>, - >, - >, - RpcConvert: RpcConvert, Network = Self::NetworkTypes>, - NetworkTypes: RpcTypes>, - Error: FromEvmError - + From<::Error> - + From, - > + SpawnBlocking, - Self::Error: From, - N: ScrollNodeCore, + N: RpcNodeCore, + ScrollEthApiError: FromEvmError, + Rpc: + RpcConvert>, { #[inline] fn call_gas_limit(&self) -> u64 { diff --git a/crates/scroll/rpc/src/eth/mod.rs b/crates/scroll/rpc/src/eth/mod.rs index e78754e2167..368493ede31 100644 --- a/crates/scroll/rpc/src/eth/mod.rs +++ b/crates/scroll/rpc/src/eth/mod.rs @@ -1,55 +1,41 @@ //! Scroll-Reth `eth_` endpoint implementation. +use crate::{ + eth::{receipt::ScrollReceiptConverter, transaction::ScrollTxInfoMapper}, + ScrollEthApiError, SequencerClient, +}; use alloy_primitives::U256; use eyre::WrapErr; -use reth_chainspec::{EthChainSpec, EthereumHardforks}; +pub use receipt::ScrollReceiptBuilder; use reth_evm::ConfigureEvm; -use reth_network_api::NetworkInfo; -use reth_node_api::FullNodeComponents; -use reth_provider::{ - BlockNumReader, BlockReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, - ProviderBlock, ProviderHeader, ProviderReceipt, ProviderTx, StageCheckpointReader, - StateProviderFactory, -}; +use reth_node_api::{FullNodeComponents, FullNodeTypes, HeaderTy}; +use reth_node_builder::rpc::{EthApiBuilder, EthApiCtx}; +use reth_provider::{BlockReader, ProviderHeader, ProviderTx}; use reth_rpc::eth::{core::EthApiInner, DevSigner}; +use reth_rpc_convert::{RpcConvert, RpcConverter, RpcTypes, SignableTxRequest}; use reth_rpc_eth_api::{ helpers::{ - AddDevSigners, EthApiSpec, EthFees, EthSigner, EthState, LoadBlock, LoadFee, LoadState, - SpawnBlocking, Trace, + pending_block::BuildPendingEnv, spec::SignersForApi, AddDevSigners, EthApiSpec, EthFees, + EthState, LoadFee, LoadState, SpawnBlocking, Trace, }, - EthApiTypes, FullEthApiServer, RpcConverter, RpcNodeCore, RpcNodeCoreExt, + EthApiTypes, FullEthApiServer, RpcNodeCore, RpcNodeCoreExt, }; -use reth_rpc_eth_types::{EthStateCache, FeeHistoryCache, GasPriceOracle}; +use reth_rpc_eth_types::{error::FromEvmError, EthStateCache, FeeHistoryCache, GasPriceOracle}; use reth_tasks::{ pool::{BlockingTaskGuard, BlockingTaskPool}, TaskSpawner, }; -use reth_transaction_pool::TransactionPool; +use scroll_alloy_network::Scroll; use std::{fmt, marker::PhantomData, sync::Arc}; -use crate::{eth::transaction::ScrollTxInfoMapper, ScrollEthApiError}; -pub use receipt::ScrollReceiptBuilder; -use reth_node_builder::rpc::{EthApiBuilder, EthApiCtx}; -use reth_primitives_traits::NodePrimitives; -use reth_rpc_eth_types::error::FromEvmError; -use reth_scroll_primitives::ScrollPrimitives; -use scroll_alloy_network::{Network, Scroll}; - mod block; mod call; mod pending_block; pub mod receipt; pub mod transaction; -use crate::SequencerClient; - /// Adapter for [`EthApiInner`], which holds all the data required to serve core `eth_` API. -pub type EthApiNodeBackend = EthApiInner< - ::Provider, - ::Pool, - ::Network, - ::Evm, ->; +pub type EthApiNodeBackend = EthApiInner; /// A helper trait with requirements for [`RpcNodeCore`] to be used in [`ScrollEthApi`]. pub trait ScrollNodeCore: RpcNodeCore {} @@ -66,38 +52,36 @@ impl ScrollNodeCore for T where T: RpcNodeCore {} /// This type implements the [`FullEthApi`](reth_rpc_eth_api::helpers::FullEthApi) by implemented /// all the `Eth` helper traits and prerequisite traits. #[derive(Clone)] -pub struct ScrollEthApi { +pub struct ScrollEthApi { /// Gateway to node's core components. - inner: Arc>, - /// Marker for the network types. - _nt: PhantomData, - tx_resp_builder: RpcConverter>, + inner: Arc>, } -impl ScrollEthApi { +impl ScrollEthApi { /// Creates a new [`ScrollEthApi`]. - pub fn new(eth_api: EthApiNodeBackend, sequencer_client: Option) -> Self { - let inner = Arc::new(ScrollEthApiInner { eth_api, sequencer_client }); - Self { - inner: inner.clone(), - _nt: PhantomData, - tx_resp_builder: RpcConverter::with_mapper(ScrollTxInfoMapper::new(inner)), - } + pub fn new( + eth_api: EthApiNodeBackend, + sequencer_client: Option, + min_suggested_priority_fee: U256, + payload_size_limit: u64, + ) -> Self { + let inner = Arc::new(ScrollEthApiInner { + eth_api, + min_suggested_priority_fee, + payload_size_limit, + sequencer_client, + }); + Self { inner } } } -impl ScrollEthApi +impl ScrollEthApi where - N: ScrollNodeCore< - Provider: BlockReaderIdExt - + ChainSpecProvider - + CanonStateSubscriptions - + Clone - + 'static, - >, + N: RpcNodeCore, + Rpc: RpcConvert, { /// Returns a reference to the [`EthApiNodeBackend`]. - pub fn eth_api(&self) -> &EthApiNodeBackend { + pub fn eth_api(&self) -> &EthApiNodeBackend { self.inner.eth_api() } @@ -112,34 +96,30 @@ where } } -impl EthApiTypes for ScrollEthApi +impl EthApiTypes for ScrollEthApi where - Self: Send + Sync + fmt::Debug, - N: ScrollNodeCore, - NetworkT: Network + Clone + fmt::Debug, - ::Evm: fmt::Debug, - ::Primitives: fmt::Debug, + N: RpcNodeCore, + Rpc: RpcConvert, { type Error = ScrollEthApiError; - type NetworkTypes = Scroll; - type RpcConvert = RpcConverter>; + type NetworkTypes = Rpc::Network; + type RpcConvert = Rpc; fn tx_resp_builder(&self) -> &Self::RpcConvert { - &self.tx_resp_builder + self.inner.eth_api.tx_resp_builder() } } -impl RpcNodeCore for ScrollEthApi +impl RpcNodeCore for ScrollEthApi where - N: ScrollNodeCore, - NetworkT: Network, + N: RpcNodeCore, + Rpc: RpcConvert, { type Primitives = N::Primitives; type Provider = N::Provider; type Pool = N::Pool; type Evm = ::Evm; type Network = ::Network; - type PayloadBuilder = (); #[inline] fn pool(&self) -> &Self::Pool { @@ -156,39 +136,30 @@ where self.inner.eth_api.network() } - #[inline] - fn payload_builder(&self) -> &Self::PayloadBuilder { - &() - } - #[inline] fn provider(&self) -> &Self::Provider { self.inner.eth_api.provider() } } -impl RpcNodeCoreExt for ScrollEthApi +impl RpcNodeCoreExt for ScrollEthApi where - N: ScrollNodeCore, - NetworkT: Network, + N: RpcNodeCore, + Rpc: RpcConvert, { #[inline] - fn cache(&self) -> &EthStateCache, ProviderReceipt> { + fn cache(&self) -> &EthStateCache { self.inner.eth_api.cache() } } -impl EthApiSpec for ScrollEthApi +impl EthApiSpec for ScrollEthApi where - N: ScrollNodeCore< - Provider: ChainSpecProvider - + BlockNumReader - + StageCheckpointReader, - Network: NetworkInfo, - >, - NetworkT: Network, + N: RpcNodeCore, + Rpc: RpcConvert, { type Transaction = ProviderTx; + type Rpc = Rpc::Network; #[inline] fn starting_block(&self) -> U256 { @@ -196,18 +167,15 @@ where } #[inline] - fn signers(&self) -> &parking_lot::RwLock>>>> { + fn signers(&self) -> &SignersForApi { self.inner.eth_api.signers() } } -impl SpawnBlocking for ScrollEthApi +impl SpawnBlocking for ScrollEthApi where - Self: Send + Sync + Clone + 'static, - N: ScrollNodeCore, - NetworkT: Network, - ::Evm: fmt::Debug, - ::Primitives: fmt::Debug, + N: RpcNodeCore, + Rpc: RpcConvert, { #[inline] fn io_task_spawner(&self) -> impl TaskSpawner { @@ -225,14 +193,11 @@ where } } -impl LoadFee for ScrollEthApi +impl LoadFee for ScrollEthApi where - Self: LoadBlock, - N: ScrollNodeCore< - Provider: BlockReaderIdExt - + ChainSpecProvider - + StateProviderFactory, - >, + N: RpcNodeCore, + ScrollEthApiError: FromEvmError, + Rpc: RpcConvert, { #[inline] fn gas_oracle(&self) -> &GasPriceOracle { @@ -245,22 +210,17 @@ where } } -impl LoadState for ScrollEthApi +impl LoadState for ScrollEthApi where - N: ScrollNodeCore< - Provider: StateProviderFactory + ChainSpecProvider, - Pool: TransactionPool, - >, - NetworkT: Network, - ::Evm: fmt::Debug, - ::Primitives: fmt::Debug, + N: RpcNodeCore, + Rpc: RpcConvert, { } -impl EthState for ScrollEthApi +impl EthState for ScrollEthApi where - Self: LoadState + SpawnBlocking, - N: ScrollNodeCore, + N: RpcNodeCore, + Rpc: RpcConvert, { #[inline] fn max_proof_window(&self) -> u64 { @@ -268,43 +228,35 @@ where } } -impl EthFees for ScrollEthApi +impl EthFees for ScrollEthApi where - Self: LoadFee< - Provider: ChainSpecProvider< - ChainSpec: EthChainSpec

>, - >, - >, - N: ScrollNodeCore, + N: RpcNodeCore, + ScrollEthApiError: FromEvmError, + Rpc: RpcConvert, { } -impl Trace for ScrollEthApi +impl Trace for ScrollEthApi where - Self: RpcNodeCore - + LoadState< - Evm: ConfigureEvm< - Primitives: NodePrimitives< - BlockHeader = ProviderHeader, - SignedTx = ProviderTx, - >, - >, - Error: FromEvmError, - >, - N: ScrollNodeCore, + N: RpcNodeCore, + ScrollEthApiError: FromEvmError, + Rpc: RpcConvert, { } -impl AddDevSigners for ScrollEthApi +impl AddDevSigners for ScrollEthApi where - N: ScrollNodeCore, + N: RpcNodeCore, + Rpc: RpcConvert< + Network: RpcTypes>>, + >, { fn with_dev_accounts(&self) { *self.inner.eth_api.signers().write() = DevSigner::random_signers(20) } } -impl fmt::Debug for ScrollEthApi { +impl fmt::Debug for ScrollEthApi { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("ScrollEthApi").finish_non_exhaustive() } @@ -312,17 +264,21 @@ impl fmt::Debug for ScrollEthApi { /// Container type `ScrollEthApi` #[allow(missing_debug_implementations)] -pub struct ScrollEthApiInner { +pub struct ScrollEthApiInner { /// Gateway to node's core components. - pub eth_api: EthApiNodeBackend, + pub eth_api: EthApiNodeBackend, /// Sequencer client, configured to forward submitted transactions to sequencer of given Scroll /// network. sequencer_client: Option, + /// Minimum priority fee + min_suggested_priority_fee: U256, + /// Maximum payload size + payload_size_limit: u64, } -impl ScrollEthApiInner { +impl ScrollEthApiInner { /// Returns a reference to the [`EthApiNodeBackend`]. - const fn eth_api(&self) -> &EthApiNodeBackend { + const fn eth_api(&self) -> &EthApiNodeBackend { &self.eth_api } @@ -332,18 +288,63 @@ impl ScrollEthApiInner { } } +/// Converter for Scroll RPC types. +pub type ScrollRpcConvert = RpcConverter< + NetworkT, + ::Evm, + ScrollReceiptConverter, + (), + ScrollTxInfoMapper<::Provider>, +>; + /// A type that knows how to build a [`ScrollEthApi`]. -#[derive(Debug, Default)] -pub struct ScrollEthApiBuilder { +#[derive(Debug)] +pub struct ScrollEthApiBuilder { /// Sequencer client, configured to forward submitted transactions to sequencer of given Scroll /// network. sequencer_url: Option, + /// Minimum suggested priority fee (tip) + min_suggested_priority_fee: u64, + /// Maximum payload size + payload_size_limit: u64, + /// Marker for network types. + _nt: PhantomData, +} + +impl Default for ScrollEthApiBuilder { + fn default() -> Self { + Self { + sequencer_url: None, + // TODO (scroll): update default values. + min_suggested_priority_fee: 0, + payload_size_limit: 0, + _nt: PhantomData, + } + } } -impl ScrollEthApiBuilder { +impl ScrollEthApiBuilder { /// Creates a [`ScrollEthApiBuilder`] instance. pub const fn new() -> Self { - Self { sequencer_url: None } + Self { + // TODO (scroll): update default values. + min_suggested_priority_fee: 0, + payload_size_limit: 0, + sequencer_url: None, + _nt: PhantomData, + } + } + + /// With minimum suggested priority fee (tip) + pub const fn with_min_suggested_priority_fee(mut self, min: u64) -> Self { + self.min_suggested_priority_fee = min; + self + } + + /// With payload size limit + pub const fn with_payload_size_limit(mut self, limit: u64) -> Self { + self.payload_size_limit = limit; + self } /// With a [`SequencerClient`]. @@ -353,29 +354,22 @@ impl ScrollEthApiBuilder { } } -impl EthApiBuilder for ScrollEthApiBuilder +impl EthApiBuilder for ScrollEthApiBuilder where - N: FullNodeComponents, - ScrollEthApi: FullEthApiServer, + N: FullNodeComponents>>>, + NetworkT: RpcTypes, + ScrollRpcConvert: RpcConvert, + ScrollEthApi>: + FullEthApiServer + AddDevSigners, { - type EthApi = ScrollEthApi; + type EthApi = ScrollEthApi>; async fn build_eth_api(self, ctx: EthApiCtx<'_, N>) -> eyre::Result { - let Self { sequencer_url } = self; - let eth_api = reth_rpc::EthApiBuilder::new( - ctx.components.provider().clone(), - ctx.components.pool().clone(), - ctx.components.network().clone(), - ctx.components.evm_config().clone(), - ) - .eth_cache(ctx.cache) - .task_spawner(ctx.components.task_executor().clone()) - .gas_cap(ctx.config.rpc_gas_cap.into()) - .max_simulate_blocks(ctx.config.rpc_max_simulate_blocks) - .eth_proof_window(ctx.config.eth_proof_window) - .fee_history_cache_config(ctx.config.fee_history_cache) - .proof_permits(ctx.config.proof_permits) - .build_inner(); + let Self { min_suggested_priority_fee, payload_size_limit, sequencer_url, .. } = self; + let rpc_converter = RpcConverter::new(ScrollReceiptConverter::default()) + .with_mapper(ScrollTxInfoMapper::new(ctx.components.provider().clone())); + + let eth_api = ctx.eth_api_builder().with_rpc_converter(rpc_converter).build_inner(); let sequencer_client = if let Some(url) = sequencer_url { Some( @@ -387,6 +381,11 @@ where None }; - Ok(ScrollEthApi::new(eth_api, sequencer_client)) + Ok(ScrollEthApi::new( + eth_api, + sequencer_client, + U256::from(min_suggested_priority_fee), + payload_size_limit, + )) } } diff --git a/crates/scroll/rpc/src/eth/pending_block.rs b/crates/scroll/rpc/src/eth/pending_block.rs index 7b0323288ea..21075c77f87 100644 --- a/crates/scroll/rpc/src/eth/pending_block.rs +++ b/crates/scroll/rpc/src/eth/pending_block.rs @@ -1,75 +1,25 @@ //! Loads Scroll pending block for an RPC response. -use crate::ScrollEthApi; - -use alloy_consensus::{BlockHeader, Header}; -use reth_chainspec::EthChainSpec; -use reth_evm::ConfigureEvm; -use reth_primitives_traits::{NodePrimitives, SealedHeader}; -use reth_provider::{ - BlockReaderIdExt, ChainSpecProvider, ProviderBlock, ProviderHeader, ProviderReceipt, - ProviderTx, StateProviderFactory, -}; +use crate::{ScrollEthApi, ScrollEthApiError}; use reth_rpc_eth_api::{ - helpers::{LoadPendingBlock, SpawnBlocking}, - types::RpcTypes, - EthApiTypes, RpcConvert, RpcNodeCore, + helpers::{pending_block::PendingEnvBuilder, LoadPendingBlock}, + RpcConvert, RpcNodeCore, }; use reth_rpc_eth_types::{error::FromEvmError, PendingBlock}; -use reth_scroll_evm::ScrollNextBlockEnvAttributes; -use reth_scroll_primitives::{ScrollBlock, ScrollReceipt, ScrollTransactionSigned}; -use reth_transaction_pool::{PoolTransaction, TransactionPool}; -use scroll_alloy_hardforks::ScrollHardforks; -impl LoadPendingBlock for ScrollEthApi +impl LoadPendingBlock for ScrollEthApi where - Self: SpawnBlocking - + EthApiTypes< - NetworkTypes: RpcTypes< - Header = alloy_rpc_types_eth::Header>, - >, - Error: FromEvmError, - RpcConvert: RpcConvert, - >, - N: RpcNodeCore< - Provider: BlockReaderIdExt< - Transaction = ScrollTransactionSigned, - Block = ScrollBlock, - Receipt = ScrollReceipt, - Header = Header, - > + ChainSpecProvider - + StateProviderFactory, - Pool: TransactionPool>>, - Evm: ConfigureEvm< - Primitives = ::Primitives, - NextBlockEnvCtx = ScrollNextBlockEnvAttributes, - >, - Primitives: NodePrimitives< - BlockHeader = ProviderHeader, - SignedTx = ProviderTx, - Receipt = ProviderReceipt, - Block = ProviderBlock, - >, - >, + N: RpcNodeCore, + ScrollEthApiError: FromEvmError, + Rpc: RpcConvert, { #[inline] - fn pending_block( - &self, - ) -> &tokio::sync::Mutex< - Option, ProviderReceipt>>, - > { + fn pending_block(&self) -> &tokio::sync::Mutex>> { self.inner.eth_api.pending_block() } - fn next_env_attributes( - &self, - parent: &SealedHeader>, - ) -> Result<::NextBlockEnvCtx, Self::Error> { - Ok(ScrollNextBlockEnvAttributes { - timestamp: parent.timestamp().saturating_add(3), - suggested_fee_recipient: parent.beneficiary(), - gas_limit: parent.gas_limit(), - base_fee: parent.base_fee_per_gas().unwrap_or_default(), - }) + #[inline] + fn pending_env_builder(&self) -> &dyn PendingEnvBuilder { + self.inner.eth_api.pending_env_builder() } } diff --git a/crates/scroll/rpc/src/eth/receipt.rs b/crates/scroll/rpc/src/eth/receipt.rs index 254fb1f546b..16d5ac93d60 100644 --- a/crates/scroll/rpc/src/eth/receipt.rs +++ b/crates/scroll/rpc/src/eth/receipt.rs @@ -2,42 +2,48 @@ use crate::{ScrollEthApi, ScrollEthApiError}; use alloy_rpc_types_eth::{Log, TransactionReceipt}; -use reth_node_api::{FullNodeComponents, NodeTypes}; -use reth_primitives::TransactionMeta; -use reth_provider::{ReceiptProvider, TransactionsProvider}; -use reth_rpc_eth_api::{helpers::LoadReceipt, FromEthApiError, RpcReceipt}; -use reth_rpc_eth_types::{receipt::build_receipt, EthApiError}; - -use reth_scroll_chainspec::ScrollChainSpec; +use reth_primitives_traits::NodePrimitives; +use reth_rpc_convert::{ + transaction::{ConvertReceiptInput, ReceiptConverter}, + RpcConvert, +}; +use reth_rpc_eth_api::{helpers::LoadReceipt, RpcNodeCore}; +use reth_rpc_eth_types::receipt::build_receipt; use reth_scroll_primitives::{ScrollReceipt, ScrollTransactionSigned}; use scroll_alloy_consensus::ScrollReceiptEnvelope; use scroll_alloy_rpc_types::{ScrollTransactionReceipt, ScrollTransactionReceiptFields}; +use std::fmt::Debug; + +impl LoadReceipt for ScrollEthApi +where + N: RpcNodeCore, + Rpc: RpcConvert, +{ +} + +/// Converter for Scroll receipts. +#[derive(Debug, Default, Clone)] +#[non_exhaustive] +pub struct ScrollReceiptConverter; -impl LoadReceipt for ScrollEthApi +impl ReceiptConverter for ScrollReceiptConverter where - Self: Send + Sync, - N: FullNodeComponents>, - Self::Provider: TransactionsProvider - + ReceiptProvider, + N: NodePrimitives, { - async fn build_transaction_receipt( + type RpcReceipt = ScrollTransactionReceipt; + type Error = ScrollEthApiError; + + fn convert_receipts( &self, - tx: ScrollTransactionSigned, - meta: TransactionMeta, - receipt: ScrollReceipt, - ) -> Result, Self::Error> { - let all_receipts = self - .inner - .eth_api - .cache() - .get_receipts(meta.block_hash) - .await - .map_err(Self::Error::from_eth_err)? - .ok_or(Self::Error::from_eth_err(EthApiError::HeaderNotFound( - meta.block_hash.into(), - )))?; + inputs: Vec>, + ) -> Result, Self::Error> { + let mut receipts = Vec::with_capacity(inputs.len()); + + for input in inputs { + receipts.push(ScrollReceiptBuilder::new(input)?.build()); + } - Ok(ScrollReceiptBuilder::new(&tx, meta, &receipt, &all_receipts)?.build()) + Ok(receipts) } } @@ -52,35 +58,31 @@ pub struct ScrollReceiptBuilder { impl ScrollReceiptBuilder { /// Returns a new builder. - pub fn new( - transaction: &ScrollTransactionSigned, - meta: TransactionMeta, - receipt: &ScrollReceipt, - all_receipts: &[ScrollReceipt], - ) -> Result { + pub fn new(input: ConvertReceiptInput<'_, N>) -> Result + where + N: NodePrimitives, + { let core_receipt = - build_receipt(transaction, meta, receipt, all_receipts, None, |receipt_with_bloom| { - match receipt { - ScrollReceipt::Legacy(_) => { - ScrollReceiptEnvelope::::Legacy(receipt_with_bloom) - } - ScrollReceipt::Eip2930(_) => { - ScrollReceiptEnvelope::::Eip2930(receipt_with_bloom) - } - ScrollReceipt::Eip1559(_) => { - ScrollReceiptEnvelope::::Eip1559(receipt_with_bloom) - } - ScrollReceipt::Eip7702(_) => { - ScrollReceiptEnvelope::::Eip7702(receipt_with_bloom) - } - ScrollReceipt::L1Message(_) => { - ScrollReceiptEnvelope::::L1Message(receipt_with_bloom) - } + build_receipt(&input, None, |receipt_with_bloom| match input.receipt.as_ref() { + ScrollReceipt::Legacy(_) => { + ScrollReceiptEnvelope::::Legacy(receipt_with_bloom) + } + ScrollReceipt::Eip2930(_) => { + ScrollReceiptEnvelope::::Eip2930(receipt_with_bloom) + } + ScrollReceipt::Eip1559(_) => { + ScrollReceiptEnvelope::::Eip1559(receipt_with_bloom) + } + ScrollReceipt::Eip7702(_) => { + ScrollReceiptEnvelope::::Eip7702(receipt_with_bloom) + } + ScrollReceipt::L1Message(_) => { + ScrollReceiptEnvelope::::L1Message(receipt_with_bloom) } - })?; + }); let scroll_receipt_fields = - ScrollTransactionReceiptFields { l1_fee: Some(receipt.l1_fee().saturating_to()) }; + ScrollTransactionReceiptFields { l1_fee: Some(input.receipt.l1_fee().saturating_to()) }; Ok(Self { core_receipt, scroll_receipt_fields }) } diff --git a/crates/scroll/rpc/src/eth/transaction.rs b/crates/scroll/rpc/src/eth/transaction.rs index 7a749f891da..1364b1ee642 100644 --- a/crates/scroll/rpc/src/eth/transaction.rs +++ b/crates/scroll/rpc/src/eth/transaction.rs @@ -1,36 +1,29 @@ //! Loads and formats Scroll transaction RPC response. -use crate::{ - eth::{ScrollEthApiInner, ScrollNodeCore}, - ScrollEthApi, ScrollEthApiError, SequencerClient, -}; +use crate::{ScrollEthApi, ScrollEthApiError, SequencerClient}; use alloy_consensus::transaction::TransactionInfo; use alloy_primitives::{Bytes, B256}; use reth_evm::execute::ProviderError; -use reth_node_api::FullNodeComponents; -use reth_provider::{ - BlockReader, BlockReaderIdExt, ProviderTx, ReceiptProvider, TransactionsProvider, -}; +use reth_provider::ReceiptProvider; +use reth_rpc_convert::RpcConvert; use reth_rpc_eth_api::{ - helpers::{EthSigner, EthTransactions, LoadTransaction, SpawnBlocking}, - try_into_scroll_tx_info, EthApiTypes, FromEthApiError, FullEthApiTypes, RpcNodeCore, - RpcNodeCoreExt, TxInfoMapper, + helpers::{spec::SignersForRpc, EthTransactions, LoadTransaction}, + try_into_scroll_tx_info, FromEthApiError, RpcNodeCore, TxInfoMapper, }; use reth_rpc_eth_types::utils::recover_raw_transaction; use reth_scroll_primitives::ScrollReceipt; -use reth_transaction_pool::{PoolTransaction, TransactionOrigin, TransactionPool}; -use scroll_alloy_consensus::{ScrollTransactionInfo, ScrollTxEnvelope}; -use std::{ - fmt::{Debug, Formatter}, - sync::Arc, +use reth_transaction_pool::{ + AddedTransactionOutcome, PoolTransaction, TransactionOrigin, TransactionPool, }; +use scroll_alloy_consensus::{ScrollTransactionInfo, ScrollTxEnvelope}; +use std::fmt::{Debug, Formatter}; -impl EthTransactions for ScrollEthApi +impl EthTransactions for ScrollEthApi where - Self: LoadTransaction + EthApiTypes, - N: ScrollNodeCore>>, + N: RpcNodeCore, + Rpc: RpcConvert, { - fn signers(&self) -> &parking_lot::RwLock>>>> { + fn signers(&self) -> &SignersForRpc { self.inner.eth_api.signers() } @@ -47,7 +40,7 @@ where tracing::debug!(target: "scroll::rpc::eth", hash = %pool_transaction.hash(), "forwarding raw transaction to sequencer"); // Retain tx in local tx pool before forwarding to sequencer rpc, for local RPC usage. - let hash = self + let AddedTransactionOutcome { hash, .. } = self .pool() .add_transaction(TransactionOrigin::Local, pool_transaction.clone()) .await @@ -69,7 +62,7 @@ where } // submit the transaction to the pool with a `Local` origin - let hash = self + let AddedTransactionOutcome { hash, .. } = self .pool() .add_transaction(TransactionOrigin::Local, pool_transaction) .await @@ -79,17 +72,17 @@ where } } -impl LoadTransaction for ScrollEthApi +impl LoadTransaction for ScrollEthApi where - Self: SpawnBlocking + FullEthApiTypes + RpcNodeCoreExt, - N: ScrollNodeCore, - Self::Pool: TransactionPool, + N: RpcNodeCore, + Rpc: RpcConvert, { } -impl ScrollEthApi +impl ScrollEthApi where - N: ScrollNodeCore, + N: RpcNodeCore, + Rpc: RpcConvert, { /// Returns the [`SequencerClient`] if one is set. pub fn raw_tx_forwarder(&self) -> Option { @@ -100,26 +93,30 @@ where /// Scroll implementation of [`TxInfoMapper`]. /// /// Receipt is fetched to extract the `l1_fee` for all transactions but L1 messages. -#[derive(Clone)] -pub struct ScrollTxInfoMapper(Arc>); +pub struct ScrollTxInfoMapper(Provider); + +impl Clone for ScrollTxInfoMapper { + fn clone(&self) -> Self { + Self(self.0.clone()) + } +} -impl Debug for ScrollTxInfoMapper { +impl Debug for ScrollTxInfoMapper { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { f.debug_struct("ScrollTxInfoMapper").finish() } } -impl ScrollTxInfoMapper { +impl ScrollTxInfoMapper { /// Creates [`ScrollTxInfoMapper`] that uses [`ReceiptProvider`] borrowed from given `eth_api`. - pub const fn new(eth_api: Arc>) -> Self { - Self(eth_api) + pub const fn new(provider: Provider) -> Self { + Self(provider) } } -impl TxInfoMapper<&ScrollTxEnvelope> for ScrollTxInfoMapper +impl TxInfoMapper<&ScrollTxEnvelope> for ScrollTxInfoMapper where - N: FullNodeComponents, - N::Provider: ReceiptProvider, + Provider: ReceiptProvider, { type Out = ScrollTransactionInfo; type Err = ProviderError; @@ -129,6 +126,6 @@ where tx: &ScrollTxEnvelope, tx_info: TransactionInfo, ) -> Result { - try_into_scroll_tx_info(self.0.eth_api.provider(), tx, tx_info) + try_into_scroll_tx_info(&self.0, tx, tx_info) } } From 01fd5afdb8c9f897140d9a7fc2f597e14afc13d8 Mon Sep 17 00:00:00 2001 From: Gregory Edison Date: Tue, 29 Jul 2025 15:55:59 +0200 Subject: [PATCH 297/305] chore: fix lints Signed-off-by: Gregory Edison --- crates/scroll/evm/src/config.rs | 19 +++++++++---------- crates/scroll/evm/src/execute.rs | 4 ++-- crates/scroll/node/src/addons.rs | 2 +- crates/scroll/node/src/args.rs | 2 +- 4 files changed, 13 insertions(+), 14 deletions(-) diff --git a/crates/scroll/evm/src/config.rs b/crates/scroll/evm/src/config.rs index e9baba86dff..4f8adc3f105 100644 --- a/crates/scroll/evm/src/config.rs +++ b/crates/scroll/evm/src/config.rs @@ -155,8 +155,8 @@ mod tests { // prepare all fork heads let curie_head = &Head { number: 7096836, ..Default::default() }; - let bernouilli_head = &Head { number: 5220340, ..Default::default() }; - let pre_bernouilli_head = &Head { number: 0, ..Default::default() }; + let bernoulli_head = &Head { number: 5220340, ..Default::default() }; + let pre_bernoulli_head = &Head { number: 0, ..Default::default() }; // check correct spec id assert_eq!( @@ -164,14 +164,13 @@ mod tests { ScrollSpecId::CURIE ); assert_eq!( - config - .spec_id_at_timestamp_and_number(bernouilli_head.timestamp, bernouilli_head.number), + config.spec_id_at_timestamp_and_number(bernoulli_head.timestamp, bernoulli_head.number), ScrollSpecId::BERNOULLI ); assert_eq!( config.spec_id_at_timestamp_and_number( - pre_bernouilli_head.timestamp, - pre_bernouilli_head.number + pre_bernoulli_head.timestamp, + pre_bernoulli_head.number ), ScrollSpecId::SHANGHAI ); @@ -195,20 +194,20 @@ mod tests { assert_eq!(env.cfg_env.spec, ScrollSpecId::CURIE); // bernoulli - let bernouilli_header = Header { number: 5220340, ..Default::default() }; + let bernoulli_header = Header { number: 5220340, ..Default::default() }; // fill cfg env - let env = config.evm_env(&bernouilli_header); + let env = config.evm_env(&bernoulli_header); // check correct cfg env assert_eq!(env.cfg_env.chain_id, Scroll as u64); assert_eq!(env.cfg_env.spec, ScrollSpecId::BERNOULLI); // pre-bernoulli - let pre_bernouilli_header = Header { number: 0, ..Default::default() }; + let pre_bernoulli_header = Header { number: 0, ..Default::default() }; // fill cfg env - let env = config.evm_env(&pre_bernouilli_header); + let env = config.evm_env(&pre_bernoulli_header); // check correct cfg env assert_eq!(env.cfg_env.chain_id, Scroll as u64); diff --git a/crates/scroll/evm/src/execute.rs b/crates/scroll/evm/src/execute.rs index 9a944ca3199..75ad6c239b3 100644 --- a/crates/scroll/evm/src/execute.rs +++ b/crates/scroll/evm/src/execute.rs @@ -148,9 +148,9 @@ mod tests { ) } - fn transaction(typ: ScrollTxType, gas_limit: u64) -> ScrollTxEnvelope { + fn transaction(ty: ScrollTxType, gas_limit: u64) -> ScrollTxEnvelope { let pk = B256::random(); - match typ { + match ty { ScrollTxType::Legacy => { let tx = TxLegacy { to: TxKind::Call(Address::ZERO), diff --git a/crates/scroll/node/src/addons.rs b/crates/scroll/node/src/addons.rs index cd3dcd0429f..8d48748d123 100644 --- a/crates/scroll/node/src/addons.rs +++ b/crates/scroll/node/src/addons.rs @@ -157,7 +157,7 @@ impl Default for ScrollAddOnsBuilder { } impl ScrollAddOnsBuilder { - /// With a [`SequencerClient`]. + /// With a [`reth_scroll_rpc::SequencerClient`]. pub fn with_sequencer(mut self, sequencer_client: Option) -> Self { self.sequencer_url = sequencer_client; self diff --git a/crates/scroll/node/src/args.rs b/crates/scroll/node/src/args.rs index b02e2726e91..9f3e77dbd8d 100644 --- a/crates/scroll/node/src/args.rs +++ b/crates/scroll/node/src/args.rs @@ -11,7 +11,7 @@ pub struct ScrollRollupArgs { #[arg(long = "scroll.min-suggested-priority-fee", default_value_t = 1_000_000)] pub min_suggested_priority_fee: u64, - /// Payload size limit, default to [`SCROLL_DEFAULT_PAYLOAD_SIZE_LIMIT`]. + /// Payload size limit, default to `122kB`. #[arg(long = "scroll.payload-size-limit", default_value_t = SCROLL_DEFAULT_PAYLOAD_SIZE_LIMIT)] pub payload_size_limit: u64, } From 44cd240fcacd6ae13b3ceb3d298bc94b55b4186f Mon Sep 17 00:00:00 2001 From: Gregory Edison Date: Tue, 29 Jul 2025 15:56:32 +0200 Subject: [PATCH 298/305] fix: open vm Signed-off-by: Gregory Edison --- Cargo.lock | 2 +- crates/engine/local/src/payload.rs | 2 +- crates/rpc/rpc-eth-api/Cargo.toml | 4 + .../rpc-eth-api/src/helpers/pending_block.rs | 14 ++ crates/scroll/evm/Cargo.toml | 1 - crates/scroll/evm/src/lib.rs | 13 -- crates/scroll/openvm-compat/Cargo.lock | 193 +++++++++--------- crates/scroll/openvm-compat/Cargo.toml | 2 +- crates/scroll/rpc/Cargo.toml | 2 +- 9 files changed, 124 insertions(+), 109 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 17cdb408cd3..57751396269 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10227,6 +10227,7 @@ dependencies = [ "reth-rpc-convert", "reth-rpc-eth-types", "reth-rpc-server-types", + "reth-scroll-evm", "reth-storage-api", "reth-tasks", "reth-transaction-pool", @@ -10422,7 +10423,6 @@ dependencies = [ "reth-execution-types", "reth-primitives", "reth-primitives-traits", - "reth-rpc-eth-api", "reth-scroll-chainspec", "reth-scroll-forks", "reth-scroll-primitives", diff --git a/crates/engine/local/src/payload.rs b/crates/engine/local/src/payload.rs index 2189d82fabd..eb9a3370aeb 100644 --- a/crates/engine/local/src/payload.rs +++ b/crates/engine/local/src/payload.rs @@ -80,4 +80,4 @@ where gas_limit: None, } } -} \ No newline at end of file +} diff --git a/crates/rpc/rpc-eth-api/Cargo.toml b/crates/rpc/rpc-eth-api/Cargo.toml index 44637d1931c..a2293b46309 100644 --- a/crates/rpc/rpc-eth-api/Cargo.toml +++ b/crates/rpc/rpc-eth-api/Cargo.toml @@ -31,6 +31,9 @@ reth-network-api.workspace = true reth-node-api.workspace = true reth-trie-common = { workspace = true, features = ["eip1186"] } +# scroll +reth-scroll-evm = { workspace = true, optional = true } + # ethereum alloy-evm = { workspace = true, features = ["overrides", "call-util"] } alloy-rlp.workspace = true @@ -68,3 +71,4 @@ op = [ "reth-rpc-convert/op", "alloy-evm/op", ] +scroll = ["reth-scroll-evm"] diff --git a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs index 0af1a69ee4f..4296157e2bf 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/pending_block.rs @@ -372,3 +372,17 @@ impl BuildPendingEnv for NextBlockEnvAttributes { } } } + +#[cfg(feature = "scroll")] +impl BuildPendingEnv + for reth_scroll_evm::ScrollNextBlockEnvAttributes +{ + fn build_pending_env(parent: &reth_primitives_traits::SealedHeader) -> Self { + Self { + timestamp: parent.timestamp().saturating_add(1), + suggested_fee_recipient: parent.beneficiary(), + gas_limit: parent.gas_limit(), + base_fee: parent.base_fee_per_gas().unwrap_or_default(), + } + } +} diff --git a/crates/scroll/evm/Cargo.toml b/crates/scroll/evm/Cargo.toml index bda6cf9e40a..1874d574578 100644 --- a/crates/scroll/evm/Cargo.toml +++ b/crates/scroll/evm/Cargo.toml @@ -18,7 +18,6 @@ reth-evm = { workspace = true, features = ["scroll-alloy-traits"] } reth-execution-types.workspace = true reth-primitives = { workspace = true, features = ["serde-bincode-compat"] } reth-primitives-traits.workspace = true -reth-rpc-eth-api.workspace = true reth-storage-api.workspace = true # revm diff --git a/crates/scroll/evm/src/lib.rs b/crates/scroll/evm/src/lib.rs index daf887af4ce..2268a1d7a03 100644 --- a/crates/scroll/evm/src/lib.rs +++ b/crates/scroll/evm/src/lib.rs @@ -158,16 +158,3 @@ pub struct ScrollNextBlockEnvAttributes { /// The base fee of the next block. pub base_fee: u64, } - -impl reth_rpc_eth_api::helpers::pending_block::BuildPendingEnv - for ScrollNextBlockEnvAttributes -{ - fn build_pending_env(parent: &reth_primitives_traits::SealedHeader) -> Self { - Self { - timestamp: parent.timestamp().saturating_add(1), - suggested_fee_recipient: parent.beneficiary(), - gas_limit: parent.gas_limit(), - base_fee: parent.base_fee_per_gas().unwrap_or_default(), - } - } -} diff --git a/crates/scroll/openvm-compat/Cargo.lock b/crates/scroll/openvm-compat/Cargo.lock index c5267e11ad0..9f8bd1ba3af 100644 --- a/crates/scroll/openvm-compat/Cargo.lock +++ b/crates/scroll/openvm-compat/Cargo.lock @@ -35,9 +35,9 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "1.0.19" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74a694d8be621ee12b45ae23e7f18393b9a1e04f1ba47a0136767cb8c955f7f8" +checksum = "1b6093bc69509849435a2d68237a2e9fea79d27390c8e62f1e4012c460aabad8" dependencies = [ "alloy-eips", "alloy-primitives", @@ -60,9 +60,9 @@ dependencies = [ [[package]] name = "alloy-consensus-any" -version = "1.0.19" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1647d47f59288584cc3b40eff3e7dde6af8c88a2fca8fe02c22de7b9ab218ffa" +checksum = "8d1cfed4fefd13b5620cb81cdb6ba397866ff0de514c1b24806e6e79cdff5570" dependencies = [ "alloy-consensus", "alloy-eips", @@ -112,9 +112,9 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "1.0.19" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "715ae25d525c567481ba2fc97000415624836d516958b9c3f189f1e267d1d90a" +checksum = "5937e2d544e9b71000942d875cbc57965b32859a666ea543cc57aae5a06d602d" dependencies = [ "alloy-eip2124", "alloy-eip2930", @@ -132,9 +132,9 @@ dependencies = [ [[package]] name = "alloy-evm" -version = "0.12.3" +version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff5aae4c6dc600734b206b175f3200085ee82dcdaa388760358830a984ca9869" +checksum = "822fc12d28a75059f87ef03939679e775c0655e83c98589500f7b9ec41d63e95" dependencies = [ "alloy-consensus", "alloy-eips", @@ -149,9 +149,9 @@ dependencies = [ [[package]] name = "alloy-genesis" -version = "1.0.19" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "696a83af273bfc512e02693bd4b5056c8c57898328bd0ce594013fb864de4dcf" +checksum = "c51b4c13e02a8104170a4de02ccf006d7c233e6c10ab290ee16e7041e6ac221d" dependencies = [ "alloy-eips", "alloy-primitives", @@ -176,9 +176,9 @@ dependencies = [ [[package]] name = "alloy-network-primitives" -version = "1.0.19" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35648c318b4649d2d141d1ed4f6e32c69f4959bdc2f6e44d53c0a333ed615a37" +checksum = "793df1e3457573877fbde8872e4906638fde565ee2d3bd16d04aad17d43dbf0e" dependencies = [ "alloy-consensus", "alloy-eips", @@ -189,9 +189,9 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6177ed26655d4e84e00b65cb494d4e0b8830e7cae7ef5d63087d445a2600fb55" +checksum = "3cfebde8c581a5d37b678d0a48a32decb51efd7a63a08ce2517ddec26db705c8" dependencies = [ "alloy-rlp", "bytes", @@ -238,9 +238,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" -version = "1.0.19" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ed717902ec7e7e5b737cf416f29c21f43a4e86db90ff6fddde199f4ed6ea1ac" +checksum = "f2f9cbf5f781b9ee39cfdddea078fdef6015424f4c8282ef0e5416d15ca352c4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -252,9 +252,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "1.0.19" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8300d59b0126876a1914102c588f9a4792eb4c754d483a954dc29904ddf79d6" +checksum = "46586ec3c278639fc0e129f0eb73dbfa3d57f683c44b2ff5e066fab7ba63fa1f" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -267,14 +267,15 @@ dependencies = [ "itertools 0.14.0", "serde", "serde_json", + "serde_with", "thiserror", ] [[package]] name = "alloy-serde" -version = "1.0.19" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8070bc2af2d48969e3aa709ea3ebf1f8316176b91c2132efe33d113f74383a9e" +checksum = "1e1722bc30feef87cc0fa824e43c9013f9639cc6c037be7be28a31361c788be2" dependencies = [ "alloy-primitives", "serde", @@ -283,9 +284,9 @@ dependencies = [ [[package]] name = "alloy-sol-macro" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a14f21d053aea4c6630687c2f4ad614bed4c81e14737a9b904798b24f30ea849" +checksum = "aedac07a10d4c2027817a43cc1f038313fc53c7ac866f7363239971fd01f9f18" dependencies = [ "alloy-sol-macro-expander", "alloy-sol-macro-input", @@ -297,9 +298,9 @@ dependencies = [ [[package]] name = "alloy-sol-macro-expander" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34d99282e7c9ef14eb62727981a985a01869e586d1dec729d3bb33679094c100" +checksum = "24f9a598f010f048d8b8226492b6401104f5a5c1273c2869b72af29b48bb4ba9" dependencies = [ "alloy-sol-macro-input", "const-hex", @@ -315,9 +316,9 @@ dependencies = [ [[package]] name = "alloy-sol-macro-input" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eda029f955b78e493360ee1d7bd11e1ab9f2a220a5715449babc79d6d0a01105" +checksum = "f494adf9d60e49aa6ce26dfd42c7417aa6d4343cf2ae621f20e4d92a5ad07d85" dependencies = [ "const-hex", "dunce", @@ -331,9 +332,9 @@ dependencies = [ [[package]] name = "alloy-sol-types" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58377025a47d8b8426b3e4846a251f2c1991033b27f517aade368146f6ab1dfe" +checksum = "a285b46e3e0c177887028278f04cc8262b76fd3b8e0e20e93cea0a58c35f5ac5" dependencies = [ "alloy-primitives", "alloy-sol-macro", @@ -357,9 +358,9 @@ dependencies = [ [[package]] name = "alloy-tx-macros" -version = "1.0.19" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "472e12600c46b766110edd8382b4804d70188870f064531ee8fd61a35ed18686" +checksum = "9f916ff6d52f219c44a9684aea764ce2c7e1d53bd4a724c9b127863aeacc30bb" dependencies = [ "alloy-primitives", "darling", @@ -643,6 +644,12 @@ dependencies = [ "rand 0.8.5", ] +[[package]] +name = "arrayref" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" + [[package]] name = "arrayvec" version = "0.7.6" @@ -821,9 +828,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.29" +version = "1.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c1599538de2394445747c8cf7935946e3cc27e9625f889d979bfb2aaf569362" +checksum = "c736e259eea577f443d5c86c304f9f4ae0295c43f3ba05c21f1d66b5f06001af" dependencies = [ "jobserver", "libc", @@ -1891,9 +1898,9 @@ dependencies = [ [[package]] name = "op-alloy-consensus" -version = "0.18.9" +version = "0.18.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8719d9b783b29cfa1cf8d591b894805786b9ab4940adc700a57fd0d5b721cf5" +checksum = "d3c719b26da6d9cac18c3a35634d6ab27a74a304ed9b403b43749c22e57a389f" dependencies = [ "alloy-consensus", "alloy-eips", @@ -2342,7 +2349,7 @@ checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "reth-chainspec" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-chains", "alloy-consensus", @@ -2361,7 +2368,7 @@ dependencies = [ [[package]] name = "reth-codecs" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -2378,7 +2385,7 @@ dependencies = [ [[package]] name = "reth-codecs-derive" -version = "1.5.0" +version = "1.6.0" dependencies = [ "convert_case", "proc-macro2", @@ -2388,7 +2395,7 @@ dependencies = [ [[package]] name = "reth-db-models" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -2397,7 +2404,7 @@ dependencies = [ [[package]] name = "reth-ethereum-forks" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-eip2124", "alloy-hardforks", @@ -2408,7 +2415,7 @@ dependencies = [ [[package]] name = "reth-ethereum-primitives" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -2422,7 +2429,7 @@ dependencies = [ [[package]] name = "reth-evm" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -2443,7 +2450,7 @@ dependencies = [ [[package]] name = "reth-evm-ethereum" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -2460,7 +2467,7 @@ dependencies = [ [[package]] name = "reth-execution-errors" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-evm", "alloy-primitives", @@ -2472,7 +2479,7 @@ dependencies = [ [[package]] name = "reth-execution-types" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -2487,7 +2494,7 @@ dependencies = [ [[package]] name = "reth-network-peers" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -2498,7 +2505,7 @@ dependencies = [ [[package]] name = "reth-primitives" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "once_cell", @@ -2510,7 +2517,7 @@ dependencies = [ [[package]] name = "reth-primitives-traits" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -2537,7 +2544,7 @@ dependencies = [ [[package]] name = "reth-prune-types" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-primitives", "derive_more", @@ -2546,7 +2553,7 @@ dependencies = [ [[package]] name = "reth-scroll-chainspec" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-chains", "alloy-consensus", @@ -2554,6 +2561,7 @@ dependencies = [ "alloy-genesis", "alloy-primitives", "alloy-serde", + "auto_impl", "derive_more", "once_cell", "reth-chainspec", @@ -2569,7 +2577,7 @@ dependencies = [ [[package]] name = "reth-scroll-evm" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -2584,6 +2592,7 @@ dependencies = [ "reth-scroll-chainspec", "reth-scroll-forks", "reth-scroll-primitives", + "reth-storage-api", "revm", "revm-primitives", "revm-scroll", @@ -2596,7 +2605,7 @@ dependencies = [ [[package]] name = "reth-scroll-forks" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-chains", "alloy-primitives", @@ -2608,7 +2617,7 @@ dependencies = [ [[package]] name = "reth-scroll-primitives" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -2624,7 +2633,7 @@ dependencies = [ [[package]] name = "reth-stages-types" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-primitives", "reth-trie-common", @@ -2632,7 +2641,7 @@ dependencies = [ [[package]] name = "reth-static-file-types" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-primitives", "derive_more", @@ -2642,7 +2651,7 @@ dependencies = [ [[package]] name = "reth-storage-api" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -2663,7 +2672,7 @@ dependencies = [ [[package]] name = "reth-storage-errors" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-eips", "alloy-primitives", @@ -2678,7 +2687,7 @@ dependencies = [ [[package]] name = "reth-trie" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -2699,7 +2708,7 @@ dependencies = [ [[package]] name = "reth-trie-common" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -2714,7 +2723,7 @@ dependencies = [ [[package]] name = "reth-trie-sparse" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -2729,15 +2738,15 @@ dependencies = [ [[package]] name = "reth-zstd-compressors" -version = "1.5.0" +version = "1.6.0" dependencies = [ "zstd", ] [[package]] name = "revm" -version = "26.0.1" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" +version = "27.1.0" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv83#60bf14c2b0190435824b37c04ad1e40f4b41479e" dependencies = [ "revm-bytecode", "revm-context", @@ -2754,8 +2763,8 @@ dependencies = [ [[package]] name = "revm-bytecode" -version = "5.0.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" +version = "6.1.0" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv83#60bf14c2b0190435824b37c04ad1e40f4b41479e" dependencies = [ "bitvec", "once_cell", @@ -2766,8 +2775,8 @@ dependencies = [ [[package]] name = "revm-context" -version = "7.0.1" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" +version = "8.0.4" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv83#60bf14c2b0190435824b37c04ad1e40f4b41479e" dependencies = [ "cfg-if", "derive-where", @@ -2780,8 +2789,8 @@ dependencies = [ [[package]] name = "revm-context-interface" -version = "7.0.1" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" +version = "9.0.0" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv83#60bf14c2b0190435824b37c04ad1e40f4b41479e" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -2794,8 +2803,8 @@ dependencies = [ [[package]] name = "revm-database" -version = "6.0.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" +version = "7.0.2" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv83#60bf14c2b0190435824b37c04ad1e40f4b41479e" dependencies = [ "revm-bytecode", "revm-database-interface", @@ -2805,18 +2814,19 @@ dependencies = [ [[package]] name = "revm-database-interface" -version = "6.0.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" +version = "7.0.2" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv83#60bf14c2b0190435824b37c04ad1e40f4b41479e" dependencies = [ "auto_impl", + "either", "revm-primitives", "revm-state", ] [[package]] name = "revm-handler" -version = "7.0.1" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" +version = "8.1.0" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv83#60bf14c2b0190435824b37c04ad1e40f4b41479e" dependencies = [ "auto_impl", "derive-where", @@ -2832,8 +2842,8 @@ dependencies = [ [[package]] name = "revm-inspector" -version = "7.0.1" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" +version = "8.1.0" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv83#60bf14c2b0190435824b37c04ad1e40f4b41479e" dependencies = [ "auto_impl", "either", @@ -2847,8 +2857,8 @@ dependencies = [ [[package]] name = "revm-interpreter" -version = "22.0.1" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" +version = "24.0.0" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv83#60bf14c2b0190435824b37c04ad1e40f4b41479e" dependencies = [ "revm-bytecode", "revm-context-interface", @@ -2857,14 +2867,15 @@ dependencies = [ [[package]] name = "revm-precompile" -version = "23.0.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" +version = "25.0.0" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv83#60bf14c2b0190435824b37c04ad1e40f4b41479e" dependencies = [ "ark-bls12-381", "ark-bn254", "ark-ec", "ark-ff 0.5.0", "ark-serialize 0.5.0", + "arrayref", "aurora-engine-modexp", "cfg-if", "k256", @@ -2877,8 +2888,8 @@ dependencies = [ [[package]] name = "revm-primitives" -version = "20.0.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" +version = "20.1.0" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv83#60bf14c2b0190435824b37c04ad1e40f4b41479e" dependencies = [ "alloy-primitives", "num_enum", @@ -2888,7 +2899,7 @@ dependencies = [ [[package]] name = "revm-scroll" version = "0.1.0" -source = "git+https://github.com/scroll-tech/scroll-revm?branch=feat%2Fv78#c0609bc9e8cb23aba8f560a82e040a49726cf760" +source = "git+https://github.com/scroll-tech/scroll-revm?branch=feat%2Fv82#151eab7b5772a95b9d3279c44ff638e0119361cf" dependencies = [ "auto_impl", "enumn", @@ -2900,8 +2911,8 @@ dependencies = [ [[package]] name = "revm-state" -version = "6.0.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Freth-v78#64e018f80e65d79505591aacec4f35ec46bca5ff" +version = "7.0.2" +source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv83#60bf14c2b0190435824b37c04ad1e40f4b41479e" dependencies = [ "bitflags", "revm-bytecode", @@ -3064,7 +3075,7 @@ dependencies = [ [[package]] name = "scroll-alloy-consensus" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -3079,7 +3090,7 @@ dependencies = [ [[package]] name = "scroll-alloy-evm" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -3094,7 +3105,7 @@ dependencies = [ [[package]] name = "scroll-alloy-hardforks" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-hardforks", "auto_impl", @@ -3102,7 +3113,7 @@ dependencies = [ [[package]] name = "scroll-alloy-rpc-types" -version = "1.5.0" +version = "1.6.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -3405,9 +3416,9 @@ dependencies = [ [[package]] name = "syn-solidity" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9ac494e7266fcdd2ad80bf4375d55d27a117ea5c866c26d0e97fe5b3caeeb75" +checksum = "a7a985ff4ffd7373e10e0fb048110fb11a162e5a4c47f92ddb8787a6f766b769" dependencies = [ "paste", "proc-macro2", diff --git a/crates/scroll/openvm-compat/Cargo.toml b/crates/scroll/openvm-compat/Cargo.toml index 5f136c1fca4..994e3239c40 100644 --- a/crates/scroll/openvm-compat/Cargo.toml +++ b/crates/scroll/openvm-compat/Cargo.toml @@ -28,4 +28,4 @@ scroll-alloy-consensus = { path = "../alloy/consensus", default-features = false scroll-alloy-rpc-types = { path = "../alloy/rpc-types", default-features = false } [patch.crates-io] -revm = { git = "https://github.com/scroll-tech/revm", branch = "feat/reth-v78" } +revm = { git = "https://github.com/scroll-tech/revm", branch = "feat/v83" } diff --git a/crates/scroll/rpc/Cargo.toml b/crates/scroll/rpc/Cargo.toml index a0b84a8367c..d8cd27c5da3 100644 --- a/crates/scroll/rpc/Cargo.toml +++ b/crates/scroll/rpc/Cargo.toml @@ -16,7 +16,7 @@ workspace = true reth-evm.workspace = true reth-primitives-traits.workspace = true reth-provider.workspace = true -reth-rpc-eth-api.workspace = true +reth-rpc-eth-api = { workspace = true, features = ["scroll"] } reth-rpc-eth-types.workspace = true reth-tasks = { workspace = true, features = ["rayon"] } reth-transaction-pool.workspace = true From 67e7759e4254b030aa08e1f18b4502efd9e3d525 Mon Sep 17 00:00:00 2001 From: Gregory Edison Date: Tue, 29 Jul 2025 16:47:16 +0200 Subject: [PATCH 299/305] chore: point revm to scroll branch Signed-off-by: Gregory Edison --- Cargo.lock | 44 +++++++++++++------------- Cargo.toml | 26 +++++++-------- crates/scroll/openvm-compat/Cargo.toml | 2 +- 3 files changed, 36 insertions(+), 36 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 57751396269..89effc43020 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3229,7 +3229,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "778e2ac28f6c47af28e4907f13ffd1e1ddbd400980a9abd7c8df189bf578a5ad" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.60.2", ] [[package]] @@ -4525,7 +4525,7 @@ dependencies = [ "js-sys", "log", "wasm-bindgen", - "windows-core 0.57.0", + "windows-core 0.58.0", ] [[package]] @@ -4981,7 +4981,7 @@ checksum = "e04d7f318608d35d4b61ddd75cbdaee86b023ebe2bd5a66ee0915f0bf93095a9" dependencies = [ "hermit-abi", "libc", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -6205,7 +6205,7 @@ dependencies = [ [[package]] name = "op-revm" version = "8.1.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv83#60bf14c2b0190435824b37c04ad1e40f4b41479e" +source = "git+https://github.com/scroll-tech/revm?branch=scroll#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "auto_impl", "once_cell", @@ -6938,7 +6938,7 @@ dependencies = [ "once_cell", "socket2 0.5.10", "tracing", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -11150,7 +11150,7 @@ dependencies = [ [[package]] name = "revm" version = "27.1.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv83#60bf14c2b0190435824b37c04ad1e40f4b41479e" +source = "git+https://github.com/scroll-tech/revm?branch=scroll#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "revm-bytecode", "revm-context", @@ -11168,7 +11168,7 @@ dependencies = [ [[package]] name = "revm-bytecode" version = "6.1.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv83#60bf14c2b0190435824b37c04ad1e40f4b41479e" +source = "git+https://github.com/scroll-tech/revm?branch=scroll#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "bitvec", "once_cell", @@ -11180,7 +11180,7 @@ dependencies = [ [[package]] name = "revm-context" version = "8.0.4" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv83#60bf14c2b0190435824b37c04ad1e40f4b41479e" +source = "git+https://github.com/scroll-tech/revm?branch=scroll#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "cfg-if", "derive-where", @@ -11195,7 +11195,7 @@ dependencies = [ [[package]] name = "revm-context-interface" version = "9.0.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv83#60bf14c2b0190435824b37c04ad1e40f4b41479e" +source = "git+https://github.com/scroll-tech/revm?branch=scroll#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -11210,7 +11210,7 @@ dependencies = [ [[package]] name = "revm-database" version = "7.0.2" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv83#60bf14c2b0190435824b37c04ad1e40f4b41479e" +source = "git+https://github.com/scroll-tech/revm?branch=scroll#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "alloy-eips", "revm-bytecode", @@ -11223,7 +11223,7 @@ dependencies = [ [[package]] name = "revm-database-interface" version = "7.0.2" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv83#60bf14c2b0190435824b37c04ad1e40f4b41479e" +source = "git+https://github.com/scroll-tech/revm?branch=scroll#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "auto_impl", "either", @@ -11235,7 +11235,7 @@ dependencies = [ [[package]] name = "revm-handler" version = "8.1.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv83#60bf14c2b0190435824b37c04ad1e40f4b41479e" +source = "git+https://github.com/scroll-tech/revm?branch=scroll#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "auto_impl", "derive-where", @@ -11253,7 +11253,7 @@ dependencies = [ [[package]] name = "revm-inspector" version = "8.1.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv83#60bf14c2b0190435824b37c04ad1e40f4b41479e" +source = "git+https://github.com/scroll-tech/revm?branch=scroll#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "auto_impl", "either", @@ -11290,7 +11290,7 @@ dependencies = [ [[package]] name = "revm-interpreter" version = "24.0.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv83#60bf14c2b0190435824b37c04ad1e40f4b41479e" +source = "git+https://github.com/scroll-tech/revm?branch=scroll#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "revm-bytecode", "revm-context-interface", @@ -11301,7 +11301,7 @@ dependencies = [ [[package]] name = "revm-precompile" version = "25.0.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv83#60bf14c2b0190435824b37c04ad1e40f4b41479e" +source = "git+https://github.com/scroll-tech/revm?branch=scroll#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "ark-bls12-381", "ark-bn254", @@ -11327,7 +11327,7 @@ dependencies = [ [[package]] name = "revm-primitives" version = "20.1.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv83#60bf14c2b0190435824b37c04ad1e40f4b41479e" +source = "git+https://github.com/scroll-tech/revm?branch=scroll#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "alloy-primitives", "num_enum", @@ -11337,7 +11337,7 @@ dependencies = [ [[package]] name = "revm-scroll" version = "0.1.0" -source = "git+https://github.com/scroll-tech/scroll-revm?branch=feat%2Fv82#151eab7b5772a95b9d3279c44ff638e0119361cf" +source = "git+https://github.com/scroll-tech/scroll-revm?branch=feat%2Fv82#a38a5739a090681298371ff8b85f43dffda836cb" dependencies = [ "auto_impl", "enumn", @@ -11351,7 +11351,7 @@ dependencies = [ [[package]] name = "revm-state" version = "7.0.2" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv83#60bf14c2b0190435824b37c04ad1e40f4b41479e" +source = "git+https://github.com/scroll-tech/revm?branch=scroll#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "bitflags 2.9.1", "revm-bytecode", @@ -11595,7 +11595,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.4.15", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -11608,7 +11608,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.9.4", - "windows-sys 0.52.0", + "windows-sys 0.60.2", ] [[package]] @@ -11666,7 +11666,7 @@ dependencies = [ "security-framework 3.2.0", "security-framework-sys", "webpki-root-certs 0.26.11", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -12649,7 +12649,7 @@ dependencies = [ "getrandom 0.3.3", "once_cell", "rustix 1.0.8", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index c2d50fa9118..0a7e2fb3b35 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -473,17 +473,17 @@ reth-ress-protocol = { path = "crates/ress/protocol" } reth-ress-provider = { path = "crates/ress/provider" } # revm -revm = { git = "https://github.com/scroll-tech/revm", branch = "feat/v83", default-features = false, features = ["enable_eip7702"] } -revm-bytecode = { git = "https://github.com/scroll-tech/revm", branch = "feat/v83", default-features = false } -revm-database = { git = "https://github.com/scroll-tech/revm", branch = "feat/v83", default-features = false } -revm-state = { git = "https://github.com/scroll-tech/revm", branch = "feat/v83", default-features = false } -revm-primitives = { git = "https://github.com/scroll-tech/revm", branch = "feat/v83", default-features = false } -revm-interpreter = { git = "https://github.com/scroll-tech/revm", branch = "feat/v83", default-features = false } -revm-inspector = { git = "https://github.com/scroll-tech/revm", branch = "feat/v83", default-features = false } -revm-context = { git = "https://github.com/scroll-tech/revm", branch = "feat/v83", default-features = false } -revm-context-interface = { git = "https://github.com/scroll-tech/revm", branch = "feat/v83", default-features = false } -revm-database-interface = { git = "https://github.com/scroll-tech/revm", branch = "feat/v83", default-features = false } -op-revm = { git = "https://github.com/scroll-tech/revm", branch = "feat/v83", default-features = false } +revm = { git = "https://github.com/scroll-tech/revm", branch = "scroll", default-features = false, features = ["enable_eip7702", "enable_eip7623"] } +revm-bytecode = { git = "https://github.com/scroll-tech/revm", branch = "scroll", default-features = false } +revm-database = { git = "https://github.com/scroll-tech/revm", branch = "scroll", default-features = false } +revm-state = { git = "https://github.com/scroll-tech/revm", branch = "scroll", default-features = false } +revm-primitives = { git = "https://github.com/scroll-tech/revm", branch = "scroll", default-features = false } +revm-interpreter = { git = "https://github.com/scroll-tech/revm", branch = "scroll", default-features = false } +revm-inspector = { git = "https://github.com/scroll-tech/revm", branch = "scroll", default-features = false } +revm-context = { git = "https://github.com/scroll-tech/revm", branch = "scroll", default-features = false } +revm-context-interface = { git = "https://github.com/scroll-tech/revm", branch = "scroll", default-features = false } +revm-database-interface = { git = "https://github.com/scroll-tech/revm", branch = "scroll", default-features = false } +op-revm = { git = "https://github.com/scroll-tech/revm", branch = "scroll", default-features = false } revm-scroll = { git = "https://github.com/scroll-tech/scroll-revm", branch = "feat/v82", default-features = false } revm-inspectors = "0.27.1" @@ -754,8 +754,8 @@ walkdir = "2.3.3" vergen-git2 = "1.0.5" [patch.crates-io] -revm = { git = "https://github.com/scroll-tech/revm", branch = "feat/v83" } -op-revm = { git = "https://github.com/scroll-tech/revm", branch = "feat/v83" } +revm = { git = "https://github.com/scroll-tech/revm", branch = "scroll" } +op-revm = { git = "https://github.com/scroll-tech/revm", branch = "scroll" } # alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "cfb13aa" } # alloy-contract = { git = "https://github.com/alloy-rs/alloy", rev = "cfb13aa" } # alloy-eips = { git = "https://github.com/alloy-rs/alloy", rev = "cfb13aa" } diff --git a/crates/scroll/openvm-compat/Cargo.toml b/crates/scroll/openvm-compat/Cargo.toml index 994e3239c40..0f45cf390a3 100644 --- a/crates/scroll/openvm-compat/Cargo.toml +++ b/crates/scroll/openvm-compat/Cargo.toml @@ -28,4 +28,4 @@ scroll-alloy-consensus = { path = "../alloy/consensus", default-features = false scroll-alloy-rpc-types = { path = "../alloy/rpc-types", default-features = false } [patch.crates-io] -revm = { git = "https://github.com/scroll-tech/revm", branch = "feat/v83" } +revm = { git = "https://github.com/scroll-tech/revm", branch = "scroll" } From d4056544ba75649765e62acd3604fecbfa5d03a6 Mon Sep 17 00:00:00 2001 From: Gregory Edison Date: Thu, 31 Jul 2025 10:22:56 +0200 Subject: [PATCH 300/305] chore: reconcile merge Signed-off-by: Gregory Edison --- Cargo.lock | 4 ++++ crates/scroll/rpc/Cargo.toml | 4 ++++ crates/scroll/rpc/src/eth/fee.rs | 14 ++++++++++---- crates/scroll/rpc/src/eth/mod.rs | 4 ++-- 4 files changed, 20 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3ee2b1e04c5..e0a04d356e2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10569,6 +10569,7 @@ dependencies = [ "eyre", "jsonrpsee-types", "reqwest", + "reth-chainspec", "reth-evm", "reth-node-api", "reth-node-builder", @@ -10578,11 +10579,14 @@ dependencies = [ "reth-rpc-convert", "reth-rpc-eth-api", "reth-rpc-eth-types", + "reth-scroll-chainspec", + "reth-scroll-evm", "reth-scroll-primitives", "reth-tasks", "reth-transaction-pool", "revm", "scroll-alloy-consensus", + "scroll-alloy-hardforks", "scroll-alloy-network", "scroll-alloy-rpc-types", "thiserror 2.0.12", diff --git a/crates/scroll/rpc/Cargo.toml b/crates/scroll/rpc/Cargo.toml index 9d25b903796..5bf30ccaa36 100644 --- a/crates/scroll/rpc/Cargo.toml +++ b/crates/scroll/rpc/Cargo.toml @@ -13,6 +13,7 @@ workspace = true [dependencies] # reth +reth-chainspec.workspace = true reth-evm.workspace = true reth-primitives-traits.workspace = true reth-provider.workspace = true @@ -26,8 +27,11 @@ reth-node-api.workspace = true reth-node-builder.workspace = true # scroll +reth-scroll-chainspec.workspace = true +reth-scroll-evm.workspace = true reth-scroll-primitives = { workspace = true, features = ["serde", "serde-bincode-compat", "reth-codec"] } scroll-alloy-consensus.workspace = true +scroll-alloy-hardforks.workspace = true scroll-alloy-network.workspace = true scroll-alloy-rpc-types.workspace = true diff --git a/crates/scroll/rpc/src/eth/fee.rs b/crates/scroll/rpc/src/eth/fee.rs index 200559eaa3a..dd8803b114f 100644 --- a/crates/scroll/rpc/src/eth/fee.rs +++ b/crates/scroll/rpc/src/eth/fee.rs @@ -1,4 +1,5 @@ -use crate::{eth::ScrollNodeCore, ScrollEthApi}; +use crate::{ScrollEthApi, ScrollEthApiError}; + use alloy_consensus::BlockHeader; use alloy_eips::eip7840::BlobParams; use alloy_primitives::{Sealable, U256}; @@ -9,18 +10,21 @@ use reth_provider::{ BaseFeeProvider, BlockIdReader, ChainSpecProvider, HeaderProvider, ProviderHeader, StateProviderFactory, }; +use reth_rpc_convert::RpcConvert; use reth_rpc_eth_api::{ helpers::{EthFees, LoadFee}, FromEthApiError, RpcNodeCore, RpcNodeCoreExt, }; -use reth_rpc_eth_types::{fee_history::calculate_reward_percentiles_for_block, EthApiError}; +use reth_rpc_eth_types::{ + error::FromEvmError, fee_history::calculate_reward_percentiles_for_block, EthApiError, +}; use reth_scroll_chainspec::{ChainConfig, ScrollChainConfig}; use reth_scroll_evm::ScrollBaseFeeProvider; use scroll_alloy_hardforks::ScrollHardforks; use std::future::Future; use tracing::debug; -impl EthFees for ScrollEthApi +impl EthFees for ScrollEthApi where Self: LoadFee< Provider: StateProviderFactory @@ -30,7 +34,9 @@ where + ChainConfig, >, >, - N: ScrollNodeCore, + N: RpcNodeCore, + ScrollEthApiError: FromEvmError, + Rpc: RpcConvert, { #[allow(clippy::manual_async_fn)] fn fee_history( diff --git a/crates/scroll/rpc/src/eth/mod.rs b/crates/scroll/rpc/src/eth/mod.rs index a6c3a6a4d2a..f719fe2e7fd 100644 --- a/crates/scroll/rpc/src/eth/mod.rs +++ b/crates/scroll/rpc/src/eth/mod.rs @@ -15,8 +15,8 @@ use reth_rpc::eth::{core::EthApiInner, DevSigner}; use reth_rpc_convert::{RpcConvert, RpcConverter, RpcTypes, SignableTxRequest}; use reth_rpc_eth_api::{ helpers::{ - pending_block::BuildPendingEnv, spec::SignersForApi, AddDevSigners, EthApiSpec, EthFees, - EthState, LoadFee, LoadState, SpawnBlocking, Trace, + pending_block::BuildPendingEnv, spec::SignersForApi, AddDevSigners, EthApiSpec, EthState, + LoadFee, LoadState, SpawnBlocking, Trace, }, EthApiTypes, FullEthApiServer, RpcNodeCore, RpcNodeCoreExt, }; From 90627f891c51ce1a9146a419a27ea1357a307df0 Mon Sep 17 00:00:00 2001 From: Gregory Edison Date: Thu, 31 Jul 2025 10:42:49 +0200 Subject: [PATCH 301/305] chore: redirect imports to default branches Signed-off-by: Gregory Edison --- Cargo.lock | 44 +++++++++++++------------- Cargo.toml | 28 ++++++++-------- crates/scroll/openvm-compat/Cargo.lock | 26 +++++++-------- crates/scroll/openvm-compat/Cargo.toml | 2 +- 4 files changed, 50 insertions(+), 50 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e0a04d356e2..2a68e3651d6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3229,7 +3229,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "778e2ac28f6c47af28e4907f13ffd1e1ddbd400980a9abd7c8df189bf578a5ad" dependencies = [ "libc", - "windows-sys 0.60.2", + "windows-sys 0.52.0", ] [[package]] @@ -4525,7 +4525,7 @@ dependencies = [ "js-sys", "log", "wasm-bindgen", - "windows-core 0.58.0", + "windows-core 0.57.0", ] [[package]] @@ -4981,7 +4981,7 @@ checksum = "e04d7f318608d35d4b61ddd75cbdaee86b023ebe2bd5a66ee0915f0bf93095a9" dependencies = [ "hermit-abi", "libc", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -6205,7 +6205,7 @@ dependencies = [ [[package]] name = "op-revm" version = "8.1.0" -source = "git+https://github.com/scroll-tech/revm?branch=scroll#9cd9896c06a5bf6d4212906260d8789579873ba4" +source = "git+https://github.com/scroll-tech/revm#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "auto_impl", "once_cell", @@ -6938,7 +6938,7 @@ dependencies = [ "once_cell", "socket2 0.5.10", "tracing", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -11155,7 +11155,7 @@ dependencies = [ [[package]] name = "revm" version = "27.1.0" -source = "git+https://github.com/scroll-tech/revm?branch=scroll#9cd9896c06a5bf6d4212906260d8789579873ba4" +source = "git+https://github.com/scroll-tech/revm#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "revm-bytecode", "revm-context", @@ -11173,7 +11173,7 @@ dependencies = [ [[package]] name = "revm-bytecode" version = "6.1.0" -source = "git+https://github.com/scroll-tech/revm?branch=scroll#9cd9896c06a5bf6d4212906260d8789579873ba4" +source = "git+https://github.com/scroll-tech/revm#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "bitvec", "once_cell", @@ -11185,7 +11185,7 @@ dependencies = [ [[package]] name = "revm-context" version = "8.0.4" -source = "git+https://github.com/scroll-tech/revm?branch=scroll#9cd9896c06a5bf6d4212906260d8789579873ba4" +source = "git+https://github.com/scroll-tech/revm#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "cfg-if", "derive-where", @@ -11200,7 +11200,7 @@ dependencies = [ [[package]] name = "revm-context-interface" version = "9.0.0" -source = "git+https://github.com/scroll-tech/revm?branch=scroll#9cd9896c06a5bf6d4212906260d8789579873ba4" +source = "git+https://github.com/scroll-tech/revm#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -11215,7 +11215,7 @@ dependencies = [ [[package]] name = "revm-database" version = "7.0.2" -source = "git+https://github.com/scroll-tech/revm?branch=scroll#9cd9896c06a5bf6d4212906260d8789579873ba4" +source = "git+https://github.com/scroll-tech/revm#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "alloy-eips", "revm-bytecode", @@ -11228,7 +11228,7 @@ dependencies = [ [[package]] name = "revm-database-interface" version = "7.0.2" -source = "git+https://github.com/scroll-tech/revm?branch=scroll#9cd9896c06a5bf6d4212906260d8789579873ba4" +source = "git+https://github.com/scroll-tech/revm#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "auto_impl", "either", @@ -11240,7 +11240,7 @@ dependencies = [ [[package]] name = "revm-handler" version = "8.1.0" -source = "git+https://github.com/scroll-tech/revm?branch=scroll#9cd9896c06a5bf6d4212906260d8789579873ba4" +source = "git+https://github.com/scroll-tech/revm#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "auto_impl", "derive-where", @@ -11258,7 +11258,7 @@ dependencies = [ [[package]] name = "revm-inspector" version = "8.1.0" -source = "git+https://github.com/scroll-tech/revm?branch=scroll#9cd9896c06a5bf6d4212906260d8789579873ba4" +source = "git+https://github.com/scroll-tech/revm#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "auto_impl", "either", @@ -11295,7 +11295,7 @@ dependencies = [ [[package]] name = "revm-interpreter" version = "24.0.0" -source = "git+https://github.com/scroll-tech/revm?branch=scroll#9cd9896c06a5bf6d4212906260d8789579873ba4" +source = "git+https://github.com/scroll-tech/revm#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "revm-bytecode", "revm-context-interface", @@ -11306,7 +11306,7 @@ dependencies = [ [[package]] name = "revm-precompile" version = "25.0.0" -source = "git+https://github.com/scroll-tech/revm?branch=scroll#9cd9896c06a5bf6d4212906260d8789579873ba4" +source = "git+https://github.com/scroll-tech/revm#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "ark-bls12-381", "ark-bn254", @@ -11332,7 +11332,7 @@ dependencies = [ [[package]] name = "revm-primitives" version = "20.1.0" -source = "git+https://github.com/scroll-tech/revm?branch=scroll#9cd9896c06a5bf6d4212906260d8789579873ba4" +source = "git+https://github.com/scroll-tech/revm#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "alloy-primitives", "num_enum", @@ -11342,7 +11342,7 @@ dependencies = [ [[package]] name = "revm-scroll" version = "0.1.0" -source = "git+https://github.com/scroll-tech/scroll-revm?branch=feat%2Fv82#a38a5739a090681298371ff8b85f43dffda836cb" +source = "git+https://github.com/scroll-tech/scroll-revm#08e6281f419517527d77a66fae19075e8bfeac3e" dependencies = [ "auto_impl", "enumn", @@ -11356,7 +11356,7 @@ dependencies = [ [[package]] name = "revm-state" version = "7.0.2" -source = "git+https://github.com/scroll-tech/revm?branch=scroll#9cd9896c06a5bf6d4212906260d8789579873ba4" +source = "git+https://github.com/scroll-tech/revm#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "bitflags 2.9.1", "revm-bytecode", @@ -11600,7 +11600,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.4.15", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -11613,7 +11613,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.9.4", - "windows-sys 0.60.2", + "windows-sys 0.52.0", ] [[package]] @@ -11671,7 +11671,7 @@ dependencies = [ "security-framework 3.2.0", "security-framework-sys", "webpki-root-certs 0.26.11", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -12654,7 +12654,7 @@ dependencies = [ "getrandom 0.3.3", "once_cell", "rustix 1.0.8", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 0a7e2fb3b35..7db7095d16f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -473,18 +473,18 @@ reth-ress-protocol = { path = "crates/ress/protocol" } reth-ress-provider = { path = "crates/ress/provider" } # revm -revm = { git = "https://github.com/scroll-tech/revm", branch = "scroll", default-features = false, features = ["enable_eip7702", "enable_eip7623"] } -revm-bytecode = { git = "https://github.com/scroll-tech/revm", branch = "scroll", default-features = false } -revm-database = { git = "https://github.com/scroll-tech/revm", branch = "scroll", default-features = false } -revm-state = { git = "https://github.com/scroll-tech/revm", branch = "scroll", default-features = false } -revm-primitives = { git = "https://github.com/scroll-tech/revm", branch = "scroll", default-features = false } -revm-interpreter = { git = "https://github.com/scroll-tech/revm", branch = "scroll", default-features = false } -revm-inspector = { git = "https://github.com/scroll-tech/revm", branch = "scroll", default-features = false } -revm-context = { git = "https://github.com/scroll-tech/revm", branch = "scroll", default-features = false } -revm-context-interface = { git = "https://github.com/scroll-tech/revm", branch = "scroll", default-features = false } -revm-database-interface = { git = "https://github.com/scroll-tech/revm", branch = "scroll", default-features = false } -op-revm = { git = "https://github.com/scroll-tech/revm", branch = "scroll", default-features = false } -revm-scroll = { git = "https://github.com/scroll-tech/scroll-revm", branch = "feat/v82", default-features = false } +revm = { git = "https://github.com/scroll-tech/revm", default-features = false, features = ["enable_eip7702", "enable_eip7623"] } +revm-bytecode = { git = "https://github.com/scroll-tech/revm", default-features = false } +revm-database = { git = "https://github.com/scroll-tech/revm", default-features = false } +revm-state = { git = "https://github.com/scroll-tech/revm", default-features = false } +revm-primitives = { git = "https://github.com/scroll-tech/revm", default-features = false } +revm-interpreter = { git = "https://github.com/scroll-tech/revm", default-features = false } +revm-inspector = { git = "https://github.com/scroll-tech/revm", default-features = false } +revm-context = { git = "https://github.com/scroll-tech/revm", default-features = false } +revm-context-interface = { git = "https://github.com/scroll-tech/revm", default-features = false } +revm-database-interface = { git = "https://github.com/scroll-tech/revm", default-features = false } +op-revm = { git = "https://github.com/scroll-tech/revm", default-features = false } +revm-scroll = { git = "https://github.com/scroll-tech/scroll-revm", default-features = false } revm-inspectors = "0.27.1" # eth @@ -754,8 +754,8 @@ walkdir = "2.3.3" vergen-git2 = "1.0.5" [patch.crates-io] -revm = { git = "https://github.com/scroll-tech/revm", branch = "scroll" } -op-revm = { git = "https://github.com/scroll-tech/revm", branch = "scroll" } +revm = { git = "https://github.com/scroll-tech/revm" } +op-revm = { git = "https://github.com/scroll-tech/revm" } # alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "cfb13aa" } # alloy-contract = { git = "https://github.com/alloy-rs/alloy", rev = "cfb13aa" } # alloy-eips = { git = "https://github.com/alloy-rs/alloy", rev = "cfb13aa" } diff --git a/crates/scroll/openvm-compat/Cargo.lock b/crates/scroll/openvm-compat/Cargo.lock index 9f8bd1ba3af..92cac3d6642 100644 --- a/crates/scroll/openvm-compat/Cargo.lock +++ b/crates/scroll/openvm-compat/Cargo.lock @@ -2746,7 +2746,7 @@ dependencies = [ [[package]] name = "revm" version = "27.1.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv83#60bf14c2b0190435824b37c04ad1e40f4b41479e" +source = "git+https://github.com/scroll-tech/revm#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "revm-bytecode", "revm-context", @@ -2764,7 +2764,7 @@ dependencies = [ [[package]] name = "revm-bytecode" version = "6.1.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv83#60bf14c2b0190435824b37c04ad1e40f4b41479e" +source = "git+https://github.com/scroll-tech/revm#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "bitvec", "once_cell", @@ -2776,7 +2776,7 @@ dependencies = [ [[package]] name = "revm-context" version = "8.0.4" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv83#60bf14c2b0190435824b37c04ad1e40f4b41479e" +source = "git+https://github.com/scroll-tech/revm#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "cfg-if", "derive-where", @@ -2790,7 +2790,7 @@ dependencies = [ [[package]] name = "revm-context-interface" version = "9.0.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv83#60bf14c2b0190435824b37c04ad1e40f4b41479e" +source = "git+https://github.com/scroll-tech/revm#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "alloy-eip2930", "alloy-eip7702", @@ -2804,7 +2804,7 @@ dependencies = [ [[package]] name = "revm-database" version = "7.0.2" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv83#60bf14c2b0190435824b37c04ad1e40f4b41479e" +source = "git+https://github.com/scroll-tech/revm#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "revm-bytecode", "revm-database-interface", @@ -2815,7 +2815,7 @@ dependencies = [ [[package]] name = "revm-database-interface" version = "7.0.2" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv83#60bf14c2b0190435824b37c04ad1e40f4b41479e" +source = "git+https://github.com/scroll-tech/revm#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "auto_impl", "either", @@ -2826,7 +2826,7 @@ dependencies = [ [[package]] name = "revm-handler" version = "8.1.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv83#60bf14c2b0190435824b37c04ad1e40f4b41479e" +source = "git+https://github.com/scroll-tech/revm#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "auto_impl", "derive-where", @@ -2843,7 +2843,7 @@ dependencies = [ [[package]] name = "revm-inspector" version = "8.1.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv83#60bf14c2b0190435824b37c04ad1e40f4b41479e" +source = "git+https://github.com/scroll-tech/revm#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "auto_impl", "either", @@ -2858,7 +2858,7 @@ dependencies = [ [[package]] name = "revm-interpreter" version = "24.0.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv83#60bf14c2b0190435824b37c04ad1e40f4b41479e" +source = "git+https://github.com/scroll-tech/revm#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "revm-bytecode", "revm-context-interface", @@ -2868,7 +2868,7 @@ dependencies = [ [[package]] name = "revm-precompile" version = "25.0.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv83#60bf14c2b0190435824b37c04ad1e40f4b41479e" +source = "git+https://github.com/scroll-tech/revm#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "ark-bls12-381", "ark-bn254", @@ -2889,7 +2889,7 @@ dependencies = [ [[package]] name = "revm-primitives" version = "20.1.0" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv83#60bf14c2b0190435824b37c04ad1e40f4b41479e" +source = "git+https://github.com/scroll-tech/revm#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "alloy-primitives", "num_enum", @@ -2899,7 +2899,7 @@ dependencies = [ [[package]] name = "revm-scroll" version = "0.1.0" -source = "git+https://github.com/scroll-tech/scroll-revm?branch=feat%2Fv82#151eab7b5772a95b9d3279c44ff638e0119361cf" +source = "git+https://github.com/scroll-tech/scroll-revm#08e6281f419517527d77a66fae19075e8bfeac3e" dependencies = [ "auto_impl", "enumn", @@ -2912,7 +2912,7 @@ dependencies = [ [[package]] name = "revm-state" version = "7.0.2" -source = "git+https://github.com/scroll-tech/revm?branch=feat%2Fv83#60bf14c2b0190435824b37c04ad1e40f4b41479e" +source = "git+https://github.com/scroll-tech/revm#9cd9896c06a5bf6d4212906260d8789579873ba4" dependencies = [ "bitflags", "revm-bytecode", diff --git a/crates/scroll/openvm-compat/Cargo.toml b/crates/scroll/openvm-compat/Cargo.toml index 0f45cf390a3..e244598819b 100644 --- a/crates/scroll/openvm-compat/Cargo.toml +++ b/crates/scroll/openvm-compat/Cargo.toml @@ -28,4 +28,4 @@ scroll-alloy-consensus = { path = "../alloy/consensus", default-features = false scroll-alloy-rpc-types = { path = "../alloy/rpc-types", default-features = false } [patch.crates-io] -revm = { git = "https://github.com/scroll-tech/revm", branch = "scroll" } +revm = { git = "https://github.com/scroll-tech/revm" } From 06a12388d20019144c1d38b30c011c1c1eaea6e5 Mon Sep 17 00:00:00 2001 From: Gregory Edison Date: Thu, 31 Jul 2025 11:55:23 +0200 Subject: [PATCH 302/305] test: run e2e tests on ubuntu-latest Signed-off-by: Gregory Edison --- .github/workflows/e2e.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index ac43d6cc84f..80738c6ed0f 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -19,8 +19,7 @@ concurrency: jobs: test: name: e2e-testsuite - runs-on: - group: Reth + runs-on: ubuntu-latest env: RUST_BACKTRACE: 1 timeout-minutes: 90 From f12a48b4c2060ecf8372c52762f0f22de4ce9d25 Mon Sep 17 00:00:00 2001 From: Gregory Edison Date: Thu, 31 Jul 2025 17:45:11 +0200 Subject: [PATCH 303/305] test: increase available stack for e2e test Signed-off-by: Gregory Edison --- .github/workflows/e2e.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index 80738c6ed0f..53199542c9c 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -22,6 +22,7 @@ jobs: runs-on: ubuntu-latest env: RUST_BACKTRACE: 1 + RUST_MIN_STACK: 4194304 timeout-minutes: 90 steps: - uses: actions/checkout@v4 From 659aab38d0bea0d505a7df890abf99ce0eb38d1d Mon Sep 17 00:00:00 2001 From: Gregory Edison Date: Thu, 31 Jul 2025 18:05:27 +0200 Subject: [PATCH 304/305] chore: bump revm Signed-off-by: Gregory Edison --- Cargo.lock | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2a68e3651d6..042209bfcf6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3229,7 +3229,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "778e2ac28f6c47af28e4907f13ffd1e1ddbd400980a9abd7c8df189bf578a5ad" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.60.2", ] [[package]] @@ -4525,7 +4525,7 @@ dependencies = [ "js-sys", "log", "wasm-bindgen", - "windows-core 0.57.0", + "windows-core 0.58.0", ] [[package]] @@ -4981,7 +4981,7 @@ checksum = "e04d7f318608d35d4b61ddd75cbdaee86b023ebe2bd5a66ee0915f0bf93095a9" dependencies = [ "hermit-abi", "libc", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -6938,7 +6938,7 @@ dependencies = [ "once_cell", "socket2 0.5.10", "tracing", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -11342,7 +11342,7 @@ dependencies = [ [[package]] name = "revm-scroll" version = "0.1.0" -source = "git+https://github.com/scroll-tech/scroll-revm#08e6281f419517527d77a66fae19075e8bfeac3e" +source = "git+https://github.com/scroll-tech/scroll-revm#720ee7802e5ad695ac1f8699bbab9c9f2424417f" dependencies = [ "auto_impl", "enumn", @@ -11600,7 +11600,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.4.15", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -11613,7 +11613,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.9.4", - "windows-sys 0.52.0", + "windows-sys 0.60.2", ] [[package]] @@ -11671,7 +11671,7 @@ dependencies = [ "security-framework 3.2.0", "security-framework-sys", "webpki-root-certs 0.26.11", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -12654,7 +12654,7 @@ dependencies = [ "getrandom 0.3.3", "once_cell", "rustix 1.0.8", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] From 23843291c783ec75305df76c0998a4368a698074 Mon Sep 17 00:00:00 2001 From: Gregory Edison Date: Fri, 1 Aug 2025 08:56:51 +0200 Subject: [PATCH 305/305] fix: hardcoded constants Signed-off-by: Gregory Edison --- crates/scroll/node/src/args.rs | 9 ++++++--- crates/scroll/rpc/src/eth/mod.rs | 4 ++-- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/crates/scroll/node/src/args.rs b/crates/scroll/node/src/args.rs index 9f3e77dbd8d..7fe563804e2 100644 --- a/crates/scroll/node/src/args.rs +++ b/crates/scroll/node/src/args.rs @@ -1,5 +1,7 @@ use crate::builder::payload::SCROLL_DEFAULT_PAYLOAD_SIZE_LIMIT; +use reth_scroll_rpc::eth::DEFAULT_MIN_SUGGESTED_PRIORITY_FEE; + /// Rollup arguments for the Scroll node. #[derive(Debug, Clone, clap::Args)] pub struct ScrollRollupArgs { @@ -7,8 +9,9 @@ pub struct ScrollRollupArgs { #[arg(long = "scroll.sequencer")] pub sequencer: Option, - /// Minimum suggested priority fee (tip) in wei, default `1_000_000` - #[arg(long = "scroll.min-suggested-priority-fee", default_value_t = 1_000_000)] + /// Minimum suggested priority fee (tip) in wei, default to + /// [`DEFAULT_MIN_SUGGESTED_PRIORITY_FEE`]. + #[arg(long = "scroll.min-suggested-priority-fee", default_value_t = DEFAULT_MIN_SUGGESTED_PRIORITY_FEE)] pub min_suggested_priority_fee: u64, /// Payload size limit, default to `122kB`. @@ -20,7 +23,7 @@ impl Default for ScrollRollupArgs { fn default() -> Self { Self { sequencer: None, - min_suggested_priority_fee: 1_000_000, + min_suggested_priority_fee: DEFAULT_MIN_SUGGESTED_PRIORITY_FEE, payload_size_limit: SCROLL_DEFAULT_PAYLOAD_SIZE_LIMIT, } } diff --git a/crates/scroll/rpc/src/eth/mod.rs b/crates/scroll/rpc/src/eth/mod.rs index f719fe2e7fd..6deb09f7dc3 100644 --- a/crates/scroll/rpc/src/eth/mod.rs +++ b/crates/scroll/rpc/src/eth/mod.rs @@ -301,10 +301,10 @@ pub type ScrollRpcConvert = RpcConverter< >; /// The default suggested priority fee for the gas price oracle. -const DEFAULT_MIN_SUGGESTED_PRIORITY_FEE: u64 = 100; +pub const DEFAULT_MIN_SUGGESTED_PRIORITY_FEE: u64 = 100; /// The default payload size limit in bytes for the sequencer. -const DEFAULT_PAYLOAD_SIZE_LIMIT: u64 = 122_880; +pub const DEFAULT_PAYLOAD_SIZE_LIMIT: u64 = 122_880; /// A type that knows how to build a [`ScrollEthApi`]. #[derive(Debug)]