From 3174d8e3de792b9096a148ef3a81d0c9e5721fb4 Mon Sep 17 00:00:00 2001 From: chungquantin <56880684+chungquantin@users.noreply.github.com> Date: Tue, 7 Jan 2025 13:39:06 +0700 Subject: [PATCH 01/13] refactor: add Eth variant to OpBlockExecutionError --- crates/ethereum/evm/src/execute.rs | 2 + crates/evm/src/execute.rs | 8 ++- crates/optimism/evm/src/error.rs | 13 ++++- crates/optimism/evm/src/execute.rs | 49 +++++++++++-------- .../custom-beacon-withdrawals/src/main.rs | 1 + 5 files changed, 49 insertions(+), 24 deletions(-) diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index 0470a283ed6e..8ae6a09f5a03 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -69,6 +69,8 @@ where Transaction = reth_primitives::TransactionSigned, >, { + type Error = BlockExecutionError; + type Primitives = EthPrimitives; type Strategy + Display>> = diff --git a/crates/evm/src/execute.rs b/crates/evm/src/execute.rs index 9a9f65375918..0561c1cd91a7 100644 --- a/crates/evm/src/execute.rs +++ b/crates/evm/src/execute.rs @@ -250,6 +250,9 @@ pub trait BlockExecutionStrategy { /// A strategy factory that can create block execution strategies. pub trait BlockExecutionStrategyFactory: Send + Sync + Clone + Unpin + 'static { + /// The error type returned by this strategy's methods. + type Error: From + core::error::Error; + /// Primitive types used by the strategy. type Primitives: NodePrimitives; @@ -257,7 +260,7 @@ pub trait BlockExecutionStrategyFactory: Send + Sync + Clone + Unpin + 'static { type Strategy + Display>>: BlockExecutionStrategy< DB = DB, Primitives = Self::Primitives, - Error = BlockExecutionError, + Error = Self::Error, >; /// Creates a strategy using the give database. @@ -290,7 +293,7 @@ impl BasicBlockExecutorProvider { impl BlockExecutorProvider for BasicBlockExecutorProvider where - F: BlockExecutionStrategyFactory, + F: BlockExecutionStrategyFactory, { type Primitives = F::Primitives; @@ -624,6 +627,7 @@ mod tests { } impl BlockExecutionStrategyFactory for TestExecutorStrategyFactory { + type Error = BlockExecutionError; type Primitives = EthPrimitives; type Strategy + Display>> = TestExecutorStrategy; diff --git a/crates/optimism/evm/src/error.rs b/crates/optimism/evm/src/error.rs index 461f8c11e4fb..f1c8f6304adc 100644 --- a/crates/optimism/evm/src/error.rs +++ b/crates/optimism/evm/src/error.rs @@ -1,10 +1,10 @@ //! Error types for the Optimism EVM module. use alloc::string::String; -use reth_evm::execute::BlockExecutionError; +use reth_evm::execute::{BlockExecutionError, ProviderError}; /// Optimism Block Executor Errors -#[derive(Debug, Clone, PartialEq, Eq, thiserror::Error)] +#[derive(Debug, thiserror::Error)] pub enum OpBlockExecutionError { /// Error when trying to parse L1 block info #[error("could not get L1 block info from L2 block: {message}")] @@ -21,6 +21,9 @@ pub enum OpBlockExecutionError { /// Thrown when a database account could not be loaded. #[error("failed to load account {_0}")] AccountLoadFailed(alloy_primitives::Address), + /// Thrown when a L1 block execution failed. + #[error("execution error on L1: {_0}")] + Eth(BlockExecutionError), } impl From for BlockExecutionError { @@ -28,3 +31,9 @@ impl From for BlockExecutionError { Self::other(err) } } + +impl From for OpBlockExecutionError { + fn from(error: ProviderError) -> Self { + Self::Eth(BlockExecutionError::from(error)) + } +} diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index 402f0ab16f7a..b6f21b85014d 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -61,6 +61,7 @@ where + 'static + ConfigureEvm
, { + type Error = OpBlockExecutionError; type Primitives = OpPrimitives; type Strategy + Display>> = OpExecutionStrategy; @@ -126,7 +127,7 @@ where { type DB = DB; type Primitives = OpPrimitives; - type Error = BlockExecutionError; + type Error = OpBlockExecutionError; fn init(&mut self, tx_env_overrides: Box) { self.tx_env_overrides = Some(tx_env_overrides); @@ -144,12 +145,14 @@ where let env = self.evm_env_for_block(&block.header); let mut evm = self.evm_config.evm_with_env(&mut self.state, env); - self.system_caller.apply_beacon_root_contract_call( - block.timestamp, - block.number, - block.parent_beacon_block_root, - &mut evm, - )?; + self.system_caller + .apply_beacon_root_contract_call( + block.timestamp, + block.number, + block.parent_beacon_block_root, + &mut evm, + ) + .map_err(OpBlockExecutionError::Eth)?; // Ensure that the create2deployer is force-deployed at the canyon transition. Optimism // blocks will always have at least a single transaction in them (the L1 info transaction), @@ -180,11 +183,12 @@ where if transaction.gas_limit() > block_available_gas && (is_regolith || !transaction.is_deposit()) { - return Err(BlockValidationError::TransactionGasLimitMoreThanAvailableBlockGas { - transaction_gas_limit: transaction.gas_limit(), - block_available_gas, - } - .into()) + return Err(OpBlockExecutionError::Eth(BlockExecutionError::Validation( + BlockValidationError::TransactionGasLimitMoreThanAvailableBlockGas { + transaction_gas_limit: transaction.gas_limit(), + block_available_gas, + }, + ))) } // Cache the depositor account prior to the state transition for the deposit nonce. @@ -211,10 +215,12 @@ where let result_and_state = evm.transact().map_err(move |err| { let new_err = err.map_db_err(|e| e.into()); // Ensure hash is calculated for error log, if not already done - BlockValidationError::EVM { - hash: transaction.recalculate_hash(), - error: Box::new(new_err), - } + OpBlockExecutionError::Eth(BlockExecutionError::Validation( + BlockValidationError::EVM { + hash: transaction.recalculate_hash(), + error: Box::new(new_err), + }, + )) })?; trace!( @@ -269,11 +275,14 @@ where let balance_increments = post_block_balance_increments(&self.chain_spec.clone(), &block.block); // increment balances - self.state - .increment_balances(balance_increments.clone()) - .map_err(|_| BlockValidationError::IncrementBalanceFailed)?; + self.state.increment_balances(balance_increments.clone()).map_err(|_| { + OpBlockExecutionError::Eth(BlockExecutionError::Validation( + BlockValidationError::IncrementBalanceFailed, + )) + })?; // call state hook with changes due to balance increments. - let balance_state = balance_increment_state(&balance_increments, &mut self.state)?; + let balance_state = balance_increment_state(&balance_increments, &mut self.state) + .map_err(OpBlockExecutionError::Eth)?; self.system_caller.on_state(&balance_state); Ok(Requests::default()) diff --git a/examples/custom-beacon-withdrawals/src/main.rs b/examples/custom-beacon-withdrawals/src/main.rs index f484b082be7a..1a5f676436f9 100644 --- a/examples/custom-beacon-withdrawals/src/main.rs +++ b/examples/custom-beacon-withdrawals/src/main.rs @@ -91,6 +91,7 @@ pub struct CustomExecutorStrategyFactory { } impl BlockExecutionStrategyFactory for CustomExecutorStrategyFactory { + type Error = BlockExecutionError; type Primitives = EthPrimitives; type Strategy + Display>> = CustomExecutorStrategy; From 998c226362ef1588b19de1bd22e02ab11dd26391 Mon Sep 17 00:00:00 2001 From: chungquantin <56880684+chungquantin@users.noreply.github.com> Date: Thu, 16 Jan 2025 17:15:13 +0700 Subject: [PATCH 02/13] refactor: BlockExecutionStrategyFactory --- crates/blockchain-tree/src/chain.rs | 2 +- crates/evm/src/either.rs | 4 +++- crates/evm/src/execute.rs | 20 +++++++++++++------- crates/evm/src/noop.rs | 2 ++ crates/evm/src/test_utils.rs | 2 ++ crates/exex/exex/src/backfill/job.rs | 2 +- crates/optimism/evm/src/error.rs | 16 ++-------------- 7 files changed, 24 insertions(+), 24 deletions(-) diff --git a/crates/blockchain-tree/src/chain.rs b/crates/blockchain-tree/src/chain.rs index e607d00d2d9b..bfa6627413aa 100644 --- a/crates/blockchain-tree/src/chain.rs +++ b/crates/blockchain-tree/src/chain.rs @@ -209,7 +209,7 @@ impl AppendableChain { let block_hash = block.hash(); let block = block.unseal(); - let state = executor.execute(&block)?; + let state = executor.execute(&block).unwrap(); externals.consensus.validate_block_post_execution( &block, PostExecutionInput::new(&state.receipts, &state.requests), diff --git a/crates/evm/src/either.rs b/crates/evm/src/either.rs index 4faeb1a72030..7a59b7f8a620 100644 --- a/crates/evm/src/either.rs +++ b/crates/evm/src/either.rs @@ -19,10 +19,12 @@ use revm::State; impl BlockExecutorProvider for Either where A: BlockExecutorProvider, - B: BlockExecutorProvider, + B: BlockExecutorProvider, { type Primitives = A::Primitives; + type Error = A::Error; + type Executor + Display>> = Either, B::Executor>; diff --git a/crates/evm/src/execute.rs b/crates/evm/src/execute.rs index 0561c1cd91a7..ca65f3fcf239 100644 --- a/crates/evm/src/execute.rs +++ b/crates/evm/src/execute.rs @@ -138,6 +138,9 @@ pub trait BlockExecutorProvider: Send + Sync + Clone + Unpin + 'static { /// Receipt type. type Primitives: NodePrimitives; + /// The error type returned by the executor. + type Error; + /// An executor that can execute a single block given a database. /// /// # Verification @@ -153,7 +156,7 @@ pub trait BlockExecutorProvider: Send + Sync + Clone + Unpin + 'static { DB, Input<'a> = &'a BlockWithSenders<::Block>, Output = BlockExecutionOutput<::Receipt>, - Error = BlockExecutionError, + Error = Self::Error, >; /// An executor that can execute a batch of blocks given a database. @@ -161,7 +164,7 @@ pub trait BlockExecutorProvider: Send + Sync + Clone + Unpin + 'static { DB, Input<'a> = &'a BlockWithSenders<::Block>, Output = ExecutionOutcome<::Receipt>, - Error = BlockExecutionError, + Error = Self::Error, >; /// Creates a new executor for single block execution. @@ -198,7 +201,7 @@ pub trait BlockExecutionStrategy { type Primitives: NodePrimitives; /// The error type returned by this strategy's methods. - type Error: From + core::error::Error; + type Error; /// Initialize the strategy with the given transaction environment overrides. fn init(&mut self, _tx_env_overrides: Box) {} @@ -251,7 +254,7 @@ pub trait BlockExecutionStrategy { /// A strategy factory that can create block execution strategies. pub trait BlockExecutionStrategyFactory: Send + Sync + Clone + Unpin + 'static { /// The error type returned by this strategy's methods. - type Error: From + core::error::Error; + type Error; /// Primitive types used by the strategy. type Primitives: NodePrimitives; @@ -293,8 +296,10 @@ impl BasicBlockExecutorProvider { impl BlockExecutorProvider for BasicBlockExecutorProvider where - F: BlockExecutionStrategyFactory, + F: BlockExecutionStrategyFactory, { + type Error = F::Error; + type Primitives = F::Primitives; type Executor + Display>> = @@ -425,12 +430,12 @@ where impl BatchExecutor for BasicBatchExecutor where - S: BlockExecutionStrategy, + S: BlockExecutionStrategy, DB: Database + Display>, { type Input<'a> = &'a BlockWithSenders<::Block>; type Output = ExecutionOutcome<::Receipt>; - type Error = BlockExecutionError; + type Error = S::Error; fn execute_and_verify_one(&mut self, block: Self::Input<'_>) -> Result<(), Self::Error> { if self.batch_record.first_block().is_none() { @@ -529,6 +534,7 @@ mod tests { struct TestExecutorProvider; impl BlockExecutorProvider for TestExecutorProvider { + type Error = BlockExecutionError; type Primitives = EthPrimitives; type Executor + Display>> = TestExecutor; type BatchExecutor + Display>> = TestExecutor; diff --git a/crates/evm/src/noop.rs b/crates/evm/src/noop.rs index 66041840ae77..19fa57979dc3 100644 --- a/crates/evm/src/noop.rs +++ b/crates/evm/src/noop.rs @@ -25,6 +25,8 @@ pub struct NoopBlockExecutorProvider

(core::marker::PhantomData

); impl BlockExecutorProvider for NoopBlockExecutorProvider

{ type Primitives = P; + type Error = BlockExecutionError; + type Executor + Display>> = Self; type BatchExecutor + Display>> = Self; diff --git a/crates/evm/src/test_utils.rs b/crates/evm/src/test_utils.rs index 762098a4871c..f75334262e28 100644 --- a/crates/evm/src/test_utils.rs +++ b/crates/evm/src/test_utils.rs @@ -35,6 +35,8 @@ impl MockExecutorProvider { impl BlockExecutorProvider for MockExecutorProvider { type Primitives = EthPrimitives; + type Error = BlockExecutionError; + type Executor + Display>> = Self; type BatchExecutor + Display>> = Self; diff --git a/crates/exex/exex/src/backfill/job.rs b/crates/exex/exex/src/backfill/job.rs index 8ad8692ae6ad..9de21216a011 100644 --- a/crates/exex/exex/src/backfill/job.rs +++ b/crates/exex/exex/src/backfill/job.rs @@ -112,7 +112,7 @@ where let (unsealed_header, hash) = header.split(); let block = P::Block::new(unsealed_header, body).with_senders_unchecked(senders); - executor.execute_and_verify_one(&block)?; + executor.execute_and_verify_one(&block).unwrap(); execution_duration += execute_start.elapsed(); // TODO(alexey): report gas metrics using `block.header.gas_used` diff --git a/crates/optimism/evm/src/error.rs b/crates/optimism/evm/src/error.rs index f1c8f6304adc..4082d0159dd2 100644 --- a/crates/optimism/evm/src/error.rs +++ b/crates/optimism/evm/src/error.rs @@ -22,18 +22,6 @@ pub enum OpBlockExecutionError { #[error("failed to load account {_0}")] AccountLoadFailed(alloy_primitives::Address), /// Thrown when a L1 block execution failed. - #[error("execution error on L1: {_0}")] - Eth(BlockExecutionError), -} - -impl From for BlockExecutionError { - fn from(err: OpBlockExecutionError) -> Self { - Self::other(err) - } -} - -impl From for OpBlockExecutionError { - fn from(error: ProviderError) -> Self { - Self::Eth(BlockExecutionError::from(error)) - } + #[error(transparent)] + Eth(#[from] BlockExecutionError), } From 7bfd8a70ba27102ddc24dfff28e9d9665010124a Mon Sep 17 00:00:00 2001 From: chungquantin <56880684+chungquantin@users.noreply.github.com> Date: Thu, 16 Jan 2025 17:43:01 +0700 Subject: [PATCH 03/13] chore: init --- crates/blockchain-tree/src/blockchain_tree.rs | 2 +- crates/blockchain-tree/src/chain.rs | 10 ++++----- crates/blockchain-tree/src/shareable.rs | 7 +++--- crates/evm/src/execute.rs | 6 +++-- crates/evm/src/metrics.rs | 3 ++- crates/exex/exex/src/backfill/job.rs | 22 ++++++++++++++----- crates/rpc/rpc/src/debug.rs | 7 ++++-- crates/rpc/rpc/src/validation.rs | 2 +- 8 files changed, 39 insertions(+), 20 deletions(-) diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 3964ea53b7e2..264d35f2d10d 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -95,7 +95,7 @@ impl BlockchainTree { impl BlockchainTree where N: TreeNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { /// Builds the blockchain tree for the node. /// diff --git a/crates/blockchain-tree/src/chain.rs b/crates/blockchain-tree/src/chain.rs index bfa6627413aa..9fba5bab4a9f 100644 --- a/crates/blockchain-tree/src/chain.rs +++ b/crates/blockchain-tree/src/chain.rs @@ -77,7 +77,7 @@ impl AppendableChain { ) -> Result where N: TreeNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { let execution_outcome = ExecutionOutcome::default(); let empty = BTreeMap::new(); @@ -115,7 +115,7 @@ impl AppendableChain { ) -> Result where N: TreeNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { let parent_number = block.number.checked_sub(1).ok_or(BlockchainTreeError::GenesisBlockHasNoParent)?; @@ -178,7 +178,7 @@ impl AppendableChain { where EDP: FullExecutionDataProvider, N: TreeNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { // some checks are done before blocks comes here. externals.consensus.validate_header_against_parent(&block, parent_block)?; @@ -209,7 +209,7 @@ impl AppendableChain { let block_hash = block.hash(); let block = block.unseal(); - let state = executor.execute(&block).unwrap(); + let state = executor.execute(&block)?; externals.consensus.validate_block_post_execution( &block, PostExecutionInput::new(&state.receipts, &state.requests), @@ -284,7 +284,7 @@ impl AppendableChain { ) -> Result<(), InsertBlockErrorKind> where N: TreeNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { let parent_block = self.chain.tip(); diff --git a/crates/blockchain-tree/src/shareable.rs b/crates/blockchain-tree/src/shareable.rs index e668f4e2dac0..d37a849867b3 100644 --- a/crates/blockchain-tree/src/shareable.rs +++ b/crates/blockchain-tree/src/shareable.rs @@ -12,6 +12,7 @@ use reth_blockchain_tree_api::{ InsertPayloadOk, }; use reth_evm::execute::BlockExecutorProvider; +use reth_execution_errors::BlockExecutionError; use reth_node_types::NodeTypesWithDB; use reth_primitives::{Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader}; use reth_provider::{ @@ -39,7 +40,7 @@ impl ShareableBlockchainTree { impl BlockchainTreeEngine for ShareableBlockchainTree where N: TreeNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { fn buffer_block(&self, block: SealedBlockWithSenders) -> Result<(), InsertBlockError> { let mut tree = self.tree.write(); @@ -110,7 +111,7 @@ where impl BlockchainTreeViewer for ShareableBlockchainTree where N: TreeNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { fn header_by_hash(&self, hash: BlockHash) -> Option { trace!(target: "blockchain_tree", ?hash, "Returning header by hash"); @@ -173,7 +174,7 @@ where impl BlockchainTreePendingStateProvider for ShareableBlockchainTree where N: TreeNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { fn find_pending_state_provider( &self, diff --git a/crates/evm/src/execute.rs b/crates/evm/src/execute.rs index ca65f3fcf239..139740c831e0 100644 --- a/crates/evm/src/execute.rs +++ b/crates/evm/src/execute.rs @@ -201,7 +201,7 @@ pub trait BlockExecutionStrategy { type Primitives: NodePrimitives; /// The error type returned by this strategy's methods. - type Error; + type Error: From + Into; /// Initialize the strategy with the given transaction environment overrides. fn init(&mut self, _tx_env_overrides: Box) {} @@ -446,7 +446,9 @@ where let ExecuteOutput { receipts, .. } = self.strategy.execute_transactions(block)?; let requests = self.strategy.apply_post_execution_changes(block, &receipts)?; - self.strategy.validate_block_post_execution(block, &receipts, &requests)?; + self.strategy + .validate_block_post_execution(block, &receipts, &requests) + .map_err(BlockExecutionError::Consensus)?; // prepare the state according to the prune mode let retention = self.batch_record.bundle_retention(block.header().number()); diff --git a/crates/evm/src/metrics.rs b/crates/evm/src/metrics.rs index 242ddfe5b79a..d5d08ba82396 100644 --- a/crates/evm/src/metrics.rs +++ b/crates/evm/src/metrics.rs @@ -145,6 +145,7 @@ mod tests { use super::*; use alloy_eips::eip7685::Requests; use metrics_util::debugging::{DebugValue, DebuggingRecorder, Snapshotter}; + use reth_execution_errors::BlockExecutionError; use revm::db::BundleState; use revm_primitives::{ Account, AccountInfo, AccountStatus, EvmState, EvmStorage, EvmStorageSlot, B256, U256, @@ -162,7 +163,7 @@ mod tests { where Self: 'a; type Output = BlockExecutionOutput<()>; - type Error = std::convert::Infallible; + type Error = BlockExecutionError; fn execute(self, _input: Self::Input<'_>) -> Result { Ok(BlockExecutionOutput { diff --git a/crates/exex/exex/src/backfill/job.rs b/crates/exex/exex/src/backfill/job.rs index 9de21216a011..500cb9f9b5e2 100644 --- a/crates/exex/exex/src/backfill/job.rs +++ b/crates/exex/exex/src/backfill/job.rs @@ -39,7 +39,10 @@ pub struct BackfillJob { impl Iterator for BackfillJob where - E: BlockExecutorProvider>, + E: BlockExecutorProvider< + Primitives: NodePrimitives, + Error = BlockExecutionError, + >, P: HeaderProvider + BlockReader + StateProviderFactory, { type Item = BackfillJobResult>; @@ -55,7 +58,10 @@ where impl BackfillJob where - E: BlockExecutorProvider>, + E: BlockExecutorProvider< + Primitives: NodePrimitives, + Error = BlockExecutionError, + >, P: BlockReader + HeaderProvider + StateProviderFactory, { /// Converts the backfill job into a single block backfill job. @@ -112,7 +118,7 @@ where let (unsealed_header, hash) = header.split(); let block = P::Block::new(unsealed_header, body).with_senders_unchecked(senders); - executor.execute_and_verify_one(&block).unwrap(); + executor.execute_and_verify_one(&block)?; execution_duration += execute_start.elapsed(); // TODO(alexey): report gas metrics using `block.header.gas_used` @@ -162,7 +168,10 @@ pub struct SingleBlockBackfillJob { impl Iterator for SingleBlockBackfillJob where - E: BlockExecutorProvider>, + E: BlockExecutorProvider< + Primitives: NodePrimitives, + Error = BlockExecutionError, + >, P: HeaderProvider + BlockReader + StateProviderFactory, { type Item = BackfillJobResult<( @@ -177,7 +186,10 @@ where impl SingleBlockBackfillJob where - E: BlockExecutorProvider>, + E: BlockExecutorProvider< + Primitives: NodePrimitives, + Error = BlockExecutionError, + >, P: HeaderProvider + BlockReader + StateProviderFactory, { /// Converts the single block backfill job into a stream. diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index b88bac816f8b..5cd0bab466c4 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -15,6 +15,7 @@ use alloy_rpc_types_trace::geth::{ use async_trait::async_trait; use jsonrpsee::core::RpcResult; use reth_chainspec::EthereumHardforks; +use reth_errors::BlockExecutionError; use reth_evm::{ env::EvmEnv, execute::{BlockExecutorProvider, Executor}, @@ -83,8 +84,10 @@ impl DebugApi { impl DebugApi where Eth: EthApiTypes + TraceExt + 'static, - BlockExecutor: - BlockExecutorProvider>>, + BlockExecutor: BlockExecutorProvider< + Primitives: NodePrimitives>, + Error = BlockExecutionError, + >, { /// Acquires a permit to execute a tracing call. async fn acquire_trace_permit(&self) -> Result { diff --git a/crates/rpc/rpc/src/validation.rs b/crates/rpc/rpc/src/validation.rs index cb3ab4f296cf..60fc16b0b14f 100644 --- a/crates/rpc/rpc/src/validation.rs +++ b/crates/rpc/rpc/src/validation.rs @@ -93,7 +93,7 @@ where + ChainSpecProvider + StateProviderFactory + 'static, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { /// Validates the given block and a [`BidTrace`] against it. pub async fn validate_message_against_block( From e21f7b6b272b57ebdda4fc7ff2a1d5634b3121ef Mon Sep 17 00:00:00 2001 From: chungquantin <56880684+chungquantin@users.noreply.github.com> Date: Thu, 16 Jan 2025 17:47:51 +0700 Subject: [PATCH 04/13] Revert "chore: init" This reverts commit 7bfd8a70ba27102ddc24dfff28e9d9665010124a. --- crates/blockchain-tree/src/blockchain_tree.rs | 2 +- crates/blockchain-tree/src/chain.rs | 10 ++++----- crates/blockchain-tree/src/shareable.rs | 7 +++--- crates/evm/src/execute.rs | 6 ++--- crates/evm/src/metrics.rs | 3 +-- crates/exex/exex/src/backfill/job.rs | 22 +++++-------------- crates/rpc/rpc/src/debug.rs | 7 ++---- crates/rpc/rpc/src/validation.rs | 2 +- 8 files changed, 20 insertions(+), 39 deletions(-) diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 264d35f2d10d..3964ea53b7e2 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -95,7 +95,7 @@ impl BlockchainTree { impl BlockchainTree where N: TreeNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { /// Builds the blockchain tree for the node. /// diff --git a/crates/blockchain-tree/src/chain.rs b/crates/blockchain-tree/src/chain.rs index 9fba5bab4a9f..bfa6627413aa 100644 --- a/crates/blockchain-tree/src/chain.rs +++ b/crates/blockchain-tree/src/chain.rs @@ -77,7 +77,7 @@ impl AppendableChain { ) -> Result where N: TreeNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { let execution_outcome = ExecutionOutcome::default(); let empty = BTreeMap::new(); @@ -115,7 +115,7 @@ impl AppendableChain { ) -> Result where N: TreeNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { let parent_number = block.number.checked_sub(1).ok_or(BlockchainTreeError::GenesisBlockHasNoParent)?; @@ -178,7 +178,7 @@ impl AppendableChain { where EDP: FullExecutionDataProvider, N: TreeNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { // some checks are done before blocks comes here. externals.consensus.validate_header_against_parent(&block, parent_block)?; @@ -209,7 +209,7 @@ impl AppendableChain { let block_hash = block.hash(); let block = block.unseal(); - let state = executor.execute(&block)?; + let state = executor.execute(&block).unwrap(); externals.consensus.validate_block_post_execution( &block, PostExecutionInput::new(&state.receipts, &state.requests), @@ -284,7 +284,7 @@ impl AppendableChain { ) -> Result<(), InsertBlockErrorKind> where N: TreeNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { let parent_block = self.chain.tip(); diff --git a/crates/blockchain-tree/src/shareable.rs b/crates/blockchain-tree/src/shareable.rs index d37a849867b3..e668f4e2dac0 100644 --- a/crates/blockchain-tree/src/shareable.rs +++ b/crates/blockchain-tree/src/shareable.rs @@ -12,7 +12,6 @@ use reth_blockchain_tree_api::{ InsertPayloadOk, }; use reth_evm::execute::BlockExecutorProvider; -use reth_execution_errors::BlockExecutionError; use reth_node_types::NodeTypesWithDB; use reth_primitives::{Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader}; use reth_provider::{ @@ -40,7 +39,7 @@ impl ShareableBlockchainTree { impl BlockchainTreeEngine for ShareableBlockchainTree where N: TreeNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { fn buffer_block(&self, block: SealedBlockWithSenders) -> Result<(), InsertBlockError> { let mut tree = self.tree.write(); @@ -111,7 +110,7 @@ where impl BlockchainTreeViewer for ShareableBlockchainTree where N: TreeNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { fn header_by_hash(&self, hash: BlockHash) -> Option { trace!(target: "blockchain_tree", ?hash, "Returning header by hash"); @@ -174,7 +173,7 @@ where impl BlockchainTreePendingStateProvider for ShareableBlockchainTree where N: TreeNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { fn find_pending_state_provider( &self, diff --git a/crates/evm/src/execute.rs b/crates/evm/src/execute.rs index 139740c831e0..ca65f3fcf239 100644 --- a/crates/evm/src/execute.rs +++ b/crates/evm/src/execute.rs @@ -201,7 +201,7 @@ pub trait BlockExecutionStrategy { type Primitives: NodePrimitives; /// The error type returned by this strategy's methods. - type Error: From + Into; + type Error; /// Initialize the strategy with the given transaction environment overrides. fn init(&mut self, _tx_env_overrides: Box) {} @@ -446,9 +446,7 @@ where let ExecuteOutput { receipts, .. } = self.strategy.execute_transactions(block)?; let requests = self.strategy.apply_post_execution_changes(block, &receipts)?; - self.strategy - .validate_block_post_execution(block, &receipts, &requests) - .map_err(BlockExecutionError::Consensus)?; + self.strategy.validate_block_post_execution(block, &receipts, &requests)?; // prepare the state according to the prune mode let retention = self.batch_record.bundle_retention(block.header().number()); diff --git a/crates/evm/src/metrics.rs b/crates/evm/src/metrics.rs index d5d08ba82396..242ddfe5b79a 100644 --- a/crates/evm/src/metrics.rs +++ b/crates/evm/src/metrics.rs @@ -145,7 +145,6 @@ mod tests { use super::*; use alloy_eips::eip7685::Requests; use metrics_util::debugging::{DebugValue, DebuggingRecorder, Snapshotter}; - use reth_execution_errors::BlockExecutionError; use revm::db::BundleState; use revm_primitives::{ Account, AccountInfo, AccountStatus, EvmState, EvmStorage, EvmStorageSlot, B256, U256, @@ -163,7 +162,7 @@ mod tests { where Self: 'a; type Output = BlockExecutionOutput<()>; - type Error = BlockExecutionError; + type Error = std::convert::Infallible; fn execute(self, _input: Self::Input<'_>) -> Result { Ok(BlockExecutionOutput { diff --git a/crates/exex/exex/src/backfill/job.rs b/crates/exex/exex/src/backfill/job.rs index 500cb9f9b5e2..9de21216a011 100644 --- a/crates/exex/exex/src/backfill/job.rs +++ b/crates/exex/exex/src/backfill/job.rs @@ -39,10 +39,7 @@ pub struct BackfillJob { impl Iterator for BackfillJob where - E: BlockExecutorProvider< - Primitives: NodePrimitives, - Error = BlockExecutionError, - >, + E: BlockExecutorProvider>, P: HeaderProvider + BlockReader + StateProviderFactory, { type Item = BackfillJobResult>; @@ -58,10 +55,7 @@ where impl BackfillJob where - E: BlockExecutorProvider< - Primitives: NodePrimitives, - Error = BlockExecutionError, - >, + E: BlockExecutorProvider>, P: BlockReader + HeaderProvider + StateProviderFactory, { /// Converts the backfill job into a single block backfill job. @@ -118,7 +112,7 @@ where let (unsealed_header, hash) = header.split(); let block = P::Block::new(unsealed_header, body).with_senders_unchecked(senders); - executor.execute_and_verify_one(&block)?; + executor.execute_and_verify_one(&block).unwrap(); execution_duration += execute_start.elapsed(); // TODO(alexey): report gas metrics using `block.header.gas_used` @@ -168,10 +162,7 @@ pub struct SingleBlockBackfillJob { impl Iterator for SingleBlockBackfillJob where - E: BlockExecutorProvider< - Primitives: NodePrimitives, - Error = BlockExecutionError, - >, + E: BlockExecutorProvider>, P: HeaderProvider + BlockReader + StateProviderFactory, { type Item = BackfillJobResult<( @@ -186,10 +177,7 @@ where impl SingleBlockBackfillJob where - E: BlockExecutorProvider< - Primitives: NodePrimitives, - Error = BlockExecutionError, - >, + E: BlockExecutorProvider>, P: HeaderProvider + BlockReader + StateProviderFactory, { /// Converts the single block backfill job into a stream. diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 5cd0bab466c4..b88bac816f8b 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -15,7 +15,6 @@ use alloy_rpc_types_trace::geth::{ use async_trait::async_trait; use jsonrpsee::core::RpcResult; use reth_chainspec::EthereumHardforks; -use reth_errors::BlockExecutionError; use reth_evm::{ env::EvmEnv, execute::{BlockExecutorProvider, Executor}, @@ -84,10 +83,8 @@ impl DebugApi { impl DebugApi where Eth: EthApiTypes + TraceExt + 'static, - BlockExecutor: BlockExecutorProvider< - Primitives: NodePrimitives>, - Error = BlockExecutionError, - >, + BlockExecutor: + BlockExecutorProvider>>, { /// Acquires a permit to execute a tracing call. async fn acquire_trace_permit(&self) -> Result { diff --git a/crates/rpc/rpc/src/validation.rs b/crates/rpc/rpc/src/validation.rs index 60fc16b0b14f..cb3ab4f296cf 100644 --- a/crates/rpc/rpc/src/validation.rs +++ b/crates/rpc/rpc/src/validation.rs @@ -93,7 +93,7 @@ where + ChainSpecProvider + StateProviderFactory + 'static, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { /// Validates the given block and a [`BidTrace`] against it. pub async fn validate_message_against_block( From 0c74b82851d430bafa7919917c9d7e47b37a409f Mon Sep 17 00:00:00 2001 From: chungquantin <56880684+chungquantin@users.noreply.github.com> Date: Thu, 16 Jan 2025 23:19:11 +0700 Subject: [PATCH 05/13] refactor(partial): add BlockExecutionError to BlockExecutionProvider --- crates/blockchain-tree/src/blockchain_tree.rs | 2 +- crates/blockchain-tree/src/chain.rs | 10 +++---- crates/blockchain-tree/src/shareable.rs | 7 ++--- crates/evm/src/execute.rs | 8 +++--- crates/evm/src/metrics.rs | 3 ++- crates/exex/exex/src/backfill/job.rs | 22 ++++++++++++---- crates/exex/exex/src/backfill/stream.rs | 12 +++++++-- crates/exex/exex/src/notifications.rs | 26 ++++++++++++------- crates/node/api/src/node.rs | 7 +++-- crates/rpc/rpc/src/debug.rs | 13 +++++++--- crates/rpc/rpc/src/validation.rs | 2 +- 11 files changed, 76 insertions(+), 36 deletions(-) diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 3964ea53b7e2..264d35f2d10d 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -95,7 +95,7 @@ impl BlockchainTree { impl BlockchainTree where N: TreeNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { /// Builds the blockchain tree for the node. /// diff --git a/crates/blockchain-tree/src/chain.rs b/crates/blockchain-tree/src/chain.rs index bfa6627413aa..9fba5bab4a9f 100644 --- a/crates/blockchain-tree/src/chain.rs +++ b/crates/blockchain-tree/src/chain.rs @@ -77,7 +77,7 @@ impl AppendableChain { ) -> Result where N: TreeNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { let execution_outcome = ExecutionOutcome::default(); let empty = BTreeMap::new(); @@ -115,7 +115,7 @@ impl AppendableChain { ) -> Result where N: TreeNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { let parent_number = block.number.checked_sub(1).ok_or(BlockchainTreeError::GenesisBlockHasNoParent)?; @@ -178,7 +178,7 @@ impl AppendableChain { where EDP: FullExecutionDataProvider, N: TreeNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { // some checks are done before blocks comes here. externals.consensus.validate_header_against_parent(&block, parent_block)?; @@ -209,7 +209,7 @@ impl AppendableChain { let block_hash = block.hash(); let block = block.unseal(); - let state = executor.execute(&block).unwrap(); + let state = executor.execute(&block)?; externals.consensus.validate_block_post_execution( &block, PostExecutionInput::new(&state.receipts, &state.requests), @@ -284,7 +284,7 @@ impl AppendableChain { ) -> Result<(), InsertBlockErrorKind> where N: TreeNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { let parent_block = self.chain.tip(); diff --git a/crates/blockchain-tree/src/shareable.rs b/crates/blockchain-tree/src/shareable.rs index e668f4e2dac0..d37a849867b3 100644 --- a/crates/blockchain-tree/src/shareable.rs +++ b/crates/blockchain-tree/src/shareable.rs @@ -12,6 +12,7 @@ use reth_blockchain_tree_api::{ InsertPayloadOk, }; use reth_evm::execute::BlockExecutorProvider; +use reth_execution_errors::BlockExecutionError; use reth_node_types::NodeTypesWithDB; use reth_primitives::{Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader}; use reth_provider::{ @@ -39,7 +40,7 @@ impl ShareableBlockchainTree { impl BlockchainTreeEngine for ShareableBlockchainTree where N: TreeNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { fn buffer_block(&self, block: SealedBlockWithSenders) -> Result<(), InsertBlockError> { let mut tree = self.tree.write(); @@ -110,7 +111,7 @@ where impl BlockchainTreeViewer for ShareableBlockchainTree where N: TreeNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { fn header_by_hash(&self, hash: BlockHash) -> Option { trace!(target: "blockchain_tree", ?hash, "Returning header by hash"); @@ -173,7 +174,7 @@ where impl BlockchainTreePendingStateProvider for ShareableBlockchainTree where N: TreeNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { fn find_pending_state_provider( &self, diff --git a/crates/evm/src/execute.rs b/crates/evm/src/execute.rs index ca65f3fcf239..c0b4735f0258 100644 --- a/crates/evm/src/execute.rs +++ b/crates/evm/src/execute.rs @@ -81,7 +81,7 @@ pub trait BatchExecutor { /// The output type for the executor. type Output; /// The error type returned by the executor. - type Error; + type Error: Into; /// Executes the next block in the batch, verifies the output and updates the state internally. fn execute_and_verify_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error>; @@ -201,7 +201,7 @@ pub trait BlockExecutionStrategy { type Primitives: NodePrimitives; /// The error type returned by this strategy's methods. - type Error; + type Error: From + Into; /// Initialize the strategy with the given transaction environment overrides. fn init(&mut self, _tx_env_overrides: Box) {} @@ -446,7 +446,9 @@ where let ExecuteOutput { receipts, .. } = self.strategy.execute_transactions(block)?; let requests = self.strategy.apply_post_execution_changes(block, &receipts)?; - self.strategy.validate_block_post_execution(block, &receipts, &requests)?; + self.strategy + .validate_block_post_execution(block, &receipts, &requests) + .map_err(BlockExecutionError::Consensus)?; // prepare the state according to the prune mode let retention = self.batch_record.bundle_retention(block.header().number()); diff --git a/crates/evm/src/metrics.rs b/crates/evm/src/metrics.rs index 242ddfe5b79a..d5d08ba82396 100644 --- a/crates/evm/src/metrics.rs +++ b/crates/evm/src/metrics.rs @@ -145,6 +145,7 @@ mod tests { use super::*; use alloy_eips::eip7685::Requests; use metrics_util::debugging::{DebugValue, DebuggingRecorder, Snapshotter}; + use reth_execution_errors::BlockExecutionError; use revm::db::BundleState; use revm_primitives::{ Account, AccountInfo, AccountStatus, EvmState, EvmStorage, EvmStorageSlot, B256, U256, @@ -162,7 +163,7 @@ mod tests { where Self: 'a; type Output = BlockExecutionOutput<()>; - type Error = std::convert::Infallible; + type Error = BlockExecutionError; fn execute(self, _input: Self::Input<'_>) -> Result { Ok(BlockExecutionOutput { diff --git a/crates/exex/exex/src/backfill/job.rs b/crates/exex/exex/src/backfill/job.rs index 9de21216a011..500cb9f9b5e2 100644 --- a/crates/exex/exex/src/backfill/job.rs +++ b/crates/exex/exex/src/backfill/job.rs @@ -39,7 +39,10 @@ pub struct BackfillJob { impl Iterator for BackfillJob where - E: BlockExecutorProvider>, + E: BlockExecutorProvider< + Primitives: NodePrimitives, + Error = BlockExecutionError, + >, P: HeaderProvider + BlockReader + StateProviderFactory, { type Item = BackfillJobResult>; @@ -55,7 +58,10 @@ where impl BackfillJob where - E: BlockExecutorProvider>, + E: BlockExecutorProvider< + Primitives: NodePrimitives, + Error = BlockExecutionError, + >, P: BlockReader + HeaderProvider + StateProviderFactory, { /// Converts the backfill job into a single block backfill job. @@ -112,7 +118,7 @@ where let (unsealed_header, hash) = header.split(); let block = P::Block::new(unsealed_header, body).with_senders_unchecked(senders); - executor.execute_and_verify_one(&block).unwrap(); + executor.execute_and_verify_one(&block)?; execution_duration += execute_start.elapsed(); // TODO(alexey): report gas metrics using `block.header.gas_used` @@ -162,7 +168,10 @@ pub struct SingleBlockBackfillJob { impl Iterator for SingleBlockBackfillJob where - E: BlockExecutorProvider>, + E: BlockExecutorProvider< + Primitives: NodePrimitives, + Error = BlockExecutionError, + >, P: HeaderProvider + BlockReader + StateProviderFactory, { type Item = BackfillJobResult<( @@ -177,7 +186,10 @@ where impl SingleBlockBackfillJob where - E: BlockExecutorProvider>, + E: BlockExecutorProvider< + Primitives: NodePrimitives, + Error = BlockExecutionError, + >, P: HeaderProvider + BlockReader + StateProviderFactory, { /// Converts the single block backfill job into a stream. diff --git a/crates/exex/exex/src/backfill/stream.rs b/crates/exex/exex/src/backfill/stream.rs index 8e572422e4a7..cf20a4a85767 100644 --- a/crates/exex/exex/src/backfill/stream.rs +++ b/crates/exex/exex/src/backfill/stream.rs @@ -114,7 +114,11 @@ where impl Stream for StreamBackfillJob> where - E: BlockExecutorProvider> + Clone + 'static, + E: BlockExecutorProvider< + Primitives: NodePrimitives, + Error = BlockExecutionError, + > + Clone + + 'static, P: BlockReader + StateProviderFactory + Clone + Unpin + 'static, { type Item = BackfillJobResult>; @@ -147,7 +151,11 @@ where impl Stream for StreamBackfillJob> where - E: BlockExecutorProvider> + Clone + 'static, + E: BlockExecutorProvider< + Primitives: NodePrimitives, + Error = BlockExecutionError, + > + Clone + + 'static, P: BlockReader + StateProviderFactory + Clone + Unpin + 'static, { type Item = BackfillJobResult>; diff --git a/crates/exex/exex/src/notifications.rs b/crates/exex/exex/src/notifications.rs index 05892e2f90d5..7c56f75affaa 100644 --- a/crates/exex/exex/src/notifications.rs +++ b/crates/exex/exex/src/notifications.rs @@ -2,7 +2,7 @@ use crate::{BackfillJobFactory, ExExNotification, StreamBackfillJob, WalHandle}; use alloy_consensus::BlockHeader; use futures::{Stream, StreamExt}; use reth_chainspec::Head; -use reth_evm::execute::BlockExecutorProvider; +use reth_evm::execute::{BlockExecutionError, BlockExecutorProvider}; use reth_exex_types::ExExHead; use reth_node_api::NodePrimitives; use reth_primitives::EthPrimitives; @@ -105,8 +105,10 @@ where impl ExExNotificationsStream for ExExNotifications where P: BlockReader + HeaderProvider + StateProviderFactory + Clone + Unpin + 'static, - E: BlockExecutorProvider> - + Clone + E: BlockExecutorProvider< + Primitives: NodePrimitives, + Error = BlockExecutionError, + > + Clone + Unpin + 'static, { @@ -157,8 +159,10 @@ where impl Stream for ExExNotifications where P: BlockReader + HeaderProvider + StateProviderFactory + Clone + Unpin + 'static, - E: BlockExecutorProvider> - + Clone + E: BlockExecutorProvider< + Primitives: NodePrimitives, + Error = BlockExecutionError, + > + Clone + Unpin + 'static, { @@ -301,8 +305,10 @@ where impl ExExNotificationsWithHead where P: BlockReader + HeaderProvider + StateProviderFactory + Clone + Unpin + 'static, - E: BlockExecutorProvider> - + Clone + E: BlockExecutorProvider< + Primitives: NodePrimitives, + Error = BlockExecutionError, + > + Clone + Unpin + 'static, { @@ -381,8 +387,10 @@ where impl Stream for ExExNotificationsWithHead where P: BlockReader + HeaderProvider + StateProviderFactory + Clone + Unpin + 'static, - E: BlockExecutorProvider> - + Clone + E: BlockExecutorProvider< + Primitives: NodePrimitives, + Error = BlockExecutionError, + > + Clone + Unpin + 'static, { diff --git a/crates/node/api/src/node.rs b/crates/node/api/src/node.rs index 8db75480d11a..712930c70b1a 100644 --- a/crates/node/api/src/node.rs +++ b/crates/node/api/src/node.rs @@ -8,7 +8,7 @@ use reth_db_api::{ database_metrics::{DatabaseMetadata, DatabaseMetrics}, Database, }; -use reth_evm::execute::BlockExecutorProvider; +use reth_evm::execute::{BlockExecutionError, BlockExecutorProvider}; use reth_network_api::FullNetwork; use reth_node_core::node_config::NodeConfig; use reth_node_types::{HeaderTy, NodeTypes, NodeTypesWithDBAdapter, NodeTypesWithEngine, TxTy}; @@ -55,7 +55,10 @@ pub trait FullNodeComponents: FullNodeTypes + Clone + 'static { type Evm: ConfigureEvm

, Transaction = TxTy>; /// The type that knows how to execute blocks. - type Executor: BlockExecutorProvider::Primitives>; + type Executor: BlockExecutorProvider< + Primitives = ::Primitives, + Error = BlockExecutionError, + >; /// The consensus type of the node. type Consensus: FullConsensus<::Primitives> + Clone + Unpin + 'static; diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index b88bac816f8b..c8d764d7a2cf 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -15,6 +15,7 @@ use alloy_rpc_types_trace::geth::{ use async_trait::async_trait; use jsonrpsee::core::RpcResult; use reth_chainspec::EthereumHardforks; +use reth_errors::BlockExecutionError; use reth_evm::{ env::EvmEnv, execute::{BlockExecutorProvider, Executor}, @@ -83,8 +84,10 @@ impl DebugApi { impl DebugApi where Eth: EthApiTypes + TraceExt + 'static, - BlockExecutor: - BlockExecutorProvider>>, + BlockExecutor: BlockExecutorProvider< + Primitives: NodePrimitives>, + Error = BlockExecutionError, + >, { /// Acquires a permit to execute a tracing call. async fn acquire_trace_permit(&self) -> Result { @@ -819,8 +822,10 @@ where impl DebugApiServer for DebugApi where Eth: EthApiTypes + EthTransactions + TraceExt + 'static, - BlockExecutor: - BlockExecutorProvider>>, + BlockExecutor: BlockExecutorProvider< + Primitives: NodePrimitives>, + Error = BlockExecutionError, + >, { /// Handler for `debug_getRawHeader` async fn raw_header(&self, block_id: BlockId) -> RpcResult { diff --git a/crates/rpc/rpc/src/validation.rs b/crates/rpc/rpc/src/validation.rs index cb3ab4f296cf..60fc16b0b14f 100644 --- a/crates/rpc/rpc/src/validation.rs +++ b/crates/rpc/rpc/src/validation.rs @@ -93,7 +93,7 @@ where + ChainSpecProvider + StateProviderFactory + 'static, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { /// Validates the given block and a [`BidTrace`] against it. pub async fn validate_message_against_block( From 214bd6b62ca1b31125a55ed287ef0f6293cd3502 Mon Sep 17 00:00:00 2001 From: chungquantin <56880684+chungquantin@users.noreply.github.com> Date: Tue, 7 Jan 2025 13:39:06 +0700 Subject: [PATCH 06/13] refactor: add Eth variant to OpBlockExecutionError --- crates/ethereum/evm/src/execute.rs | 2 + crates/evm/src/execute.rs | 8 ++- crates/optimism/evm/src/error.rs | 13 ++++- crates/optimism/evm/src/execute.rs | 57 +++++++++++-------- .../custom-beacon-withdrawals/src/main.rs | 1 + 5 files changed, 53 insertions(+), 28 deletions(-) diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index 8bc3272272b2..5254a6e94353 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -69,6 +69,8 @@ where Transaction = reth_primitives::TransactionSigned, >, { + type Error = BlockExecutionError; + type Primitives = EthPrimitives; type Strategy + Display>> = diff --git a/crates/evm/src/execute.rs b/crates/evm/src/execute.rs index 2b221f14564a..b04f173ac966 100644 --- a/crates/evm/src/execute.rs +++ b/crates/evm/src/execute.rs @@ -249,6 +249,9 @@ pub trait BlockExecutionStrategy { /// A strategy factory that can create block execution strategies. pub trait BlockExecutionStrategyFactory: Send + Sync + Clone + Unpin + 'static { + /// The error type returned by this strategy's methods. + type Error: From + core::error::Error; + /// Primitive types used by the strategy. type Primitives: NodePrimitives; @@ -256,7 +259,7 @@ pub trait BlockExecutionStrategyFactory: Send + Sync + Clone + Unpin + 'static { type Strategy + Display>>: BlockExecutionStrategy< DB = DB, Primitives = Self::Primitives, - Error = BlockExecutionError, + Error = Self::Error, >; /// Creates a strategy using the give database. @@ -289,7 +292,7 @@ impl BasicBlockExecutorProvider { impl BlockExecutorProvider for BasicBlockExecutorProvider where - F: BlockExecutionStrategyFactory, + F: BlockExecutionStrategyFactory, { type Primitives = F::Primitives; @@ -623,6 +626,7 @@ mod tests { } impl BlockExecutionStrategyFactory for TestExecutorStrategyFactory { + type Error = BlockExecutionError; type Primitives = EthPrimitives; type Strategy + Display>> = TestExecutorStrategy; diff --git a/crates/optimism/evm/src/error.rs b/crates/optimism/evm/src/error.rs index 461f8c11e4fb..f1c8f6304adc 100644 --- a/crates/optimism/evm/src/error.rs +++ b/crates/optimism/evm/src/error.rs @@ -1,10 +1,10 @@ //! Error types for the Optimism EVM module. use alloc::string::String; -use reth_evm::execute::BlockExecutionError; +use reth_evm::execute::{BlockExecutionError, ProviderError}; /// Optimism Block Executor Errors -#[derive(Debug, Clone, PartialEq, Eq, thiserror::Error)] +#[derive(Debug, thiserror::Error)] pub enum OpBlockExecutionError { /// Error when trying to parse L1 block info #[error("could not get L1 block info from L2 block: {message}")] @@ -21,6 +21,9 @@ pub enum OpBlockExecutionError { /// Thrown when a database account could not be loaded. #[error("failed to load account {_0}")] AccountLoadFailed(alloy_primitives::Address), + /// Thrown when a L1 block execution failed. + #[error("execution error on L1: {_0}")] + Eth(BlockExecutionError), } impl From for BlockExecutionError { @@ -28,3 +31,9 @@ impl From for BlockExecutionError { Self::other(err) } } + +impl From for OpBlockExecutionError { + fn from(error: ProviderError) -> Self { + Self::Eth(BlockExecutionError::from(error)) + } +} diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index bfac67fbb573..2c6a3c05f543 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -79,6 +79,7 @@ where + ConfigureEvm
, { type Primitives = N; + type Error = OpBlockExecutionError; type Strategy + Display>> = OpExecutionStrategy; @@ -153,7 +154,7 @@ where { type DB = DB; type Primitives = N; - type Error = BlockExecutionError; + type Error = OpBlockExecutionError; fn init(&mut self, tx_env_overrides: Box) { self.tx_env_overrides = Some(tx_env_overrides); @@ -170,12 +171,14 @@ where let mut evm = self.evm_config.evm_for_block(&mut self.state, block.header()); - self.system_caller.apply_beacon_root_contract_call( - block.header().timestamp, - block.header().number, - block.header().parent_beacon_block_root, - &mut evm, - )?; + self.system_caller + .apply_beacon_root_contract_call( + block.header().timestamp, + block.header().number, + block.header().parent_beacon_block_root, + &mut evm, + ) + .map_err(OpBlockExecutionError::Eth)?; // Ensure that the create2deployer is force-deployed at the canyon transition. Optimism // blocks will always have at least a single transaction in them (the L1 info transaction), @@ -202,14 +205,15 @@ where // The sum of the transaction’s gas limit, Tg, and the gas utilized in this block prior, // must be no greater than the block’s gasLimit. let block_available_gas = block.gas_limit() - cumulative_gas_used; - if transaction.gas_limit() > block_available_gas && - (is_regolith || !transaction.is_deposit()) + if transaction.gas_limit() > block_available_gas + && (is_regolith || !transaction.is_deposit()) { - return Err(BlockValidationError::TransactionGasLimitMoreThanAvailableBlockGas { - transaction_gas_limit: transaction.gas_limit(), - block_available_gas, - } - .into()) + return Err(OpBlockExecutionError::Eth(BlockExecutionError::Validation( + BlockValidationError::TransactionGasLimitMoreThanAvailableBlockGas { + transaction_gas_limit: transaction.gas_limit(), + block_available_gas, + }, + ))); } // Cache the depositor account prior to the state transition for the deposit nonce. @@ -236,10 +240,12 @@ where let result_and_state = evm.transact().map_err(move |err| { let new_err = err.map_db_err(|e| e.into()); // Ensure hash is calculated for error log, if not already done - BlockValidationError::EVM { - hash: transaction.recalculate_hash(), - error: Box::new(new_err), - } + OpBlockExecutionError::Eth(BlockExecutionError::Validation( + BlockValidationError::EVM { + hash: transaction.recalculate_hash(), + error: Box::new(new_err), + }, + )) })?; trace!( @@ -279,8 +285,8 @@ where // when set. The state transition process ensures // this is only set for post-Canyon deposit // transactions. - deposit_receipt_version: (transaction.is_deposit() && - self.chain_spec.is_fork_active_at_timestamp( + deposit_receipt_version: (transaction.is_deposit() + && self.chain_spec.is_fork_active_at_timestamp( OpHardfork::Canyon, block.header().timestamp, )) @@ -301,11 +307,14 @@ where ) -> Result { let balance_increments = post_block_balance_increments(&self.chain_spec.clone(), block); // increment balances - self.state - .increment_balances(balance_increments.clone()) - .map_err(|_| BlockValidationError::IncrementBalanceFailed)?; + self.state.increment_balances(balance_increments.clone()).map_err(|_| { + OpBlockExecutionError::Eth(BlockExecutionError::Validation( + BlockValidationError::IncrementBalanceFailed, + )) + })?; // call state hook with changes due to balance increments. - let balance_state = balance_increment_state(&balance_increments, &mut self.state)?; + let balance_state = balance_increment_state(&balance_increments, &mut self.state) + .map_err(OpBlockExecutionError::Eth)?; self.system_caller.on_state(&balance_state); Ok(Requests::default()) diff --git a/examples/custom-beacon-withdrawals/src/main.rs b/examples/custom-beacon-withdrawals/src/main.rs index 7bb8a77d2598..bbd76bdbc660 100644 --- a/examples/custom-beacon-withdrawals/src/main.rs +++ b/examples/custom-beacon-withdrawals/src/main.rs @@ -89,6 +89,7 @@ pub struct CustomExecutorStrategyFactory { } impl BlockExecutionStrategyFactory for CustomExecutorStrategyFactory { + type Error = BlockExecutionError; type Primitives = EthPrimitives; type Strategy + Display>> = CustomExecutorStrategy; From f1278f18152e108932628989a03cba12e0cf3aed Mon Sep 17 00:00:00 2001 From: chungquantin <56880684+chungquantin@users.noreply.github.com> Date: Thu, 16 Jan 2025 17:15:13 +0700 Subject: [PATCH 07/13] refactor: BlockExecutionStrategyFactory --- crates/blockchain-tree/src/chain.rs | 311 +++++++++++++++++++++++++++ crates/evm/src/either.rs | 4 +- crates/evm/src/execute.rs | 20 +- crates/evm/src/noop.rs | 2 + crates/evm/src/test_utils.rs | 2 + crates/exex/exex/src/backfill/job.rs | 2 +- crates/optimism/evm/src/error.rs | 16 +- 7 files changed, 334 insertions(+), 23 deletions(-) create mode 100644 crates/blockchain-tree/src/chain.rs diff --git a/crates/blockchain-tree/src/chain.rs b/crates/blockchain-tree/src/chain.rs new file mode 100644 index 000000000000..bfa6627413aa --- /dev/null +++ b/crates/blockchain-tree/src/chain.rs @@ -0,0 +1,311 @@ +//! A chain in a [`BlockchainTree`][super::BlockchainTree]. +//! +//! A [`Chain`] contains the state of accounts for the chain after execution of its constituent +//! blocks, as well as a list of the blocks the chain is composed of. + +use super::externals::TreeExternals; +use crate::BundleStateDataRef; +use alloy_eips::ForkBlock; +use alloy_primitives::{BlockHash, BlockNumber}; +use reth_blockchain_tree_api::{ + error::{BlockchainTreeError, InsertBlockErrorKind}, + BlockAttachment, BlockValidationKind, +}; +use reth_consensus::{ConsensusError, PostExecutionInput}; +use reth_evm::execute::{BlockExecutorProvider, Executor}; +use reth_execution_errors::BlockExecutionError; +use reth_execution_types::{Chain, ExecutionOutcome}; +use reth_primitives::{GotExpected, SealedBlockWithSenders, SealedHeader}; +use reth_provider::{ + providers::{BundleStateProvider, ConsistentDbView, TreeNodeTypes}, + DBProvider, FullExecutionDataProvider, HashedPostStateProvider, ProviderError, + StateRootProvider, TryIntoHistoricalStateProvider, +}; +use reth_revm::database::StateProviderDatabase; +use reth_trie::{updates::TrieUpdates, TrieInput}; +use reth_trie_parallel::root::ParallelStateRoot; +use std::{ + collections::BTreeMap, + ops::{Deref, DerefMut}, + time::Instant, +}; + +/// A chain in the blockchain tree that has functionality to execute blocks and append them to +/// itself. +#[derive(Clone, Debug, Default, PartialEq, Eq)] +pub struct AppendableChain { + chain: Chain, +} + +impl Deref for AppendableChain { + type Target = Chain; + + fn deref(&self) -> &Self::Target { + &self.chain + } +} + +impl DerefMut for AppendableChain { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.chain + } +} + +impl AppendableChain { + /// Create a new appendable chain from a given chain. + pub const fn new(chain: Chain) -> Self { + Self { chain } + } + + /// Get the chain. + pub fn into_inner(self) -> Chain { + self.chain + } + + /// Create a new chain that forks off of the canonical chain. + /// + /// if [`BlockValidationKind::Exhaustive`] is specified, the method will verify the state root + /// of the block. + pub fn new_canonical_fork( + block: SealedBlockWithSenders, + parent_header: &SealedHeader, + canonical_block_hashes: &BTreeMap, + canonical_fork: ForkBlock, + externals: &TreeExternals, + block_attachment: BlockAttachment, + block_validation_kind: BlockValidationKind, + ) -> Result + where + N: TreeNodeTypes, + E: BlockExecutorProvider, + { + let execution_outcome = ExecutionOutcome::default(); + let empty = BTreeMap::new(); + + let state_provider = BundleStateDataRef { + execution_outcome: &execution_outcome, + sidechain_block_hashes: &empty, + canonical_block_hashes, + canonical_fork, + }; + + let (bundle_state, trie_updates) = Self::validate_and_execute( + block.clone(), + parent_header, + state_provider, + externals, + block_attachment, + block_validation_kind, + )?; + + Ok(Self::new(Chain::new(vec![block], bundle_state, trie_updates))) + } + + /// Create a new chain that forks off of an existing sidechain. + /// + /// This differs from [`AppendableChain::new_canonical_fork`] in that this starts a new fork. + pub(crate) fn new_chain_fork( + &self, + block: SealedBlockWithSenders, + side_chain_block_hashes: BTreeMap, + canonical_block_hashes: &BTreeMap, + canonical_fork: ForkBlock, + externals: &TreeExternals, + block_validation_kind: BlockValidationKind, + ) -> Result + where + N: TreeNodeTypes, + E: BlockExecutorProvider, + { + let parent_number = + block.number.checked_sub(1).ok_or(BlockchainTreeError::GenesisBlockHasNoParent)?; + let parent = self.blocks().get(&parent_number).ok_or( + BlockchainTreeError::BlockNumberNotFoundInChain { block_number: parent_number }, + )?; + + let mut execution_outcome = self.execution_outcome().clone(); + + // Revert state to the state after execution of the parent block + execution_outcome.revert_to(parent.number); + + // Revert changesets to get the state of the parent that we need to apply the change. + let bundle_state_data = BundleStateDataRef { + execution_outcome: &execution_outcome, + sidechain_block_hashes: &side_chain_block_hashes, + canonical_block_hashes, + canonical_fork, + }; + let (block_state, _) = Self::validate_and_execute( + block.clone(), + parent, + bundle_state_data, + externals, + BlockAttachment::HistoricalFork, + block_validation_kind, + )?; + // extending will also optimize few things, mostly related to selfdestruct and wiping of + // storage. + execution_outcome.extend(block_state); + + // remove all receipts and reverts (except the last one), as they belong to the chain we + // forked from and not the new chain we are creating. + let size = execution_outcome.receipts().len(); + execution_outcome.receipts_mut().drain(0..size - 1); + execution_outcome.state_mut().take_n_reverts(size - 1); + execution_outcome.set_first_block(block.number); + + // If all is okay, return new chain back. Present chain is not modified. + Ok(Self::new(Chain::from_block(block, execution_outcome, None))) + } + + /// Validate and execute the given block that _extends the canonical chain_, validating its + /// state root after execution if possible and requested. + /// + /// Note: State root validation is limited to blocks that extend the canonical chain and is + /// optional, see [`BlockValidationKind`]. So this function takes two parameters to determine + /// if the state can and should be validated. + /// - [`BlockAttachment`] represents if the block extends the canonical chain, and thus we can + /// cache the trie state updates. + /// - [`BlockValidationKind`] determines if the state root __should__ be validated. + fn validate_and_execute( + block: SealedBlockWithSenders, + parent_block: &SealedHeader, + bundle_state_data_provider: EDP, + externals: &TreeExternals, + block_attachment: BlockAttachment, + block_validation_kind: BlockValidationKind, + ) -> Result<(ExecutionOutcome, Option), BlockExecutionError> + where + EDP: FullExecutionDataProvider, + N: TreeNodeTypes, + E: BlockExecutorProvider, + { + // some checks are done before blocks comes here. + externals.consensus.validate_header_against_parent(&block, parent_block)?; + + // get the state provider. + let canonical_fork = bundle_state_data_provider.canonical_fork(); + + // SAFETY: For block execution and parallel state root computation below we open multiple + // independent database transactions. Upon opening the database transaction the consistent + // view will check a current tip in the database and throw an error if it doesn't match + // the one recorded during initialization. + // It is safe to use consistent view without any special error handling as long as + // we guarantee that plain state cannot change during processing of new payload. + // The usage has to be re-evaluated if that was ever to change. + let consistent_view = + ConsistentDbView::new_with_latest_tip(externals.provider_factory.clone())?; + let state_provider = consistent_view + .provider_ro()? + // State root calculation can take a while, and we're sure no write transaction + // will be open in parallel. See https://github.com/paradigmxyz/reth/issues/7509. + .disable_long_read_transaction_safety() + .try_into_history_at_block(canonical_fork.number)?; + + let provider = BundleStateProvider::new(state_provider, bundle_state_data_provider); + + let db = StateProviderDatabase::new(&provider); + let executor = externals.executor_factory.executor(db); + let block_hash = block.hash(); + let block = block.unseal(); + + let state = executor.execute(&block).unwrap(); + externals.consensus.validate_block_post_execution( + &block, + PostExecutionInput::new(&state.receipts, &state.requests), + )?; + + let initial_execution_outcome = ExecutionOutcome::from((state, block.number)); + + // check state root if the block extends the canonical chain __and__ if state root + // validation was requested. + if block_validation_kind.is_exhaustive() { + // calculate and check state root + let start = Instant::now(); + let (state_root, trie_updates) = if block_attachment.is_canonical() { + let mut execution_outcome = + provider.block_execution_data_provider.execution_outcome().clone(); + execution_outcome.extend(initial_execution_outcome.clone()); + ParallelStateRoot::new( + consistent_view, + TrieInput::from_state(provider.hashed_post_state(execution_outcome.state())), + ) + .incremental_root_with_updates() + .map(|(root, updates)| (root, Some(updates))) + .map_err(ProviderError::from)? + } else { + let hashed_state = provider.hashed_post_state(initial_execution_outcome.state()); + let state_root = provider.state_root(hashed_state)?; + (state_root, None) + }; + if block.state_root != state_root { + return Err(ConsensusError::BodyStateRootDiff( + GotExpected { got: state_root, expected: block.state_root }.into(), + ) + .into()) + } + + tracing::debug!( + target: "blockchain_tree::chain", + number = block.number, + hash = %block_hash, + elapsed = ?start.elapsed(), + "Validated state root" + ); + + Ok((initial_execution_outcome, trie_updates)) + } else { + Ok((initial_execution_outcome, None)) + } + } + + /// Validate and execute the given block, and append it to this chain. + /// + /// This expects that the block's ancestors can be traced back to the `canonical_fork` (the + /// first parent block of the `block`'s chain that is in the canonical chain). + /// + /// In other words, expects a gap less (side-) chain: [`canonical_fork..block`] in order to be + /// able to __execute__ the block. + /// + /// CAUTION: This will only perform state root check if it's possible: if the `canonical_fork` + /// is the canonical head, or: state root check can't be performed if the given canonical is + /// __not__ the canonical head. + #[track_caller] + #[allow(clippy::too_many_arguments)] + pub(crate) fn append_block( + &mut self, + block: SealedBlockWithSenders, + side_chain_block_hashes: BTreeMap, + canonical_block_hashes: &BTreeMap, + externals: &TreeExternals, + canonical_fork: ForkBlock, + block_attachment: BlockAttachment, + block_validation_kind: BlockValidationKind, + ) -> Result<(), InsertBlockErrorKind> + where + N: TreeNodeTypes, + E: BlockExecutorProvider, + { + let parent_block = self.chain.tip(); + + let bundle_state_data = BundleStateDataRef { + execution_outcome: self.execution_outcome(), + sidechain_block_hashes: &side_chain_block_hashes, + canonical_block_hashes, + canonical_fork, + }; + + let (block_state, _) = Self::validate_and_execute( + block.clone(), + parent_block, + bundle_state_data, + externals, + block_attachment, + block_validation_kind, + )?; + // extend the state. + self.chain.append_block(block, block_state); + + Ok(()) + } +} diff --git a/crates/evm/src/either.rs b/crates/evm/src/either.rs index 4faeb1a72030..7a59b7f8a620 100644 --- a/crates/evm/src/either.rs +++ b/crates/evm/src/either.rs @@ -19,10 +19,12 @@ use revm::State; impl BlockExecutorProvider for Either where A: BlockExecutorProvider, - B: BlockExecutorProvider, + B: BlockExecutorProvider, { type Primitives = A::Primitives; + type Error = A::Error; + type Executor + Display>> = Either, B::Executor>; diff --git a/crates/evm/src/execute.rs b/crates/evm/src/execute.rs index b04f173ac966..dff6240a5ca7 100644 --- a/crates/evm/src/execute.rs +++ b/crates/evm/src/execute.rs @@ -137,6 +137,9 @@ pub trait BlockExecutorProvider: Send + Sync + Clone + Unpin + 'static { /// Receipt type. type Primitives: NodePrimitives; + /// The error type returned by the executor. + type Error; + /// An executor that can execute a single block given a database. /// /// # Verification @@ -152,7 +155,7 @@ pub trait BlockExecutorProvider: Send + Sync + Clone + Unpin + 'static { DB, Input<'a> = &'a RecoveredBlock<::Block>, Output = BlockExecutionOutput<::Receipt>, - Error = BlockExecutionError, + Error = Self::Error, >; /// An executor that can execute a batch of blocks given a database. @@ -160,7 +163,7 @@ pub trait BlockExecutorProvider: Send + Sync + Clone + Unpin + 'static { DB, Input<'a> = &'a RecoveredBlock<::Block>, Output = ExecutionOutcome<::Receipt>, - Error = BlockExecutionError, + Error = Self::Error, >; /// Creates a new executor for single block execution. @@ -197,7 +200,7 @@ pub trait BlockExecutionStrategy { type Primitives: NodePrimitives; /// The error type returned by this strategy's methods. - type Error: From + core::error::Error; + type Error; /// Initialize the strategy with the given transaction environment overrides. fn init(&mut self, _tx_env_overrides: Box) {} @@ -250,7 +253,7 @@ pub trait BlockExecutionStrategy { /// A strategy factory that can create block execution strategies. pub trait BlockExecutionStrategyFactory: Send + Sync + Clone + Unpin + 'static { /// The error type returned by this strategy's methods. - type Error: From + core::error::Error; + type Error; /// Primitive types used by the strategy. type Primitives: NodePrimitives; @@ -292,8 +295,10 @@ impl BasicBlockExecutorProvider { impl BlockExecutorProvider for BasicBlockExecutorProvider where - F: BlockExecutionStrategyFactory, + F: BlockExecutionStrategyFactory, { + type Error = F::Error; + type Primitives = F::Primitives; type Executor + Display>> = @@ -424,12 +429,12 @@ where impl BatchExecutor for BasicBatchExecutor where - S: BlockExecutionStrategy, + S: BlockExecutionStrategy, DB: Database + Display>, { type Input<'a> = &'a RecoveredBlock<::Block>; type Output = ExecutionOutcome<::Receipt>; - type Error = BlockExecutionError; + type Error = S::Error; fn execute_and_verify_one(&mut self, block: Self::Input<'_>) -> Result<(), Self::Error> { if self.batch_record.first_block().is_none() { @@ -528,6 +533,7 @@ mod tests { struct TestExecutorProvider; impl BlockExecutorProvider for TestExecutorProvider { + type Error = BlockExecutionError; type Primitives = EthPrimitives; type Executor + Display>> = TestExecutor; type BatchExecutor + Display>> = TestExecutor; diff --git a/crates/evm/src/noop.rs b/crates/evm/src/noop.rs index 27e2e9623929..aaca6f20aaf6 100644 --- a/crates/evm/src/noop.rs +++ b/crates/evm/src/noop.rs @@ -25,6 +25,8 @@ pub struct NoopBlockExecutorProvider

(core::marker::PhantomData

); impl BlockExecutorProvider for NoopBlockExecutorProvider

{ type Primitives = P; + type Error = BlockExecutionError; + type Executor + Display>> = Self; type BatchExecutor + Display>> = Self; diff --git a/crates/evm/src/test_utils.rs b/crates/evm/src/test_utils.rs index 2eaf7fdc5aa1..001ee32eb314 100644 --- a/crates/evm/src/test_utils.rs +++ b/crates/evm/src/test_utils.rs @@ -35,6 +35,8 @@ impl MockExecutorProvider { impl BlockExecutorProvider for MockExecutorProvider { type Primitives = EthPrimitives; + type Error = BlockExecutionError; + type Executor + Display>> = Self; type BatchExecutor + Display>> = Self; diff --git a/crates/exex/exex/src/backfill/job.rs b/crates/exex/exex/src/backfill/job.rs index 4f1ac8e97aa4..6d8f0397588e 100644 --- a/crates/exex/exex/src/backfill/job.rs +++ b/crates/exex/exex/src/backfill/job.rs @@ -111,7 +111,7 @@ where let (header, body) = block.split_sealed_header_body(); let block = P::Block::new_sealed(header, body).with_senders(senders); - executor.execute_and_verify_one(&block)?; + executor.execute_and_verify_one(&block).unwrap(); execution_duration += execute_start.elapsed(); // TODO(alexey): report gas metrics using `block.header.gas_used` diff --git a/crates/optimism/evm/src/error.rs b/crates/optimism/evm/src/error.rs index f1c8f6304adc..4082d0159dd2 100644 --- a/crates/optimism/evm/src/error.rs +++ b/crates/optimism/evm/src/error.rs @@ -22,18 +22,6 @@ pub enum OpBlockExecutionError { #[error("failed to load account {_0}")] AccountLoadFailed(alloy_primitives::Address), /// Thrown when a L1 block execution failed. - #[error("execution error on L1: {_0}")] - Eth(BlockExecutionError), -} - -impl From for BlockExecutionError { - fn from(err: OpBlockExecutionError) -> Self { - Self::other(err) - } -} - -impl From for OpBlockExecutionError { - fn from(error: ProviderError) -> Self { - Self::Eth(BlockExecutionError::from(error)) - } + #[error(transparent)] + Eth(#[from] BlockExecutionError), } From e7692dc0504343e0eeaa9254dc6e828c6159d2b3 Mon Sep 17 00:00:00 2001 From: chungquantin <56880684+chungquantin@users.noreply.github.com> Date: Thu, 16 Jan 2025 17:43:01 +0700 Subject: [PATCH 08/13] chore: init --- crates/blockchain-tree/src/blockchain_tree.rs | 2441 +++++++++++++++++ crates/blockchain-tree/src/chain.rs | 10 +- crates/blockchain-tree/src/shareable.rs | 206 ++ crates/evm/src/execute.rs | 6 +- crates/evm/src/metrics.rs | 3 +- crates/exex/exex/src/backfill/job.rs | 22 +- crates/rpc/rpc/src/debug.rs | 7 +- crates/rpc/rpc/src/validation.rs | 2 +- 8 files changed, 2681 insertions(+), 16 deletions(-) create mode 100644 crates/blockchain-tree/src/blockchain_tree.rs create mode 100644 crates/blockchain-tree/src/shareable.rs diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs new file mode 100644 index 000000000000..264d35f2d10d --- /dev/null +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -0,0 +1,2441 @@ +//! Implementation of [`BlockchainTree`] + +use crate::{ + externals::TreeNodeTypes, + metrics::{MakeCanonicalAction, MakeCanonicalDurationsRecorder, TreeMetrics}, + state::{SidechainId, TreeState}, + AppendableChain, BlockIndices, BlockchainTreeConfig, ExecutionData, TreeExternals, +}; +use alloy_eips::{BlockNumHash, ForkBlock}; +use alloy_primitives::{BlockHash, BlockNumber, B256, U256}; +use reth_blockchain_tree_api::{ + error::{BlockchainTreeError, CanonicalError, InsertBlockError, InsertBlockErrorKind}, + BlockAttachment, BlockStatus, BlockValidationKind, CanonicalOutcome, InsertPayloadOk, +}; +use reth_consensus::{Consensus, ConsensusError}; +use reth_evm::execute::BlockExecutorProvider; +use reth_execution_errors::{BlockExecutionError, BlockValidationError}; +use reth_execution_types::{Chain, ExecutionOutcome}; +use reth_node_types::NodeTypesWithDB; +use reth_primitives::{ + EthereumHardfork, GotExpected, Hardforks, Receipt, SealedBlock, SealedBlockWithSenders, + SealedHeader, StaticFileSegment, +}; +use reth_provider::{ + BlockExecutionWriter, BlockNumReader, BlockWriter, CanonStateNotification, + CanonStateNotificationSender, CanonStateNotifications, ChainSpecProvider, ChainSplit, + ChainSplitTarget, DBProvider, DisplayBlocksChain, HashedPostStateProvider, HeaderProvider, + ProviderError, StaticFileProviderFactory, StorageLocation, +}; +use reth_stages_api::{MetricEvent, MetricEventsSender}; +use reth_storage_errors::provider::{ProviderResult, RootMismatch}; +use reth_trie::{hashed_cursor::HashedPostStateCursorFactory, StateRoot}; +use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseStateRoot}; +use std::{ + collections::{btree_map::Entry, BTreeMap, HashSet}, + sync::Arc, +}; +use tracing::{debug, error, info, instrument, trace, warn}; + +#[cfg_attr(doc, aquamarine::aquamarine)] +/// A Tree of chains. +/// +/// The flowchart represents all the states a block can have inside the tree. +/// +/// - Green blocks belong to the canonical chain and are saved inside the database. +/// - Pending blocks and sidechains are found in-memory inside [`BlockchainTree`]. +/// +/// Both pending chains and sidechains have the same mechanisms, the only difference is when they +/// get committed to the database. +/// +/// For pending, it is an append operation, but for sidechains they need to move the current +/// canonical blocks to the tree (by removing them from the database), and commit the sidechain +/// blocks to the database to become the canonical chain (reorg). +/// +/// `include_mmd!("docs/mermaid/tree.mmd`") +/// +/// # Main functions +/// * [`BlockchainTree::insert_block`]: Connect a block to a chain, execute it, and if valid, insert +/// the block into the tree. +/// * [`BlockchainTree::finalize_block`]: Remove chains that branch off of the now finalized block. +/// * [`BlockchainTree::make_canonical`]: Check if we have the hash of a block that is the current +/// canonical head and commit it to db. +#[derive(Debug)] +pub struct BlockchainTree { + /// The state of the tree + /// + /// Tracks all the chains, the block indices, and the block buffer. + state: TreeState, + /// External components (the database, consensus engine etc.) + externals: TreeExternals, + /// Tree configuration + config: BlockchainTreeConfig, + /// Broadcast channel for canon state changes notifications. + canon_state_notification_sender: CanonStateNotificationSender, + /// Metrics for sync stages. + sync_metrics_tx: Option, + /// Metrics for the blockchain tree. + metrics: TreeMetrics, +} + +impl BlockchainTree { + /// Subscribe to new blocks events. + /// + /// Note: Only canonical blocks are emitted by the tree. + pub fn subscribe_canon_state(&self) -> CanonStateNotifications { + self.canon_state_notification_sender.subscribe() + } + + /// Returns a clone of the sender for the canonical state notifications. + pub fn canon_state_notification_sender(&self) -> CanonStateNotificationSender { + self.canon_state_notification_sender.clone() + } +} + +impl BlockchainTree +where + N: TreeNodeTypes, + E: BlockExecutorProvider, +{ + /// Builds the blockchain tree for the node. + /// + /// This method configures the blockchain tree, which is a critical component of the node, + /// responsible for managing the blockchain state, including blocks, transactions, and receipts. + /// It integrates with the consensus mechanism and the EVM for executing transactions. + /// + /// # Parameters + /// - `externals`: External components required by the blockchain tree: + /// - `provider_factory`: A factory for creating various blockchain-related providers, such + /// as for accessing the database or static files. + /// - `consensus`: The consensus configuration, which defines how the node reaches agreement + /// on the blockchain state with other nodes. + /// - `evm_config`: The EVM (Ethereum Virtual Machine) configuration, which affects how + /// smart contracts and transactions are executed. Proper validation of this configuration + /// is crucial for the correct execution of transactions. + /// - `tree_config`: Configuration for the blockchain tree, including any parameters that affect + /// its structure or performance. + pub fn new( + externals: TreeExternals, + config: BlockchainTreeConfig, + ) -> ProviderResult { + let max_reorg_depth = config.max_reorg_depth() as usize; + // The size of the broadcast is twice the maximum reorg depth, because at maximum reorg + // depth at least N blocks must be sent at once. + let (canon_state_notification_sender, _receiver) = + tokio::sync::broadcast::channel(max_reorg_depth * 2); + + let last_canonical_hashes = + externals.fetch_latest_canonical_hashes(config.num_of_canonical_hashes() as usize)?; + + // If we haven't written the finalized block, assume it's zero + let last_finalized_block_number = + externals.fetch_latest_finalized_block_number()?.unwrap_or_default(); + + Ok(Self { + externals, + state: TreeState::new( + last_finalized_block_number, + last_canonical_hashes, + config.max_unconnected_blocks(), + ), + config, + canon_state_notification_sender, + sync_metrics_tx: None, + metrics: Default::default(), + }) + } + + /// Replaces the canon state notification sender. + /// + /// Caution: this will close any existing subscriptions to the previous sender. + #[doc(hidden)] + pub fn with_canon_state_notification_sender( + mut self, + canon_state_notification_sender: CanonStateNotificationSender, + ) -> Self { + self.canon_state_notification_sender = canon_state_notification_sender; + self + } + + /// Set the sync metric events sender. + /// + /// A transmitter for sending synchronization metrics. This is used for monitoring the node's + /// synchronization process with the blockchain network. + pub fn with_sync_metrics_tx(mut self, metrics_tx: MetricEventsSender) -> Self { + self.sync_metrics_tx = Some(metrics_tx); + self + } + + /// Check if the block is known to blockchain tree or database and return its status. + /// + /// Function will check: + /// * if block is inside database returns [`BlockStatus::Valid`]. + /// * if block is inside buffer returns [`BlockStatus::Disconnected`]. + /// * if block is part of the canonical returns [`BlockStatus::Valid`]. + /// + /// Returns an error if + /// - an error occurred while reading from the database. + /// - the block is already finalized + pub(crate) fn is_block_known( + &self, + block: BlockNumHash, + ) -> Result, InsertBlockErrorKind> { + // check if block is canonical + if self.is_block_hash_canonical(&block.hash)? { + return Ok(Some(BlockStatus::Valid(BlockAttachment::Canonical))); + } + + let last_finalized_block = self.block_indices().last_finalized_block(); + // check db if block is finalized. + if block.number <= last_finalized_block { + // check if block is inside database + if self.externals.provider_factory.provider()?.block_number(block.hash)?.is_some() { + return Ok(Some(BlockStatus::Valid(BlockAttachment::Canonical))); + } + + return Err(BlockchainTreeError::PendingBlockIsFinalized { + last_finalized: last_finalized_block, + } + .into()) + } + + // is block inside chain + if let Some(attachment) = self.is_block_inside_sidechain(&block) { + return Ok(Some(BlockStatus::Valid(attachment))); + } + + // check if block is disconnected + if let Some(block) = self.state.buffered_blocks.block(&block.hash) { + return Ok(Some(BlockStatus::Disconnected { + head: self.state.block_indices.canonical_tip(), + missing_ancestor: block.parent_num_hash(), + })) + } + + Ok(None) + } + + /// Expose internal indices of the `BlockchainTree`. + #[inline] + pub const fn block_indices(&self) -> &BlockIndices { + self.state.block_indices() + } + + /// Returns the block with matching hash from any side-chain. + /// + /// Caution: This will not return blocks from the canonical chain. + #[inline] + pub fn sidechain_block_by_hash(&self, block_hash: BlockHash) -> Option<&SealedBlock> { + self.state.block_by_hash(block_hash) + } + + /// Returns the block with matching hash from any side-chain. + /// + /// Caution: This will not return blocks from the canonical chain. + #[inline] + pub fn block_with_senders_by_hash( + &self, + block_hash: BlockHash, + ) -> Option<&SealedBlockWithSenders> { + self.state.block_with_senders_by_hash(block_hash) + } + + /// Returns the block's receipts with matching hash from any side-chain. + /// + /// Caution: This will not return blocks from the canonical chain. + pub fn receipts_by_block_hash(&self, block_hash: BlockHash) -> Option> { + self.state.receipts_by_block_hash(block_hash) + } + + /// Returns the block that's considered the `Pending` block, if it exists. + pub fn pending_block(&self) -> Option<&SealedBlock> { + let b = self.block_indices().pending_block_num_hash()?; + self.sidechain_block_by_hash(b.hash) + } + + /// Return items needed to execute on the pending state. + /// This includes: + /// * `BlockHash` of canonical block that chain connects to. Needed for creating database + /// provider for the rest of the state. + /// * `BundleState` changes that happened at the asked `block_hash` + /// * `BTreeMap` list of past pending and canonical hashes, That are + /// needed for evm `BLOCKHASH` opcode. + /// Return none if: + /// * block unknown. + /// * `chain_id` not present in state. + /// * there are no parent hashes stored. + pub fn post_state_data(&self, block_hash: BlockHash) -> Option { + trace!(target: "blockchain_tree", ?block_hash, "Searching for post state data"); + + let canonical_chain = self.state.block_indices.canonical_chain(); + + // if it is part of the chain + if let Some(chain_id) = self.block_indices().get_side_chain_id(&block_hash) { + trace!(target: "blockchain_tree", ?block_hash, "Constructing post state data based on non-canonical chain"); + // get block state + let Some(chain) = self.state.chains.get(&chain_id) else { + debug!(target: "blockchain_tree", ?chain_id, "Chain with ID not present"); + return None; + }; + let block_number = chain.block_number(block_hash)?; + let execution_outcome = chain.execution_outcome_at_block(block_number)?; + + // get parent hashes + let mut parent_block_hashes = self.all_chain_hashes(chain_id); + let Some((first_pending_block_number, _)) = parent_block_hashes.first_key_value() + else { + debug!(target: "blockchain_tree", ?chain_id, "No block hashes stored"); + return None; + }; + let canonical_chain = canonical_chain + .iter() + .filter(|&(key, _)| &key < first_pending_block_number) + .collect::>(); + parent_block_hashes.extend(canonical_chain); + + // get canonical fork. + let canonical_fork = self.canonical_fork(chain_id)?; + return Some(ExecutionData { execution_outcome, parent_block_hashes, canonical_fork }); + } + + // check if there is canonical block + if let Some(canonical_number) = canonical_chain.canonical_number(&block_hash) { + trace!(target: "blockchain_tree", %block_hash, "Constructing post state data based on canonical chain"); + return Some(ExecutionData { + canonical_fork: ForkBlock { number: canonical_number, hash: block_hash }, + execution_outcome: ExecutionOutcome::default(), + parent_block_hashes: canonical_chain.inner().clone(), + }); + } + + None + } + + /// Try inserting a validated [Self::validate_block] block inside the tree. + /// + /// If the block's parent block is unknown, this returns [`BlockStatus::Disconnected`] and the + /// block will be buffered until the parent block is inserted and then attached to sidechain + #[instrument(level = "trace", skip_all, fields(block = ?block.num_hash()), target = "blockchain_tree", ret)] + fn try_insert_validated_block( + &mut self, + block: SealedBlockWithSenders, + block_validation_kind: BlockValidationKind, + ) -> Result { + debug_assert!(self.validate_block(&block).is_ok(), "Block must be validated"); + + let parent = block.parent_num_hash(); + + // check if block parent can be found in any side chain. + if let Some(chain_id) = self.block_indices().get_side_chain_id(&parent.hash) { + // found parent in side tree, try to insert there + return self.try_insert_block_into_side_chain(block, chain_id, block_validation_kind); + } + + // if not found, check if the parent can be found inside canonical chain. + if self.is_block_hash_canonical(&parent.hash)? { + return self.try_append_canonical_chain(block.clone(), block_validation_kind); + } + + // this is another check to ensure that if the block points to a canonical block its block + // is valid + if let Some(canonical_parent_number) = + self.block_indices().canonical_number(&block.parent_hash) + { + // we found the parent block in canonical chain + if canonical_parent_number != parent.number { + return Err(ConsensusError::ParentBlockNumberMismatch { + parent_block_number: canonical_parent_number, + block_number: block.number, + } + .into()) + } + } + + // if there is a parent inside the buffer, validate against it. + if let Some(buffered_parent) = self.state.buffered_blocks.block(&parent.hash) { + self.externals.consensus.validate_header_against_parent(&block, buffered_parent)?; + } + + // insert block inside unconnected block buffer. Delaying its execution. + self.state.buffered_blocks.insert_block(block.clone()); + + let block_hash = block.hash(); + // find the lowest ancestor of the block in the buffer to return as the missing parent + // this shouldn't return None because that only happens if the block was evicted, which + // shouldn't happen right after insertion + let lowest_ancestor = self + .state + .buffered_blocks + .lowest_ancestor(&block_hash) + .ok_or(BlockchainTreeError::BlockBufferingFailed { block_hash })?; + + Ok(BlockStatus::Disconnected { + head: self.state.block_indices.canonical_tip(), + missing_ancestor: lowest_ancestor.parent_num_hash(), + }) + } + + /// This tries to append the given block to the canonical chain. + /// + /// WARNING: this expects that the block extends the canonical chain: The block's parent is + /// part of the canonical chain (e.g. the block's parent is the latest canonical hash). See also + /// [Self::is_block_hash_canonical]. + #[instrument(level = "trace", skip_all, target = "blockchain_tree")] + fn try_append_canonical_chain( + &mut self, + block: SealedBlockWithSenders, + block_validation_kind: BlockValidationKind, + ) -> Result { + let parent = block.parent_num_hash(); + let block_num_hash = block.num_hash(); + debug!(target: "blockchain_tree", head = ?block_num_hash.hash, ?parent, "Appending block to canonical chain"); + + let provider = self.externals.provider_factory.provider()?; + + // Validate that the block is post merge + let parent_td = provider + .header_td(&block.parent_hash)? + .ok_or_else(|| BlockchainTreeError::CanonicalChain { block_hash: block.parent_hash })?; + + if !self + .externals + .provider_factory + .chain_spec() + .fork(EthereumHardfork::Paris) + .active_at_ttd(parent_td, U256::ZERO) + { + return Err(BlockExecutionError::Validation(BlockValidationError::BlockPreMerge { + hash: block.hash(), + }) + .into()) + } + + let parent_header = provider + .header(&block.parent_hash)? + .ok_or_else(|| BlockchainTreeError::CanonicalChain { block_hash: block.parent_hash })?; + + let parent_sealed_header = SealedHeader::new(parent_header, block.parent_hash); + + let canonical_chain = self.state.block_indices.canonical_chain(); + + let block_attachment = if block.parent_hash == canonical_chain.tip().hash { + BlockAttachment::Canonical + } else { + BlockAttachment::HistoricalFork + }; + + let chain = AppendableChain::new_canonical_fork( + block, + &parent_sealed_header, + canonical_chain.inner(), + parent, + &self.externals, + block_attachment, + block_validation_kind, + )?; + + self.insert_chain(chain); + self.try_connect_buffered_blocks(block_num_hash); + + Ok(BlockStatus::Valid(block_attachment)) + } + + /// Try inserting a block into the given side chain. + /// + /// WARNING: This expects a valid side chain id, see [BlockIndices::get_side_chain_id] + #[instrument(level = "trace", skip_all, target = "blockchain_tree")] + fn try_insert_block_into_side_chain( + &mut self, + block: SealedBlockWithSenders, + chain_id: SidechainId, + block_validation_kind: BlockValidationKind, + ) -> Result { + let block_num_hash = block.num_hash(); + debug!(target: "blockchain_tree", ?block_num_hash, ?chain_id, "Inserting block into side chain"); + // Create a new sidechain by forking the given chain, or append the block if the parent + // block is the top of the given chain. + let block_hashes = self.all_chain_hashes(chain_id); + + // get canonical fork. + let canonical_fork = self.canonical_fork(chain_id).ok_or_else(|| { + BlockchainTreeError::BlockSideChainIdConsistency { chain_id: chain_id.into() } + })?; + + // get chain that block needs to join to. + let parent_chain = self.state.chains.get_mut(&chain_id).ok_or_else(|| { + BlockchainTreeError::BlockSideChainIdConsistency { chain_id: chain_id.into() } + })?; + + let chain_tip = parent_chain.tip().hash(); + let canonical_chain = self.state.block_indices.canonical_chain(); + + // append the block if it is continuing the side chain. + let block_attachment = if chain_tip == block.parent_hash { + // check if the chain extends the currently tracked canonical head + let block_attachment = if canonical_fork.hash == canonical_chain.tip().hash { + BlockAttachment::Canonical + } else { + BlockAttachment::HistoricalFork + }; + + let block_hash = block.hash(); + let block_number = block.number; + debug!(target: "blockchain_tree", ?block_hash, ?block_number, "Appending block to side chain"); + parent_chain.append_block( + block, + block_hashes, + canonical_chain.inner(), + &self.externals, + canonical_fork, + block_attachment, + block_validation_kind, + )?; + + self.state.block_indices.insert_non_fork_block(block_number, block_hash, chain_id); + block_attachment + } else { + debug!(target: "blockchain_tree", ?canonical_fork, "Starting new fork from side chain"); + // the block starts a new fork + let chain = parent_chain.new_chain_fork( + block, + block_hashes, + canonical_chain.inner(), + canonical_fork, + &self.externals, + block_validation_kind, + )?; + self.insert_chain(chain); + BlockAttachment::HistoricalFork + }; + + // After we inserted the block, we try to connect any buffered blocks + self.try_connect_buffered_blocks(block_num_hash); + + Ok(BlockStatus::Valid(block_attachment)) + } + + /// Get all block hashes from a sidechain that are not part of the canonical chain. + /// This is a one time operation per block. + /// + /// # Note + /// + /// This is not cached in order to save memory. + fn all_chain_hashes(&self, chain_id: SidechainId) -> BTreeMap { + let mut chain_id = chain_id; + let mut hashes = BTreeMap::new(); + loop { + let Some(chain) = self.state.chains.get(&chain_id) else { return hashes }; + + // The parent chains might contain blocks with overlapping numbers or numbers greater + // than original chain tip. Insert the block hash only if it's not present + // for the given block number and the block number does not exceed the + // original chain tip. + let latest_block_number = hashes + .last_key_value() + .map(|(number, _)| *number) + .unwrap_or_else(|| chain.tip().number); + for block in chain.blocks().values().filter(|b| b.number <= latest_block_number) { + if let Entry::Vacant(e) = hashes.entry(block.number) { + e.insert(block.hash()); + } + } + + let fork_block = chain.fork_block(); + if let Some(next_chain_id) = self.block_indices().get_side_chain_id(&fork_block.hash) { + chain_id = next_chain_id; + } else { + // if there is no fork block that point to other chains, break the loop. + // it means that this fork joins to canonical block. + break + } + } + hashes + } + + /// Get the block at which the given chain forks off the current canonical chain. + /// + /// This is used to figure out what kind of state provider the executor should use to execute + /// the block on + /// + /// Returns `None` if the chain is unknown. + fn canonical_fork(&self, chain_id: SidechainId) -> Option { + let mut chain_id = chain_id; + let mut fork; + loop { + // chain fork block + fork = self.state.chains.get(&chain_id)?.fork_block(); + // get fork block chain + if let Some(fork_chain_id) = self.block_indices().get_side_chain_id(&fork.hash) { + chain_id = fork_chain_id; + continue + } + break + } + (self.block_indices().canonical_hash(&fork.number) == Some(fork.hash)).then_some(fork) + } + + /// Insert a chain into the tree. + /// + /// Inserts a chain into the tree and builds the block indices. + fn insert_chain(&mut self, chain: AppendableChain) -> Option { + self.state.insert_chain(chain) + } + + /// Iterate over all child chains that depend on this block and return + /// their ids. + fn find_all_dependent_chains(&self, block: &BlockHash) -> HashSet { + // Find all forks of given block. + let mut dependent_block = + self.block_indices().fork_to_child().get(block).cloned().unwrap_or_default(); + let mut dependent_chains = HashSet::default(); + + while let Some(block) = dependent_block.pop_back() { + // Get chain of dependent block. + let Some(chain_id) = self.block_indices().get_side_chain_id(&block) else { + debug!(target: "blockchain_tree", ?block, "Block not in tree"); + return Default::default(); + }; + + // Find all blocks that fork from this chain. + let Some(chain) = self.state.chains.get(&chain_id) else { + debug!(target: "blockchain_tree", ?chain_id, "Chain not in tree"); + return Default::default(); + }; + for chain_block in chain.blocks().values() { + if let Some(forks) = self.block_indices().fork_to_child().get(&chain_block.hash()) { + // If there are sub forks append them for processing. + dependent_block.extend(forks); + } + } + // Insert dependent chain id. + dependent_chains.insert(chain_id); + } + dependent_chains + } + + /// Inserts unwound chain back into the tree and updates any dependent chains. + /// + /// This method searches for any chain that depended on this block being part of the canonical + /// chain. Each dependent chain's state is then updated with state entries removed from the + /// plain state during the unwind. + /// Returns the result of inserting the chain or None if any of the dependent chains is not + /// in the tree. + fn insert_unwound_chain(&mut self, chain: AppendableChain) -> Option { + // iterate over all blocks in chain and find any fork blocks that are in tree. + for (number, block) in chain.blocks() { + let hash = block.hash(); + + // find all chains that fork from this block. + let chains_to_bump = self.find_all_dependent_chains(&hash); + if !chains_to_bump.is_empty() { + // if there is such chain, revert state to this block. + let mut cloned_execution_outcome = chain.execution_outcome().clone(); + cloned_execution_outcome.revert_to(*number); + + // prepend state to all chains that fork from this block. + for chain_id in chains_to_bump { + let Some(chain) = self.state.chains.get_mut(&chain_id) else { + debug!(target: "blockchain_tree", ?chain_id, "Chain not in tree"); + return None; + }; + + debug!(target: "blockchain_tree", + unwound_block= ?block.num_hash(), + chain_id = ?chain_id, + chain_tip = ?chain.tip().num_hash(), + "Prepend unwound block state to blockchain tree chain"); + + chain.prepend_state(cloned_execution_outcome.state().clone()) + } + } + } + // Insert unwound chain to the tree. + self.insert_chain(chain) + } + + /// Checks the block buffer for the given block. + pub fn get_buffered_block(&self, hash: &BlockHash) -> Option<&SealedBlockWithSenders> { + self.state.get_buffered_block(hash) + } + + /// Gets the lowest ancestor for the given block in the block buffer. + pub fn lowest_buffered_ancestor(&self, hash: &BlockHash) -> Option<&SealedBlockWithSenders> { + self.state.lowest_buffered_ancestor(hash) + } + + /// Insert a new block into the tree. + /// + /// # Note + /// + /// This recovers transaction signers (unlike [`BlockchainTree::insert_block`]). + pub fn insert_block_without_senders( + &mut self, + block: SealedBlock, + ) -> Result { + match block.try_seal_with_senders() { + Ok(block) => self.insert_block(block, BlockValidationKind::Exhaustive), + Err(block) => Err(InsertBlockError::sender_recovery_error(block)), + } + } + + /// Insert block for future execution. + /// + /// Returns an error if the block is invalid. + pub fn buffer_block(&mut self, block: SealedBlockWithSenders) -> Result<(), InsertBlockError> { + // validate block consensus rules + if let Err(err) = self.validate_block(&block) { + return Err(InsertBlockError::consensus_error(err, block.block)); + } + + self.state.buffered_blocks.insert_block(block); + Ok(()) + } + + /// Validate if block is correct and satisfies all the consensus rules that concern the header + /// and block body itself. + fn validate_block(&self, block: &SealedBlockWithSenders) -> Result<(), ConsensusError> { + if let Err(e) = + self.externals.consensus.validate_header_with_total_difficulty(block, U256::MAX) + { + error!(?block, "Failed to validate total difficulty for block {}: {e}", block.hash()); + return Err(e); + } + + if let Err(e) = self.externals.consensus.validate_header(block) { + error!(?block, "Failed to validate header {}: {e}", block.hash()); + return Err(e); + } + + if let Err(e) = self.externals.consensus.validate_block_pre_execution(block) { + error!(?block, "Failed to validate block {}: {e}", block.hash()); + return Err(e); + } + + Ok(()) + } + + /// Check if block is found inside a sidechain and its attachment. + /// + /// if it is canonical or extends the canonical chain, return [`BlockAttachment::Canonical`] + /// if it does not extend the canonical chain, return [`BlockAttachment::HistoricalFork`] + /// if the block is not in the tree or its chain id is not valid, return None + #[track_caller] + fn is_block_inside_sidechain(&self, block: &BlockNumHash) -> Option { + // check if block known and is already in the tree + if let Some(chain_id) = self.block_indices().get_side_chain_id(&block.hash) { + // find the canonical fork of this chain + let Some(canonical_fork) = self.canonical_fork(chain_id) else { + debug!(target: "blockchain_tree", chain_id=?chain_id, block=?block.hash, "Chain id not valid"); + return None; + }; + // if the block's chain extends canonical chain + return if canonical_fork == self.block_indices().canonical_tip() { + Some(BlockAttachment::Canonical) + } else { + Some(BlockAttachment::HistoricalFork) + }; + } + None + } + + /// Insert a block (with recovered senders) into the tree. + /// + /// Returns the [`BlockStatus`] on success: + /// + /// - The block is already part of a sidechain in the tree, or + /// - The block is already part of the canonical chain, or + /// - The parent is part of a sidechain in the tree, and we can fork at this block, or + /// - The parent is part of the canonical chain, and we can fork at this block + /// + /// Otherwise, an error is returned, indicating that neither the block nor its parent are part + /// of the chain or any sidechains. + /// + /// This means that if the block becomes canonical, we need to fetch the missing blocks over + /// P2P. + /// + /// If the [`BlockValidationKind::SkipStateRootValidation`] variant is provided the state root + /// is not validated. + /// + /// # Note + /// + /// If the senders have not already been recovered, call + /// [`BlockchainTree::insert_block_without_senders`] instead. + pub fn insert_block( + &mut self, + block: SealedBlockWithSenders, + block_validation_kind: BlockValidationKind, + ) -> Result { + // check if we already have this block + match self.is_block_known(block.num_hash()) { + Ok(Some(status)) => return Ok(InsertPayloadOk::AlreadySeen(status)), + Err(err) => return Err(InsertBlockError::new(block.block, err)), + _ => {} + } + + // validate block consensus rules + if let Err(err) = self.validate_block(&block) { + return Err(InsertBlockError::consensus_error(err, block.block)); + } + + let status = self + .try_insert_validated_block(block.clone(), block_validation_kind) + .map_err(|kind| InsertBlockError::new(block.block, kind))?; + Ok(InsertPayloadOk::Inserted(status)) + } + + /// Discard all blocks that precede block number from the buffer. + pub fn remove_old_blocks(&mut self, block: BlockNumber) { + self.state.buffered_blocks.remove_old_blocks(block); + } + + /// Finalize blocks up until and including `finalized_block`, and remove them from the tree. + pub fn finalize_block(&mut self, finalized_block: BlockNumber) -> ProviderResult<()> { + // remove blocks + let mut remove_chains = self.state.block_indices.finalize_canonical_blocks( + finalized_block, + self.config.num_of_additional_canonical_block_hashes(), + ); + // remove chains of removed blocks + while let Some(chain_id) = remove_chains.pop_first() { + if let Some(chain) = self.state.chains.remove(&chain_id) { + remove_chains.extend(self.state.block_indices.remove_chain(&chain)); + } + } + // clean block buffer. + self.remove_old_blocks(finalized_block); + + // save finalized block in db. + self.externals.save_finalized_block_number(finalized_block)?; + + Ok(()) + } + + /// Reads the last `N` canonical hashes from the database and updates the block indices of the + /// tree by attempting to connect the buffered blocks to canonical hashes. + /// + /// + /// `N` is the maximum of `max_reorg_depth` and the number of block hashes needed to satisfy the + /// `BLOCKHASH` opcode in the EVM. + /// + /// # Note + /// + /// This finalizes `last_finalized_block` prior to reading the canonical hashes (using + /// [`BlockchainTree::finalize_block`]). + pub fn connect_buffered_blocks_to_canonical_hashes_and_finalize( + &mut self, + last_finalized_block: BlockNumber, + ) -> ProviderResult<()> { + self.finalize_block(last_finalized_block)?; + + let last_canonical_hashes = self.update_block_hashes()?; + + self.connect_buffered_blocks_to_hashes(last_canonical_hashes)?; + + Ok(()) + } + + /// Update all block hashes. iterate over present and new list of canonical hashes and compare + /// them. Remove all mismatches, disconnect them and removes all chains. + pub fn update_block_hashes(&mut self) -> ProviderResult> { + let last_canonical_hashes = self + .externals + .fetch_latest_canonical_hashes(self.config.num_of_canonical_hashes() as usize)?; + + let (mut remove_chains, _) = + self.state.block_indices.update_block_hashes(last_canonical_hashes.clone()); + + // remove all chains that got discarded + while let Some(chain_id) = remove_chains.first() { + if let Some(chain) = self.state.chains.remove(chain_id) { + remove_chains.extend(self.state.block_indices.remove_chain(&chain)); + } + } + + Ok(last_canonical_hashes) + } + + /// Update all block hashes. iterate over present and new list of canonical hashes and compare + /// them. Remove all mismatches, disconnect them, removes all chains and clears all buffered + /// blocks before the tip. + pub fn update_block_hashes_and_clear_buffered( + &mut self, + ) -> ProviderResult> { + let chain = self.update_block_hashes()?; + + if let Some((block, _)) = chain.last_key_value() { + self.remove_old_blocks(*block); + } + + Ok(chain) + } + + /// Reads the last `N` canonical hashes from the database and updates the block indices of the + /// tree by attempting to connect the buffered blocks to canonical hashes. + /// + /// `N` is the maximum of `max_reorg_depth` and the number of block hashes needed to satisfy the + /// `BLOCKHASH` opcode in the EVM. + pub fn connect_buffered_blocks_to_canonical_hashes(&mut self) -> ProviderResult<()> { + let last_canonical_hashes = self + .externals + .fetch_latest_canonical_hashes(self.config.num_of_canonical_hashes() as usize)?; + self.connect_buffered_blocks_to_hashes(last_canonical_hashes)?; + + Ok(()) + } + + fn connect_buffered_blocks_to_hashes( + &mut self, + hashes: impl IntoIterator>, + ) -> ProviderResult<()> { + // check unconnected block buffer for children of the canonical hashes + for added_block in hashes { + self.try_connect_buffered_blocks(added_block.into()) + } + + // check unconnected block buffer for children of the chains + let mut all_chain_blocks = Vec::new(); + for chain in self.state.chains.values() { + all_chain_blocks.reserve_exact(chain.blocks().len()); + for (&number, block) in chain.blocks() { + all_chain_blocks.push(BlockNumHash { number, hash: block.hash() }) + } + } + for block in all_chain_blocks { + self.try_connect_buffered_blocks(block) + } + + Ok(()) + } + + /// Connect unconnected (buffered) blocks if the new block closes a gap. + /// + /// This will try to insert all children of the new block, extending its chain. + /// + /// If all children are valid, then this essentially appends all child blocks to the + /// new block's chain. + fn try_connect_buffered_blocks(&mut self, new_block: BlockNumHash) { + trace!(target: "blockchain_tree", ?new_block, "try_connect_buffered_blocks"); + + // first remove all the children of the new block from the buffer + let include_blocks = self.state.buffered_blocks.remove_block_with_children(&new_block.hash); + // then try to reinsert them into the tree + for block in include_blocks { + // don't fail on error, just ignore the block. + let _ = self + .try_insert_validated_block(block, BlockValidationKind::SkipStateRootValidation) + .map_err(|err| { + debug!(target: "blockchain_tree", %err, "Failed to insert buffered block"); + err + }); + } + } + + /// Removes chain corresponding to provided chain id from block indices, + /// splits it at split target, and returns the canonical part of it. + /// Returns [None] if chain is missing. + /// + /// The pending part of the chain is reinserted back into the tree with the same `chain_id`. + fn remove_and_split_chain( + &mut self, + chain_id: SidechainId, + split_at: ChainSplitTarget, + ) -> Option { + let chain = self.state.chains.remove(&chain_id)?; + match chain.into_inner().split(split_at) { + ChainSplit::Split { canonical, pending } => { + trace!(target: "blockchain_tree", ?canonical, ?pending, "Split chain"); + // rest of split chain is inserted back with same chain_id. + self.state.block_indices.insert_chain(chain_id, &pending); + self.state.chains.insert(chain_id, AppendableChain::new(pending)); + Some(canonical) + } + ChainSplit::NoSplitCanonical(canonical) => { + trace!(target: "blockchain_tree", "No split on canonical chain"); + Some(canonical) + } + ChainSplit::NoSplitPending(_) => { + unreachable!("Should not happen as block indices guarantee structure of blocks") + } + } + } + + /// Attempts to find the header for the given block hash if it is canonical. + /// + /// Returns `Ok(None)` if the block hash is not canonical (block hash does not exist, or is + /// included in a sidechain). + /// + /// Note: this does not distinguish between a block that is finalized and a block that is not + /// finalized yet, only whether it is part of the canonical chain or not. + pub fn find_canonical_header( + &self, + hash: &BlockHash, + ) -> Result, ProviderError> { + // if the indices show that the block hash is not canonical, it's either in a sidechain or + // canonical, but in the db. If it is in a sidechain, it is not canonical. If it is missing + // in the db, then it is also not canonical. + + let provider = self.externals.provider_factory.provider()?; + + let mut header = None; + if let Some(num) = self.block_indices().canonical_number(hash) { + header = provider.header_by_number(num)?; + } + + if header.is_none() && self.sidechain_block_by_hash(*hash).is_some() { + return Ok(None) + } + + if header.is_none() { + header = provider.header(hash)? + } + + Ok(header.map(|header| SealedHeader::new(header, *hash))) + } + + /// Determines whether or not a block is canonical, checking the db if necessary. + /// + /// Note: this does not distinguish between a block that is finalized and a block that is not + /// finalized yet, only whether it is part of the canonical chain or not. + pub fn is_block_hash_canonical(&self, hash: &BlockHash) -> Result { + self.find_canonical_header(hash).map(|header| header.is_some()) + } + + /// Make a block and its parent(s) part of the canonical chain and commit them to the database + /// + /// # Note + /// + /// This unwinds the database if necessary, i.e. if parts of the canonical chain have been + /// reorged. + /// + /// # Returns + /// + /// Returns `Ok` if the blocks were canonicalized, or if the blocks were already canonical. + #[track_caller] + #[instrument(level = "trace", skip(self), target = "blockchain_tree")] + pub fn make_canonical( + &mut self, + block_hash: BlockHash, + ) -> Result { + let mut durations_recorder = MakeCanonicalDurationsRecorder::default(); + + let old_block_indices = self.block_indices().clone(); + let old_buffered_blocks = self.state.buffered_blocks.parent_to_child.clone(); + durations_recorder.record_relative(MakeCanonicalAction::CloneOldBlocks); + + // If block is already canonical don't return error. + let canonical_header = self.find_canonical_header(&block_hash)?; + durations_recorder.record_relative(MakeCanonicalAction::FindCanonicalHeader); + if let Some(header) = canonical_header { + info!(target: "blockchain_tree", %block_hash, "Block is already canonical, ignoring."); + // TODO: this could be fetched from the chainspec first + let td = + self.externals.provider_factory.provider()?.header_td(&block_hash)?.ok_or_else( + || { + CanonicalError::from(BlockValidationError::MissingTotalDifficulty { + hash: block_hash, + }) + }, + )?; + + if !self + .externals + .provider_factory + .chain_spec() + .fork(EthereumHardfork::Paris) + .active_at_ttd(td, U256::ZERO) + { + return Err(CanonicalError::from(BlockValidationError::BlockPreMerge { + hash: block_hash, + })) + } + + let head = self.state.block_indices.canonical_tip(); + return Ok(CanonicalOutcome::AlreadyCanonical { header, head }); + } + + let Some(chain_id) = self.block_indices().get_side_chain_id(&block_hash) else { + debug!(target: "blockchain_tree", ?block_hash, "Block hash not found in block indices"); + return Err(CanonicalError::from(BlockchainTreeError::BlockHashNotFoundInChain { + block_hash, + })) + }; + + // we are splitting chain at the block hash that we want to make canonical + let Some(canonical) = self.remove_and_split_chain(chain_id, block_hash.into()) else { + debug!(target: "blockchain_tree", ?block_hash, ?chain_id, "Chain not present"); + return Err(CanonicalError::from(BlockchainTreeError::BlockSideChainIdConsistency { + chain_id: chain_id.into(), + })) + }; + trace!(target: "blockchain_tree", chain = ?canonical, "Found chain to make canonical"); + durations_recorder.record_relative(MakeCanonicalAction::SplitChain); + + let mut fork_block = canonical.fork_block(); + let mut chains_to_promote = vec![canonical]; + + // loop while fork blocks are found in Tree. + while let Some(chain_id) = self.block_indices().get_side_chain_id(&fork_block.hash) { + // canonical chain is lower part of the chain. + let Some(canonical) = + self.remove_and_split_chain(chain_id, ChainSplitTarget::Number(fork_block.number)) + else { + debug!(target: "blockchain_tree", ?fork_block, ?chain_id, "Fork not present"); + return Err(CanonicalError::from( + BlockchainTreeError::BlockSideChainIdConsistency { chain_id: chain_id.into() }, + )); + }; + fork_block = canonical.fork_block(); + chains_to_promote.push(canonical); + } + durations_recorder.record_relative(MakeCanonicalAction::SplitChainForks); + + let old_tip = self.block_indices().canonical_tip(); + // Merge all chains into one chain. + let Some(mut new_canon_chain) = chains_to_promote.pop() else { + debug!(target: "blockchain_tree", "No blocks in the chain to make canonical"); + return Err(CanonicalError::from(BlockchainTreeError::BlockHashNotFoundInChain { + block_hash: fork_block.hash, + })) + }; + trace!(target: "blockchain_tree", ?new_canon_chain, "Merging chains"); + let mut chain_appended = false; + for chain in chains_to_promote.into_iter().rev() { + trace!(target: "blockchain_tree", ?chain, "Appending chain"); + let block_hash = chain.fork_block().hash; + new_canon_chain.append_chain(chain).map_err(|_| { + CanonicalError::from(BlockchainTreeError::BlockHashNotFoundInChain { block_hash }) + })?; + chain_appended = true; + } + durations_recorder.record_relative(MakeCanonicalAction::MergeAllChains); + + if chain_appended { + trace!(target: "blockchain_tree", ?new_canon_chain, "Canonical chain appended"); + } + // update canonical index + self.state.block_indices.canonicalize_blocks(new_canon_chain.blocks()); + durations_recorder.record_relative(MakeCanonicalAction::UpdateCanonicalIndex); + + debug!( + target: "blockchain_tree", + "Committing new canonical chain: {}", DisplayBlocksChain(new_canon_chain.blocks()) + ); + + // If chain extends the tip + let chain_notification = if new_canon_chain.fork_block().hash == old_tip.hash { + // Commit new canonical chain to database. + self.commit_canonical_to_database(new_canon_chain.clone(), &mut durations_recorder)?; + CanonStateNotification::Commit { new: Arc::new(new_canon_chain) } + } else { + // It forks to canonical block that is not the tip. + let canon_fork: BlockNumHash = new_canon_chain.fork_block(); + // sanity check + if self.block_indices().canonical_hash(&canon_fork.number) != Some(canon_fork.hash) { + error!( + target: "blockchain_tree", + ?canon_fork, + block_indices=?self.block_indices(), + "All chains should point to canonical chain" + ); + unreachable!("all chains should point to canonical chain."); + } + + let old_canon_chain = + self.revert_canonical_from_database(canon_fork.number).inspect_err(|error| { + error!( + target: "blockchain_tree", + "Reverting canonical chain failed with error: {:?}\n\ + Old BlockIndices are:{:?}\n\ + New BlockIndices are: {:?}\n\ + Old BufferedBlocks are:{:?}", + error, old_block_indices, self.block_indices(), old_buffered_blocks + ); + })?; + durations_recorder + .record_relative(MakeCanonicalAction::RevertCanonicalChainFromDatabase); + + // Commit new canonical chain. + self.commit_canonical_to_database(new_canon_chain.clone(), &mut durations_recorder)?; + + if let Some(old_canon_chain) = old_canon_chain { + self.update_reorg_metrics(old_canon_chain.len() as f64); + + // Insert old canonical chain back into tree. + self.insert_unwound_chain(AppendableChain::new(old_canon_chain.clone())); + durations_recorder.record_relative(MakeCanonicalAction::InsertOldCanonicalChain); + + CanonStateNotification::Reorg { + old: Arc::new(old_canon_chain), + new: Arc::new(new_canon_chain), + } + } else { + // error here to confirm that we are reverting nothing from db. + error!(target: "blockchain_tree", %block_hash, "Nothing was removed from database"); + CanonStateNotification::Commit { new: Arc::new(new_canon_chain) } + } + }; + + debug!( + target: "blockchain_tree", + actions = ?durations_recorder.actions, + "Canonicalization finished" + ); + + // clear trie updates for other children + self.block_indices() + .fork_to_child() + .get(&old_tip.hash) + .cloned() + .unwrap_or_default() + .into_iter() + .for_each(|child| { + if let Some(chain_id) = self.block_indices().get_side_chain_id(&child) { + if let Some(chain) = self.state.chains.get_mut(&chain_id) { + chain.clear_trie_updates(); + } + } + }); + + durations_recorder.record_relative(MakeCanonicalAction::ClearTrieUpdatesForOtherChildren); + + // Send notification about new canonical chain and return outcome of canonicalization. + let outcome = CanonicalOutcome::Committed { head: chain_notification.tip().header.clone() }; + let _ = self.canon_state_notification_sender.send(chain_notification); + Ok(outcome) + } + + /// Write the given chain to the database as canonical. + fn commit_canonical_to_database( + &self, + chain: Chain, + recorder: &mut MakeCanonicalDurationsRecorder, + ) -> Result<(), CanonicalError> { + let (blocks, state, chain_trie_updates) = chain.into_inner(); + let hashed_state = self.externals.provider_factory.hashed_post_state(state.state()); + let prefix_sets = hashed_state.construct_prefix_sets().freeze(); + let hashed_state_sorted = hashed_state.into_sorted(); + + // Compute state root or retrieve cached trie updates before opening write transaction. + let block_hash_numbers = + blocks.iter().map(|(number, b)| (number, b.hash())).collect::>(); + let trie_updates = match chain_trie_updates { + Some(updates) => { + debug!(target: "blockchain_tree", blocks = ?block_hash_numbers, "Using cached trie updates"); + self.metrics.trie_updates_insert_cached.increment(1); + updates + } + None => { + debug!(target: "blockchain_tree", blocks = ?block_hash_numbers, "Recomputing state root for insert"); + let provider = self + .externals + .provider_factory + .provider()? + // State root calculation can take a while, and we're sure no write transaction + // will be open in parallel. See https://github.com/paradigmxyz/reth/issues/6168. + .disable_long_read_transaction_safety(); + let (state_root, trie_updates) = StateRoot::from_tx(provider.tx_ref()) + .with_hashed_cursor_factory(HashedPostStateCursorFactory::new( + DatabaseHashedCursorFactory::new(provider.tx_ref()), + &hashed_state_sorted, + )) + .with_prefix_sets(prefix_sets) + .root_with_updates() + .map_err(BlockValidationError::from)?; + let tip = blocks.tip(); + if state_root != tip.state_root { + return Err(ProviderError::StateRootMismatch(Box::new(RootMismatch { + root: GotExpected { got: state_root, expected: tip.state_root }, + block_number: tip.number, + block_hash: tip.hash(), + })) + .into()) + } + self.metrics.trie_updates_insert_recomputed.increment(1); + trie_updates + } + }; + recorder.record_relative(MakeCanonicalAction::RetrieveStateTrieUpdates); + + let provider_rw = self.externals.provider_factory.provider_rw()?; + provider_rw + .append_blocks_with_state( + blocks.into_blocks().collect(), + state, + hashed_state_sorted, + trie_updates, + ) + .map_err(|e| CanonicalError::CanonicalCommit(e.to_string()))?; + + provider_rw.commit()?; + recorder.record_relative(MakeCanonicalAction::CommitCanonicalChainToDatabase); + + Ok(()) + } + + /// Unwind tables and put it inside state + pub fn unwind(&mut self, unwind_to: BlockNumber) -> Result<(), CanonicalError> { + // nothing to be done if unwind_to is higher then the tip + if self.block_indices().canonical_tip().number <= unwind_to { + return Ok(()); + } + // revert `N` blocks from current canonical chain and put them inside BlockchainTree + let old_canon_chain = self.revert_canonical_from_database(unwind_to)?; + + // check if there is block in chain + if let Some(old_canon_chain) = old_canon_chain { + self.state.block_indices.unwind_canonical_chain(unwind_to); + // insert old canonical chain to BlockchainTree. + self.insert_unwound_chain(AppendableChain::new(old_canon_chain)); + } + + Ok(()) + } + + /// Reverts the canonical chain down to the given block from the database and returns the + /// unwound chain. + /// + /// The block, `revert_until`, is __non-inclusive__, i.e. `revert_until` stays in the database. + fn revert_canonical_from_database( + &self, + revert_until: BlockNumber, + ) -> Result, CanonicalError> { + // This should only happen when an optimistic sync target was re-orged. + // + // Static files generally contain finalized data. The blockchain tree only deals + // with non-finalized data. The only scenario where canonical reverts go past the highest + // static file is when an optimistic sync occurred and non-finalized data was written to + // static files. + if self + .externals + .provider_factory + .static_file_provider() + .get_highest_static_file_block(StaticFileSegment::Headers) + .unwrap_or_default() > + revert_until + { + trace!( + target: "blockchain_tree", + "Reverting optimistic canonical chain to block {}", + revert_until + ); + return Err(CanonicalError::OptimisticTargetRevert(revert_until)); + } + + // read data that is needed for new sidechain + let provider_rw = self.externals.provider_factory.provider_rw()?; + + let tip = provider_rw.last_block_number()?; + let revert_range = (revert_until + 1)..=tip; + info!(target: "blockchain_tree", "REORG: revert canonical from database by unwinding chain blocks {:?}", revert_range); + // read block and execution result from database. and remove traces of block from tables. + let blocks_and_execution = provider_rw + .take_block_and_execution_above(revert_until, StorageLocation::Database) + .map_err(|e| CanonicalError::CanonicalRevert(e.to_string()))?; + + provider_rw.commit()?; + + if blocks_and_execution.is_empty() { + Ok(None) + } else { + Ok(Some(blocks_and_execution)) + } + } + + fn update_reorg_metrics(&self, reorg_depth: f64) { + self.metrics.reorgs.increment(1); + self.metrics.latest_reorg_depth.set(reorg_depth); + } + + /// Update blockchain tree chains (canonical and sidechains) and sync metrics. + /// + /// NOTE: this method should not be called during the pipeline sync, because otherwise the sync + /// checkpoint metric will get overwritten. Buffered blocks metrics are updated in + /// [`BlockBuffer`](crate::block_buffer::BlockBuffer) during the pipeline sync. + pub(crate) fn update_chains_metrics(&mut self) { + let height = self.state.block_indices.canonical_tip().number; + + let longest_sidechain_height = + self.state.chains.values().map(|chain| chain.tip().number).max(); + if let Some(longest_sidechain_height) = longest_sidechain_height { + self.metrics.longest_sidechain_height.set(longest_sidechain_height as f64); + } + + self.metrics.sidechains.set(self.state.chains.len() as f64); + self.metrics.canonical_chain_height.set(height as f64); + if let Some(metrics_tx) = self.sync_metrics_tx.as_mut() { + let _ = metrics_tx.send(MetricEvent::SyncHeight { height }); + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy_consensus::{Header, TxEip1559, EMPTY_ROOT_HASH}; + use alloy_eips::{ + eip1559::{ETHEREUM_BLOCK_GAS_LIMIT, INITIAL_BASE_FEE}, + eip4895::Withdrawals, + }; + use alloy_genesis::{Genesis, GenesisAccount}; + use alloy_primitives::{keccak256, Address, PrimitiveSignature as Signature, B256}; + use assert_matches::assert_matches; + use linked_hash_set::LinkedHashSet; + use reth_chainspec::{ChainSpecBuilder, MAINNET, MIN_TRANSACTION_GAS}; + use reth_consensus::test_utils::TestConsensus; + use reth_db::tables; + use reth_db_api::transaction::DbTxMut; + use reth_evm::test_utils::MockExecutorProvider; + use reth_evm_ethereum::execute::EthExecutorProvider; + use reth_node_types::FullNodePrimitives; + use reth_primitives::{ + proofs::{calculate_receipt_root, calculate_transaction_root}, + Account, BlockBody, RecoveredTx, Transaction, TransactionSigned, + }; + use reth_provider::{ + providers::ProviderNodeTypes, + test_utils::{ + blocks::BlockchainTestData, create_test_provider_factory_with_chain_spec, + MockNodeTypesWithDB, + }, + ProviderFactory, StorageLocation, + }; + use reth_stages_api::StageCheckpoint; + use reth_trie::{root::state_root_unhashed, StateRoot}; + use std::collections::HashMap; + + fn setup_externals( + exec_res: Vec, + ) -> TreeExternals { + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(MAINNET.genesis.clone()) + .shanghai_activated() + .build(), + ); + let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec); + let consensus = Arc::new(TestConsensus::default()); + let executor_factory = MockExecutorProvider::default(); + executor_factory.extend(exec_res); + + TreeExternals::new(provider_factory, consensus, executor_factory) + } + + fn setup_genesis< + N: ProviderNodeTypes< + Primitives: FullNodePrimitives< + BlockBody = reth_primitives::BlockBody, + BlockHeader = reth_primitives::Header, + >, + >, + >( + factory: &ProviderFactory, + mut genesis: SealedBlock, + ) { + // insert genesis to db. + + genesis.header.set_block_number(10); + genesis.header.set_state_root(EMPTY_ROOT_HASH); + let provider = factory.provider_rw().unwrap(); + + provider + .insert_historical_block( + genesis.try_seal_with_senders().expect("invalid tx signature in genesis"), + ) + .unwrap(); + + // insert first 10 blocks + for i in 0..10 { + provider + .tx_ref() + .put::(i, B256::new([100 + i as u8; 32])) + .unwrap(); + } + provider + .tx_ref() + .put::("Finish".to_string(), StageCheckpoint::new(10)) + .unwrap(); + provider.commit().unwrap(); + } + + /// Test data structure that will check tree internals + #[derive(Default, Debug)] + struct TreeTester { + /// Number of chains + chain_num: Option, + /// Check block to chain index + block_to_chain: Option>, + /// Check fork to child index + fork_to_child: Option>>, + /// Pending blocks + pending_blocks: Option<(BlockNumber, HashSet)>, + /// Buffered blocks + buffered_blocks: Option>, + } + + impl TreeTester { + const fn with_chain_num(mut self, chain_num: usize) -> Self { + self.chain_num = Some(chain_num); + self + } + + fn with_block_to_chain(mut self, block_to_chain: HashMap) -> Self { + self.block_to_chain = Some(block_to_chain); + self + } + + fn with_fork_to_child( + mut self, + fork_to_child: HashMap>, + ) -> Self { + self.fork_to_child = Some(fork_to_child); + self + } + + fn with_buffered_blocks( + mut self, + buffered_blocks: HashMap, + ) -> Self { + self.buffered_blocks = Some(buffered_blocks); + self + } + + fn with_pending_blocks( + mut self, + pending_blocks: (BlockNumber, HashSet), + ) -> Self { + self.pending_blocks = Some(pending_blocks); + self + } + + fn assert(self, tree: &BlockchainTree) { + if let Some(chain_num) = self.chain_num { + assert_eq!(tree.state.chains.len(), chain_num); + } + if let Some(block_to_chain) = self.block_to_chain { + assert_eq!(*tree.state.block_indices.blocks_to_chain(), block_to_chain); + } + if let Some(fork_to_child) = self.fork_to_child { + let mut x: HashMap> = + HashMap::with_capacity(fork_to_child.len()); + for (key, hash_set) in fork_to_child { + x.insert(key, hash_set.into_iter().collect()); + } + assert_eq!(*tree.state.block_indices.fork_to_child(), x); + } + if let Some(pending_blocks) = self.pending_blocks { + let (num, hashes) = tree.state.block_indices.pending_blocks(); + let hashes = hashes.into_iter().collect::>(); + assert_eq!((num, hashes), pending_blocks); + } + if let Some(buffered_blocks) = self.buffered_blocks { + assert_eq!(*tree.state.buffered_blocks.blocks(), buffered_blocks); + } + } + } + + #[test] + fn consecutive_reorgs() { + let signer = Address::random(); + let initial_signer_balance = U256::from(10).pow(U256::from(18)); + let chain_spec = Arc::new( + ChainSpecBuilder::default() + .chain(MAINNET.chain) + .genesis(Genesis { + alloc: BTreeMap::from([( + signer, + GenesisAccount { balance: initial_signer_balance, ..Default::default() }, + )]), + ..MAINNET.genesis.clone() + }) + .shanghai_activated() + .build(), + ); + let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); + let consensus = Arc::new(TestConsensus::default()); + let executor_provider = EthExecutorProvider::ethereum(chain_spec.clone()); + + { + let provider_rw = provider_factory.provider_rw().unwrap(); + provider_rw + .insert_block( + SealedBlock::new(chain_spec.sealed_genesis_header(), Default::default()) + .try_seal_with_senders() + .unwrap(), + StorageLocation::Database, + ) + .unwrap(); + let account = Account { balance: initial_signer_balance, ..Default::default() }; + provider_rw.tx_ref().put::(signer, account).unwrap(); + provider_rw.tx_ref().put::(keccak256(signer), account).unwrap(); + provider_rw.commit().unwrap(); + } + + let single_tx_cost = U256::from(INITIAL_BASE_FEE * MIN_TRANSACTION_GAS); + let mock_tx = |nonce: u64| -> RecoveredTx<_> { + TransactionSigned::new_unhashed( + Transaction::Eip1559(TxEip1559 { + chain_id: chain_spec.chain.id(), + nonce, + gas_limit: MIN_TRANSACTION_GAS, + to: Address::ZERO.into(), + max_fee_per_gas: INITIAL_BASE_FEE as u128, + ..Default::default() + }), + Signature::test_signature(), + ) + .with_signer(signer) + }; + + let mock_block = |number: u64, + parent: Option, + body: Vec>, + num_of_signer_txs: u64| + -> SealedBlockWithSenders { + let signed_body = body.clone().into_iter().map(|tx| tx.into_tx()).collect::>(); + let transactions_root = calculate_transaction_root(&signed_body); + let receipts = body + .iter() + .enumerate() + .map(|(idx, tx)| { + Receipt { + tx_type: tx.tx_type(), + success: true, + cumulative_gas_used: (idx as u64 + 1) * MIN_TRANSACTION_GAS, + ..Default::default() + } + .with_bloom() + }) + .collect::>(); + + // receipts root computation is different for OP + let receipts_root = calculate_receipt_root(&receipts); + + let header = Header { + number, + parent_hash: parent.unwrap_or_default(), + gas_used: body.len() as u64 * MIN_TRANSACTION_GAS, + gas_limit: ETHEREUM_BLOCK_GAS_LIMIT, + mix_hash: B256::random(), + base_fee_per_gas: Some(INITIAL_BASE_FEE), + transactions_root, + receipts_root, + state_root: state_root_unhashed(HashMap::from([( + signer, + Account { + balance: initial_signer_balance - + (single_tx_cost * U256::from(num_of_signer_txs)), + nonce: num_of_signer_txs, + ..Default::default() + } + .into_trie_account(EMPTY_ROOT_HASH), + )])), + ..Default::default() + }; + + SealedBlockWithSenders::new( + SealedBlock::new( + SealedHeader::seal(header), + BlockBody { + transactions: signed_body, + ommers: Vec::new(), + withdrawals: Some(Withdrawals::default()), + }, + ), + body.iter().map(|tx| tx.signer()).collect(), + ) + .unwrap() + }; + + let fork_block = mock_block(1, Some(chain_spec.genesis_hash()), Vec::from([mock_tx(0)]), 1); + + let canonical_block_1 = + mock_block(2, Some(fork_block.hash()), Vec::from([mock_tx(1), mock_tx(2)]), 3); + let canonical_block_2 = mock_block(3, Some(canonical_block_1.hash()), Vec::new(), 3); + let canonical_block_3 = + mock_block(4, Some(canonical_block_2.hash()), Vec::from([mock_tx(3)]), 4); + + let sidechain_block_1 = mock_block(2, Some(fork_block.hash()), Vec::from([mock_tx(1)]), 2); + let sidechain_block_2 = + mock_block(3, Some(sidechain_block_1.hash()), Vec::from([mock_tx(2)]), 3); + + let mut tree = BlockchainTree::new( + TreeExternals::new(provider_factory, consensus, executor_provider), + BlockchainTreeConfig::default(), + ) + .expect("failed to create tree"); + + tree.insert_block(fork_block.clone(), BlockValidationKind::Exhaustive).unwrap(); + + assert_eq!( + tree.make_canonical(fork_block.hash()).unwrap(), + CanonicalOutcome::Committed { head: fork_block.header.clone() } + ); + + assert_eq!( + tree.insert_block(canonical_block_1.clone(), BlockValidationKind::Exhaustive).unwrap(), + InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) + ); + + assert_eq!( + tree.make_canonical(canonical_block_1.hash()).unwrap(), + CanonicalOutcome::Committed { head: canonical_block_1.header.clone() } + ); + + assert_eq!( + tree.insert_block(canonical_block_2, BlockValidationKind::Exhaustive).unwrap(), + InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) + ); + + assert_eq!( + tree.insert_block(sidechain_block_1.clone(), BlockValidationKind::Exhaustive).unwrap(), + InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::HistoricalFork)) + ); + + assert_eq!( + tree.make_canonical(sidechain_block_1.hash()).unwrap(), + CanonicalOutcome::Committed { head: sidechain_block_1.header.clone() } + ); + + assert_eq!( + tree.make_canonical(canonical_block_1.hash()).unwrap(), + CanonicalOutcome::Committed { head: canonical_block_1.header.clone() } + ); + + assert_eq!( + tree.insert_block(sidechain_block_2.clone(), BlockValidationKind::Exhaustive).unwrap(), + InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::HistoricalFork)) + ); + + assert_eq!( + tree.make_canonical(sidechain_block_2.hash()).unwrap(), + CanonicalOutcome::Committed { head: sidechain_block_2.header.clone() } + ); + + assert_eq!( + tree.insert_block(canonical_block_3.clone(), BlockValidationKind::Exhaustive).unwrap(), + InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::HistoricalFork)) + ); + + assert_eq!( + tree.make_canonical(canonical_block_3.hash()).unwrap(), + CanonicalOutcome::Committed { head: canonical_block_3.header.clone() } + ); + } + + #[test] + fn sidechain_block_hashes() { + let data = BlockchainTestData::default_from_number(11); + let (block1, exec1) = data.blocks[0].clone(); + let (block2, exec2) = data.blocks[1].clone(); + let (block3, exec3) = data.blocks[2].clone(); + let (block4, exec4) = data.blocks[3].clone(); + let genesis = data.genesis; + + // test pops execution results from vector, so order is from last to first. + let externals = + setup_externals(vec![exec3.clone(), exec2.clone(), exec4, exec3, exec2, exec1]); + + // last finalized block would be number 9. + setup_genesis(&externals.provider_factory, genesis); + + // make tree + let config = BlockchainTreeConfig::new(1, 2, 3, 2); + let mut tree = BlockchainTree::new(externals, config).expect("failed to create tree"); + // genesis block 10 is already canonical + tree.make_canonical(B256::ZERO).unwrap(); + + // make genesis block 10 as finalized + tree.finalize_block(10).unwrap(); + + assert_eq!( + tree.insert_block(block1.clone(), BlockValidationKind::Exhaustive).unwrap(), + InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) + ); + + assert_eq!( + tree.insert_block(block2.clone(), BlockValidationKind::Exhaustive).unwrap(), + InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) + ); + + assert_eq!( + tree.insert_block(block3.clone(), BlockValidationKind::Exhaustive).unwrap(), + InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) + ); + + assert_eq!( + tree.insert_block(block4, BlockValidationKind::Exhaustive).unwrap(), + InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) + ); + + let mut block2a = block2; + let block2a_hash = B256::new([0x34; 32]); + block2a.set_hash(block2a_hash); + + assert_eq!( + tree.insert_block(block2a.clone(), BlockValidationKind::Exhaustive).unwrap(), + InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::HistoricalFork)) + ); + + let mut block3a = block3; + let block3a_hash = B256::new([0x35; 32]); + block3a.set_hash(block3a_hash); + block3a.set_parent_hash(block2a.hash()); + + assert_eq!( + tree.insert_block(block3a.clone(), BlockValidationKind::Exhaustive).unwrap(), + InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) /* TODO: this is incorrect, figure out why */ + ); + + let block3a_chain_id = tree.state.block_indices.get_side_chain_id(&block3a.hash()).unwrap(); + assert_eq!( + tree.all_chain_hashes(block3a_chain_id), + BTreeMap::from([ + (block1.number, block1.hash()), + (block2a.number, block2a.hash()), + (block3a.number, block3a.hash()), + ]) + ); + } + + #[test] + fn cached_trie_updates() { + let data = BlockchainTestData::default_from_number(11); + let (block1, exec1) = data.blocks[0].clone(); + let (block2, exec2) = data.blocks[1].clone(); + let (block3, exec3) = data.blocks[2].clone(); + let (block4, exec4) = data.blocks[3].clone(); + let (block5, exec5) = data.blocks[4].clone(); + let genesis = data.genesis; + + // test pops execution results from vector, so order is from last to first. + let externals = setup_externals(vec![exec5.clone(), exec4, exec3, exec2, exec1]); + + // last finalized block would be number 9. + setup_genesis(&externals.provider_factory, genesis); + + // make tree + let config = BlockchainTreeConfig::new(1, 2, 3, 2); + let mut tree = BlockchainTree::new(externals, config).expect("failed to create tree"); + // genesis block 10 is already canonical + tree.make_canonical(B256::ZERO).unwrap(); + + // make genesis block 10 as finalized + tree.finalize_block(10).unwrap(); + + assert_eq!( + tree.insert_block(block1.clone(), BlockValidationKind::Exhaustive).unwrap(), + InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) + ); + let block1_chain_id = tree.state.block_indices.get_side_chain_id(&block1.hash()).unwrap(); + let block1_chain = tree.state.chains.get(&block1_chain_id).unwrap(); + assert!(block1_chain.trie_updates().is_some()); + + assert_eq!( + tree.insert_block(block2.clone(), BlockValidationKind::Exhaustive).unwrap(), + InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) + ); + let block2_chain_id = tree.state.block_indices.get_side_chain_id(&block2.hash()).unwrap(); + let block2_chain = tree.state.chains.get(&block2_chain_id).unwrap(); + assert!(block2_chain.trie_updates().is_none()); + + assert_eq!( + tree.make_canonical(block2.hash()).unwrap(), + CanonicalOutcome::Committed { head: block2.header.clone() } + ); + + assert_eq!( + tree.insert_block(block3.clone(), BlockValidationKind::Exhaustive).unwrap(), + InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) + ); + let block3_chain_id = tree.state.block_indices.get_side_chain_id(&block3.hash()).unwrap(); + let block3_chain = tree.state.chains.get(&block3_chain_id).unwrap(); + assert!(block3_chain.trie_updates().is_some()); + + assert_eq!( + tree.make_canonical(block3.hash()).unwrap(), + CanonicalOutcome::Committed { head: block3.header.clone() } + ); + + assert_eq!( + tree.insert_block(block4.clone(), BlockValidationKind::Exhaustive).unwrap(), + InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) + ); + let block4_chain_id = tree.state.block_indices.get_side_chain_id(&block4.hash()).unwrap(); + let block4_chain = tree.state.chains.get(&block4_chain_id).unwrap(); + assert!(block4_chain.trie_updates().is_some()); + + assert_eq!( + tree.insert_block(block5.clone(), BlockValidationKind::Exhaustive).unwrap(), + InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) + ); + + let block5_chain_id = tree.state.block_indices.get_side_chain_id(&block5.hash()).unwrap(); + let block5_chain = tree.state.chains.get(&block5_chain_id).unwrap(); + assert!(block5_chain.trie_updates().is_none()); + + assert_eq!( + tree.make_canonical(block5.hash()).unwrap(), + CanonicalOutcome::Committed { head: block5.header.clone() } + ); + + let provider = tree.externals.provider_factory.provider().unwrap(); + let prefix_sets = tree + .externals + .provider_factory + .hashed_post_state(exec5.state()) + .construct_prefix_sets() + .freeze(); + let state_root = + StateRoot::from_tx(provider.tx_ref()).with_prefix_sets(prefix_sets).root().unwrap(); + assert_eq!(state_root, block5.state_root); + } + + #[test] + fn test_side_chain_fork() { + let data = BlockchainTestData::default_from_number(11); + let (block1, exec1) = data.blocks[0].clone(); + let (block2, exec2) = data.blocks[1].clone(); + let genesis = data.genesis; + + // test pops execution results from vector, so order is from last to first. + let externals = setup_externals(vec![exec2.clone(), exec2, exec1]); + + // last finalized block would be number 9. + setup_genesis(&externals.provider_factory, genesis); + + // make tree + let config = BlockchainTreeConfig::new(1, 2, 3, 2); + let mut tree = BlockchainTree::new(externals, config).expect("failed to create tree"); + // genesis block 10 is already canonical + tree.make_canonical(B256::ZERO).unwrap(); + + // make genesis block 10 as finalized + tree.finalize_block(10).unwrap(); + + assert_eq!( + tree.insert_block(block1.clone(), BlockValidationKind::Exhaustive).unwrap(), + InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) + ); + + assert_eq!( + tree.insert_block(block2.clone(), BlockValidationKind::Exhaustive).unwrap(), + InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) + ); + + // we have one chain that has two blocks. + // Trie state: + // b2 (pending block) + // | + // | + // b1 (pending block) + // / + // / + // g1 (canonical blocks) + // | + TreeTester::default() + .with_chain_num(1) + .with_block_to_chain(HashMap::from([ + (block1.hash(), 0.into()), + (block2.hash(), 0.into()), + ])) + .with_fork_to_child(HashMap::from([( + block1.parent_hash, + HashSet::from([block1.hash()]), + )])) + .assert(&tree); + + let mut block2a = block2.clone(); + let block2a_hash = B256::new([0x34; 32]); + block2a.set_hash(block2a_hash); + + assert_eq!( + tree.insert_block(block2a.clone(), BlockValidationKind::Exhaustive).unwrap(), + InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::HistoricalFork)) + ); + + // fork chain. + // Trie state: + // b2 b2a (pending blocks in tree) + // | / + // | / + // b1 + // / + // / + // g1 (canonical blocks) + // | + + TreeTester::default() + .with_chain_num(2) + .with_block_to_chain(HashMap::from([ + (block1.hash(), 0.into()), + (block2.hash(), 0.into()), + (block2a.hash(), 1.into()), + ])) + .with_fork_to_child(HashMap::from([ + (block1.parent_hash, HashSet::from([block1.hash()])), + (block2a.parent_hash, HashSet::from([block2a.hash()])), + ])) + .assert(&tree); + // chain 0 has two blocks so receipts and reverts len is 2 + let chain0 = tree.state.chains.get(&0.into()).unwrap().execution_outcome(); + assert_eq!(chain0.receipts().len(), 2); + assert_eq!(chain0.state().reverts.len(), 2); + assert_eq!(chain0.first_block(), block1.number); + // chain 1 has one block so receipts and reverts len is 1 + let chain1 = tree.state.chains.get(&1.into()).unwrap().execution_outcome(); + assert_eq!(chain1.receipts().len(), 1); + assert_eq!(chain1.state().reverts.len(), 1); + assert_eq!(chain1.first_block(), block2.number); + } + + #[test] + fn sanity_path() { + let data = BlockchainTestData::default_from_number(11); + let (block1, exec1) = data.blocks[0].clone(); + let (block2, exec2) = data.blocks[1].clone(); + let genesis = data.genesis; + + // test pops execution results from vector, so order is from last to first. + let externals = setup_externals(vec![exec2.clone(), exec1.clone(), exec2, exec1]); + + // last finalized block would be number 9. + setup_genesis(&externals.provider_factory, genesis); + + // make tree + let config = BlockchainTreeConfig::new(1, 2, 3, 2); + let mut tree = BlockchainTree::new(externals, config).expect("failed to create tree"); + + let mut canon_notif = tree.subscribe_canon_state(); + // genesis block 10 is already canonical + let head = BlockNumHash::new(10, B256::ZERO); + tree.make_canonical(head.hash).unwrap(); + + // make sure is_block_hash_canonical returns true for genesis block + tree.is_block_hash_canonical(&B256::ZERO).unwrap(); + + // make genesis block 10 as finalized + tree.finalize_block(head.number).unwrap(); + + // block 2 parent is not known, block2 is buffered. + assert_eq!( + tree.insert_block(block2.clone(), BlockValidationKind::Exhaustive).unwrap(), + InsertPayloadOk::Inserted(BlockStatus::Disconnected { + head, + missing_ancestor: block2.parent_num_hash() + }) + ); + + // Buffered block: [block2] + // Trie state: + // | + // g1 (canonical blocks) + // | + + TreeTester::default() + .with_buffered_blocks(HashMap::from([(block2.hash(), block2.clone())])) + .assert(&tree); + + assert_eq!( + tree.is_block_known(block2.num_hash()).unwrap(), + Some(BlockStatus::Disconnected { head, missing_ancestor: block2.parent_num_hash() }) + ); + + // check if random block is known + let old_block = BlockNumHash::new(1, B256::new([32; 32])); + let err = BlockchainTreeError::PendingBlockIsFinalized { last_finalized: 10 }; + + assert_eq!(tree.is_block_known(old_block).unwrap_err().as_tree_error(), Some(err)); + + // insert block1 and buffered block2 is inserted + assert_eq!( + tree.insert_block(block1.clone(), BlockValidationKind::Exhaustive).unwrap(), + InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) + ); + + // Buffered blocks: [] + // Trie state: + // b2 (pending block) + // | + // | + // b1 (pending block) + // / + // / + // g1 (canonical blocks) + // | + TreeTester::default() + .with_chain_num(1) + .with_block_to_chain(HashMap::from([ + (block1.hash(), 0.into()), + (block2.hash(), 0.into()), + ])) + .with_fork_to_child(HashMap::from([( + block1.parent_hash, + HashSet::from([block1.hash()]), + )])) + .with_pending_blocks((block1.number, HashSet::from([block1.hash()]))) + .assert(&tree); + + // already inserted block will `InsertPayloadOk::AlreadySeen(_)` + assert_eq!( + tree.insert_block(block1.clone(), BlockValidationKind::Exhaustive).unwrap(), + InsertPayloadOk::AlreadySeen(BlockStatus::Valid(BlockAttachment::Canonical)) + ); + + // block two is already inserted. + assert_eq!( + tree.insert_block(block2.clone(), BlockValidationKind::Exhaustive).unwrap(), + InsertPayloadOk::AlreadySeen(BlockStatus::Valid(BlockAttachment::Canonical)) + ); + + // make block1 canonical + tree.make_canonical(block1.hash()).unwrap(); + // check notification + assert_matches!(canon_notif.try_recv(), Ok(CanonStateNotification::Commit{ new}) if *new.blocks() == BTreeMap::from([(block1.number,block1.clone())])); + + // make block2 canonicals + tree.make_canonical(block2.hash()).unwrap(); + // check notification. + assert_matches!(canon_notif.try_recv(), Ok(CanonStateNotification::Commit{ new}) if *new.blocks() == BTreeMap::from([(block2.number,block2.clone())])); + + // Trie state: + // b2 (canonical block) + // | + // | + // b1 (canonical block) + // | + // | + // g1 (canonical blocks) + // | + TreeTester::default() + .with_chain_num(0) + .with_block_to_chain(HashMap::from([])) + .with_fork_to_child(HashMap::from([])) + .assert(&tree); + + /**** INSERT SIDE BLOCKS *** */ + + let mut block1a = block1.clone(); + let block1a_hash = B256::new([0x33; 32]); + block1a.set_hash(block1a_hash); + let mut block2a = block2.clone(); + let block2a_hash = B256::new([0x34; 32]); + block2a.set_hash(block2a_hash); + + // reinsert two blocks that point to canonical chain + assert_eq!( + tree.insert_block(block1a.clone(), BlockValidationKind::Exhaustive).unwrap(), + InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::HistoricalFork)) + ); + + TreeTester::default() + .with_chain_num(1) + .with_block_to_chain(HashMap::from([(block1a_hash, 1.into())])) + .with_fork_to_child(HashMap::from([( + block1.parent_hash, + HashSet::from([block1a_hash]), + )])) + .with_pending_blocks((block2.number + 1, HashSet::from([]))) + .assert(&tree); + + assert_eq!( + tree.insert_block(block2a.clone(), BlockValidationKind::Exhaustive).unwrap(), + InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::HistoricalFork)) + ); + // Trie state: + // b2 b2a (side chain) + // | / + // | / + // b1 b1a (side chain) + // | / + // |/ + // g1 (10) + // | + TreeTester::default() + .with_chain_num(2) + .with_block_to_chain(HashMap::from([ + (block1a_hash, 1.into()), + (block2a_hash, 2.into()), + ])) + .with_fork_to_child(HashMap::from([ + (block1.parent_hash, HashSet::from([block1a_hash])), + (block1.hash(), HashSet::from([block2a_hash])), + ])) + .with_pending_blocks((block2.number + 1, HashSet::from([]))) + .assert(&tree); + + // make b2a canonical + assert!(tree.make_canonical(block2a_hash).is_ok()); + // check notification. + assert_matches!(canon_notif.try_recv(), + Ok(CanonStateNotification::Reorg{ old, new}) + if *old.blocks() == BTreeMap::from([(block2.number,block2.clone())]) + && *new.blocks() == BTreeMap::from([(block2a.number,block2a.clone())])); + + // Trie state: + // b2a b2 (side chain) + // | / + // | / + // b1 b1a (side chain) + // | / + // |/ + // g1 (10) + // | + TreeTester::default() + .with_chain_num(2) + .with_block_to_chain(HashMap::from([ + (block1a_hash, 1.into()), + (block2.hash(), 3.into()), + ])) + .with_fork_to_child(HashMap::from([ + (block1.parent_hash, HashSet::from([block1a_hash])), + (block1.hash(), HashSet::from([block2.hash()])), + ])) + .with_pending_blocks((block2.number + 1, HashSet::default())) + .assert(&tree); + + assert_matches!(tree.make_canonical(block1a_hash), Ok(_)); + // Trie state: + // b2a b2 (side chain) + // | / + // | / + // b1a b1 (side chain) + // | / + // |/ + // g1 (10) + // | + TreeTester::default() + .with_chain_num(2) + .with_block_to_chain(HashMap::from([ + (block1.hash(), 4.into()), + (block2a_hash, 4.into()), + (block2.hash(), 3.into()), + ])) + .with_fork_to_child(HashMap::from([ + (block1.parent_hash, HashSet::from([block1.hash()])), + (block1.hash(), HashSet::from([block2.hash()])), + ])) + .with_pending_blocks((block1a.number + 1, HashSet::default())) + .assert(&tree); + + // check notification. + assert_matches!(canon_notif.try_recv(), + Ok(CanonStateNotification::Reorg{ old, new}) + if *old.blocks() == BTreeMap::from([(block1.number,block1.clone()),(block2a.number,block2a.clone())]) + && *new.blocks() == BTreeMap::from([(block1a.number,block1a.clone())])); + + // check that b2 and b1 are not canonical + assert!(!tree.is_block_hash_canonical(&block2.hash()).unwrap()); + assert!(!tree.is_block_hash_canonical(&block1.hash()).unwrap()); + + // ensure that b1a is canonical + assert!(tree.is_block_hash_canonical(&block1a.hash()).unwrap()); + + // make b2 canonical + tree.make_canonical(block2.hash()).unwrap(); + // Trie state: + // b2 b2a (side chain) + // | / + // | / + // b1 b1a (side chain) + // | / + // |/ + // g1 (10) + // | + TreeTester::default() + .with_chain_num(2) + .with_block_to_chain(HashMap::from([ + (block1a_hash, 5.into()), + (block2a_hash, 4.into()), + ])) + .with_fork_to_child(HashMap::from([ + (block1.parent_hash, HashSet::from([block1a_hash])), + (block1.hash(), HashSet::from([block2a_hash])), + ])) + .with_pending_blocks((block2.number + 1, HashSet::default())) + .assert(&tree); + + // check notification. + assert_matches!(canon_notif.try_recv(), + Ok(CanonStateNotification::Reorg{ old, new}) + if *old.blocks() == BTreeMap::from([(block1a.number,block1a.clone())]) + && *new.blocks() == BTreeMap::from([(block1.number,block1.clone()),(block2.number,block2.clone())])); + + // check that b2 is now canonical + assert!(tree.is_block_hash_canonical(&block2.hash()).unwrap()); + + // finalize b1 that would make b1a removed from tree + tree.finalize_block(11).unwrap(); + // Trie state: + // b2 b2a (side chain) + // | / + // | / + // b1 (canon) + // | + // g1 (10) + // | + TreeTester::default() + .with_chain_num(1) + .with_block_to_chain(HashMap::from([(block2a_hash, 4.into())])) + .with_fork_to_child(HashMap::from([(block1.hash(), HashSet::from([block2a_hash]))])) + .with_pending_blocks((block2.number + 1, HashSet::from([]))) + .assert(&tree); + + // unwind canonical + assert!(tree.unwind(block1.number).is_ok()); + // Trie state: + // b2 b2a (pending block) + // / / + // / / + // / / + // b1 (canonical block) + // | + // | + // g1 (canonical blocks) + // | + TreeTester::default() + .with_chain_num(2) + .with_block_to_chain(HashMap::from([ + (block2a_hash, 4.into()), + (block2.hash(), 6.into()), + ])) + .with_fork_to_child(HashMap::from([( + block1.hash(), + HashSet::from([block2a_hash, block2.hash()]), + )])) + .with_pending_blocks((block2.number, HashSet::from([block2.hash(), block2a.hash()]))) + .assert(&tree); + + // commit b2a + tree.make_canonical(block2.hash()).unwrap(); + + // Trie state: + // b2 b2a (side chain) + // | / + // | / + // b1 (finalized) + // | + // g1 (10) + // | + TreeTester::default() + .with_chain_num(1) + .with_block_to_chain(HashMap::from([(block2a_hash, 4.into())])) + .with_fork_to_child(HashMap::from([(block1.hash(), HashSet::from([block2a_hash]))])) + .with_pending_blocks((block2.number + 1, HashSet::default())) + .assert(&tree); + + // check notification. + assert_matches!(canon_notif.try_recv(), + Ok(CanonStateNotification::Commit{ new }) + if *new.blocks() == BTreeMap::from([(block2.number,block2.clone())])); + + // insert unconnected block2b + let mut block2b = block2a.clone(); + block2b.set_hash(B256::new([0x99; 32])); + block2b.set_parent_hash(B256::new([0x88; 32])); + + assert_eq!( + tree.insert_block(block2b.clone(), BlockValidationKind::Exhaustive).unwrap(), + InsertPayloadOk::Inserted(BlockStatus::Disconnected { + head: block2.header.num_hash(), + missing_ancestor: block2b.parent_num_hash() + }) + ); + + TreeTester::default() + .with_buffered_blocks(HashMap::from([(block2b.hash(), block2b.clone())])) + .assert(&tree); + + // update canonical block to b2, this would make b2a be removed + assert!(tree.connect_buffered_blocks_to_canonical_hashes_and_finalize(12).is_ok()); + + assert_eq!( + tree.is_block_known(block2.num_hash()).unwrap(), + Some(BlockStatus::Valid(BlockAttachment::Canonical)) + ); + + // Trie state: + // b2 (finalized) + // | + // b1 (finalized) + // | + // g1 (10) + // | + TreeTester::default() + .with_chain_num(0) + .with_block_to_chain(HashMap::default()) + .with_fork_to_child(HashMap::default()) + .with_pending_blocks((block2.number + 1, HashSet::default())) + .with_buffered_blocks(HashMap::default()) + .assert(&tree); + } + + #[test] + fn last_finalized_block_initialization() { + let data = BlockchainTestData::default_from_number(11); + let (block1, exec1) = data.blocks[0].clone(); + let (block2, exec2) = data.blocks[1].clone(); + let (block3, exec3) = data.blocks[2].clone(); + let genesis = data.genesis; + + // test pops execution results from vector, so order is from last to first. + let externals = + setup_externals(vec![exec3.clone(), exec2.clone(), exec1.clone(), exec3, exec2, exec1]); + let cloned_externals_1 = TreeExternals { + provider_factory: externals.provider_factory.clone(), + executor_factory: externals.executor_factory.clone(), + consensus: externals.consensus.clone(), + }; + let cloned_externals_2 = TreeExternals { + provider_factory: externals.provider_factory.clone(), + executor_factory: externals.executor_factory.clone(), + consensus: externals.consensus.clone(), + }; + + // last finalized block would be number 9. + setup_genesis(&externals.provider_factory, genesis); + + // make tree + let config = BlockchainTreeConfig::new(1, 2, 3, 2); + let mut tree = BlockchainTree::new(externals, config).expect("failed to create tree"); + + assert_eq!( + tree.insert_block(block1.clone(), BlockValidationKind::Exhaustive).unwrap(), + InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) + ); + + assert_eq!( + tree.insert_block(block2.clone(), BlockValidationKind::Exhaustive).unwrap(), + InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) + ); + + assert_eq!( + tree.insert_block(block3, BlockValidationKind::Exhaustive).unwrap(), + InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) + ); + + tree.make_canonical(block2.hash()).unwrap(); + + // restart + let mut tree = + BlockchainTree::new(cloned_externals_1, config).expect("failed to create tree"); + assert_eq!(tree.block_indices().last_finalized_block(), 0); + + let mut block1a = block1; + let block1a_hash = B256::new([0x33; 32]); + block1a.set_hash(block1a_hash); + + assert_eq!( + tree.insert_block(block1a.clone(), BlockValidationKind::Exhaustive).unwrap(), + InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::HistoricalFork)) + ); + + tree.make_canonical(block1a.hash()).unwrap(); + tree.finalize_block(block1a.number).unwrap(); + + // restart + let tree = BlockchainTree::new(cloned_externals_2, config).expect("failed to create tree"); + + assert_eq!(tree.block_indices().last_finalized_block(), block1a.number); + } +} diff --git a/crates/blockchain-tree/src/chain.rs b/crates/blockchain-tree/src/chain.rs index bfa6627413aa..9fba5bab4a9f 100644 --- a/crates/blockchain-tree/src/chain.rs +++ b/crates/blockchain-tree/src/chain.rs @@ -77,7 +77,7 @@ impl AppendableChain { ) -> Result where N: TreeNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { let execution_outcome = ExecutionOutcome::default(); let empty = BTreeMap::new(); @@ -115,7 +115,7 @@ impl AppendableChain { ) -> Result where N: TreeNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { let parent_number = block.number.checked_sub(1).ok_or(BlockchainTreeError::GenesisBlockHasNoParent)?; @@ -178,7 +178,7 @@ impl AppendableChain { where EDP: FullExecutionDataProvider, N: TreeNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { // some checks are done before blocks comes here. externals.consensus.validate_header_against_parent(&block, parent_block)?; @@ -209,7 +209,7 @@ impl AppendableChain { let block_hash = block.hash(); let block = block.unseal(); - let state = executor.execute(&block).unwrap(); + let state = executor.execute(&block)?; externals.consensus.validate_block_post_execution( &block, PostExecutionInput::new(&state.receipts, &state.requests), @@ -284,7 +284,7 @@ impl AppendableChain { ) -> Result<(), InsertBlockErrorKind> where N: TreeNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { let parent_block = self.chain.tip(); diff --git a/crates/blockchain-tree/src/shareable.rs b/crates/blockchain-tree/src/shareable.rs new file mode 100644 index 000000000000..d37a849867b3 --- /dev/null +++ b/crates/blockchain-tree/src/shareable.rs @@ -0,0 +1,206 @@ +//! Wrapper around `BlockchainTree` that allows for it to be shared. + +use crate::externals::TreeNodeTypes; + +use super::BlockchainTree; +use alloy_eips::BlockNumHash; +use alloy_primitives::{BlockHash, BlockNumber}; +use parking_lot::RwLock; +use reth_blockchain_tree_api::{ + error::{CanonicalError, InsertBlockError}, + BlockValidationKind, BlockchainTreeEngine, BlockchainTreeViewer, CanonicalOutcome, + InsertPayloadOk, +}; +use reth_evm::execute::BlockExecutorProvider; +use reth_execution_errors::BlockExecutionError; +use reth_node_types::NodeTypesWithDB; +use reth_primitives::{Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader}; +use reth_provider::{ + providers::ProviderNodeTypes, BlockchainTreePendingStateProvider, CanonStateNotifications, + CanonStateSubscriptions, FullExecutionDataProvider, NodePrimitivesProvider, ProviderError, +}; +use reth_storage_errors::provider::ProviderResult; +use std::{collections::BTreeMap, sync::Arc}; +use tracing::trace; + +/// Shareable blockchain tree that is behind a `RwLock` +#[derive(Clone, Debug)] +pub struct ShareableBlockchainTree { + /// `BlockchainTree` + pub tree: Arc>>, +} + +impl ShareableBlockchainTree { + /// Create a new shareable database. + pub fn new(tree: BlockchainTree) -> Self { + Self { tree: Arc::new(RwLock::new(tree)) } + } +} + +impl BlockchainTreeEngine for ShareableBlockchainTree +where + N: TreeNodeTypes, + E: BlockExecutorProvider, +{ + fn buffer_block(&self, block: SealedBlockWithSenders) -> Result<(), InsertBlockError> { + let mut tree = self.tree.write(); + // Blockchain tree metrics shouldn't be updated here, see + // `BlockchainTree::update_chains_metrics` documentation. + tree.buffer_block(block) + } + + fn insert_block( + &self, + block: SealedBlockWithSenders, + validation_kind: BlockValidationKind, + ) -> Result { + trace!(target: "blockchain_tree", hash = %block.hash(), number = block.number, parent_hash = %block.parent_hash, "Inserting block"); + let mut tree = self.tree.write(); + let res = tree.insert_block(block, validation_kind); + tree.update_chains_metrics(); + res + } + + fn finalize_block(&self, finalized_block: BlockNumber) -> ProviderResult<()> { + trace!(target: "blockchain_tree", finalized_block, "Finalizing block"); + let mut tree = self.tree.write(); + tree.finalize_block(finalized_block)?; + tree.update_chains_metrics(); + + Ok(()) + } + + fn connect_buffered_blocks_to_canonical_hashes_and_finalize( + &self, + last_finalized_block: BlockNumber, + ) -> Result<(), CanonicalError> { + trace!(target: "blockchain_tree", last_finalized_block, "Connecting buffered blocks to canonical hashes and finalizing the tree"); + let mut tree = self.tree.write(); + let res = + tree.connect_buffered_blocks_to_canonical_hashes_and_finalize(last_finalized_block); + tree.update_chains_metrics(); + Ok(res?) + } + + fn update_block_hashes_and_clear_buffered( + &self, + ) -> Result, CanonicalError> { + let mut tree = self.tree.write(); + let res = tree.update_block_hashes_and_clear_buffered(); + tree.update_chains_metrics(); + Ok(res?) + } + + fn connect_buffered_blocks_to_canonical_hashes(&self) -> Result<(), CanonicalError> { + trace!(target: "blockchain_tree", "Connecting buffered blocks to canonical hashes"); + let mut tree = self.tree.write(); + let res = tree.connect_buffered_blocks_to_canonical_hashes(); + tree.update_chains_metrics(); + Ok(res?) + } + + fn make_canonical(&self, block_hash: BlockHash) -> Result { + trace!(target: "blockchain_tree", %block_hash, "Making block canonical"); + let mut tree = self.tree.write(); + let res = tree.make_canonical(block_hash); + tree.update_chains_metrics(); + res + } +} + +impl BlockchainTreeViewer for ShareableBlockchainTree +where + N: TreeNodeTypes, + E: BlockExecutorProvider, +{ + fn header_by_hash(&self, hash: BlockHash) -> Option { + trace!(target: "blockchain_tree", ?hash, "Returning header by hash"); + self.tree.read().sidechain_block_by_hash(hash).map(|b| b.header.clone()) + } + + fn block_by_hash(&self, block_hash: BlockHash) -> Option { + trace!(target: "blockchain_tree", ?block_hash, "Returning block by hash"); + self.tree.read().sidechain_block_by_hash(block_hash).cloned() + } + + fn block_with_senders_by_hash(&self, block_hash: BlockHash) -> Option { + trace!(target: "blockchain_tree", ?block_hash, "Returning block by hash"); + self.tree.read().block_with_senders_by_hash(block_hash).cloned() + } + + fn buffered_header_by_hash(&self, block_hash: BlockHash) -> Option { + self.tree.read().get_buffered_block(&block_hash).map(|b| b.header.clone()) + } + + fn is_canonical(&self, hash: BlockHash) -> Result { + trace!(target: "blockchain_tree", ?hash, "Checking if block is canonical"); + self.tree.read().is_block_hash_canonical(&hash) + } + + fn lowest_buffered_ancestor(&self, hash: BlockHash) -> Option { + trace!(target: "blockchain_tree", ?hash, "Returning lowest buffered ancestor"); + self.tree.read().lowest_buffered_ancestor(&hash).cloned() + } + + fn canonical_tip(&self) -> BlockNumHash { + trace!(target: "blockchain_tree", "Returning canonical tip"); + self.tree.read().block_indices().canonical_tip() + } + + fn pending_block_num_hash(&self) -> Option { + trace!(target: "blockchain_tree", "Returning first pending block"); + self.tree.read().block_indices().pending_block_num_hash() + } + + fn pending_block(&self) -> Option { + trace!(target: "blockchain_tree", "Returning first pending block"); + self.tree.read().pending_block().cloned() + } + + fn pending_block_and_receipts(&self) -> Option<(SealedBlock, Vec)> { + let tree = self.tree.read(); + let pending_block = tree.pending_block()?.clone(); + let receipts = + tree.receipts_by_block_hash(pending_block.hash())?.into_iter().cloned().collect(); + Some((pending_block, receipts)) + } + + fn receipts_by_block_hash(&self, block_hash: BlockHash) -> Option> { + let tree = self.tree.read(); + Some(tree.receipts_by_block_hash(block_hash)?.into_iter().cloned().collect()) + } +} + +impl BlockchainTreePendingStateProvider for ShareableBlockchainTree +where + N: TreeNodeTypes, + E: BlockExecutorProvider, +{ + fn find_pending_state_provider( + &self, + block_hash: BlockHash, + ) -> Option> { + trace!(target: "blockchain_tree", ?block_hash, "Finding pending state provider"); + let provider = self.tree.read().post_state_data(block_hash)?; + Some(Box::new(provider)) + } +} + +impl NodePrimitivesProvider for ShareableBlockchainTree +where + N: ProviderNodeTypes, + E: Send + Sync, +{ + type Primitives = N::Primitives; +} + +impl CanonStateSubscriptions for ShareableBlockchainTree +where + N: TreeNodeTypes, + E: Send + Sync, +{ + fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { + trace!(target: "blockchain_tree", "Registered subscriber for canonical state"); + self.tree.read().subscribe_canon_state() + } +} diff --git a/crates/evm/src/execute.rs b/crates/evm/src/execute.rs index dff6240a5ca7..3e402a6cc07a 100644 --- a/crates/evm/src/execute.rs +++ b/crates/evm/src/execute.rs @@ -200,7 +200,7 @@ pub trait BlockExecutionStrategy { type Primitives: NodePrimitives; /// The error type returned by this strategy's methods. - type Error; + type Error: From + Into; /// Initialize the strategy with the given transaction environment overrides. fn init(&mut self, _tx_env_overrides: Box) {} @@ -445,7 +445,9 @@ where let ExecuteOutput { receipts, .. } = self.strategy.execute_transactions(block)?; let requests = self.strategy.apply_post_execution_changes(block, &receipts)?; - self.strategy.validate_block_post_execution(block, &receipts, &requests)?; + self.strategy + .validate_block_post_execution(block, &receipts, &requests) + .map_err(BlockExecutionError::Consensus)?; // prepare the state according to the prune mode let retention = self.batch_record.bundle_retention(block.header().number()); diff --git a/crates/evm/src/metrics.rs b/crates/evm/src/metrics.rs index 4787bf9ce5f7..94ef44d9b688 100644 --- a/crates/evm/src/metrics.rs +++ b/crates/evm/src/metrics.rs @@ -145,6 +145,7 @@ mod tests { use super::*; use alloy_eips::eip7685::Requests; use metrics_util::debugging::{DebugValue, DebuggingRecorder, Snapshotter}; + use reth_execution_errors::BlockExecutionError; use revm::db::BundleState; use revm_primitives::{ Account, AccountInfo, AccountStatus, EvmState, EvmStorage, EvmStorageSlot, B256, U256, @@ -162,7 +163,7 @@ mod tests { where Self: 'a; type Output = BlockExecutionOutput<()>; - type Error = std::convert::Infallible; + type Error = BlockExecutionError; fn execute(self, _input: Self::Input<'_>) -> Result { Ok(BlockExecutionOutput { diff --git a/crates/exex/exex/src/backfill/job.rs b/crates/exex/exex/src/backfill/job.rs index 6d8f0397588e..6ef0e270ae05 100644 --- a/crates/exex/exex/src/backfill/job.rs +++ b/crates/exex/exex/src/backfill/job.rs @@ -39,7 +39,10 @@ pub struct BackfillJob { impl Iterator for BackfillJob where - E: BlockExecutorProvider>, + E: BlockExecutorProvider< + Primitives: NodePrimitives, + Error = BlockExecutionError, + >, P: HeaderProvider + BlockReader + StateProviderFactory, { type Item = BackfillJobResult>; @@ -55,7 +58,10 @@ where impl BackfillJob where - E: BlockExecutorProvider>, + E: BlockExecutorProvider< + Primitives: NodePrimitives, + Error = BlockExecutionError, + >, P: BlockReader + HeaderProvider + StateProviderFactory, { /// Converts the backfill job into a single block backfill job. @@ -111,7 +117,7 @@ where let (header, body) = block.split_sealed_header_body(); let block = P::Block::new_sealed(header, body).with_senders(senders); - executor.execute_and_verify_one(&block).unwrap(); + executor.execute_and_verify_one(&block)?; execution_duration += execute_start.elapsed(); // TODO(alexey): report gas metrics using `block.header.gas_used` @@ -161,7 +167,10 @@ pub struct SingleBlockBackfillJob { impl Iterator for SingleBlockBackfillJob where - E: BlockExecutorProvider>, + E: BlockExecutorProvider< + Primitives: NodePrimitives, + Error = BlockExecutionError, + >, P: HeaderProvider + BlockReader + StateProviderFactory, { type Item = BackfillJobResult<( @@ -176,7 +185,10 @@ where impl SingleBlockBackfillJob where - E: BlockExecutorProvider>, + E: BlockExecutorProvider< + Primitives: NodePrimitives, + Error = BlockExecutionError, + >, P: HeaderProvider + BlockReader + StateProviderFactory, { /// Converts the single block backfill job into a stream. diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 59637ce81b0e..47bae9db03b7 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -15,6 +15,7 @@ use alloy_rpc_types_trace::geth::{ use async_trait::async_trait; use jsonrpsee::core::RpcResult; use reth_chainspec::EthereumHardforks; +use reth_errors::BlockExecutionError; use reth_evm::{ env::EvmEnv, execute::{BlockExecutorProvider, Executor}, @@ -84,8 +85,10 @@ impl DebugApi { impl DebugApi where Eth: EthApiTypes + TraceExt + 'static, - BlockExecutor: - BlockExecutorProvider>>, + BlockExecutor: BlockExecutorProvider< + Primitives: NodePrimitives>, + Error = BlockExecutionError, + >, { /// Acquires a permit to execute a tracing call. async fn acquire_trace_permit(&self) -> Result { diff --git a/crates/rpc/rpc/src/validation.rs b/crates/rpc/rpc/src/validation.rs index c621c8b9790c..10eac69da7af 100644 --- a/crates/rpc/rpc/src/validation.rs +++ b/crates/rpc/rpc/src/validation.rs @@ -93,7 +93,7 @@ where + ChainSpecProvider + StateProviderFactory + 'static, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { /// Validates the given block and a [`BidTrace`] against it. pub async fn validate_message_against_block( From a36615f07e617f03dbcee74d2aba01bbabe0d0e6 Mon Sep 17 00:00:00 2001 From: chungquantin <56880684+chungquantin@users.noreply.github.com> Date: Thu, 16 Jan 2025 17:47:51 +0700 Subject: [PATCH 09/13] Revert "chore: init" This reverts commit 7bfd8a70ba27102ddc24dfff28e9d9665010124a. --- crates/blockchain-tree/src/blockchain_tree.rs | 2 +- crates/blockchain-tree/src/chain.rs | 10 ++++----- crates/blockchain-tree/src/shareable.rs | 7 +++--- crates/evm/src/execute.rs | 6 ++--- crates/evm/src/metrics.rs | 3 +-- crates/exex/exex/src/backfill/job.rs | 22 +++++-------------- crates/rpc/rpc/src/debug.rs | 7 ++---- crates/rpc/rpc/src/validation.rs | 2 +- 8 files changed, 20 insertions(+), 39 deletions(-) diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 264d35f2d10d..3964ea53b7e2 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -95,7 +95,7 @@ impl BlockchainTree { impl BlockchainTree where N: TreeNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { /// Builds the blockchain tree for the node. /// diff --git a/crates/blockchain-tree/src/chain.rs b/crates/blockchain-tree/src/chain.rs index 9fba5bab4a9f..bfa6627413aa 100644 --- a/crates/blockchain-tree/src/chain.rs +++ b/crates/blockchain-tree/src/chain.rs @@ -77,7 +77,7 @@ impl AppendableChain { ) -> Result where N: TreeNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { let execution_outcome = ExecutionOutcome::default(); let empty = BTreeMap::new(); @@ -115,7 +115,7 @@ impl AppendableChain { ) -> Result where N: TreeNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { let parent_number = block.number.checked_sub(1).ok_or(BlockchainTreeError::GenesisBlockHasNoParent)?; @@ -178,7 +178,7 @@ impl AppendableChain { where EDP: FullExecutionDataProvider, N: TreeNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { // some checks are done before blocks comes here. externals.consensus.validate_header_against_parent(&block, parent_block)?; @@ -209,7 +209,7 @@ impl AppendableChain { let block_hash = block.hash(); let block = block.unseal(); - let state = executor.execute(&block)?; + let state = executor.execute(&block).unwrap(); externals.consensus.validate_block_post_execution( &block, PostExecutionInput::new(&state.receipts, &state.requests), @@ -284,7 +284,7 @@ impl AppendableChain { ) -> Result<(), InsertBlockErrorKind> where N: TreeNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { let parent_block = self.chain.tip(); diff --git a/crates/blockchain-tree/src/shareable.rs b/crates/blockchain-tree/src/shareable.rs index d37a849867b3..e668f4e2dac0 100644 --- a/crates/blockchain-tree/src/shareable.rs +++ b/crates/blockchain-tree/src/shareable.rs @@ -12,7 +12,6 @@ use reth_blockchain_tree_api::{ InsertPayloadOk, }; use reth_evm::execute::BlockExecutorProvider; -use reth_execution_errors::BlockExecutionError; use reth_node_types::NodeTypesWithDB; use reth_primitives::{Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader}; use reth_provider::{ @@ -40,7 +39,7 @@ impl ShareableBlockchainTree { impl BlockchainTreeEngine for ShareableBlockchainTree where N: TreeNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { fn buffer_block(&self, block: SealedBlockWithSenders) -> Result<(), InsertBlockError> { let mut tree = self.tree.write(); @@ -111,7 +110,7 @@ where impl BlockchainTreeViewer for ShareableBlockchainTree where N: TreeNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { fn header_by_hash(&self, hash: BlockHash) -> Option { trace!(target: "blockchain_tree", ?hash, "Returning header by hash"); @@ -174,7 +173,7 @@ where impl BlockchainTreePendingStateProvider for ShareableBlockchainTree where N: TreeNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { fn find_pending_state_provider( &self, diff --git a/crates/evm/src/execute.rs b/crates/evm/src/execute.rs index 3e402a6cc07a..dff6240a5ca7 100644 --- a/crates/evm/src/execute.rs +++ b/crates/evm/src/execute.rs @@ -200,7 +200,7 @@ pub trait BlockExecutionStrategy { type Primitives: NodePrimitives; /// The error type returned by this strategy's methods. - type Error: From + Into; + type Error; /// Initialize the strategy with the given transaction environment overrides. fn init(&mut self, _tx_env_overrides: Box) {} @@ -445,9 +445,7 @@ where let ExecuteOutput { receipts, .. } = self.strategy.execute_transactions(block)?; let requests = self.strategy.apply_post_execution_changes(block, &receipts)?; - self.strategy - .validate_block_post_execution(block, &receipts, &requests) - .map_err(BlockExecutionError::Consensus)?; + self.strategy.validate_block_post_execution(block, &receipts, &requests)?; // prepare the state according to the prune mode let retention = self.batch_record.bundle_retention(block.header().number()); diff --git a/crates/evm/src/metrics.rs b/crates/evm/src/metrics.rs index 94ef44d9b688..4787bf9ce5f7 100644 --- a/crates/evm/src/metrics.rs +++ b/crates/evm/src/metrics.rs @@ -145,7 +145,6 @@ mod tests { use super::*; use alloy_eips::eip7685::Requests; use metrics_util::debugging::{DebugValue, DebuggingRecorder, Snapshotter}; - use reth_execution_errors::BlockExecutionError; use revm::db::BundleState; use revm_primitives::{ Account, AccountInfo, AccountStatus, EvmState, EvmStorage, EvmStorageSlot, B256, U256, @@ -163,7 +162,7 @@ mod tests { where Self: 'a; type Output = BlockExecutionOutput<()>; - type Error = BlockExecutionError; + type Error = std::convert::Infallible; fn execute(self, _input: Self::Input<'_>) -> Result { Ok(BlockExecutionOutput { diff --git a/crates/exex/exex/src/backfill/job.rs b/crates/exex/exex/src/backfill/job.rs index 6ef0e270ae05..6d8f0397588e 100644 --- a/crates/exex/exex/src/backfill/job.rs +++ b/crates/exex/exex/src/backfill/job.rs @@ -39,10 +39,7 @@ pub struct BackfillJob { impl Iterator for BackfillJob where - E: BlockExecutorProvider< - Primitives: NodePrimitives, - Error = BlockExecutionError, - >, + E: BlockExecutorProvider>, P: HeaderProvider + BlockReader + StateProviderFactory, { type Item = BackfillJobResult>; @@ -58,10 +55,7 @@ where impl BackfillJob where - E: BlockExecutorProvider< - Primitives: NodePrimitives, - Error = BlockExecutionError, - >, + E: BlockExecutorProvider>, P: BlockReader + HeaderProvider + StateProviderFactory, { /// Converts the backfill job into a single block backfill job. @@ -117,7 +111,7 @@ where let (header, body) = block.split_sealed_header_body(); let block = P::Block::new_sealed(header, body).with_senders(senders); - executor.execute_and_verify_one(&block)?; + executor.execute_and_verify_one(&block).unwrap(); execution_duration += execute_start.elapsed(); // TODO(alexey): report gas metrics using `block.header.gas_used` @@ -167,10 +161,7 @@ pub struct SingleBlockBackfillJob { impl Iterator for SingleBlockBackfillJob where - E: BlockExecutorProvider< - Primitives: NodePrimitives, - Error = BlockExecutionError, - >, + E: BlockExecutorProvider>, P: HeaderProvider + BlockReader + StateProviderFactory, { type Item = BackfillJobResult<( @@ -185,10 +176,7 @@ where impl SingleBlockBackfillJob where - E: BlockExecutorProvider< - Primitives: NodePrimitives, - Error = BlockExecutionError, - >, + E: BlockExecutorProvider>, P: HeaderProvider + BlockReader + StateProviderFactory, { /// Converts the single block backfill job into a stream. diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 47bae9db03b7..59637ce81b0e 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -15,7 +15,6 @@ use alloy_rpc_types_trace::geth::{ use async_trait::async_trait; use jsonrpsee::core::RpcResult; use reth_chainspec::EthereumHardforks; -use reth_errors::BlockExecutionError; use reth_evm::{ env::EvmEnv, execute::{BlockExecutorProvider, Executor}, @@ -85,10 +84,8 @@ impl DebugApi { impl DebugApi where Eth: EthApiTypes + TraceExt + 'static, - BlockExecutor: BlockExecutorProvider< - Primitives: NodePrimitives>, - Error = BlockExecutionError, - >, + BlockExecutor: + BlockExecutorProvider>>, { /// Acquires a permit to execute a tracing call. async fn acquire_trace_permit(&self) -> Result { diff --git a/crates/rpc/rpc/src/validation.rs b/crates/rpc/rpc/src/validation.rs index 10eac69da7af..c621c8b9790c 100644 --- a/crates/rpc/rpc/src/validation.rs +++ b/crates/rpc/rpc/src/validation.rs @@ -93,7 +93,7 @@ where + ChainSpecProvider + StateProviderFactory + 'static, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { /// Validates the given block and a [`BidTrace`] against it. pub async fn validate_message_against_block( From aace4021ae59477a0f0bbccffeba95b7bf76f4c1 Mon Sep 17 00:00:00 2001 From: chungquantin <56880684+chungquantin@users.noreply.github.com> Date: Thu, 16 Jan 2025 23:19:11 +0700 Subject: [PATCH 10/13] refactor(partial): add BlockExecutionError to BlockExecutionProvider --- crates/blockchain-tree/src/blockchain_tree.rs | 2 +- crates/blockchain-tree/src/chain.rs | 10 +++---- crates/blockchain-tree/src/shareable.rs | 7 ++--- crates/evm/src/execute.rs | 8 +++--- crates/evm/src/metrics.rs | 3 ++- crates/exex/exex/src/backfill/job.rs | 22 ++++++++++++---- crates/exex/exex/src/backfill/stream.rs | 12 +++++++-- crates/exex/exex/src/notifications.rs | 26 ++++++++++++------- crates/node/api/src/node.rs | 8 +++--- crates/rpc/rpc/src/debug.rs | 13 +++++++--- crates/rpc/rpc/src/validation.rs | 2 +- 11 files changed, 76 insertions(+), 37 deletions(-) diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 3964ea53b7e2..264d35f2d10d 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -95,7 +95,7 @@ impl BlockchainTree { impl BlockchainTree where N: TreeNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { /// Builds the blockchain tree for the node. /// diff --git a/crates/blockchain-tree/src/chain.rs b/crates/blockchain-tree/src/chain.rs index bfa6627413aa..9fba5bab4a9f 100644 --- a/crates/blockchain-tree/src/chain.rs +++ b/crates/blockchain-tree/src/chain.rs @@ -77,7 +77,7 @@ impl AppendableChain { ) -> Result where N: TreeNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { let execution_outcome = ExecutionOutcome::default(); let empty = BTreeMap::new(); @@ -115,7 +115,7 @@ impl AppendableChain { ) -> Result where N: TreeNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { let parent_number = block.number.checked_sub(1).ok_or(BlockchainTreeError::GenesisBlockHasNoParent)?; @@ -178,7 +178,7 @@ impl AppendableChain { where EDP: FullExecutionDataProvider, N: TreeNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { // some checks are done before blocks comes here. externals.consensus.validate_header_against_parent(&block, parent_block)?; @@ -209,7 +209,7 @@ impl AppendableChain { let block_hash = block.hash(); let block = block.unseal(); - let state = executor.execute(&block).unwrap(); + let state = executor.execute(&block)?; externals.consensus.validate_block_post_execution( &block, PostExecutionInput::new(&state.receipts, &state.requests), @@ -284,7 +284,7 @@ impl AppendableChain { ) -> Result<(), InsertBlockErrorKind> where N: TreeNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { let parent_block = self.chain.tip(); diff --git a/crates/blockchain-tree/src/shareable.rs b/crates/blockchain-tree/src/shareable.rs index e668f4e2dac0..d37a849867b3 100644 --- a/crates/blockchain-tree/src/shareable.rs +++ b/crates/blockchain-tree/src/shareable.rs @@ -12,6 +12,7 @@ use reth_blockchain_tree_api::{ InsertPayloadOk, }; use reth_evm::execute::BlockExecutorProvider; +use reth_execution_errors::BlockExecutionError; use reth_node_types::NodeTypesWithDB; use reth_primitives::{Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader}; use reth_provider::{ @@ -39,7 +40,7 @@ impl ShareableBlockchainTree { impl BlockchainTreeEngine for ShareableBlockchainTree where N: TreeNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { fn buffer_block(&self, block: SealedBlockWithSenders) -> Result<(), InsertBlockError> { let mut tree = self.tree.write(); @@ -110,7 +111,7 @@ where impl BlockchainTreeViewer for ShareableBlockchainTree where N: TreeNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { fn header_by_hash(&self, hash: BlockHash) -> Option { trace!(target: "blockchain_tree", ?hash, "Returning header by hash"); @@ -173,7 +174,7 @@ where impl BlockchainTreePendingStateProvider for ShareableBlockchainTree where N: TreeNodeTypes, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { fn find_pending_state_provider( &self, diff --git a/crates/evm/src/execute.rs b/crates/evm/src/execute.rs index dff6240a5ca7..ca358d86621f 100644 --- a/crates/evm/src/execute.rs +++ b/crates/evm/src/execute.rs @@ -80,7 +80,7 @@ pub trait BatchExecutor { /// The output type for the executor. type Output; /// The error type returned by the executor. - type Error; + type Error: Into; /// Executes the next block in the batch, verifies the output and updates the state internally. fn execute_and_verify_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error>; @@ -200,7 +200,7 @@ pub trait BlockExecutionStrategy { type Primitives: NodePrimitives; /// The error type returned by this strategy's methods. - type Error; + type Error: From + Into; /// Initialize the strategy with the given transaction environment overrides. fn init(&mut self, _tx_env_overrides: Box) {} @@ -445,7 +445,9 @@ where let ExecuteOutput { receipts, .. } = self.strategy.execute_transactions(block)?; let requests = self.strategy.apply_post_execution_changes(block, &receipts)?; - self.strategy.validate_block_post_execution(block, &receipts, &requests)?; + self.strategy + .validate_block_post_execution(block, &receipts, &requests) + .map_err(BlockExecutionError::Consensus)?; // prepare the state according to the prune mode let retention = self.batch_record.bundle_retention(block.header().number()); diff --git a/crates/evm/src/metrics.rs b/crates/evm/src/metrics.rs index 4787bf9ce5f7..94ef44d9b688 100644 --- a/crates/evm/src/metrics.rs +++ b/crates/evm/src/metrics.rs @@ -145,6 +145,7 @@ mod tests { use super::*; use alloy_eips::eip7685::Requests; use metrics_util::debugging::{DebugValue, DebuggingRecorder, Snapshotter}; + use reth_execution_errors::BlockExecutionError; use revm::db::BundleState; use revm_primitives::{ Account, AccountInfo, AccountStatus, EvmState, EvmStorage, EvmStorageSlot, B256, U256, @@ -162,7 +163,7 @@ mod tests { where Self: 'a; type Output = BlockExecutionOutput<()>; - type Error = std::convert::Infallible; + type Error = BlockExecutionError; fn execute(self, _input: Self::Input<'_>) -> Result { Ok(BlockExecutionOutput { diff --git a/crates/exex/exex/src/backfill/job.rs b/crates/exex/exex/src/backfill/job.rs index 6d8f0397588e..6ef0e270ae05 100644 --- a/crates/exex/exex/src/backfill/job.rs +++ b/crates/exex/exex/src/backfill/job.rs @@ -39,7 +39,10 @@ pub struct BackfillJob { impl Iterator for BackfillJob where - E: BlockExecutorProvider>, + E: BlockExecutorProvider< + Primitives: NodePrimitives, + Error = BlockExecutionError, + >, P: HeaderProvider + BlockReader + StateProviderFactory, { type Item = BackfillJobResult>; @@ -55,7 +58,10 @@ where impl BackfillJob where - E: BlockExecutorProvider>, + E: BlockExecutorProvider< + Primitives: NodePrimitives, + Error = BlockExecutionError, + >, P: BlockReader + HeaderProvider + StateProviderFactory, { /// Converts the backfill job into a single block backfill job. @@ -111,7 +117,7 @@ where let (header, body) = block.split_sealed_header_body(); let block = P::Block::new_sealed(header, body).with_senders(senders); - executor.execute_and_verify_one(&block).unwrap(); + executor.execute_and_verify_one(&block)?; execution_duration += execute_start.elapsed(); // TODO(alexey): report gas metrics using `block.header.gas_used` @@ -161,7 +167,10 @@ pub struct SingleBlockBackfillJob { impl Iterator for SingleBlockBackfillJob where - E: BlockExecutorProvider>, + E: BlockExecutorProvider< + Primitives: NodePrimitives, + Error = BlockExecutionError, + >, P: HeaderProvider + BlockReader + StateProviderFactory, { type Item = BackfillJobResult<( @@ -176,7 +185,10 @@ where impl SingleBlockBackfillJob where - E: BlockExecutorProvider>, + E: BlockExecutorProvider< + Primitives: NodePrimitives, + Error = BlockExecutionError, + >, P: HeaderProvider + BlockReader + StateProviderFactory, { /// Converts the single block backfill job into a stream. diff --git a/crates/exex/exex/src/backfill/stream.rs b/crates/exex/exex/src/backfill/stream.rs index 30b28b5c66dc..d352dffc0e90 100644 --- a/crates/exex/exex/src/backfill/stream.rs +++ b/crates/exex/exex/src/backfill/stream.rs @@ -114,7 +114,11 @@ where impl Stream for StreamBackfillJob> where - E: BlockExecutorProvider> + Clone + 'static, + E: BlockExecutorProvider< + Primitives: NodePrimitives, + Error = BlockExecutionError, + > + Clone + + 'static, P: BlockReader + StateProviderFactory + Clone + Unpin + 'static, { type Item = BackfillJobResult>; @@ -147,7 +151,11 @@ where impl Stream for StreamBackfillJob> where - E: BlockExecutorProvider> + Clone + 'static, + E: BlockExecutorProvider< + Primitives: NodePrimitives, + Error = BlockExecutionError, + > + Clone + + 'static, P: BlockReader + StateProviderFactory + Clone + Unpin + 'static, { type Item = BackfillJobResult>; diff --git a/crates/exex/exex/src/notifications.rs b/crates/exex/exex/src/notifications.rs index f9f5dfc914e8..d084f5e68507 100644 --- a/crates/exex/exex/src/notifications.rs +++ b/crates/exex/exex/src/notifications.rs @@ -2,7 +2,7 @@ use crate::{BackfillJobFactory, ExExNotification, StreamBackfillJob, WalHandle}; use alloy_consensus::BlockHeader; use futures::{Stream, StreamExt}; use reth_chainspec::Head; -use reth_evm::execute::BlockExecutorProvider; +use reth_evm::execute::{BlockExecutionError, BlockExecutorProvider}; use reth_exex_types::ExExHead; use reth_node_api::NodePrimitives; use reth_primitives::EthPrimitives; @@ -105,8 +105,10 @@ where impl ExExNotificationsStream for ExExNotifications where P: BlockReader + HeaderProvider + StateProviderFactory + Clone + Unpin + 'static, - E: BlockExecutorProvider> - + Clone + E: BlockExecutorProvider< + Primitives: NodePrimitives, + Error = BlockExecutionError, + > + Clone + Unpin + 'static, { @@ -157,8 +159,10 @@ where impl Stream for ExExNotifications where P: BlockReader + HeaderProvider + StateProviderFactory + Clone + Unpin + 'static, - E: BlockExecutorProvider> - + Clone + E: BlockExecutorProvider< + Primitives: NodePrimitives, + Error = BlockExecutionError, + > + Clone + Unpin + 'static, { @@ -301,8 +305,10 @@ where impl ExExNotificationsWithHead where P: BlockReader + HeaderProvider + StateProviderFactory + Clone + Unpin + 'static, - E: BlockExecutorProvider> - + Clone + E: BlockExecutorProvider< + Primitives: NodePrimitives, + Error = BlockExecutionError, + > + Clone + Unpin + 'static, { @@ -381,8 +387,10 @@ where impl Stream for ExExNotificationsWithHead where P: BlockReader + HeaderProvider + StateProviderFactory + Clone + Unpin + 'static, - E: BlockExecutorProvider> - + Clone + E: BlockExecutorProvider< + Primitives: NodePrimitives, + Error = BlockExecutionError, + > + Clone + Unpin + 'static, { diff --git a/crates/node/api/src/node.rs b/crates/node/api/src/node.rs index 498297c2db8b..6785a6b9f423 100644 --- a/crates/node/api/src/node.rs +++ b/crates/node/api/src/node.rs @@ -7,8 +7,7 @@ use reth_db_api::{ database_metrics::{DatabaseMetadata, DatabaseMetrics}, Database, }; -use reth_engine_primitives::BeaconConsensusEngineHandle; -use reth_evm::execute::BlockExecutorProvider; +use reth_evm::execute::{BlockExecutionError, BlockExecutorProvider}; use reth_network_api::FullNetwork; use reth_node_core::node_config::NodeConfig; use reth_node_types::{HeaderTy, NodeTypes, NodeTypesWithDBAdapter, NodeTypesWithEngine, TxTy}; @@ -55,7 +54,10 @@ pub trait FullNodeComponents: FullNodeTypes + Clone + 'static { type Evm: ConfigureEvm

, Transaction = TxTy>; /// The type that knows how to execute blocks. - type Executor: BlockExecutorProvider::Primitives>; + type Executor: BlockExecutorProvider< + Primitives = ::Primitives, + Error = BlockExecutionError, + >; /// The consensus type of the node. type Consensus: FullConsensus<::Primitives, Error = ConsensusError> diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 59637ce81b0e..e8fde5fe382c 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -15,6 +15,7 @@ use alloy_rpc_types_trace::geth::{ use async_trait::async_trait; use jsonrpsee::core::RpcResult; use reth_chainspec::EthereumHardforks; +use reth_errors::BlockExecutionError; use reth_evm::{ env::EvmEnv, execute::{BlockExecutorProvider, Executor}, @@ -84,8 +85,10 @@ impl DebugApi { impl DebugApi where Eth: EthApiTypes + TraceExt + 'static, - BlockExecutor: - BlockExecutorProvider>>, + BlockExecutor: BlockExecutorProvider< + Primitives: NodePrimitives>, + Error = BlockExecutionError, + >, { /// Acquires a permit to execute a tracing call. async fn acquire_trace_permit(&self) -> Result { @@ -816,8 +819,10 @@ where impl DebugApiServer for DebugApi where Eth: EthApiTypes + EthTransactions + TraceExt + 'static, - BlockExecutor: - BlockExecutorProvider>>, + BlockExecutor: BlockExecutorProvider< + Primitives: NodePrimitives>, + Error = BlockExecutionError, + >, { /// Handler for `debug_getRawHeader` async fn raw_header(&self, block_id: BlockId) -> RpcResult { diff --git a/crates/rpc/rpc/src/validation.rs b/crates/rpc/rpc/src/validation.rs index c621c8b9790c..10eac69da7af 100644 --- a/crates/rpc/rpc/src/validation.rs +++ b/crates/rpc/rpc/src/validation.rs @@ -93,7 +93,7 @@ where + ChainSpecProvider + StateProviderFactory + 'static, - E: BlockExecutorProvider, + E: BlockExecutorProvider, { /// Validates the given block and a [`BidTrace`] against it. pub async fn validate_message_against_block( From d52148d77c78fce58a35d9ae1b05fb7edbb95e9b Mon Sep 17 00:00:00 2001 From: chungquantin <56880684+chungquantin@users.noreply.github.com> Date: Fri, 17 Jan 2025 12:35:01 +0700 Subject: [PATCH 11/13] chore: rebase main --- crates/node/api/src/node.rs | 1 + crates/optimism/evm/src/execute.rs | 8 ++++---- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/crates/node/api/src/node.rs b/crates/node/api/src/node.rs index 6785a6b9f423..74e5f861b319 100644 --- a/crates/node/api/src/node.rs +++ b/crates/node/api/src/node.rs @@ -7,6 +7,7 @@ use reth_db_api::{ database_metrics::{DatabaseMetadata, DatabaseMetrics}, Database, }; +use reth_engine_primitives::BeaconConsensusEngineHandle; use reth_evm::execute::{BlockExecutionError, BlockExecutorProvider}; use reth_network_api::FullNetwork; use reth_node_core::node_config::NodeConfig; diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index 2c6a3c05f543..6dbdf5eea8b5 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -205,8 +205,8 @@ where // The sum of the transaction’s gas limit, Tg, and the gas utilized in this block prior, // must be no greater than the block’s gasLimit. let block_available_gas = block.gas_limit() - cumulative_gas_used; - if transaction.gas_limit() > block_available_gas - && (is_regolith || !transaction.is_deposit()) + if transaction.gas_limit() > block_available_gas && + (is_regolith || !transaction.is_deposit()) { return Err(OpBlockExecutionError::Eth(BlockExecutionError::Validation( BlockValidationError::TransactionGasLimitMoreThanAvailableBlockGas { @@ -285,8 +285,8 @@ where // when set. The state transition process ensures // this is only set for post-Canyon deposit // transactions. - deposit_receipt_version: (transaction.is_deposit() - && self.chain_spec.is_fork_active_at_timestamp( + deposit_receipt_version: (transaction.is_deposit() && + self.chain_spec.is_fork_active_at_timestamp( OpHardfork::Canyon, block.header().timestamp, )) From 63457a461c3deed995ec989e26b7b11277635e49 Mon Sep 17 00:00:00 2001 From: chungquantin <56880684+chungquantin@users.noreply.github.com> Date: Fri, 17 Jan 2025 13:00:24 +0700 Subject: [PATCH 12/13] chore: remove blockchain-tree crate --- crates/blockchain-tree/src/blockchain_tree.rs | 2441 ----------------- crates/blockchain-tree/src/chain.rs | 311 --- crates/blockchain-tree/src/shareable.rs | 206 -- 3 files changed, 2958 deletions(-) delete mode 100644 crates/blockchain-tree/src/blockchain_tree.rs delete mode 100644 crates/blockchain-tree/src/chain.rs delete mode 100644 crates/blockchain-tree/src/shareable.rs diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs deleted file mode 100644 index 264d35f2d10d..000000000000 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ /dev/null @@ -1,2441 +0,0 @@ -//! Implementation of [`BlockchainTree`] - -use crate::{ - externals::TreeNodeTypes, - metrics::{MakeCanonicalAction, MakeCanonicalDurationsRecorder, TreeMetrics}, - state::{SidechainId, TreeState}, - AppendableChain, BlockIndices, BlockchainTreeConfig, ExecutionData, TreeExternals, -}; -use alloy_eips::{BlockNumHash, ForkBlock}; -use alloy_primitives::{BlockHash, BlockNumber, B256, U256}; -use reth_blockchain_tree_api::{ - error::{BlockchainTreeError, CanonicalError, InsertBlockError, InsertBlockErrorKind}, - BlockAttachment, BlockStatus, BlockValidationKind, CanonicalOutcome, InsertPayloadOk, -}; -use reth_consensus::{Consensus, ConsensusError}; -use reth_evm::execute::BlockExecutorProvider; -use reth_execution_errors::{BlockExecutionError, BlockValidationError}; -use reth_execution_types::{Chain, ExecutionOutcome}; -use reth_node_types::NodeTypesWithDB; -use reth_primitives::{ - EthereumHardfork, GotExpected, Hardforks, Receipt, SealedBlock, SealedBlockWithSenders, - SealedHeader, StaticFileSegment, -}; -use reth_provider::{ - BlockExecutionWriter, BlockNumReader, BlockWriter, CanonStateNotification, - CanonStateNotificationSender, CanonStateNotifications, ChainSpecProvider, ChainSplit, - ChainSplitTarget, DBProvider, DisplayBlocksChain, HashedPostStateProvider, HeaderProvider, - ProviderError, StaticFileProviderFactory, StorageLocation, -}; -use reth_stages_api::{MetricEvent, MetricEventsSender}; -use reth_storage_errors::provider::{ProviderResult, RootMismatch}; -use reth_trie::{hashed_cursor::HashedPostStateCursorFactory, StateRoot}; -use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseStateRoot}; -use std::{ - collections::{btree_map::Entry, BTreeMap, HashSet}, - sync::Arc, -}; -use tracing::{debug, error, info, instrument, trace, warn}; - -#[cfg_attr(doc, aquamarine::aquamarine)] -/// A Tree of chains. -/// -/// The flowchart represents all the states a block can have inside the tree. -/// -/// - Green blocks belong to the canonical chain and are saved inside the database. -/// - Pending blocks and sidechains are found in-memory inside [`BlockchainTree`]. -/// -/// Both pending chains and sidechains have the same mechanisms, the only difference is when they -/// get committed to the database. -/// -/// For pending, it is an append operation, but for sidechains they need to move the current -/// canonical blocks to the tree (by removing them from the database), and commit the sidechain -/// blocks to the database to become the canonical chain (reorg). -/// -/// `include_mmd!("docs/mermaid/tree.mmd`") -/// -/// # Main functions -/// * [`BlockchainTree::insert_block`]: Connect a block to a chain, execute it, and if valid, insert -/// the block into the tree. -/// * [`BlockchainTree::finalize_block`]: Remove chains that branch off of the now finalized block. -/// * [`BlockchainTree::make_canonical`]: Check if we have the hash of a block that is the current -/// canonical head and commit it to db. -#[derive(Debug)] -pub struct BlockchainTree { - /// The state of the tree - /// - /// Tracks all the chains, the block indices, and the block buffer. - state: TreeState, - /// External components (the database, consensus engine etc.) - externals: TreeExternals, - /// Tree configuration - config: BlockchainTreeConfig, - /// Broadcast channel for canon state changes notifications. - canon_state_notification_sender: CanonStateNotificationSender, - /// Metrics for sync stages. - sync_metrics_tx: Option, - /// Metrics for the blockchain tree. - metrics: TreeMetrics, -} - -impl BlockchainTree { - /// Subscribe to new blocks events. - /// - /// Note: Only canonical blocks are emitted by the tree. - pub fn subscribe_canon_state(&self) -> CanonStateNotifications { - self.canon_state_notification_sender.subscribe() - } - - /// Returns a clone of the sender for the canonical state notifications. - pub fn canon_state_notification_sender(&self) -> CanonStateNotificationSender { - self.canon_state_notification_sender.clone() - } -} - -impl BlockchainTree -where - N: TreeNodeTypes, - E: BlockExecutorProvider, -{ - /// Builds the blockchain tree for the node. - /// - /// This method configures the blockchain tree, which is a critical component of the node, - /// responsible for managing the blockchain state, including blocks, transactions, and receipts. - /// It integrates with the consensus mechanism and the EVM for executing transactions. - /// - /// # Parameters - /// - `externals`: External components required by the blockchain tree: - /// - `provider_factory`: A factory for creating various blockchain-related providers, such - /// as for accessing the database or static files. - /// - `consensus`: The consensus configuration, which defines how the node reaches agreement - /// on the blockchain state with other nodes. - /// - `evm_config`: The EVM (Ethereum Virtual Machine) configuration, which affects how - /// smart contracts and transactions are executed. Proper validation of this configuration - /// is crucial for the correct execution of transactions. - /// - `tree_config`: Configuration for the blockchain tree, including any parameters that affect - /// its structure or performance. - pub fn new( - externals: TreeExternals, - config: BlockchainTreeConfig, - ) -> ProviderResult { - let max_reorg_depth = config.max_reorg_depth() as usize; - // The size of the broadcast is twice the maximum reorg depth, because at maximum reorg - // depth at least N blocks must be sent at once. - let (canon_state_notification_sender, _receiver) = - tokio::sync::broadcast::channel(max_reorg_depth * 2); - - let last_canonical_hashes = - externals.fetch_latest_canonical_hashes(config.num_of_canonical_hashes() as usize)?; - - // If we haven't written the finalized block, assume it's zero - let last_finalized_block_number = - externals.fetch_latest_finalized_block_number()?.unwrap_or_default(); - - Ok(Self { - externals, - state: TreeState::new( - last_finalized_block_number, - last_canonical_hashes, - config.max_unconnected_blocks(), - ), - config, - canon_state_notification_sender, - sync_metrics_tx: None, - metrics: Default::default(), - }) - } - - /// Replaces the canon state notification sender. - /// - /// Caution: this will close any existing subscriptions to the previous sender. - #[doc(hidden)] - pub fn with_canon_state_notification_sender( - mut self, - canon_state_notification_sender: CanonStateNotificationSender, - ) -> Self { - self.canon_state_notification_sender = canon_state_notification_sender; - self - } - - /// Set the sync metric events sender. - /// - /// A transmitter for sending synchronization metrics. This is used for monitoring the node's - /// synchronization process with the blockchain network. - pub fn with_sync_metrics_tx(mut self, metrics_tx: MetricEventsSender) -> Self { - self.sync_metrics_tx = Some(metrics_tx); - self - } - - /// Check if the block is known to blockchain tree or database and return its status. - /// - /// Function will check: - /// * if block is inside database returns [`BlockStatus::Valid`]. - /// * if block is inside buffer returns [`BlockStatus::Disconnected`]. - /// * if block is part of the canonical returns [`BlockStatus::Valid`]. - /// - /// Returns an error if - /// - an error occurred while reading from the database. - /// - the block is already finalized - pub(crate) fn is_block_known( - &self, - block: BlockNumHash, - ) -> Result, InsertBlockErrorKind> { - // check if block is canonical - if self.is_block_hash_canonical(&block.hash)? { - return Ok(Some(BlockStatus::Valid(BlockAttachment::Canonical))); - } - - let last_finalized_block = self.block_indices().last_finalized_block(); - // check db if block is finalized. - if block.number <= last_finalized_block { - // check if block is inside database - if self.externals.provider_factory.provider()?.block_number(block.hash)?.is_some() { - return Ok(Some(BlockStatus::Valid(BlockAttachment::Canonical))); - } - - return Err(BlockchainTreeError::PendingBlockIsFinalized { - last_finalized: last_finalized_block, - } - .into()) - } - - // is block inside chain - if let Some(attachment) = self.is_block_inside_sidechain(&block) { - return Ok(Some(BlockStatus::Valid(attachment))); - } - - // check if block is disconnected - if let Some(block) = self.state.buffered_blocks.block(&block.hash) { - return Ok(Some(BlockStatus::Disconnected { - head: self.state.block_indices.canonical_tip(), - missing_ancestor: block.parent_num_hash(), - })) - } - - Ok(None) - } - - /// Expose internal indices of the `BlockchainTree`. - #[inline] - pub const fn block_indices(&self) -> &BlockIndices { - self.state.block_indices() - } - - /// Returns the block with matching hash from any side-chain. - /// - /// Caution: This will not return blocks from the canonical chain. - #[inline] - pub fn sidechain_block_by_hash(&self, block_hash: BlockHash) -> Option<&SealedBlock> { - self.state.block_by_hash(block_hash) - } - - /// Returns the block with matching hash from any side-chain. - /// - /// Caution: This will not return blocks from the canonical chain. - #[inline] - pub fn block_with_senders_by_hash( - &self, - block_hash: BlockHash, - ) -> Option<&SealedBlockWithSenders> { - self.state.block_with_senders_by_hash(block_hash) - } - - /// Returns the block's receipts with matching hash from any side-chain. - /// - /// Caution: This will not return blocks from the canonical chain. - pub fn receipts_by_block_hash(&self, block_hash: BlockHash) -> Option> { - self.state.receipts_by_block_hash(block_hash) - } - - /// Returns the block that's considered the `Pending` block, if it exists. - pub fn pending_block(&self) -> Option<&SealedBlock> { - let b = self.block_indices().pending_block_num_hash()?; - self.sidechain_block_by_hash(b.hash) - } - - /// Return items needed to execute on the pending state. - /// This includes: - /// * `BlockHash` of canonical block that chain connects to. Needed for creating database - /// provider for the rest of the state. - /// * `BundleState` changes that happened at the asked `block_hash` - /// * `BTreeMap` list of past pending and canonical hashes, That are - /// needed for evm `BLOCKHASH` opcode. - /// Return none if: - /// * block unknown. - /// * `chain_id` not present in state. - /// * there are no parent hashes stored. - pub fn post_state_data(&self, block_hash: BlockHash) -> Option { - trace!(target: "blockchain_tree", ?block_hash, "Searching for post state data"); - - let canonical_chain = self.state.block_indices.canonical_chain(); - - // if it is part of the chain - if let Some(chain_id) = self.block_indices().get_side_chain_id(&block_hash) { - trace!(target: "blockchain_tree", ?block_hash, "Constructing post state data based on non-canonical chain"); - // get block state - let Some(chain) = self.state.chains.get(&chain_id) else { - debug!(target: "blockchain_tree", ?chain_id, "Chain with ID not present"); - return None; - }; - let block_number = chain.block_number(block_hash)?; - let execution_outcome = chain.execution_outcome_at_block(block_number)?; - - // get parent hashes - let mut parent_block_hashes = self.all_chain_hashes(chain_id); - let Some((first_pending_block_number, _)) = parent_block_hashes.first_key_value() - else { - debug!(target: "blockchain_tree", ?chain_id, "No block hashes stored"); - return None; - }; - let canonical_chain = canonical_chain - .iter() - .filter(|&(key, _)| &key < first_pending_block_number) - .collect::>(); - parent_block_hashes.extend(canonical_chain); - - // get canonical fork. - let canonical_fork = self.canonical_fork(chain_id)?; - return Some(ExecutionData { execution_outcome, parent_block_hashes, canonical_fork }); - } - - // check if there is canonical block - if let Some(canonical_number) = canonical_chain.canonical_number(&block_hash) { - trace!(target: "blockchain_tree", %block_hash, "Constructing post state data based on canonical chain"); - return Some(ExecutionData { - canonical_fork: ForkBlock { number: canonical_number, hash: block_hash }, - execution_outcome: ExecutionOutcome::default(), - parent_block_hashes: canonical_chain.inner().clone(), - }); - } - - None - } - - /// Try inserting a validated [Self::validate_block] block inside the tree. - /// - /// If the block's parent block is unknown, this returns [`BlockStatus::Disconnected`] and the - /// block will be buffered until the parent block is inserted and then attached to sidechain - #[instrument(level = "trace", skip_all, fields(block = ?block.num_hash()), target = "blockchain_tree", ret)] - fn try_insert_validated_block( - &mut self, - block: SealedBlockWithSenders, - block_validation_kind: BlockValidationKind, - ) -> Result { - debug_assert!(self.validate_block(&block).is_ok(), "Block must be validated"); - - let parent = block.parent_num_hash(); - - // check if block parent can be found in any side chain. - if let Some(chain_id) = self.block_indices().get_side_chain_id(&parent.hash) { - // found parent in side tree, try to insert there - return self.try_insert_block_into_side_chain(block, chain_id, block_validation_kind); - } - - // if not found, check if the parent can be found inside canonical chain. - if self.is_block_hash_canonical(&parent.hash)? { - return self.try_append_canonical_chain(block.clone(), block_validation_kind); - } - - // this is another check to ensure that if the block points to a canonical block its block - // is valid - if let Some(canonical_parent_number) = - self.block_indices().canonical_number(&block.parent_hash) - { - // we found the parent block in canonical chain - if canonical_parent_number != parent.number { - return Err(ConsensusError::ParentBlockNumberMismatch { - parent_block_number: canonical_parent_number, - block_number: block.number, - } - .into()) - } - } - - // if there is a parent inside the buffer, validate against it. - if let Some(buffered_parent) = self.state.buffered_blocks.block(&parent.hash) { - self.externals.consensus.validate_header_against_parent(&block, buffered_parent)?; - } - - // insert block inside unconnected block buffer. Delaying its execution. - self.state.buffered_blocks.insert_block(block.clone()); - - let block_hash = block.hash(); - // find the lowest ancestor of the block in the buffer to return as the missing parent - // this shouldn't return None because that only happens if the block was evicted, which - // shouldn't happen right after insertion - let lowest_ancestor = self - .state - .buffered_blocks - .lowest_ancestor(&block_hash) - .ok_or(BlockchainTreeError::BlockBufferingFailed { block_hash })?; - - Ok(BlockStatus::Disconnected { - head: self.state.block_indices.canonical_tip(), - missing_ancestor: lowest_ancestor.parent_num_hash(), - }) - } - - /// This tries to append the given block to the canonical chain. - /// - /// WARNING: this expects that the block extends the canonical chain: The block's parent is - /// part of the canonical chain (e.g. the block's parent is the latest canonical hash). See also - /// [Self::is_block_hash_canonical]. - #[instrument(level = "trace", skip_all, target = "blockchain_tree")] - fn try_append_canonical_chain( - &mut self, - block: SealedBlockWithSenders, - block_validation_kind: BlockValidationKind, - ) -> Result { - let parent = block.parent_num_hash(); - let block_num_hash = block.num_hash(); - debug!(target: "blockchain_tree", head = ?block_num_hash.hash, ?parent, "Appending block to canonical chain"); - - let provider = self.externals.provider_factory.provider()?; - - // Validate that the block is post merge - let parent_td = provider - .header_td(&block.parent_hash)? - .ok_or_else(|| BlockchainTreeError::CanonicalChain { block_hash: block.parent_hash })?; - - if !self - .externals - .provider_factory - .chain_spec() - .fork(EthereumHardfork::Paris) - .active_at_ttd(parent_td, U256::ZERO) - { - return Err(BlockExecutionError::Validation(BlockValidationError::BlockPreMerge { - hash: block.hash(), - }) - .into()) - } - - let parent_header = provider - .header(&block.parent_hash)? - .ok_or_else(|| BlockchainTreeError::CanonicalChain { block_hash: block.parent_hash })?; - - let parent_sealed_header = SealedHeader::new(parent_header, block.parent_hash); - - let canonical_chain = self.state.block_indices.canonical_chain(); - - let block_attachment = if block.parent_hash == canonical_chain.tip().hash { - BlockAttachment::Canonical - } else { - BlockAttachment::HistoricalFork - }; - - let chain = AppendableChain::new_canonical_fork( - block, - &parent_sealed_header, - canonical_chain.inner(), - parent, - &self.externals, - block_attachment, - block_validation_kind, - )?; - - self.insert_chain(chain); - self.try_connect_buffered_blocks(block_num_hash); - - Ok(BlockStatus::Valid(block_attachment)) - } - - /// Try inserting a block into the given side chain. - /// - /// WARNING: This expects a valid side chain id, see [BlockIndices::get_side_chain_id] - #[instrument(level = "trace", skip_all, target = "blockchain_tree")] - fn try_insert_block_into_side_chain( - &mut self, - block: SealedBlockWithSenders, - chain_id: SidechainId, - block_validation_kind: BlockValidationKind, - ) -> Result { - let block_num_hash = block.num_hash(); - debug!(target: "blockchain_tree", ?block_num_hash, ?chain_id, "Inserting block into side chain"); - // Create a new sidechain by forking the given chain, or append the block if the parent - // block is the top of the given chain. - let block_hashes = self.all_chain_hashes(chain_id); - - // get canonical fork. - let canonical_fork = self.canonical_fork(chain_id).ok_or_else(|| { - BlockchainTreeError::BlockSideChainIdConsistency { chain_id: chain_id.into() } - })?; - - // get chain that block needs to join to. - let parent_chain = self.state.chains.get_mut(&chain_id).ok_or_else(|| { - BlockchainTreeError::BlockSideChainIdConsistency { chain_id: chain_id.into() } - })?; - - let chain_tip = parent_chain.tip().hash(); - let canonical_chain = self.state.block_indices.canonical_chain(); - - // append the block if it is continuing the side chain. - let block_attachment = if chain_tip == block.parent_hash { - // check if the chain extends the currently tracked canonical head - let block_attachment = if canonical_fork.hash == canonical_chain.tip().hash { - BlockAttachment::Canonical - } else { - BlockAttachment::HistoricalFork - }; - - let block_hash = block.hash(); - let block_number = block.number; - debug!(target: "blockchain_tree", ?block_hash, ?block_number, "Appending block to side chain"); - parent_chain.append_block( - block, - block_hashes, - canonical_chain.inner(), - &self.externals, - canonical_fork, - block_attachment, - block_validation_kind, - )?; - - self.state.block_indices.insert_non_fork_block(block_number, block_hash, chain_id); - block_attachment - } else { - debug!(target: "blockchain_tree", ?canonical_fork, "Starting new fork from side chain"); - // the block starts a new fork - let chain = parent_chain.new_chain_fork( - block, - block_hashes, - canonical_chain.inner(), - canonical_fork, - &self.externals, - block_validation_kind, - )?; - self.insert_chain(chain); - BlockAttachment::HistoricalFork - }; - - // After we inserted the block, we try to connect any buffered blocks - self.try_connect_buffered_blocks(block_num_hash); - - Ok(BlockStatus::Valid(block_attachment)) - } - - /// Get all block hashes from a sidechain that are not part of the canonical chain. - /// This is a one time operation per block. - /// - /// # Note - /// - /// This is not cached in order to save memory. - fn all_chain_hashes(&self, chain_id: SidechainId) -> BTreeMap { - let mut chain_id = chain_id; - let mut hashes = BTreeMap::new(); - loop { - let Some(chain) = self.state.chains.get(&chain_id) else { return hashes }; - - // The parent chains might contain blocks with overlapping numbers or numbers greater - // than original chain tip. Insert the block hash only if it's not present - // for the given block number and the block number does not exceed the - // original chain tip. - let latest_block_number = hashes - .last_key_value() - .map(|(number, _)| *number) - .unwrap_or_else(|| chain.tip().number); - for block in chain.blocks().values().filter(|b| b.number <= latest_block_number) { - if let Entry::Vacant(e) = hashes.entry(block.number) { - e.insert(block.hash()); - } - } - - let fork_block = chain.fork_block(); - if let Some(next_chain_id) = self.block_indices().get_side_chain_id(&fork_block.hash) { - chain_id = next_chain_id; - } else { - // if there is no fork block that point to other chains, break the loop. - // it means that this fork joins to canonical block. - break - } - } - hashes - } - - /// Get the block at which the given chain forks off the current canonical chain. - /// - /// This is used to figure out what kind of state provider the executor should use to execute - /// the block on - /// - /// Returns `None` if the chain is unknown. - fn canonical_fork(&self, chain_id: SidechainId) -> Option { - let mut chain_id = chain_id; - let mut fork; - loop { - // chain fork block - fork = self.state.chains.get(&chain_id)?.fork_block(); - // get fork block chain - if let Some(fork_chain_id) = self.block_indices().get_side_chain_id(&fork.hash) { - chain_id = fork_chain_id; - continue - } - break - } - (self.block_indices().canonical_hash(&fork.number) == Some(fork.hash)).then_some(fork) - } - - /// Insert a chain into the tree. - /// - /// Inserts a chain into the tree and builds the block indices. - fn insert_chain(&mut self, chain: AppendableChain) -> Option { - self.state.insert_chain(chain) - } - - /// Iterate over all child chains that depend on this block and return - /// their ids. - fn find_all_dependent_chains(&self, block: &BlockHash) -> HashSet { - // Find all forks of given block. - let mut dependent_block = - self.block_indices().fork_to_child().get(block).cloned().unwrap_or_default(); - let mut dependent_chains = HashSet::default(); - - while let Some(block) = dependent_block.pop_back() { - // Get chain of dependent block. - let Some(chain_id) = self.block_indices().get_side_chain_id(&block) else { - debug!(target: "blockchain_tree", ?block, "Block not in tree"); - return Default::default(); - }; - - // Find all blocks that fork from this chain. - let Some(chain) = self.state.chains.get(&chain_id) else { - debug!(target: "blockchain_tree", ?chain_id, "Chain not in tree"); - return Default::default(); - }; - for chain_block in chain.blocks().values() { - if let Some(forks) = self.block_indices().fork_to_child().get(&chain_block.hash()) { - // If there are sub forks append them for processing. - dependent_block.extend(forks); - } - } - // Insert dependent chain id. - dependent_chains.insert(chain_id); - } - dependent_chains - } - - /// Inserts unwound chain back into the tree and updates any dependent chains. - /// - /// This method searches for any chain that depended on this block being part of the canonical - /// chain. Each dependent chain's state is then updated with state entries removed from the - /// plain state during the unwind. - /// Returns the result of inserting the chain or None if any of the dependent chains is not - /// in the tree. - fn insert_unwound_chain(&mut self, chain: AppendableChain) -> Option { - // iterate over all blocks in chain and find any fork blocks that are in tree. - for (number, block) in chain.blocks() { - let hash = block.hash(); - - // find all chains that fork from this block. - let chains_to_bump = self.find_all_dependent_chains(&hash); - if !chains_to_bump.is_empty() { - // if there is such chain, revert state to this block. - let mut cloned_execution_outcome = chain.execution_outcome().clone(); - cloned_execution_outcome.revert_to(*number); - - // prepend state to all chains that fork from this block. - for chain_id in chains_to_bump { - let Some(chain) = self.state.chains.get_mut(&chain_id) else { - debug!(target: "blockchain_tree", ?chain_id, "Chain not in tree"); - return None; - }; - - debug!(target: "blockchain_tree", - unwound_block= ?block.num_hash(), - chain_id = ?chain_id, - chain_tip = ?chain.tip().num_hash(), - "Prepend unwound block state to blockchain tree chain"); - - chain.prepend_state(cloned_execution_outcome.state().clone()) - } - } - } - // Insert unwound chain to the tree. - self.insert_chain(chain) - } - - /// Checks the block buffer for the given block. - pub fn get_buffered_block(&self, hash: &BlockHash) -> Option<&SealedBlockWithSenders> { - self.state.get_buffered_block(hash) - } - - /// Gets the lowest ancestor for the given block in the block buffer. - pub fn lowest_buffered_ancestor(&self, hash: &BlockHash) -> Option<&SealedBlockWithSenders> { - self.state.lowest_buffered_ancestor(hash) - } - - /// Insert a new block into the tree. - /// - /// # Note - /// - /// This recovers transaction signers (unlike [`BlockchainTree::insert_block`]). - pub fn insert_block_without_senders( - &mut self, - block: SealedBlock, - ) -> Result { - match block.try_seal_with_senders() { - Ok(block) => self.insert_block(block, BlockValidationKind::Exhaustive), - Err(block) => Err(InsertBlockError::sender_recovery_error(block)), - } - } - - /// Insert block for future execution. - /// - /// Returns an error if the block is invalid. - pub fn buffer_block(&mut self, block: SealedBlockWithSenders) -> Result<(), InsertBlockError> { - // validate block consensus rules - if let Err(err) = self.validate_block(&block) { - return Err(InsertBlockError::consensus_error(err, block.block)); - } - - self.state.buffered_blocks.insert_block(block); - Ok(()) - } - - /// Validate if block is correct and satisfies all the consensus rules that concern the header - /// and block body itself. - fn validate_block(&self, block: &SealedBlockWithSenders) -> Result<(), ConsensusError> { - if let Err(e) = - self.externals.consensus.validate_header_with_total_difficulty(block, U256::MAX) - { - error!(?block, "Failed to validate total difficulty for block {}: {e}", block.hash()); - return Err(e); - } - - if let Err(e) = self.externals.consensus.validate_header(block) { - error!(?block, "Failed to validate header {}: {e}", block.hash()); - return Err(e); - } - - if let Err(e) = self.externals.consensus.validate_block_pre_execution(block) { - error!(?block, "Failed to validate block {}: {e}", block.hash()); - return Err(e); - } - - Ok(()) - } - - /// Check if block is found inside a sidechain and its attachment. - /// - /// if it is canonical or extends the canonical chain, return [`BlockAttachment::Canonical`] - /// if it does not extend the canonical chain, return [`BlockAttachment::HistoricalFork`] - /// if the block is not in the tree or its chain id is not valid, return None - #[track_caller] - fn is_block_inside_sidechain(&self, block: &BlockNumHash) -> Option { - // check if block known and is already in the tree - if let Some(chain_id) = self.block_indices().get_side_chain_id(&block.hash) { - // find the canonical fork of this chain - let Some(canonical_fork) = self.canonical_fork(chain_id) else { - debug!(target: "blockchain_tree", chain_id=?chain_id, block=?block.hash, "Chain id not valid"); - return None; - }; - // if the block's chain extends canonical chain - return if canonical_fork == self.block_indices().canonical_tip() { - Some(BlockAttachment::Canonical) - } else { - Some(BlockAttachment::HistoricalFork) - }; - } - None - } - - /// Insert a block (with recovered senders) into the tree. - /// - /// Returns the [`BlockStatus`] on success: - /// - /// - The block is already part of a sidechain in the tree, or - /// - The block is already part of the canonical chain, or - /// - The parent is part of a sidechain in the tree, and we can fork at this block, or - /// - The parent is part of the canonical chain, and we can fork at this block - /// - /// Otherwise, an error is returned, indicating that neither the block nor its parent are part - /// of the chain or any sidechains. - /// - /// This means that if the block becomes canonical, we need to fetch the missing blocks over - /// P2P. - /// - /// If the [`BlockValidationKind::SkipStateRootValidation`] variant is provided the state root - /// is not validated. - /// - /// # Note - /// - /// If the senders have not already been recovered, call - /// [`BlockchainTree::insert_block_without_senders`] instead. - pub fn insert_block( - &mut self, - block: SealedBlockWithSenders, - block_validation_kind: BlockValidationKind, - ) -> Result { - // check if we already have this block - match self.is_block_known(block.num_hash()) { - Ok(Some(status)) => return Ok(InsertPayloadOk::AlreadySeen(status)), - Err(err) => return Err(InsertBlockError::new(block.block, err)), - _ => {} - } - - // validate block consensus rules - if let Err(err) = self.validate_block(&block) { - return Err(InsertBlockError::consensus_error(err, block.block)); - } - - let status = self - .try_insert_validated_block(block.clone(), block_validation_kind) - .map_err(|kind| InsertBlockError::new(block.block, kind))?; - Ok(InsertPayloadOk::Inserted(status)) - } - - /// Discard all blocks that precede block number from the buffer. - pub fn remove_old_blocks(&mut self, block: BlockNumber) { - self.state.buffered_blocks.remove_old_blocks(block); - } - - /// Finalize blocks up until and including `finalized_block`, and remove them from the tree. - pub fn finalize_block(&mut self, finalized_block: BlockNumber) -> ProviderResult<()> { - // remove blocks - let mut remove_chains = self.state.block_indices.finalize_canonical_blocks( - finalized_block, - self.config.num_of_additional_canonical_block_hashes(), - ); - // remove chains of removed blocks - while let Some(chain_id) = remove_chains.pop_first() { - if let Some(chain) = self.state.chains.remove(&chain_id) { - remove_chains.extend(self.state.block_indices.remove_chain(&chain)); - } - } - // clean block buffer. - self.remove_old_blocks(finalized_block); - - // save finalized block in db. - self.externals.save_finalized_block_number(finalized_block)?; - - Ok(()) - } - - /// Reads the last `N` canonical hashes from the database and updates the block indices of the - /// tree by attempting to connect the buffered blocks to canonical hashes. - /// - /// - /// `N` is the maximum of `max_reorg_depth` and the number of block hashes needed to satisfy the - /// `BLOCKHASH` opcode in the EVM. - /// - /// # Note - /// - /// This finalizes `last_finalized_block` prior to reading the canonical hashes (using - /// [`BlockchainTree::finalize_block`]). - pub fn connect_buffered_blocks_to_canonical_hashes_and_finalize( - &mut self, - last_finalized_block: BlockNumber, - ) -> ProviderResult<()> { - self.finalize_block(last_finalized_block)?; - - let last_canonical_hashes = self.update_block_hashes()?; - - self.connect_buffered_blocks_to_hashes(last_canonical_hashes)?; - - Ok(()) - } - - /// Update all block hashes. iterate over present and new list of canonical hashes and compare - /// them. Remove all mismatches, disconnect them and removes all chains. - pub fn update_block_hashes(&mut self) -> ProviderResult> { - let last_canonical_hashes = self - .externals - .fetch_latest_canonical_hashes(self.config.num_of_canonical_hashes() as usize)?; - - let (mut remove_chains, _) = - self.state.block_indices.update_block_hashes(last_canonical_hashes.clone()); - - // remove all chains that got discarded - while let Some(chain_id) = remove_chains.first() { - if let Some(chain) = self.state.chains.remove(chain_id) { - remove_chains.extend(self.state.block_indices.remove_chain(&chain)); - } - } - - Ok(last_canonical_hashes) - } - - /// Update all block hashes. iterate over present and new list of canonical hashes and compare - /// them. Remove all mismatches, disconnect them, removes all chains and clears all buffered - /// blocks before the tip. - pub fn update_block_hashes_and_clear_buffered( - &mut self, - ) -> ProviderResult> { - let chain = self.update_block_hashes()?; - - if let Some((block, _)) = chain.last_key_value() { - self.remove_old_blocks(*block); - } - - Ok(chain) - } - - /// Reads the last `N` canonical hashes from the database and updates the block indices of the - /// tree by attempting to connect the buffered blocks to canonical hashes. - /// - /// `N` is the maximum of `max_reorg_depth` and the number of block hashes needed to satisfy the - /// `BLOCKHASH` opcode in the EVM. - pub fn connect_buffered_blocks_to_canonical_hashes(&mut self) -> ProviderResult<()> { - let last_canonical_hashes = self - .externals - .fetch_latest_canonical_hashes(self.config.num_of_canonical_hashes() as usize)?; - self.connect_buffered_blocks_to_hashes(last_canonical_hashes)?; - - Ok(()) - } - - fn connect_buffered_blocks_to_hashes( - &mut self, - hashes: impl IntoIterator>, - ) -> ProviderResult<()> { - // check unconnected block buffer for children of the canonical hashes - for added_block in hashes { - self.try_connect_buffered_blocks(added_block.into()) - } - - // check unconnected block buffer for children of the chains - let mut all_chain_blocks = Vec::new(); - for chain in self.state.chains.values() { - all_chain_blocks.reserve_exact(chain.blocks().len()); - for (&number, block) in chain.blocks() { - all_chain_blocks.push(BlockNumHash { number, hash: block.hash() }) - } - } - for block in all_chain_blocks { - self.try_connect_buffered_blocks(block) - } - - Ok(()) - } - - /// Connect unconnected (buffered) blocks if the new block closes a gap. - /// - /// This will try to insert all children of the new block, extending its chain. - /// - /// If all children are valid, then this essentially appends all child blocks to the - /// new block's chain. - fn try_connect_buffered_blocks(&mut self, new_block: BlockNumHash) { - trace!(target: "blockchain_tree", ?new_block, "try_connect_buffered_blocks"); - - // first remove all the children of the new block from the buffer - let include_blocks = self.state.buffered_blocks.remove_block_with_children(&new_block.hash); - // then try to reinsert them into the tree - for block in include_blocks { - // don't fail on error, just ignore the block. - let _ = self - .try_insert_validated_block(block, BlockValidationKind::SkipStateRootValidation) - .map_err(|err| { - debug!(target: "blockchain_tree", %err, "Failed to insert buffered block"); - err - }); - } - } - - /// Removes chain corresponding to provided chain id from block indices, - /// splits it at split target, and returns the canonical part of it. - /// Returns [None] if chain is missing. - /// - /// The pending part of the chain is reinserted back into the tree with the same `chain_id`. - fn remove_and_split_chain( - &mut self, - chain_id: SidechainId, - split_at: ChainSplitTarget, - ) -> Option { - let chain = self.state.chains.remove(&chain_id)?; - match chain.into_inner().split(split_at) { - ChainSplit::Split { canonical, pending } => { - trace!(target: "blockchain_tree", ?canonical, ?pending, "Split chain"); - // rest of split chain is inserted back with same chain_id. - self.state.block_indices.insert_chain(chain_id, &pending); - self.state.chains.insert(chain_id, AppendableChain::new(pending)); - Some(canonical) - } - ChainSplit::NoSplitCanonical(canonical) => { - trace!(target: "blockchain_tree", "No split on canonical chain"); - Some(canonical) - } - ChainSplit::NoSplitPending(_) => { - unreachable!("Should not happen as block indices guarantee structure of blocks") - } - } - } - - /// Attempts to find the header for the given block hash if it is canonical. - /// - /// Returns `Ok(None)` if the block hash is not canonical (block hash does not exist, or is - /// included in a sidechain). - /// - /// Note: this does not distinguish between a block that is finalized and a block that is not - /// finalized yet, only whether it is part of the canonical chain or not. - pub fn find_canonical_header( - &self, - hash: &BlockHash, - ) -> Result, ProviderError> { - // if the indices show that the block hash is not canonical, it's either in a sidechain or - // canonical, but in the db. If it is in a sidechain, it is not canonical. If it is missing - // in the db, then it is also not canonical. - - let provider = self.externals.provider_factory.provider()?; - - let mut header = None; - if let Some(num) = self.block_indices().canonical_number(hash) { - header = provider.header_by_number(num)?; - } - - if header.is_none() && self.sidechain_block_by_hash(*hash).is_some() { - return Ok(None) - } - - if header.is_none() { - header = provider.header(hash)? - } - - Ok(header.map(|header| SealedHeader::new(header, *hash))) - } - - /// Determines whether or not a block is canonical, checking the db if necessary. - /// - /// Note: this does not distinguish between a block that is finalized and a block that is not - /// finalized yet, only whether it is part of the canonical chain or not. - pub fn is_block_hash_canonical(&self, hash: &BlockHash) -> Result { - self.find_canonical_header(hash).map(|header| header.is_some()) - } - - /// Make a block and its parent(s) part of the canonical chain and commit them to the database - /// - /// # Note - /// - /// This unwinds the database if necessary, i.e. if parts of the canonical chain have been - /// reorged. - /// - /// # Returns - /// - /// Returns `Ok` if the blocks were canonicalized, or if the blocks were already canonical. - #[track_caller] - #[instrument(level = "trace", skip(self), target = "blockchain_tree")] - pub fn make_canonical( - &mut self, - block_hash: BlockHash, - ) -> Result { - let mut durations_recorder = MakeCanonicalDurationsRecorder::default(); - - let old_block_indices = self.block_indices().clone(); - let old_buffered_blocks = self.state.buffered_blocks.parent_to_child.clone(); - durations_recorder.record_relative(MakeCanonicalAction::CloneOldBlocks); - - // If block is already canonical don't return error. - let canonical_header = self.find_canonical_header(&block_hash)?; - durations_recorder.record_relative(MakeCanonicalAction::FindCanonicalHeader); - if let Some(header) = canonical_header { - info!(target: "blockchain_tree", %block_hash, "Block is already canonical, ignoring."); - // TODO: this could be fetched from the chainspec first - let td = - self.externals.provider_factory.provider()?.header_td(&block_hash)?.ok_or_else( - || { - CanonicalError::from(BlockValidationError::MissingTotalDifficulty { - hash: block_hash, - }) - }, - )?; - - if !self - .externals - .provider_factory - .chain_spec() - .fork(EthereumHardfork::Paris) - .active_at_ttd(td, U256::ZERO) - { - return Err(CanonicalError::from(BlockValidationError::BlockPreMerge { - hash: block_hash, - })) - } - - let head = self.state.block_indices.canonical_tip(); - return Ok(CanonicalOutcome::AlreadyCanonical { header, head }); - } - - let Some(chain_id) = self.block_indices().get_side_chain_id(&block_hash) else { - debug!(target: "blockchain_tree", ?block_hash, "Block hash not found in block indices"); - return Err(CanonicalError::from(BlockchainTreeError::BlockHashNotFoundInChain { - block_hash, - })) - }; - - // we are splitting chain at the block hash that we want to make canonical - let Some(canonical) = self.remove_and_split_chain(chain_id, block_hash.into()) else { - debug!(target: "blockchain_tree", ?block_hash, ?chain_id, "Chain not present"); - return Err(CanonicalError::from(BlockchainTreeError::BlockSideChainIdConsistency { - chain_id: chain_id.into(), - })) - }; - trace!(target: "blockchain_tree", chain = ?canonical, "Found chain to make canonical"); - durations_recorder.record_relative(MakeCanonicalAction::SplitChain); - - let mut fork_block = canonical.fork_block(); - let mut chains_to_promote = vec![canonical]; - - // loop while fork blocks are found in Tree. - while let Some(chain_id) = self.block_indices().get_side_chain_id(&fork_block.hash) { - // canonical chain is lower part of the chain. - let Some(canonical) = - self.remove_and_split_chain(chain_id, ChainSplitTarget::Number(fork_block.number)) - else { - debug!(target: "blockchain_tree", ?fork_block, ?chain_id, "Fork not present"); - return Err(CanonicalError::from( - BlockchainTreeError::BlockSideChainIdConsistency { chain_id: chain_id.into() }, - )); - }; - fork_block = canonical.fork_block(); - chains_to_promote.push(canonical); - } - durations_recorder.record_relative(MakeCanonicalAction::SplitChainForks); - - let old_tip = self.block_indices().canonical_tip(); - // Merge all chains into one chain. - let Some(mut new_canon_chain) = chains_to_promote.pop() else { - debug!(target: "blockchain_tree", "No blocks in the chain to make canonical"); - return Err(CanonicalError::from(BlockchainTreeError::BlockHashNotFoundInChain { - block_hash: fork_block.hash, - })) - }; - trace!(target: "blockchain_tree", ?new_canon_chain, "Merging chains"); - let mut chain_appended = false; - for chain in chains_to_promote.into_iter().rev() { - trace!(target: "blockchain_tree", ?chain, "Appending chain"); - let block_hash = chain.fork_block().hash; - new_canon_chain.append_chain(chain).map_err(|_| { - CanonicalError::from(BlockchainTreeError::BlockHashNotFoundInChain { block_hash }) - })?; - chain_appended = true; - } - durations_recorder.record_relative(MakeCanonicalAction::MergeAllChains); - - if chain_appended { - trace!(target: "blockchain_tree", ?new_canon_chain, "Canonical chain appended"); - } - // update canonical index - self.state.block_indices.canonicalize_blocks(new_canon_chain.blocks()); - durations_recorder.record_relative(MakeCanonicalAction::UpdateCanonicalIndex); - - debug!( - target: "blockchain_tree", - "Committing new canonical chain: {}", DisplayBlocksChain(new_canon_chain.blocks()) - ); - - // If chain extends the tip - let chain_notification = if new_canon_chain.fork_block().hash == old_tip.hash { - // Commit new canonical chain to database. - self.commit_canonical_to_database(new_canon_chain.clone(), &mut durations_recorder)?; - CanonStateNotification::Commit { new: Arc::new(new_canon_chain) } - } else { - // It forks to canonical block that is not the tip. - let canon_fork: BlockNumHash = new_canon_chain.fork_block(); - // sanity check - if self.block_indices().canonical_hash(&canon_fork.number) != Some(canon_fork.hash) { - error!( - target: "blockchain_tree", - ?canon_fork, - block_indices=?self.block_indices(), - "All chains should point to canonical chain" - ); - unreachable!("all chains should point to canonical chain."); - } - - let old_canon_chain = - self.revert_canonical_from_database(canon_fork.number).inspect_err(|error| { - error!( - target: "blockchain_tree", - "Reverting canonical chain failed with error: {:?}\n\ - Old BlockIndices are:{:?}\n\ - New BlockIndices are: {:?}\n\ - Old BufferedBlocks are:{:?}", - error, old_block_indices, self.block_indices(), old_buffered_blocks - ); - })?; - durations_recorder - .record_relative(MakeCanonicalAction::RevertCanonicalChainFromDatabase); - - // Commit new canonical chain. - self.commit_canonical_to_database(new_canon_chain.clone(), &mut durations_recorder)?; - - if let Some(old_canon_chain) = old_canon_chain { - self.update_reorg_metrics(old_canon_chain.len() as f64); - - // Insert old canonical chain back into tree. - self.insert_unwound_chain(AppendableChain::new(old_canon_chain.clone())); - durations_recorder.record_relative(MakeCanonicalAction::InsertOldCanonicalChain); - - CanonStateNotification::Reorg { - old: Arc::new(old_canon_chain), - new: Arc::new(new_canon_chain), - } - } else { - // error here to confirm that we are reverting nothing from db. - error!(target: "blockchain_tree", %block_hash, "Nothing was removed from database"); - CanonStateNotification::Commit { new: Arc::new(new_canon_chain) } - } - }; - - debug!( - target: "blockchain_tree", - actions = ?durations_recorder.actions, - "Canonicalization finished" - ); - - // clear trie updates for other children - self.block_indices() - .fork_to_child() - .get(&old_tip.hash) - .cloned() - .unwrap_or_default() - .into_iter() - .for_each(|child| { - if let Some(chain_id) = self.block_indices().get_side_chain_id(&child) { - if let Some(chain) = self.state.chains.get_mut(&chain_id) { - chain.clear_trie_updates(); - } - } - }); - - durations_recorder.record_relative(MakeCanonicalAction::ClearTrieUpdatesForOtherChildren); - - // Send notification about new canonical chain and return outcome of canonicalization. - let outcome = CanonicalOutcome::Committed { head: chain_notification.tip().header.clone() }; - let _ = self.canon_state_notification_sender.send(chain_notification); - Ok(outcome) - } - - /// Write the given chain to the database as canonical. - fn commit_canonical_to_database( - &self, - chain: Chain, - recorder: &mut MakeCanonicalDurationsRecorder, - ) -> Result<(), CanonicalError> { - let (blocks, state, chain_trie_updates) = chain.into_inner(); - let hashed_state = self.externals.provider_factory.hashed_post_state(state.state()); - let prefix_sets = hashed_state.construct_prefix_sets().freeze(); - let hashed_state_sorted = hashed_state.into_sorted(); - - // Compute state root or retrieve cached trie updates before opening write transaction. - let block_hash_numbers = - blocks.iter().map(|(number, b)| (number, b.hash())).collect::>(); - let trie_updates = match chain_trie_updates { - Some(updates) => { - debug!(target: "blockchain_tree", blocks = ?block_hash_numbers, "Using cached trie updates"); - self.metrics.trie_updates_insert_cached.increment(1); - updates - } - None => { - debug!(target: "blockchain_tree", blocks = ?block_hash_numbers, "Recomputing state root for insert"); - let provider = self - .externals - .provider_factory - .provider()? - // State root calculation can take a while, and we're sure no write transaction - // will be open in parallel. See https://github.com/paradigmxyz/reth/issues/6168. - .disable_long_read_transaction_safety(); - let (state_root, trie_updates) = StateRoot::from_tx(provider.tx_ref()) - .with_hashed_cursor_factory(HashedPostStateCursorFactory::new( - DatabaseHashedCursorFactory::new(provider.tx_ref()), - &hashed_state_sorted, - )) - .with_prefix_sets(prefix_sets) - .root_with_updates() - .map_err(BlockValidationError::from)?; - let tip = blocks.tip(); - if state_root != tip.state_root { - return Err(ProviderError::StateRootMismatch(Box::new(RootMismatch { - root: GotExpected { got: state_root, expected: tip.state_root }, - block_number: tip.number, - block_hash: tip.hash(), - })) - .into()) - } - self.metrics.trie_updates_insert_recomputed.increment(1); - trie_updates - } - }; - recorder.record_relative(MakeCanonicalAction::RetrieveStateTrieUpdates); - - let provider_rw = self.externals.provider_factory.provider_rw()?; - provider_rw - .append_blocks_with_state( - blocks.into_blocks().collect(), - state, - hashed_state_sorted, - trie_updates, - ) - .map_err(|e| CanonicalError::CanonicalCommit(e.to_string()))?; - - provider_rw.commit()?; - recorder.record_relative(MakeCanonicalAction::CommitCanonicalChainToDatabase); - - Ok(()) - } - - /// Unwind tables and put it inside state - pub fn unwind(&mut self, unwind_to: BlockNumber) -> Result<(), CanonicalError> { - // nothing to be done if unwind_to is higher then the tip - if self.block_indices().canonical_tip().number <= unwind_to { - return Ok(()); - } - // revert `N` blocks from current canonical chain and put them inside BlockchainTree - let old_canon_chain = self.revert_canonical_from_database(unwind_to)?; - - // check if there is block in chain - if let Some(old_canon_chain) = old_canon_chain { - self.state.block_indices.unwind_canonical_chain(unwind_to); - // insert old canonical chain to BlockchainTree. - self.insert_unwound_chain(AppendableChain::new(old_canon_chain)); - } - - Ok(()) - } - - /// Reverts the canonical chain down to the given block from the database and returns the - /// unwound chain. - /// - /// The block, `revert_until`, is __non-inclusive__, i.e. `revert_until` stays in the database. - fn revert_canonical_from_database( - &self, - revert_until: BlockNumber, - ) -> Result, CanonicalError> { - // This should only happen when an optimistic sync target was re-orged. - // - // Static files generally contain finalized data. The blockchain tree only deals - // with non-finalized data. The only scenario where canonical reverts go past the highest - // static file is when an optimistic sync occurred and non-finalized data was written to - // static files. - if self - .externals - .provider_factory - .static_file_provider() - .get_highest_static_file_block(StaticFileSegment::Headers) - .unwrap_or_default() > - revert_until - { - trace!( - target: "blockchain_tree", - "Reverting optimistic canonical chain to block {}", - revert_until - ); - return Err(CanonicalError::OptimisticTargetRevert(revert_until)); - } - - // read data that is needed for new sidechain - let provider_rw = self.externals.provider_factory.provider_rw()?; - - let tip = provider_rw.last_block_number()?; - let revert_range = (revert_until + 1)..=tip; - info!(target: "blockchain_tree", "REORG: revert canonical from database by unwinding chain blocks {:?}", revert_range); - // read block and execution result from database. and remove traces of block from tables. - let blocks_and_execution = provider_rw - .take_block_and_execution_above(revert_until, StorageLocation::Database) - .map_err(|e| CanonicalError::CanonicalRevert(e.to_string()))?; - - provider_rw.commit()?; - - if blocks_and_execution.is_empty() { - Ok(None) - } else { - Ok(Some(blocks_and_execution)) - } - } - - fn update_reorg_metrics(&self, reorg_depth: f64) { - self.metrics.reorgs.increment(1); - self.metrics.latest_reorg_depth.set(reorg_depth); - } - - /// Update blockchain tree chains (canonical and sidechains) and sync metrics. - /// - /// NOTE: this method should not be called during the pipeline sync, because otherwise the sync - /// checkpoint metric will get overwritten. Buffered blocks metrics are updated in - /// [`BlockBuffer`](crate::block_buffer::BlockBuffer) during the pipeline sync. - pub(crate) fn update_chains_metrics(&mut self) { - let height = self.state.block_indices.canonical_tip().number; - - let longest_sidechain_height = - self.state.chains.values().map(|chain| chain.tip().number).max(); - if let Some(longest_sidechain_height) = longest_sidechain_height { - self.metrics.longest_sidechain_height.set(longest_sidechain_height as f64); - } - - self.metrics.sidechains.set(self.state.chains.len() as f64); - self.metrics.canonical_chain_height.set(height as f64); - if let Some(metrics_tx) = self.sync_metrics_tx.as_mut() { - let _ = metrics_tx.send(MetricEvent::SyncHeight { height }); - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use alloy_consensus::{Header, TxEip1559, EMPTY_ROOT_HASH}; - use alloy_eips::{ - eip1559::{ETHEREUM_BLOCK_GAS_LIMIT, INITIAL_BASE_FEE}, - eip4895::Withdrawals, - }; - use alloy_genesis::{Genesis, GenesisAccount}; - use alloy_primitives::{keccak256, Address, PrimitiveSignature as Signature, B256}; - use assert_matches::assert_matches; - use linked_hash_set::LinkedHashSet; - use reth_chainspec::{ChainSpecBuilder, MAINNET, MIN_TRANSACTION_GAS}; - use reth_consensus::test_utils::TestConsensus; - use reth_db::tables; - use reth_db_api::transaction::DbTxMut; - use reth_evm::test_utils::MockExecutorProvider; - use reth_evm_ethereum::execute::EthExecutorProvider; - use reth_node_types::FullNodePrimitives; - use reth_primitives::{ - proofs::{calculate_receipt_root, calculate_transaction_root}, - Account, BlockBody, RecoveredTx, Transaction, TransactionSigned, - }; - use reth_provider::{ - providers::ProviderNodeTypes, - test_utils::{ - blocks::BlockchainTestData, create_test_provider_factory_with_chain_spec, - MockNodeTypesWithDB, - }, - ProviderFactory, StorageLocation, - }; - use reth_stages_api::StageCheckpoint; - use reth_trie::{root::state_root_unhashed, StateRoot}; - use std::collections::HashMap; - - fn setup_externals( - exec_res: Vec, - ) -> TreeExternals { - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(MAINNET.genesis.clone()) - .shanghai_activated() - .build(), - ); - let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec); - let consensus = Arc::new(TestConsensus::default()); - let executor_factory = MockExecutorProvider::default(); - executor_factory.extend(exec_res); - - TreeExternals::new(provider_factory, consensus, executor_factory) - } - - fn setup_genesis< - N: ProviderNodeTypes< - Primitives: FullNodePrimitives< - BlockBody = reth_primitives::BlockBody, - BlockHeader = reth_primitives::Header, - >, - >, - >( - factory: &ProviderFactory, - mut genesis: SealedBlock, - ) { - // insert genesis to db. - - genesis.header.set_block_number(10); - genesis.header.set_state_root(EMPTY_ROOT_HASH); - let provider = factory.provider_rw().unwrap(); - - provider - .insert_historical_block( - genesis.try_seal_with_senders().expect("invalid tx signature in genesis"), - ) - .unwrap(); - - // insert first 10 blocks - for i in 0..10 { - provider - .tx_ref() - .put::(i, B256::new([100 + i as u8; 32])) - .unwrap(); - } - provider - .tx_ref() - .put::("Finish".to_string(), StageCheckpoint::new(10)) - .unwrap(); - provider.commit().unwrap(); - } - - /// Test data structure that will check tree internals - #[derive(Default, Debug)] - struct TreeTester { - /// Number of chains - chain_num: Option, - /// Check block to chain index - block_to_chain: Option>, - /// Check fork to child index - fork_to_child: Option>>, - /// Pending blocks - pending_blocks: Option<(BlockNumber, HashSet)>, - /// Buffered blocks - buffered_blocks: Option>, - } - - impl TreeTester { - const fn with_chain_num(mut self, chain_num: usize) -> Self { - self.chain_num = Some(chain_num); - self - } - - fn with_block_to_chain(mut self, block_to_chain: HashMap) -> Self { - self.block_to_chain = Some(block_to_chain); - self - } - - fn with_fork_to_child( - mut self, - fork_to_child: HashMap>, - ) -> Self { - self.fork_to_child = Some(fork_to_child); - self - } - - fn with_buffered_blocks( - mut self, - buffered_blocks: HashMap, - ) -> Self { - self.buffered_blocks = Some(buffered_blocks); - self - } - - fn with_pending_blocks( - mut self, - pending_blocks: (BlockNumber, HashSet), - ) -> Self { - self.pending_blocks = Some(pending_blocks); - self - } - - fn assert(self, tree: &BlockchainTree) { - if let Some(chain_num) = self.chain_num { - assert_eq!(tree.state.chains.len(), chain_num); - } - if let Some(block_to_chain) = self.block_to_chain { - assert_eq!(*tree.state.block_indices.blocks_to_chain(), block_to_chain); - } - if let Some(fork_to_child) = self.fork_to_child { - let mut x: HashMap> = - HashMap::with_capacity(fork_to_child.len()); - for (key, hash_set) in fork_to_child { - x.insert(key, hash_set.into_iter().collect()); - } - assert_eq!(*tree.state.block_indices.fork_to_child(), x); - } - if let Some(pending_blocks) = self.pending_blocks { - let (num, hashes) = tree.state.block_indices.pending_blocks(); - let hashes = hashes.into_iter().collect::>(); - assert_eq!((num, hashes), pending_blocks); - } - if let Some(buffered_blocks) = self.buffered_blocks { - assert_eq!(*tree.state.buffered_blocks.blocks(), buffered_blocks); - } - } - } - - #[test] - fn consecutive_reorgs() { - let signer = Address::random(); - let initial_signer_balance = U256::from(10).pow(U256::from(18)); - let chain_spec = Arc::new( - ChainSpecBuilder::default() - .chain(MAINNET.chain) - .genesis(Genesis { - alloc: BTreeMap::from([( - signer, - GenesisAccount { balance: initial_signer_balance, ..Default::default() }, - )]), - ..MAINNET.genesis.clone() - }) - .shanghai_activated() - .build(), - ); - let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); - let consensus = Arc::new(TestConsensus::default()); - let executor_provider = EthExecutorProvider::ethereum(chain_spec.clone()); - - { - let provider_rw = provider_factory.provider_rw().unwrap(); - provider_rw - .insert_block( - SealedBlock::new(chain_spec.sealed_genesis_header(), Default::default()) - .try_seal_with_senders() - .unwrap(), - StorageLocation::Database, - ) - .unwrap(); - let account = Account { balance: initial_signer_balance, ..Default::default() }; - provider_rw.tx_ref().put::(signer, account).unwrap(); - provider_rw.tx_ref().put::(keccak256(signer), account).unwrap(); - provider_rw.commit().unwrap(); - } - - let single_tx_cost = U256::from(INITIAL_BASE_FEE * MIN_TRANSACTION_GAS); - let mock_tx = |nonce: u64| -> RecoveredTx<_> { - TransactionSigned::new_unhashed( - Transaction::Eip1559(TxEip1559 { - chain_id: chain_spec.chain.id(), - nonce, - gas_limit: MIN_TRANSACTION_GAS, - to: Address::ZERO.into(), - max_fee_per_gas: INITIAL_BASE_FEE as u128, - ..Default::default() - }), - Signature::test_signature(), - ) - .with_signer(signer) - }; - - let mock_block = |number: u64, - parent: Option, - body: Vec>, - num_of_signer_txs: u64| - -> SealedBlockWithSenders { - let signed_body = body.clone().into_iter().map(|tx| tx.into_tx()).collect::>(); - let transactions_root = calculate_transaction_root(&signed_body); - let receipts = body - .iter() - .enumerate() - .map(|(idx, tx)| { - Receipt { - tx_type: tx.tx_type(), - success: true, - cumulative_gas_used: (idx as u64 + 1) * MIN_TRANSACTION_GAS, - ..Default::default() - } - .with_bloom() - }) - .collect::>(); - - // receipts root computation is different for OP - let receipts_root = calculate_receipt_root(&receipts); - - let header = Header { - number, - parent_hash: parent.unwrap_or_default(), - gas_used: body.len() as u64 * MIN_TRANSACTION_GAS, - gas_limit: ETHEREUM_BLOCK_GAS_LIMIT, - mix_hash: B256::random(), - base_fee_per_gas: Some(INITIAL_BASE_FEE), - transactions_root, - receipts_root, - state_root: state_root_unhashed(HashMap::from([( - signer, - Account { - balance: initial_signer_balance - - (single_tx_cost * U256::from(num_of_signer_txs)), - nonce: num_of_signer_txs, - ..Default::default() - } - .into_trie_account(EMPTY_ROOT_HASH), - )])), - ..Default::default() - }; - - SealedBlockWithSenders::new( - SealedBlock::new( - SealedHeader::seal(header), - BlockBody { - transactions: signed_body, - ommers: Vec::new(), - withdrawals: Some(Withdrawals::default()), - }, - ), - body.iter().map(|tx| tx.signer()).collect(), - ) - .unwrap() - }; - - let fork_block = mock_block(1, Some(chain_spec.genesis_hash()), Vec::from([mock_tx(0)]), 1); - - let canonical_block_1 = - mock_block(2, Some(fork_block.hash()), Vec::from([mock_tx(1), mock_tx(2)]), 3); - let canonical_block_2 = mock_block(3, Some(canonical_block_1.hash()), Vec::new(), 3); - let canonical_block_3 = - mock_block(4, Some(canonical_block_2.hash()), Vec::from([mock_tx(3)]), 4); - - let sidechain_block_1 = mock_block(2, Some(fork_block.hash()), Vec::from([mock_tx(1)]), 2); - let sidechain_block_2 = - mock_block(3, Some(sidechain_block_1.hash()), Vec::from([mock_tx(2)]), 3); - - let mut tree = BlockchainTree::new( - TreeExternals::new(provider_factory, consensus, executor_provider), - BlockchainTreeConfig::default(), - ) - .expect("failed to create tree"); - - tree.insert_block(fork_block.clone(), BlockValidationKind::Exhaustive).unwrap(); - - assert_eq!( - tree.make_canonical(fork_block.hash()).unwrap(), - CanonicalOutcome::Committed { head: fork_block.header.clone() } - ); - - assert_eq!( - tree.insert_block(canonical_block_1.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - - assert_eq!( - tree.make_canonical(canonical_block_1.hash()).unwrap(), - CanonicalOutcome::Committed { head: canonical_block_1.header.clone() } - ); - - assert_eq!( - tree.insert_block(canonical_block_2, BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - - assert_eq!( - tree.insert_block(sidechain_block_1.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::HistoricalFork)) - ); - - assert_eq!( - tree.make_canonical(sidechain_block_1.hash()).unwrap(), - CanonicalOutcome::Committed { head: sidechain_block_1.header.clone() } - ); - - assert_eq!( - tree.make_canonical(canonical_block_1.hash()).unwrap(), - CanonicalOutcome::Committed { head: canonical_block_1.header.clone() } - ); - - assert_eq!( - tree.insert_block(sidechain_block_2.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::HistoricalFork)) - ); - - assert_eq!( - tree.make_canonical(sidechain_block_2.hash()).unwrap(), - CanonicalOutcome::Committed { head: sidechain_block_2.header.clone() } - ); - - assert_eq!( - tree.insert_block(canonical_block_3.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::HistoricalFork)) - ); - - assert_eq!( - tree.make_canonical(canonical_block_3.hash()).unwrap(), - CanonicalOutcome::Committed { head: canonical_block_3.header.clone() } - ); - } - - #[test] - fn sidechain_block_hashes() { - let data = BlockchainTestData::default_from_number(11); - let (block1, exec1) = data.blocks[0].clone(); - let (block2, exec2) = data.blocks[1].clone(); - let (block3, exec3) = data.blocks[2].clone(); - let (block4, exec4) = data.blocks[3].clone(); - let genesis = data.genesis; - - // test pops execution results from vector, so order is from last to first. - let externals = - setup_externals(vec![exec3.clone(), exec2.clone(), exec4, exec3, exec2, exec1]); - - // last finalized block would be number 9. - setup_genesis(&externals.provider_factory, genesis); - - // make tree - let config = BlockchainTreeConfig::new(1, 2, 3, 2); - let mut tree = BlockchainTree::new(externals, config).expect("failed to create tree"); - // genesis block 10 is already canonical - tree.make_canonical(B256::ZERO).unwrap(); - - // make genesis block 10 as finalized - tree.finalize_block(10).unwrap(); - - assert_eq!( - tree.insert_block(block1.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - - assert_eq!( - tree.insert_block(block2.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - - assert_eq!( - tree.insert_block(block3.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - - assert_eq!( - tree.insert_block(block4, BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - - let mut block2a = block2; - let block2a_hash = B256::new([0x34; 32]); - block2a.set_hash(block2a_hash); - - assert_eq!( - tree.insert_block(block2a.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::HistoricalFork)) - ); - - let mut block3a = block3; - let block3a_hash = B256::new([0x35; 32]); - block3a.set_hash(block3a_hash); - block3a.set_parent_hash(block2a.hash()); - - assert_eq!( - tree.insert_block(block3a.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) /* TODO: this is incorrect, figure out why */ - ); - - let block3a_chain_id = tree.state.block_indices.get_side_chain_id(&block3a.hash()).unwrap(); - assert_eq!( - tree.all_chain_hashes(block3a_chain_id), - BTreeMap::from([ - (block1.number, block1.hash()), - (block2a.number, block2a.hash()), - (block3a.number, block3a.hash()), - ]) - ); - } - - #[test] - fn cached_trie_updates() { - let data = BlockchainTestData::default_from_number(11); - let (block1, exec1) = data.blocks[0].clone(); - let (block2, exec2) = data.blocks[1].clone(); - let (block3, exec3) = data.blocks[2].clone(); - let (block4, exec4) = data.blocks[3].clone(); - let (block5, exec5) = data.blocks[4].clone(); - let genesis = data.genesis; - - // test pops execution results from vector, so order is from last to first. - let externals = setup_externals(vec![exec5.clone(), exec4, exec3, exec2, exec1]); - - // last finalized block would be number 9. - setup_genesis(&externals.provider_factory, genesis); - - // make tree - let config = BlockchainTreeConfig::new(1, 2, 3, 2); - let mut tree = BlockchainTree::new(externals, config).expect("failed to create tree"); - // genesis block 10 is already canonical - tree.make_canonical(B256::ZERO).unwrap(); - - // make genesis block 10 as finalized - tree.finalize_block(10).unwrap(); - - assert_eq!( - tree.insert_block(block1.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - let block1_chain_id = tree.state.block_indices.get_side_chain_id(&block1.hash()).unwrap(); - let block1_chain = tree.state.chains.get(&block1_chain_id).unwrap(); - assert!(block1_chain.trie_updates().is_some()); - - assert_eq!( - tree.insert_block(block2.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - let block2_chain_id = tree.state.block_indices.get_side_chain_id(&block2.hash()).unwrap(); - let block2_chain = tree.state.chains.get(&block2_chain_id).unwrap(); - assert!(block2_chain.trie_updates().is_none()); - - assert_eq!( - tree.make_canonical(block2.hash()).unwrap(), - CanonicalOutcome::Committed { head: block2.header.clone() } - ); - - assert_eq!( - tree.insert_block(block3.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - let block3_chain_id = tree.state.block_indices.get_side_chain_id(&block3.hash()).unwrap(); - let block3_chain = tree.state.chains.get(&block3_chain_id).unwrap(); - assert!(block3_chain.trie_updates().is_some()); - - assert_eq!( - tree.make_canonical(block3.hash()).unwrap(), - CanonicalOutcome::Committed { head: block3.header.clone() } - ); - - assert_eq!( - tree.insert_block(block4.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - let block4_chain_id = tree.state.block_indices.get_side_chain_id(&block4.hash()).unwrap(); - let block4_chain = tree.state.chains.get(&block4_chain_id).unwrap(); - assert!(block4_chain.trie_updates().is_some()); - - assert_eq!( - tree.insert_block(block5.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - - let block5_chain_id = tree.state.block_indices.get_side_chain_id(&block5.hash()).unwrap(); - let block5_chain = tree.state.chains.get(&block5_chain_id).unwrap(); - assert!(block5_chain.trie_updates().is_none()); - - assert_eq!( - tree.make_canonical(block5.hash()).unwrap(), - CanonicalOutcome::Committed { head: block5.header.clone() } - ); - - let provider = tree.externals.provider_factory.provider().unwrap(); - let prefix_sets = tree - .externals - .provider_factory - .hashed_post_state(exec5.state()) - .construct_prefix_sets() - .freeze(); - let state_root = - StateRoot::from_tx(provider.tx_ref()).with_prefix_sets(prefix_sets).root().unwrap(); - assert_eq!(state_root, block5.state_root); - } - - #[test] - fn test_side_chain_fork() { - let data = BlockchainTestData::default_from_number(11); - let (block1, exec1) = data.blocks[0].clone(); - let (block2, exec2) = data.blocks[1].clone(); - let genesis = data.genesis; - - // test pops execution results from vector, so order is from last to first. - let externals = setup_externals(vec![exec2.clone(), exec2, exec1]); - - // last finalized block would be number 9. - setup_genesis(&externals.provider_factory, genesis); - - // make tree - let config = BlockchainTreeConfig::new(1, 2, 3, 2); - let mut tree = BlockchainTree::new(externals, config).expect("failed to create tree"); - // genesis block 10 is already canonical - tree.make_canonical(B256::ZERO).unwrap(); - - // make genesis block 10 as finalized - tree.finalize_block(10).unwrap(); - - assert_eq!( - tree.insert_block(block1.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - - assert_eq!( - tree.insert_block(block2.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - - // we have one chain that has two blocks. - // Trie state: - // b2 (pending block) - // | - // | - // b1 (pending block) - // / - // / - // g1 (canonical blocks) - // | - TreeTester::default() - .with_chain_num(1) - .with_block_to_chain(HashMap::from([ - (block1.hash(), 0.into()), - (block2.hash(), 0.into()), - ])) - .with_fork_to_child(HashMap::from([( - block1.parent_hash, - HashSet::from([block1.hash()]), - )])) - .assert(&tree); - - let mut block2a = block2.clone(); - let block2a_hash = B256::new([0x34; 32]); - block2a.set_hash(block2a_hash); - - assert_eq!( - tree.insert_block(block2a.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::HistoricalFork)) - ); - - // fork chain. - // Trie state: - // b2 b2a (pending blocks in tree) - // | / - // | / - // b1 - // / - // / - // g1 (canonical blocks) - // | - - TreeTester::default() - .with_chain_num(2) - .with_block_to_chain(HashMap::from([ - (block1.hash(), 0.into()), - (block2.hash(), 0.into()), - (block2a.hash(), 1.into()), - ])) - .with_fork_to_child(HashMap::from([ - (block1.parent_hash, HashSet::from([block1.hash()])), - (block2a.parent_hash, HashSet::from([block2a.hash()])), - ])) - .assert(&tree); - // chain 0 has two blocks so receipts and reverts len is 2 - let chain0 = tree.state.chains.get(&0.into()).unwrap().execution_outcome(); - assert_eq!(chain0.receipts().len(), 2); - assert_eq!(chain0.state().reverts.len(), 2); - assert_eq!(chain0.first_block(), block1.number); - // chain 1 has one block so receipts and reverts len is 1 - let chain1 = tree.state.chains.get(&1.into()).unwrap().execution_outcome(); - assert_eq!(chain1.receipts().len(), 1); - assert_eq!(chain1.state().reverts.len(), 1); - assert_eq!(chain1.first_block(), block2.number); - } - - #[test] - fn sanity_path() { - let data = BlockchainTestData::default_from_number(11); - let (block1, exec1) = data.blocks[0].clone(); - let (block2, exec2) = data.blocks[1].clone(); - let genesis = data.genesis; - - // test pops execution results from vector, so order is from last to first. - let externals = setup_externals(vec![exec2.clone(), exec1.clone(), exec2, exec1]); - - // last finalized block would be number 9. - setup_genesis(&externals.provider_factory, genesis); - - // make tree - let config = BlockchainTreeConfig::new(1, 2, 3, 2); - let mut tree = BlockchainTree::new(externals, config).expect("failed to create tree"); - - let mut canon_notif = tree.subscribe_canon_state(); - // genesis block 10 is already canonical - let head = BlockNumHash::new(10, B256::ZERO); - tree.make_canonical(head.hash).unwrap(); - - // make sure is_block_hash_canonical returns true for genesis block - tree.is_block_hash_canonical(&B256::ZERO).unwrap(); - - // make genesis block 10 as finalized - tree.finalize_block(head.number).unwrap(); - - // block 2 parent is not known, block2 is buffered. - assert_eq!( - tree.insert_block(block2.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Disconnected { - head, - missing_ancestor: block2.parent_num_hash() - }) - ); - - // Buffered block: [block2] - // Trie state: - // | - // g1 (canonical blocks) - // | - - TreeTester::default() - .with_buffered_blocks(HashMap::from([(block2.hash(), block2.clone())])) - .assert(&tree); - - assert_eq!( - tree.is_block_known(block2.num_hash()).unwrap(), - Some(BlockStatus::Disconnected { head, missing_ancestor: block2.parent_num_hash() }) - ); - - // check if random block is known - let old_block = BlockNumHash::new(1, B256::new([32; 32])); - let err = BlockchainTreeError::PendingBlockIsFinalized { last_finalized: 10 }; - - assert_eq!(tree.is_block_known(old_block).unwrap_err().as_tree_error(), Some(err)); - - // insert block1 and buffered block2 is inserted - assert_eq!( - tree.insert_block(block1.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - - // Buffered blocks: [] - // Trie state: - // b2 (pending block) - // | - // | - // b1 (pending block) - // / - // / - // g1 (canonical blocks) - // | - TreeTester::default() - .with_chain_num(1) - .with_block_to_chain(HashMap::from([ - (block1.hash(), 0.into()), - (block2.hash(), 0.into()), - ])) - .with_fork_to_child(HashMap::from([( - block1.parent_hash, - HashSet::from([block1.hash()]), - )])) - .with_pending_blocks((block1.number, HashSet::from([block1.hash()]))) - .assert(&tree); - - // already inserted block will `InsertPayloadOk::AlreadySeen(_)` - assert_eq!( - tree.insert_block(block1.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::AlreadySeen(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - - // block two is already inserted. - assert_eq!( - tree.insert_block(block2.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::AlreadySeen(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - - // make block1 canonical - tree.make_canonical(block1.hash()).unwrap(); - // check notification - assert_matches!(canon_notif.try_recv(), Ok(CanonStateNotification::Commit{ new}) if *new.blocks() == BTreeMap::from([(block1.number,block1.clone())])); - - // make block2 canonicals - tree.make_canonical(block2.hash()).unwrap(); - // check notification. - assert_matches!(canon_notif.try_recv(), Ok(CanonStateNotification::Commit{ new}) if *new.blocks() == BTreeMap::from([(block2.number,block2.clone())])); - - // Trie state: - // b2 (canonical block) - // | - // | - // b1 (canonical block) - // | - // | - // g1 (canonical blocks) - // | - TreeTester::default() - .with_chain_num(0) - .with_block_to_chain(HashMap::from([])) - .with_fork_to_child(HashMap::from([])) - .assert(&tree); - - /**** INSERT SIDE BLOCKS *** */ - - let mut block1a = block1.clone(); - let block1a_hash = B256::new([0x33; 32]); - block1a.set_hash(block1a_hash); - let mut block2a = block2.clone(); - let block2a_hash = B256::new([0x34; 32]); - block2a.set_hash(block2a_hash); - - // reinsert two blocks that point to canonical chain - assert_eq!( - tree.insert_block(block1a.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::HistoricalFork)) - ); - - TreeTester::default() - .with_chain_num(1) - .with_block_to_chain(HashMap::from([(block1a_hash, 1.into())])) - .with_fork_to_child(HashMap::from([( - block1.parent_hash, - HashSet::from([block1a_hash]), - )])) - .with_pending_blocks((block2.number + 1, HashSet::from([]))) - .assert(&tree); - - assert_eq!( - tree.insert_block(block2a.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::HistoricalFork)) - ); - // Trie state: - // b2 b2a (side chain) - // | / - // | / - // b1 b1a (side chain) - // | / - // |/ - // g1 (10) - // | - TreeTester::default() - .with_chain_num(2) - .with_block_to_chain(HashMap::from([ - (block1a_hash, 1.into()), - (block2a_hash, 2.into()), - ])) - .with_fork_to_child(HashMap::from([ - (block1.parent_hash, HashSet::from([block1a_hash])), - (block1.hash(), HashSet::from([block2a_hash])), - ])) - .with_pending_blocks((block2.number + 1, HashSet::from([]))) - .assert(&tree); - - // make b2a canonical - assert!(tree.make_canonical(block2a_hash).is_ok()); - // check notification. - assert_matches!(canon_notif.try_recv(), - Ok(CanonStateNotification::Reorg{ old, new}) - if *old.blocks() == BTreeMap::from([(block2.number,block2.clone())]) - && *new.blocks() == BTreeMap::from([(block2a.number,block2a.clone())])); - - // Trie state: - // b2a b2 (side chain) - // | / - // | / - // b1 b1a (side chain) - // | / - // |/ - // g1 (10) - // | - TreeTester::default() - .with_chain_num(2) - .with_block_to_chain(HashMap::from([ - (block1a_hash, 1.into()), - (block2.hash(), 3.into()), - ])) - .with_fork_to_child(HashMap::from([ - (block1.parent_hash, HashSet::from([block1a_hash])), - (block1.hash(), HashSet::from([block2.hash()])), - ])) - .with_pending_blocks((block2.number + 1, HashSet::default())) - .assert(&tree); - - assert_matches!(tree.make_canonical(block1a_hash), Ok(_)); - // Trie state: - // b2a b2 (side chain) - // | / - // | / - // b1a b1 (side chain) - // | / - // |/ - // g1 (10) - // | - TreeTester::default() - .with_chain_num(2) - .with_block_to_chain(HashMap::from([ - (block1.hash(), 4.into()), - (block2a_hash, 4.into()), - (block2.hash(), 3.into()), - ])) - .with_fork_to_child(HashMap::from([ - (block1.parent_hash, HashSet::from([block1.hash()])), - (block1.hash(), HashSet::from([block2.hash()])), - ])) - .with_pending_blocks((block1a.number + 1, HashSet::default())) - .assert(&tree); - - // check notification. - assert_matches!(canon_notif.try_recv(), - Ok(CanonStateNotification::Reorg{ old, new}) - if *old.blocks() == BTreeMap::from([(block1.number,block1.clone()),(block2a.number,block2a.clone())]) - && *new.blocks() == BTreeMap::from([(block1a.number,block1a.clone())])); - - // check that b2 and b1 are not canonical - assert!(!tree.is_block_hash_canonical(&block2.hash()).unwrap()); - assert!(!tree.is_block_hash_canonical(&block1.hash()).unwrap()); - - // ensure that b1a is canonical - assert!(tree.is_block_hash_canonical(&block1a.hash()).unwrap()); - - // make b2 canonical - tree.make_canonical(block2.hash()).unwrap(); - // Trie state: - // b2 b2a (side chain) - // | / - // | / - // b1 b1a (side chain) - // | / - // |/ - // g1 (10) - // | - TreeTester::default() - .with_chain_num(2) - .with_block_to_chain(HashMap::from([ - (block1a_hash, 5.into()), - (block2a_hash, 4.into()), - ])) - .with_fork_to_child(HashMap::from([ - (block1.parent_hash, HashSet::from([block1a_hash])), - (block1.hash(), HashSet::from([block2a_hash])), - ])) - .with_pending_blocks((block2.number + 1, HashSet::default())) - .assert(&tree); - - // check notification. - assert_matches!(canon_notif.try_recv(), - Ok(CanonStateNotification::Reorg{ old, new}) - if *old.blocks() == BTreeMap::from([(block1a.number,block1a.clone())]) - && *new.blocks() == BTreeMap::from([(block1.number,block1.clone()),(block2.number,block2.clone())])); - - // check that b2 is now canonical - assert!(tree.is_block_hash_canonical(&block2.hash()).unwrap()); - - // finalize b1 that would make b1a removed from tree - tree.finalize_block(11).unwrap(); - // Trie state: - // b2 b2a (side chain) - // | / - // | / - // b1 (canon) - // | - // g1 (10) - // | - TreeTester::default() - .with_chain_num(1) - .with_block_to_chain(HashMap::from([(block2a_hash, 4.into())])) - .with_fork_to_child(HashMap::from([(block1.hash(), HashSet::from([block2a_hash]))])) - .with_pending_blocks((block2.number + 1, HashSet::from([]))) - .assert(&tree); - - // unwind canonical - assert!(tree.unwind(block1.number).is_ok()); - // Trie state: - // b2 b2a (pending block) - // / / - // / / - // / / - // b1 (canonical block) - // | - // | - // g1 (canonical blocks) - // | - TreeTester::default() - .with_chain_num(2) - .with_block_to_chain(HashMap::from([ - (block2a_hash, 4.into()), - (block2.hash(), 6.into()), - ])) - .with_fork_to_child(HashMap::from([( - block1.hash(), - HashSet::from([block2a_hash, block2.hash()]), - )])) - .with_pending_blocks((block2.number, HashSet::from([block2.hash(), block2a.hash()]))) - .assert(&tree); - - // commit b2a - tree.make_canonical(block2.hash()).unwrap(); - - // Trie state: - // b2 b2a (side chain) - // | / - // | / - // b1 (finalized) - // | - // g1 (10) - // | - TreeTester::default() - .with_chain_num(1) - .with_block_to_chain(HashMap::from([(block2a_hash, 4.into())])) - .with_fork_to_child(HashMap::from([(block1.hash(), HashSet::from([block2a_hash]))])) - .with_pending_blocks((block2.number + 1, HashSet::default())) - .assert(&tree); - - // check notification. - assert_matches!(canon_notif.try_recv(), - Ok(CanonStateNotification::Commit{ new }) - if *new.blocks() == BTreeMap::from([(block2.number,block2.clone())])); - - // insert unconnected block2b - let mut block2b = block2a.clone(); - block2b.set_hash(B256::new([0x99; 32])); - block2b.set_parent_hash(B256::new([0x88; 32])); - - assert_eq!( - tree.insert_block(block2b.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Disconnected { - head: block2.header.num_hash(), - missing_ancestor: block2b.parent_num_hash() - }) - ); - - TreeTester::default() - .with_buffered_blocks(HashMap::from([(block2b.hash(), block2b.clone())])) - .assert(&tree); - - // update canonical block to b2, this would make b2a be removed - assert!(tree.connect_buffered_blocks_to_canonical_hashes_and_finalize(12).is_ok()); - - assert_eq!( - tree.is_block_known(block2.num_hash()).unwrap(), - Some(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - - // Trie state: - // b2 (finalized) - // | - // b1 (finalized) - // | - // g1 (10) - // | - TreeTester::default() - .with_chain_num(0) - .with_block_to_chain(HashMap::default()) - .with_fork_to_child(HashMap::default()) - .with_pending_blocks((block2.number + 1, HashSet::default())) - .with_buffered_blocks(HashMap::default()) - .assert(&tree); - } - - #[test] - fn last_finalized_block_initialization() { - let data = BlockchainTestData::default_from_number(11); - let (block1, exec1) = data.blocks[0].clone(); - let (block2, exec2) = data.blocks[1].clone(); - let (block3, exec3) = data.blocks[2].clone(); - let genesis = data.genesis; - - // test pops execution results from vector, so order is from last to first. - let externals = - setup_externals(vec![exec3.clone(), exec2.clone(), exec1.clone(), exec3, exec2, exec1]); - let cloned_externals_1 = TreeExternals { - provider_factory: externals.provider_factory.clone(), - executor_factory: externals.executor_factory.clone(), - consensus: externals.consensus.clone(), - }; - let cloned_externals_2 = TreeExternals { - provider_factory: externals.provider_factory.clone(), - executor_factory: externals.executor_factory.clone(), - consensus: externals.consensus.clone(), - }; - - // last finalized block would be number 9. - setup_genesis(&externals.provider_factory, genesis); - - // make tree - let config = BlockchainTreeConfig::new(1, 2, 3, 2); - let mut tree = BlockchainTree::new(externals, config).expect("failed to create tree"); - - assert_eq!( - tree.insert_block(block1.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - - assert_eq!( - tree.insert_block(block2.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - - assert_eq!( - tree.insert_block(block3, BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) - ); - - tree.make_canonical(block2.hash()).unwrap(); - - // restart - let mut tree = - BlockchainTree::new(cloned_externals_1, config).expect("failed to create tree"); - assert_eq!(tree.block_indices().last_finalized_block(), 0); - - let mut block1a = block1; - let block1a_hash = B256::new([0x33; 32]); - block1a.set_hash(block1a_hash); - - assert_eq!( - tree.insert_block(block1a.clone(), BlockValidationKind::Exhaustive).unwrap(), - InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::HistoricalFork)) - ); - - tree.make_canonical(block1a.hash()).unwrap(); - tree.finalize_block(block1a.number).unwrap(); - - // restart - let tree = BlockchainTree::new(cloned_externals_2, config).expect("failed to create tree"); - - assert_eq!(tree.block_indices().last_finalized_block(), block1a.number); - } -} diff --git a/crates/blockchain-tree/src/chain.rs b/crates/blockchain-tree/src/chain.rs deleted file mode 100644 index 9fba5bab4a9f..000000000000 --- a/crates/blockchain-tree/src/chain.rs +++ /dev/null @@ -1,311 +0,0 @@ -//! A chain in a [`BlockchainTree`][super::BlockchainTree]. -//! -//! A [`Chain`] contains the state of accounts for the chain after execution of its constituent -//! blocks, as well as a list of the blocks the chain is composed of. - -use super::externals::TreeExternals; -use crate::BundleStateDataRef; -use alloy_eips::ForkBlock; -use alloy_primitives::{BlockHash, BlockNumber}; -use reth_blockchain_tree_api::{ - error::{BlockchainTreeError, InsertBlockErrorKind}, - BlockAttachment, BlockValidationKind, -}; -use reth_consensus::{ConsensusError, PostExecutionInput}; -use reth_evm::execute::{BlockExecutorProvider, Executor}; -use reth_execution_errors::BlockExecutionError; -use reth_execution_types::{Chain, ExecutionOutcome}; -use reth_primitives::{GotExpected, SealedBlockWithSenders, SealedHeader}; -use reth_provider::{ - providers::{BundleStateProvider, ConsistentDbView, TreeNodeTypes}, - DBProvider, FullExecutionDataProvider, HashedPostStateProvider, ProviderError, - StateRootProvider, TryIntoHistoricalStateProvider, -}; -use reth_revm::database::StateProviderDatabase; -use reth_trie::{updates::TrieUpdates, TrieInput}; -use reth_trie_parallel::root::ParallelStateRoot; -use std::{ - collections::BTreeMap, - ops::{Deref, DerefMut}, - time::Instant, -}; - -/// A chain in the blockchain tree that has functionality to execute blocks and append them to -/// itself. -#[derive(Clone, Debug, Default, PartialEq, Eq)] -pub struct AppendableChain { - chain: Chain, -} - -impl Deref for AppendableChain { - type Target = Chain; - - fn deref(&self) -> &Self::Target { - &self.chain - } -} - -impl DerefMut for AppendableChain { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.chain - } -} - -impl AppendableChain { - /// Create a new appendable chain from a given chain. - pub const fn new(chain: Chain) -> Self { - Self { chain } - } - - /// Get the chain. - pub fn into_inner(self) -> Chain { - self.chain - } - - /// Create a new chain that forks off of the canonical chain. - /// - /// if [`BlockValidationKind::Exhaustive`] is specified, the method will verify the state root - /// of the block. - pub fn new_canonical_fork( - block: SealedBlockWithSenders, - parent_header: &SealedHeader, - canonical_block_hashes: &BTreeMap, - canonical_fork: ForkBlock, - externals: &TreeExternals, - block_attachment: BlockAttachment, - block_validation_kind: BlockValidationKind, - ) -> Result - where - N: TreeNodeTypes, - E: BlockExecutorProvider, - { - let execution_outcome = ExecutionOutcome::default(); - let empty = BTreeMap::new(); - - let state_provider = BundleStateDataRef { - execution_outcome: &execution_outcome, - sidechain_block_hashes: &empty, - canonical_block_hashes, - canonical_fork, - }; - - let (bundle_state, trie_updates) = Self::validate_and_execute( - block.clone(), - parent_header, - state_provider, - externals, - block_attachment, - block_validation_kind, - )?; - - Ok(Self::new(Chain::new(vec![block], bundle_state, trie_updates))) - } - - /// Create a new chain that forks off of an existing sidechain. - /// - /// This differs from [`AppendableChain::new_canonical_fork`] in that this starts a new fork. - pub(crate) fn new_chain_fork( - &self, - block: SealedBlockWithSenders, - side_chain_block_hashes: BTreeMap, - canonical_block_hashes: &BTreeMap, - canonical_fork: ForkBlock, - externals: &TreeExternals, - block_validation_kind: BlockValidationKind, - ) -> Result - where - N: TreeNodeTypes, - E: BlockExecutorProvider, - { - let parent_number = - block.number.checked_sub(1).ok_or(BlockchainTreeError::GenesisBlockHasNoParent)?; - let parent = self.blocks().get(&parent_number).ok_or( - BlockchainTreeError::BlockNumberNotFoundInChain { block_number: parent_number }, - )?; - - let mut execution_outcome = self.execution_outcome().clone(); - - // Revert state to the state after execution of the parent block - execution_outcome.revert_to(parent.number); - - // Revert changesets to get the state of the parent that we need to apply the change. - let bundle_state_data = BundleStateDataRef { - execution_outcome: &execution_outcome, - sidechain_block_hashes: &side_chain_block_hashes, - canonical_block_hashes, - canonical_fork, - }; - let (block_state, _) = Self::validate_and_execute( - block.clone(), - parent, - bundle_state_data, - externals, - BlockAttachment::HistoricalFork, - block_validation_kind, - )?; - // extending will also optimize few things, mostly related to selfdestruct and wiping of - // storage. - execution_outcome.extend(block_state); - - // remove all receipts and reverts (except the last one), as they belong to the chain we - // forked from and not the new chain we are creating. - let size = execution_outcome.receipts().len(); - execution_outcome.receipts_mut().drain(0..size - 1); - execution_outcome.state_mut().take_n_reverts(size - 1); - execution_outcome.set_first_block(block.number); - - // If all is okay, return new chain back. Present chain is not modified. - Ok(Self::new(Chain::from_block(block, execution_outcome, None))) - } - - /// Validate and execute the given block that _extends the canonical chain_, validating its - /// state root after execution if possible and requested. - /// - /// Note: State root validation is limited to blocks that extend the canonical chain and is - /// optional, see [`BlockValidationKind`]. So this function takes two parameters to determine - /// if the state can and should be validated. - /// - [`BlockAttachment`] represents if the block extends the canonical chain, and thus we can - /// cache the trie state updates. - /// - [`BlockValidationKind`] determines if the state root __should__ be validated. - fn validate_and_execute( - block: SealedBlockWithSenders, - parent_block: &SealedHeader, - bundle_state_data_provider: EDP, - externals: &TreeExternals, - block_attachment: BlockAttachment, - block_validation_kind: BlockValidationKind, - ) -> Result<(ExecutionOutcome, Option), BlockExecutionError> - where - EDP: FullExecutionDataProvider, - N: TreeNodeTypes, - E: BlockExecutorProvider, - { - // some checks are done before blocks comes here. - externals.consensus.validate_header_against_parent(&block, parent_block)?; - - // get the state provider. - let canonical_fork = bundle_state_data_provider.canonical_fork(); - - // SAFETY: For block execution and parallel state root computation below we open multiple - // independent database transactions. Upon opening the database transaction the consistent - // view will check a current tip in the database and throw an error if it doesn't match - // the one recorded during initialization. - // It is safe to use consistent view without any special error handling as long as - // we guarantee that plain state cannot change during processing of new payload. - // The usage has to be re-evaluated if that was ever to change. - let consistent_view = - ConsistentDbView::new_with_latest_tip(externals.provider_factory.clone())?; - let state_provider = consistent_view - .provider_ro()? - // State root calculation can take a while, and we're sure no write transaction - // will be open in parallel. See https://github.com/paradigmxyz/reth/issues/7509. - .disable_long_read_transaction_safety() - .try_into_history_at_block(canonical_fork.number)?; - - let provider = BundleStateProvider::new(state_provider, bundle_state_data_provider); - - let db = StateProviderDatabase::new(&provider); - let executor = externals.executor_factory.executor(db); - let block_hash = block.hash(); - let block = block.unseal(); - - let state = executor.execute(&block)?; - externals.consensus.validate_block_post_execution( - &block, - PostExecutionInput::new(&state.receipts, &state.requests), - )?; - - let initial_execution_outcome = ExecutionOutcome::from((state, block.number)); - - // check state root if the block extends the canonical chain __and__ if state root - // validation was requested. - if block_validation_kind.is_exhaustive() { - // calculate and check state root - let start = Instant::now(); - let (state_root, trie_updates) = if block_attachment.is_canonical() { - let mut execution_outcome = - provider.block_execution_data_provider.execution_outcome().clone(); - execution_outcome.extend(initial_execution_outcome.clone()); - ParallelStateRoot::new( - consistent_view, - TrieInput::from_state(provider.hashed_post_state(execution_outcome.state())), - ) - .incremental_root_with_updates() - .map(|(root, updates)| (root, Some(updates))) - .map_err(ProviderError::from)? - } else { - let hashed_state = provider.hashed_post_state(initial_execution_outcome.state()); - let state_root = provider.state_root(hashed_state)?; - (state_root, None) - }; - if block.state_root != state_root { - return Err(ConsensusError::BodyStateRootDiff( - GotExpected { got: state_root, expected: block.state_root }.into(), - ) - .into()) - } - - tracing::debug!( - target: "blockchain_tree::chain", - number = block.number, - hash = %block_hash, - elapsed = ?start.elapsed(), - "Validated state root" - ); - - Ok((initial_execution_outcome, trie_updates)) - } else { - Ok((initial_execution_outcome, None)) - } - } - - /// Validate and execute the given block, and append it to this chain. - /// - /// This expects that the block's ancestors can be traced back to the `canonical_fork` (the - /// first parent block of the `block`'s chain that is in the canonical chain). - /// - /// In other words, expects a gap less (side-) chain: [`canonical_fork..block`] in order to be - /// able to __execute__ the block. - /// - /// CAUTION: This will only perform state root check if it's possible: if the `canonical_fork` - /// is the canonical head, or: state root check can't be performed if the given canonical is - /// __not__ the canonical head. - #[track_caller] - #[allow(clippy::too_many_arguments)] - pub(crate) fn append_block( - &mut self, - block: SealedBlockWithSenders, - side_chain_block_hashes: BTreeMap, - canonical_block_hashes: &BTreeMap, - externals: &TreeExternals, - canonical_fork: ForkBlock, - block_attachment: BlockAttachment, - block_validation_kind: BlockValidationKind, - ) -> Result<(), InsertBlockErrorKind> - where - N: TreeNodeTypes, - E: BlockExecutorProvider, - { - let parent_block = self.chain.tip(); - - let bundle_state_data = BundleStateDataRef { - execution_outcome: self.execution_outcome(), - sidechain_block_hashes: &side_chain_block_hashes, - canonical_block_hashes, - canonical_fork, - }; - - let (block_state, _) = Self::validate_and_execute( - block.clone(), - parent_block, - bundle_state_data, - externals, - block_attachment, - block_validation_kind, - )?; - // extend the state. - self.chain.append_block(block, block_state); - - Ok(()) - } -} diff --git a/crates/blockchain-tree/src/shareable.rs b/crates/blockchain-tree/src/shareable.rs deleted file mode 100644 index d37a849867b3..000000000000 --- a/crates/blockchain-tree/src/shareable.rs +++ /dev/null @@ -1,206 +0,0 @@ -//! Wrapper around `BlockchainTree` that allows for it to be shared. - -use crate::externals::TreeNodeTypes; - -use super::BlockchainTree; -use alloy_eips::BlockNumHash; -use alloy_primitives::{BlockHash, BlockNumber}; -use parking_lot::RwLock; -use reth_blockchain_tree_api::{ - error::{CanonicalError, InsertBlockError}, - BlockValidationKind, BlockchainTreeEngine, BlockchainTreeViewer, CanonicalOutcome, - InsertPayloadOk, -}; -use reth_evm::execute::BlockExecutorProvider; -use reth_execution_errors::BlockExecutionError; -use reth_node_types::NodeTypesWithDB; -use reth_primitives::{Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader}; -use reth_provider::{ - providers::ProviderNodeTypes, BlockchainTreePendingStateProvider, CanonStateNotifications, - CanonStateSubscriptions, FullExecutionDataProvider, NodePrimitivesProvider, ProviderError, -}; -use reth_storage_errors::provider::ProviderResult; -use std::{collections::BTreeMap, sync::Arc}; -use tracing::trace; - -/// Shareable blockchain tree that is behind a `RwLock` -#[derive(Clone, Debug)] -pub struct ShareableBlockchainTree { - /// `BlockchainTree` - pub tree: Arc>>, -} - -impl ShareableBlockchainTree { - /// Create a new shareable database. - pub fn new(tree: BlockchainTree) -> Self { - Self { tree: Arc::new(RwLock::new(tree)) } - } -} - -impl BlockchainTreeEngine for ShareableBlockchainTree -where - N: TreeNodeTypes, - E: BlockExecutorProvider, -{ - fn buffer_block(&self, block: SealedBlockWithSenders) -> Result<(), InsertBlockError> { - let mut tree = self.tree.write(); - // Blockchain tree metrics shouldn't be updated here, see - // `BlockchainTree::update_chains_metrics` documentation. - tree.buffer_block(block) - } - - fn insert_block( - &self, - block: SealedBlockWithSenders, - validation_kind: BlockValidationKind, - ) -> Result { - trace!(target: "blockchain_tree", hash = %block.hash(), number = block.number, parent_hash = %block.parent_hash, "Inserting block"); - let mut tree = self.tree.write(); - let res = tree.insert_block(block, validation_kind); - tree.update_chains_metrics(); - res - } - - fn finalize_block(&self, finalized_block: BlockNumber) -> ProviderResult<()> { - trace!(target: "blockchain_tree", finalized_block, "Finalizing block"); - let mut tree = self.tree.write(); - tree.finalize_block(finalized_block)?; - tree.update_chains_metrics(); - - Ok(()) - } - - fn connect_buffered_blocks_to_canonical_hashes_and_finalize( - &self, - last_finalized_block: BlockNumber, - ) -> Result<(), CanonicalError> { - trace!(target: "blockchain_tree", last_finalized_block, "Connecting buffered blocks to canonical hashes and finalizing the tree"); - let mut tree = self.tree.write(); - let res = - tree.connect_buffered_blocks_to_canonical_hashes_and_finalize(last_finalized_block); - tree.update_chains_metrics(); - Ok(res?) - } - - fn update_block_hashes_and_clear_buffered( - &self, - ) -> Result, CanonicalError> { - let mut tree = self.tree.write(); - let res = tree.update_block_hashes_and_clear_buffered(); - tree.update_chains_metrics(); - Ok(res?) - } - - fn connect_buffered_blocks_to_canonical_hashes(&self) -> Result<(), CanonicalError> { - trace!(target: "blockchain_tree", "Connecting buffered blocks to canonical hashes"); - let mut tree = self.tree.write(); - let res = tree.connect_buffered_blocks_to_canonical_hashes(); - tree.update_chains_metrics(); - Ok(res?) - } - - fn make_canonical(&self, block_hash: BlockHash) -> Result { - trace!(target: "blockchain_tree", %block_hash, "Making block canonical"); - let mut tree = self.tree.write(); - let res = tree.make_canonical(block_hash); - tree.update_chains_metrics(); - res - } -} - -impl BlockchainTreeViewer for ShareableBlockchainTree -where - N: TreeNodeTypes, - E: BlockExecutorProvider, -{ - fn header_by_hash(&self, hash: BlockHash) -> Option { - trace!(target: "blockchain_tree", ?hash, "Returning header by hash"); - self.tree.read().sidechain_block_by_hash(hash).map(|b| b.header.clone()) - } - - fn block_by_hash(&self, block_hash: BlockHash) -> Option { - trace!(target: "blockchain_tree", ?block_hash, "Returning block by hash"); - self.tree.read().sidechain_block_by_hash(block_hash).cloned() - } - - fn block_with_senders_by_hash(&self, block_hash: BlockHash) -> Option { - trace!(target: "blockchain_tree", ?block_hash, "Returning block by hash"); - self.tree.read().block_with_senders_by_hash(block_hash).cloned() - } - - fn buffered_header_by_hash(&self, block_hash: BlockHash) -> Option { - self.tree.read().get_buffered_block(&block_hash).map(|b| b.header.clone()) - } - - fn is_canonical(&self, hash: BlockHash) -> Result { - trace!(target: "blockchain_tree", ?hash, "Checking if block is canonical"); - self.tree.read().is_block_hash_canonical(&hash) - } - - fn lowest_buffered_ancestor(&self, hash: BlockHash) -> Option { - trace!(target: "blockchain_tree", ?hash, "Returning lowest buffered ancestor"); - self.tree.read().lowest_buffered_ancestor(&hash).cloned() - } - - fn canonical_tip(&self) -> BlockNumHash { - trace!(target: "blockchain_tree", "Returning canonical tip"); - self.tree.read().block_indices().canonical_tip() - } - - fn pending_block_num_hash(&self) -> Option { - trace!(target: "blockchain_tree", "Returning first pending block"); - self.tree.read().block_indices().pending_block_num_hash() - } - - fn pending_block(&self) -> Option { - trace!(target: "blockchain_tree", "Returning first pending block"); - self.tree.read().pending_block().cloned() - } - - fn pending_block_and_receipts(&self) -> Option<(SealedBlock, Vec)> { - let tree = self.tree.read(); - let pending_block = tree.pending_block()?.clone(); - let receipts = - tree.receipts_by_block_hash(pending_block.hash())?.into_iter().cloned().collect(); - Some((pending_block, receipts)) - } - - fn receipts_by_block_hash(&self, block_hash: BlockHash) -> Option> { - let tree = self.tree.read(); - Some(tree.receipts_by_block_hash(block_hash)?.into_iter().cloned().collect()) - } -} - -impl BlockchainTreePendingStateProvider for ShareableBlockchainTree -where - N: TreeNodeTypes, - E: BlockExecutorProvider, -{ - fn find_pending_state_provider( - &self, - block_hash: BlockHash, - ) -> Option> { - trace!(target: "blockchain_tree", ?block_hash, "Finding pending state provider"); - let provider = self.tree.read().post_state_data(block_hash)?; - Some(Box::new(provider)) - } -} - -impl NodePrimitivesProvider for ShareableBlockchainTree -where - N: ProviderNodeTypes, - E: Send + Sync, -{ - type Primitives = N::Primitives; -} - -impl CanonStateSubscriptions for ShareableBlockchainTree -where - N: TreeNodeTypes, - E: Send + Sync, -{ - fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { - trace!(target: "blockchain_tree", "Registered subscriber for canonical state"); - self.tree.read().subscribe_canon_state() - } -} From 9c8b898f1f8096439348ac124540c548b086844e Mon Sep 17 00:00:00 2001 From: chungquantin <56880684+chungquantin@users.noreply.github.com> Date: Fri, 17 Jan 2025 16:08:34 +0700 Subject: [PATCH 13/13] refactor: add generic block execution error type to Stage --- crates/errors/src/lib.rs | 4 ++- crates/evm/execution-errors/src/lib.rs | 21 +++++++++---- crates/evm/src/execute.rs | 13 ++++---- crates/stages/api/src/error.rs | 31 +++++++++++++------- crates/stages/api/src/pipeline/builder.rs | 8 +++-- crates/stages/api/src/pipeline/mod.rs | 10 +++---- crates/stages/api/src/pipeline/set.rs | 21 +++++++++---- crates/stages/api/src/stage.rs | 20 +++++++++---- crates/stages/api/src/test_utils.rs | 12 +++++--- crates/stages/stages/src/stages/execution.rs | 2 +- 10 files changed, 95 insertions(+), 47 deletions(-) diff --git a/crates/errors/src/lib.rs b/crates/errors/src/lib.rs index fc464eb98cbd..bd67a6ff1d16 100644 --- a/crates/errors/src/lib.rs +++ b/crates/errors/src/lib.rs @@ -16,7 +16,9 @@ mod error; pub use error::{RethError, RethResult}; pub use reth_consensus::ConsensusError; -pub use reth_execution_errors::{BlockExecutionError, BlockValidationError}; +pub use reth_execution_errors::{ + BlockExecutionError, BlockValidationError, GenericBlockExecutionError, +}; pub use reth_storage_errors::{ db::DatabaseError, provider::{ProviderError, ProviderResult}, diff --git a/crates/evm/execution-errors/src/lib.rs b/crates/evm/execution-errors/src/lib.rs index cc723fa110ff..c42074984a22 100644 --- a/crates/evm/execution-errors/src/lib.rs +++ b/crates/evm/execution-errors/src/lib.rs @@ -17,6 +17,7 @@ use alloc::{ }; use alloy_eips::BlockNumHash; use alloy_primitives::B256; +use core::fmt::Display; use reth_consensus::ConsensusError; use reth_prune_types::PruneSegmentError; use reth_storage_errors::provider::ProviderError; @@ -134,6 +135,21 @@ pub enum BlockExecutionError { Internal(#[from] InternalBlockExecutionError), } +/// Generic block execution error. +pub trait GenericBlockExecutionError: + Display + From + From +{ + /// Returns `true` if the error is a state root error. + fn is_state_root_error(&self) -> bool; +} + +impl GenericBlockExecutionError for BlockExecutionError { + /// Returns `true` if the error is a state root error. + fn is_state_root_error(&self) -> bool { + matches!(self, Self::Validation(BlockValidationError::StateRoot(_))) + } +} + impl BlockExecutionError { /// Create a new [`BlockExecutionError::Internal`] variant, containing a /// [`InternalBlockExecutionError::Other`] error. @@ -157,11 +173,6 @@ impl BlockExecutionError { _ => None, } } - - /// Returns `true` if the error is a state root error. - pub const fn is_state_root_error(&self) -> bool { - matches!(self, Self::Validation(BlockValidationError::StateRoot(_))) - } } impl From for BlockExecutionError { diff --git a/crates/evm/src/execute.rs b/crates/evm/src/execute.rs index ca358d86621f..e6b791c00384 100644 --- a/crates/evm/src/execute.rs +++ b/crates/evm/src/execute.rs @@ -1,6 +1,7 @@ //! Traits for execution. use alloy_consensus::BlockHeader; +use reth_execution_errors::GenericBlockExecutionError; // Re-export execution types pub use reth_execution_errors::{ BlockExecutionError, BlockValidationError, InternalBlockExecutionError, @@ -80,7 +81,7 @@ pub trait BatchExecutor { /// The output type for the executor. type Output; /// The error type returned by the executor. - type Error: Into; + type Error: GenericBlockExecutionError; /// Executes the next block in the batch, verifies the output and updates the state internally. fn execute_and_verify_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error>; @@ -138,7 +139,7 @@ pub trait BlockExecutorProvider: Send + Sync + Clone + Unpin + 'static { type Primitives: NodePrimitives; /// The error type returned by the executor. - type Error; + type Error: GenericBlockExecutionError; /// An executor that can execute a single block given a database. /// @@ -200,7 +201,7 @@ pub trait BlockExecutionStrategy { type Primitives: NodePrimitives; /// The error type returned by this strategy's methods. - type Error: From + Into; + type Error: GenericBlockExecutionError; /// Initialize the strategy with the given transaction environment overrides. fn init(&mut self, _tx_env_overrides: Box) {} @@ -253,7 +254,7 @@ pub trait BlockExecutionStrategy { /// A strategy factory that can create block execution strategies. pub trait BlockExecutionStrategyFactory: Send + Sync + Clone + Unpin + 'static { /// The error type returned by this strategy's methods. - type Error; + type Error: GenericBlockExecutionError; /// Primitive types used by the strategy. type Primitives: NodePrimitives; @@ -445,9 +446,7 @@ where let ExecuteOutput { receipts, .. } = self.strategy.execute_transactions(block)?; let requests = self.strategy.apply_post_execution_changes(block, &receipts)?; - self.strategy - .validate_block_post_execution(block, &receipts, &requests) - .map_err(BlockExecutionError::Consensus)?; + self.strategy.validate_block_post_execution(block, &receipts, &requests)?; // prepare the state according to the prune mode let retention = self.batch_record.bundle_retention(block.header().number()); diff --git a/crates/stages/api/src/error.rs b/crates/stages/api/src/error.rs index b63dd20f77c1..276db71427a0 100644 --- a/crates/stages/api/src/error.rs +++ b/crates/stages/api/src/error.rs @@ -1,7 +1,7 @@ use crate::PipelineEvent; use alloy_eips::eip1898::BlockWithParent; use reth_consensus::ConsensusError; -use reth_errors::{BlockExecutionError, DatabaseError, RethError}; +use reth_errors::{DatabaseError, GenericBlockExecutionError, RethError}; use reth_network_p2p::error::DownloadError; use reth_provider::ProviderError; use reth_prune::{PruneSegment, PruneSegmentError, PrunerError}; @@ -11,18 +11,21 @@ use tokio::sync::broadcast::error::SendError; /// Represents the specific error type within a block error. #[derive(Error, Debug)] -pub enum BlockErrorKind { +pub enum BlockErrorKind { /// The block encountered a validation error. #[error("validation error: {0}")] Validation(#[from] ConsensusError), /// The block encountered an execution error. #[error("execution error: {0}")] - Execution(#[from] BlockExecutionError), + Execution(E), } -impl BlockErrorKind { +impl BlockErrorKind +where + E: GenericBlockExecutionError, +{ /// Returns `true` if the error is a state root error. - pub const fn is_state_root_error(&self) -> bool { + pub fn is_state_root_error(&self) -> bool { match self { Self::Validation(err) => err.is_state_root_error(), Self::Execution(err) => err.is_state_root_error(), @@ -32,7 +35,7 @@ impl BlockErrorKind { /// A stage execution error. #[derive(Error, Debug)] -pub enum StageError { +pub enum StageError { /// The stage encountered an error related to a block. #[error("stage encountered an error in block #{number}: {error}", number = block.block.number)] Block { @@ -40,7 +43,7 @@ pub enum StageError { block: Box, /// The specific error type, either consensus or execution error. #[source] - error: BlockErrorKind, + error: BlockErrorKind, }, /// The stage encountered a downloader error where the responses cannot be attached to the /// current head. @@ -121,7 +124,10 @@ pub enum StageError { Fatal(Box), } -impl StageError { +impl StageError +where + E: GenericBlockExecutionError, +{ /// If the error is fatal the pipeline will stop. pub const fn is_fatal(&self) -> bool { matches!( @@ -139,7 +145,10 @@ impl StageError { } } -impl From for StageError { +impl From for StageError +where + E: GenericBlockExecutionError, +{ fn from(source: std::io::Error) -> Self { Self::Fatal(Box::new(source)) } @@ -147,10 +156,10 @@ impl From for StageError { /// A pipeline execution error. #[derive(Error, Debug)] -pub enum PipelineError { +pub enum PipelineError { /// The pipeline encountered an irrecoverable error in one of the stages. #[error(transparent)] - Stage(#[from] StageError), + Stage(#[from] StageError), /// The pipeline encountered a database error. #[error(transparent)] Database(#[from] DatabaseError), diff --git a/crates/stages/api/src/pipeline/builder.rs b/crates/stages/api/src/pipeline/builder.rs index 45bdc2d89427..f4b11193aebf 100644 --- a/crates/stages/api/src/pipeline/builder.rs +++ b/crates/stages/api/src/pipeline/builder.rs @@ -1,14 +1,18 @@ use crate::{pipeline::BoxedStage, MetricEventsSender, Pipeline, Stage, StageId, StageSet}; use alloy_primitives::{BlockNumber, B256}; +use reth_errors::GenericBlockExecutionError; use reth_provider::{providers::ProviderNodeTypes, DatabaseProviderFactory, ProviderFactory}; use reth_static_file::StaticFileProducer; use tokio::sync::watch; /// Builds a [`Pipeline`]. #[must_use = "call `build` to construct the pipeline"] -pub struct PipelineBuilder { +pub struct PipelineBuilder +where + E: GenericBlockExecutionError, +{ /// All configured stages in the order they will be executed. - stages: Vec>, + stages: Vec>, /// The maximum block number to sync to. max_block: Option, /// A receiver for the current chain tip to sync to. diff --git a/crates/stages/api/src/pipeline/mod.rs b/crates/stages/api/src/pipeline/mod.rs index 2cb98d44f93d..c7a902880952 100644 --- a/crates/stages/api/src/pipeline/mod.rs +++ b/crates/stages/api/src/pipeline/mod.rs @@ -28,11 +28,11 @@ use crate::{ }; pub use builder::*; use progress::*; -use reth_errors::RethResult; +use reth_errors::{GenericBlockExecutionError, RethResult}; pub use set::*; /// A container for a queued stage. -pub(crate) type BoxedStage = Box>; +pub(crate) type BoxedStage = Box>; /// The future that returns the owned pipeline and the result of the pipeline run. See /// [`Pipeline::run_as_fut`]. @@ -494,12 +494,12 @@ impl Pipeline { } } -fn on_stage_error( +fn on_stage_error( factory: &ProviderFactory, stage_id: StageId, prev_checkpoint: Option, - err: StageError, -) -> Result, PipelineError> { + err: StageError, +) -> Result, PipelineError> { if let StageError::DetachedHead { local_head, header, error } = err { warn!(target: "sync::pipeline", stage = %stage_id, ?local_head, ?header, %error, "Stage encountered detached head"); diff --git a/crates/stages/api/src/pipeline/set.rs b/crates/stages/api/src/pipeline/set.rs index c8fbf4c71d8e..f8101e9c3a78 100644 --- a/crates/stages/api/src/pipeline/set.rs +++ b/crates/stages/api/src/pipeline/set.rs @@ -1,3 +1,5 @@ +use reth_errors::GenericBlockExecutionError; + use crate::{Stage, StageId}; use std::{ collections::HashMap, @@ -10,7 +12,10 @@ use std::{ /// individual stage sets to determine what kind of configuration they expose. /// /// Individual stages in the set can be added, removed and overridden using [`StageSetBuilder`]. -pub trait StageSet: Sized { +pub trait StageSet: Sized +where + E: GenericBlockExecutionError, +{ /// Configures the stages in the set. fn builder(self) -> StageSetBuilder; @@ -19,17 +24,23 @@ pub trait StageSet: Sized { /// # Panics /// /// Panics if the [`Stage`] is not in this set. - fn set + 'static>(self, stage: S) -> StageSetBuilder { + fn set + 'static>(self, stage: S) -> StageSetBuilder { self.builder().set(stage) } } -struct StageEntry { - stage: Box>, +struct StageEntry +where + E: GenericBlockExecutionError, +{ + stage: Box>, enabled: bool, } -impl Debug for StageEntry { +impl Debug for StageEntry +where + E: GenericBlockExecutionError, +{ fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { f.debug_struct("StageEntry") .field("stage", &self.stage.id()) diff --git a/crates/stages/api/src/stage.rs b/crates/stages/api/src/stage.rs index 368269782a29..222e8e6d6ad2 100644 --- a/crates/stages/api/src/stage.rs +++ b/crates/stages/api/src/stage.rs @@ -1,5 +1,6 @@ use crate::{error::StageError, StageCheckpoint, StageId}; use alloy_primitives::{BlockNumber, TxNumber}; +use reth_errors::GenericBlockExecutionError; use reth_provider::{BlockReader, ProviderError}; use std::{ cmp::{max, min}, @@ -190,7 +191,10 @@ pub struct UnwindOutput { /// /// Stages receive [`DBProvider`](reth_provider::DBProvider). #[auto_impl::auto_impl(Box)] -pub trait Stage: Send + Sync { +pub trait Stage: Send + Sync +where + E: GenericBlockExecutionError, +{ /// Get the ID of the stage. /// /// Stage IDs must be unique. @@ -224,21 +228,25 @@ pub trait Stage: Send + Sync { &mut self, _cx: &mut Context<'_>, _input: ExecInput, - ) -> Poll> { + ) -> Poll>> { Poll::Ready(Ok(())) } /// Execute the stage. /// It is expected that the stage will write all necessary data to the database /// upon invoking this method. - fn execute(&mut self, provider: &Provider, input: ExecInput) -> Result; + fn execute( + &mut self, + provider: &Provider, + input: ExecInput, + ) -> Result>; /// Post execution commit hook. /// /// This is called after the stage has been executed and the data has been committed by the /// provider. The stage may want to pass some data from [`Self::execute`] via the internal /// field. - fn post_execute_commit(&mut self) -> Result<(), StageError> { + fn post_execute_commit(&mut self) -> Result<(), StageError> { Ok(()) } @@ -247,14 +255,14 @@ pub trait Stage: Send + Sync { &mut self, provider: &Provider, input: UnwindInput, - ) -> Result; + ) -> Result>; /// Post unwind commit hook. /// /// This is called after the stage has been unwound and the data has been committed by the /// provider. The stage may want to pass some data from [`Self::unwind`] via the internal /// field. - fn post_unwind_commit(&mut self) -> Result<(), StageError> { + fn post_unwind_commit(&mut self) -> Result<(), StageError> { Ok(()) } } diff --git a/crates/stages/api/src/test_utils.rs b/crates/stages/api/src/test_utils.rs index 1f15e55140ed..d99ad922dfa9 100644 --- a/crates/stages/api/src/test_utils.rs +++ b/crates/stages/api/src/test_utils.rs @@ -1,6 +1,7 @@ #![allow(missing_docs)] use crate::{ExecInput, ExecOutput, Stage, StageError, StageId, UnwindInput, UnwindOutput}; +use reth_errors::GenericBlockExecutionError; use std::{ collections::VecDeque, sync::{ @@ -13,15 +14,18 @@ use std::{ /// /// This can be used to mock expected outputs of [`Stage::execute`] and [`Stage::unwind`] #[derive(Debug)] -pub struct TestStage { +pub struct TestStage { id: StageId, - exec_outputs: VecDeque>, - unwind_outputs: VecDeque>, + exec_outputs: VecDeque>>, + unwind_outputs: VecDeque>>, post_execute_commit_counter: Arc, post_unwind_commit_counter: Arc, } -impl TestStage { +impl TestStage +where + E: GenericBlockExecutionError, +{ pub fn new(id: StageId) -> Self { Self { id, diff --git a/crates/stages/stages/src/stages/execution.rs b/crates/stages/stages/src/stages/execution.rs index 5557beda519a..d3dbe716aec7 100644 --- a/crates/stages/stages/src/stages/execution.rs +++ b/crates/stages/stages/src/stages/execution.rs @@ -360,7 +360,7 @@ where header.parent_hash(), NumHash::new(header.number(), header.hash_slow()), )), - error: BlockErrorKind::Execution(error), + error: BlockErrorKind::::Execution(error), } }) })?;