From a442ca2739d2e4b1999c9764d111f93cc87c31f1 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Mon, 3 Nov 2025 16:46:55 +1100 Subject: [PATCH 1/4] Bump version to v8.0.0 --- Cargo.lock | 8 ++++---- beacon_node/Cargo.toml | 2 +- boot_node/Cargo.toml | 2 +- common/lighthouse_version/src/lib.rs | 6 +++--- lcli/Cargo.toml | 2 +- lighthouse/Cargo.toml | 2 +- 6 files changed, 11 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1efb1fbc706..7c582745989 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -918,7 +918,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "8.0.0-rc.2" +version = "8.0.0" dependencies = [ "account_utils", "beacon_chain", @@ -1193,7 +1193,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "8.0.0-rc.2" +version = "8.0.0" dependencies = [ "beacon_node", "bytes", @@ -5064,7 +5064,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "8.0.0-rc.2" +version = "8.0.0" dependencies = [ "account_utils", "beacon_chain", @@ -5574,7 +5574,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "8.0.0-rc.2" +version = "8.0.0" dependencies = [ "account_manager", "account_utils", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 56c2fb410cc..6a54d3342e6 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "8.0.0-rc.2" +version = "8.0.0" authors = [ "Paul Hauner ", "Age Manning "] edition = { workspace = true } diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index bd7b37926fc..a0965fa5489 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v8.0.0-rc.2-", - fallback = "Lighthouse/v8.0.0-rc.2" + prefix = "Lighthouse/v8.0.0-", + fallback = "Lighthouse/v8.0.0" ); /// Returns the first eight characters of the latest commit hash for this build. @@ -54,7 +54,7 @@ pub fn version_with_platform() -> String { /// /// `1.5.1` pub fn version() -> &'static str { - "8.0.0-rc.2" + "8.0.0" } /// Returns the name of the current client running. diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 2698073b5fe..6b7aeb886ce 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "8.0.0-rc.2" +version = "8.0.0" authors = ["Paul Hauner "] edition = { workspace = true } diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index a3240c6d7c8..0d4129817af 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "8.0.0-rc.2" +version = "8.0.0" authors = ["Sigma Prime "] edition = { workspace = true } autotests = false From cf7b472dab4ee4a199ac50fca086bba8c114bd27 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Mon, 3 Nov 2025 17:35:51 +1100 Subject: [PATCH 2/4] Add mainnet configs (#8344) Squashed commit of the following: commit bc301da2b2c2d088140683dd70f1c56457d134fd Author: Tan Chee Keong Date: Mon Nov 3 12:06:29 2025 +0800 update comment in config commit dc6576ed3cd4d192251e408c5dbc31800d1395dc Author: Michael Sproul Date: Mon Nov 3 14:57:31 2025 +1100 Address review comments commit 841a42f874cc75223248db487f126b278ab6c855 Author: Michael Sproul Date: Mon Nov 3 12:11:59 2025 +1100 Appease Clippy commit 17d02b8978a03c44a0700439bd6c655d8d75b497 Author: Michael Sproul Date: Mon Nov 3 11:37:46 2025 +1100 Fix tests by using correct max_blobs_per_block for epoch commit b941dec275adcc73e82cda5d4bf4b967adafda71 Author: Eitan Seri-Levi Date: Sun Nov 2 13:41:31 2025 -0800 Fmt commit e43209c62be82ad8e9fe74097037c1a4327a04b1 Author: Eitan Seri-Levi Date: Sun Nov 2 13:22:48 2025 -0800 Fix test commit 991804e60135a726d439fbe215628d1a5cbdbda0 Author: Eitan Seri-Levi Date: Sun Nov 2 10:36:22 2025 -0800 Add mainnet configs --- .../src/data_column_verification.rs | 16 ++++ .../test_utils/execution_block_generator.rs | 40 +++++++- .../mainnet/config.yaml | 95 +++++++++++++------ consensus/types/presets/gnosis/electra.yaml | 3 +- consensus/types/src/chain_spec.rs | 34 ++++++- 5 files changed, 150 insertions(+), 38 deletions(-) diff --git a/beacon_node/beacon_chain/src/data_column_verification.rs b/beacon_node/beacon_chain/src/data_column_verification.rs index 07f85b045ab..61fc0677b15 100644 --- a/beacon_node/beacon_chain/src/data_column_verification.rs +++ b/beacon_node/beacon_chain/src/data_column_verification.rs @@ -850,6 +850,22 @@ mod test { .build(); harness.advance_slot(); + // Check block generator timestamp conversion sanity. + { + let exec_block_generator = harness.execution_block_generator(); + assert_eq!( + exec_block_generator + .timestamp_to_slot_post_capella(exec_block_generator.osaka_time.unwrap()), + 0 + ); + assert_eq!( + exec_block_generator.timestamp_to_slot_post_capella( + exec_block_generator.osaka_time.unwrap() + harness.spec.seconds_per_slot + ), + 1 + ); + } + let verify_fn = |column_sidecar: DataColumnSidecar| { GossipVerifiedDataColumn::<_>::new_for_block_publishing( column_sidecar.into(), diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index f1d07ae258d..44e72cba6be 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -21,7 +21,7 @@ use types::{ Blob, ChainSpec, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadBellatrix, ExecutionPayloadCapella, ExecutionPayloadDeneb, ExecutionPayloadElectra, ExecutionPayloadFulu, ExecutionPayloadGloas, ExecutionPayloadHeader, FixedBytesExtended, ForkName, Hash256, - KzgProofs, Transaction, Transactions, Uint256, + KzgProofs, Slot, Transaction, Transactions, Uint256, }; use super::DEFAULT_TERMINAL_BLOCK; @@ -265,6 +265,37 @@ impl ExecutionBlockGenerator { ForkName::Bellatrix } + /// Get the timestamp at which `fork` activates. + /// + /// This function will panic if the `fork` is not enabled or is `<= ForkName::Bellatrix`. + pub fn get_fork_timestamp_post_capella(&self, fork: ForkName) -> u64 { + match fork { + ForkName::Gloas => self.amsterdam_time, + ForkName::Fulu => self.osaka_time, + ForkName::Electra => self.prague_time, + ForkName::Deneb => self.cancun_time, + ForkName::Capella => self.shanghai_time, + _ => panic!("only the Capella fork or later is supported"), + } + .unwrap_or_else(|| panic!("fork is {fork} but no corresponding timestamp is set")) + } + + /// This is a slightly nasty method for converting timestamps to slots, but it will suffice + /// until we can plumb through a slot clock. + pub fn timestamp_to_slot_post_capella(&self, timestamp: u64) -> Slot { + let fork = self.get_fork_at_timestamp(timestamp); + let fork_epoch = self.spec.fork_epoch(fork).unwrap(); + let fork_timestamp = self.get_fork_timestamp_post_capella(fork); + + // Number of slots since fork. + let slot_offset = timestamp + .checked_sub(fork_timestamp) + .expect("timestamp should be >= fork timestamp") + / self.spec.seconds_per_slot; + + fork_epoch.start_slot(E::slots_per_epoch()) + Slot::new(slot_offset) + } + pub fn execution_block_by_number(&self, number: u64) -> Option { self.block_by_number(number) .map(|block| block.as_execution_block(self.terminal_total_difficulty)) @@ -734,9 +765,10 @@ impl ExecutionBlockGenerator { if fork_name.deneb_enabled() { // get random number between 0 and Max Blobs let mut rng = self.rng.lock(); - // TODO(EIP-7892): see FIXME below - // FIXME: this will break with BPO forks. This function needs to calculate the epoch based on block timestamp.. - let max_blobs = self.spec.max_blobs_per_block_within_fork(fork_name) as usize; + let epoch = self + .timestamp_to_slot_post_capella(execution_payload.timestamp()) + .epoch(E::slots_per_epoch()); + let max_blobs = self.spec.max_blobs_per_block(epoch) as usize; let num_blobs = rng.random_range(self.min_blobs_count..=max_blobs); let (bundle, transactions) = generate_blobs(num_blobs, fork_name)?; for tx in Vec::from(transactions) { diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml index ca7f85b5122..49168018cbe 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml @@ -6,7 +6,9 @@ PRESET_BASE: 'mainnet' # Free-form short name of the network that this configuration applies to - known # canonical network names include: # * 'mainnet' - there can be only one +# * 'sepolia' - testnet # * 'holesky' - testnet +# * 'hoodi' - testnet # Must match the regex: [a-z0-9\-] CONFIG_NAME: 'mainnet' @@ -52,23 +54,37 @@ ELECTRA_FORK_VERSION: 0x05000000 ELECTRA_FORK_EPOCH: 364032 # May 7, 2025, 10:05:11am UTC # Fulu FULU_FORK_VERSION: 0x06000000 -FULU_FORK_EPOCH: 18446744073709551615 +FULU_FORK_EPOCH: 411392 # December 3, 2025, 09:49:11pm UTC # Gloas GLOAS_FORK_VERSION: 0x07000000 GLOAS_FORK_EPOCH: 18446744073709551615 # Time parameters # --------------------------------------------------------------- -# 12 seconds +# 12 seconds (*deprecated*) SECONDS_PER_SLOT: 12 +# 12000 milliseconds +SLOT_DURATION_MS: 12000 # 14 (estimate from Eth1 mainnet) SECONDS_PER_ETH1_BLOCK: 14 -# 2**8 (= 256) epochs ~27 hours +# 2**8 (= 256) epochs MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 -# 2**8 (= 256) epochs ~27 hours +# 2**8 (= 256) epochs SHARD_COMMITTEE_PERIOD: 256 -# 2**11 (= 2,048) Eth1 blocks ~8 hours +# 2**11 (= 2,048) Eth1 blocks ETH1_FOLLOW_DISTANCE: 2048 +# 1667 basis points, ~17% of SLOT_DURATION_MS +PROPOSER_REORG_CUTOFF_BPS: 1667 +# 3333 basis points, ~33% of SLOT_DURATION_MS +ATTESTATION_DUE_BPS: 3333 +# 6667 basis points, ~67% of SLOT_DURATION_MS +AGGREGATE_DUE_BPS: 6667 + +# Altair +# 3333 basis points, ~33% of SLOT_DURATION_MS +SYNC_MESSAGE_DUE_BPS: 3333 +# 6667 basis points, ~67% of SLOT_DURATION_MS +CONTRIBUTION_DUE_BPS: 6667 # Validator cycle # --------------------------------------------------------------- @@ -78,13 +94,21 @@ INACTIVITY_SCORE_BIAS: 4 INACTIVITY_SCORE_RECOVERY_RATE: 16 # 2**4 * 10**9 (= 16,000,000,000) Gwei EJECTION_BALANCE: 16000000000 -# 2**2 (= 4) +# 2**2 (= 4) validators MIN_PER_EPOCH_CHURN_LIMIT: 4 # 2**16 (= 65,536) CHURN_LIMIT_QUOTIENT: 65536 -# [New in Deneb:EIP7514] 2**3 (= 8) + +# Deneb +# 2**3 (= 8) (*deprecated*) MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 8 +# Electra +# 2**7 * 10**9 (= 128,000,000,000) Gwei +MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA: 128000000000 +# 2**8 * 10**9 (= 256,000,000,000) Gwei +MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT: 256000000000 + # Fork choice # --------------------------------------------------------------- # 40% @@ -93,7 +117,7 @@ PROPOSER_SCORE_BOOST: 40 REORG_HEAD_WEIGHT_THRESHOLD: 20 # 160% REORG_PARENT_WEIGHT_THRESHOLD: 160 -# `2` epochs +# 2 epochs REORG_MAX_EPOCHS_SINCE_FINALIZATION: 2 # Deposit contract @@ -105,18 +129,19 @@ DEPOSIT_CONTRACT_ADDRESS: 0x00000000219ab540356cBB839Cbe05303d7705Fa # Networking # --------------------------------------------------------------- -# `10 * 2**20` (= 10485760, 10 MiB) +# 10 * 2**20 (= 10,485,760) bytes, 10 MiB MAX_PAYLOAD_SIZE: 10485760 -# `2**10` (= 1024) +# 2**10 (= 1,024) blocks MAX_REQUEST_BLOCKS: 1024 -# `2**8` (= 256) +# 2**8 (= 256) epochs EPOCHS_PER_SUBNET_SUBSCRIPTION: 256 -# `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months) +# MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2 (= 33,024) epochs MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024 # 5s TTFB_TIMEOUT: 5 # 10s RESP_TIMEOUT: 10 +# 2**5 (= 32) slots ATTESTATION_PROPAGATION_SLOT_RANGE: 32 # 500ms MAXIMUM_GOSSIP_CLOCK_DISPARITY: 500 @@ -124,45 +149,59 @@ MESSAGE_DOMAIN_INVALID_SNAPPY: 0x00000000 MESSAGE_DOMAIN_VALID_SNAPPY: 0x01000000 # 2 subnets per node SUBNETS_PER_NODE: 2 -# 2**8 (= 64) +# 2**6 (= 64) subnets ATTESTATION_SUBNET_COUNT: 64 +# 0 bits ATTESTATION_SUBNET_EXTRA_BITS: 0 -# ceillog2(ATTESTATION_SUBNET_COUNT) + ATTESTATION_SUBNET_EXTRA_BITS +# ceillog2(ATTESTATION_SUBNET_COUNT) + ATTESTATION_SUBNET_EXTRA_BITS (= 6 + 0) bits ATTESTATION_SUBNET_PREFIX_BITS: 6 ATTESTATION_SUBNET_SHUFFLING_PREFIX_BITS: 3 # Deneb -# `2**7` (=128) +# 2**7 (= 128) blocks MAX_REQUEST_BLOCKS_DENEB: 128 -# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK -MAX_REQUEST_BLOB_SIDECARS: 768 -# `2**12` (= 4096 epochs, ~18 days) +# 2**12 (= 4,096) epochs MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096 -# `6` +# 6 subnets BLOB_SIDECAR_SUBNET_COUNT: 6 -# `uint64(6)` +# 6 blobs MAX_BLOBS_PER_BLOCK: 6 +# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK (= 128 * 6) sidecars +MAX_REQUEST_BLOB_SIDECARS: 768 # Electra -# 2**7 * 10**9 (= 128,000,000,000) -MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA: 128000000000 -# 2**8 * 10**9 (= 256,000,000,000) -MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT: 256000000000 -# `9` +# 9 subnets BLOB_SIDECAR_SUBNET_COUNT_ELECTRA: 9 -# `uint64(9)` +# 9 blobs MAX_BLOBS_PER_BLOCK_ELECTRA: 9 -# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK_ELECTRA +# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK_ELECTRA (= 128 * 9) sidecars MAX_REQUEST_BLOB_SIDECARS_ELECTRA: 1152 # Fulu +# 2**7 (= 128) groups NUMBER_OF_CUSTODY_GROUPS: 128 +# 2**7 (= 128) subnets DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 +# MAX_REQUEST_BLOCKS_DENEB * NUMBER_OF_COLUMNS (= 128 * 128) sidecars MAX_REQUEST_DATA_COLUMN_SIDECARS: 16384 +# 2**3 (= 8) samples SAMPLES_PER_SLOT: 8 +# 2**2 (= 4) sidecars CUSTODY_REQUIREMENT: 4 +# 2**3 (= 8) sidecars VALIDATOR_CUSTODY_REQUIREMENT: 8 +# 2**5 * 10**9 (= 32,000,000,000) Gwei BALANCE_PER_ADDITIONAL_CUSTODY_GROUP: 32000000000 +# 2**12 (= 4,096) epochs MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS: 4096 -# Gloas \ No newline at end of file +# Blob Scheduling +# --------------------------------------------------------------- + +BLOB_SCHEDULE: + - EPOCH: 412672 # December 9, 2025, 02:21:11pm UTC + MAX_BLOBS_PER_BLOCK: 15 + - EPOCH: 419072 # January 7, 2026, 01:01:11am UTC + MAX_BLOBS_PER_BLOCK: 21 + +# Gloas diff --git a/consensus/types/presets/gnosis/electra.yaml b/consensus/types/presets/gnosis/electra.yaml index 42afbb233ed..6885667c6e1 100644 --- a/consensus/types/presets/gnosis/electra.yaml +++ b/consensus/types/presets/gnosis/electra.yaml @@ -41,8 +41,7 @@ MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD: 16 # Withdrawals processing # --------------------------------------------------------------- -# 2**3 ( = 8) pending withdrawals -MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP: 8 +MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP: 6 # Pending deposits processing # --------------------------------------------------------------- diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 93f5140383a..3565c714e06 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -87,12 +87,18 @@ pub struct ChainSpec { */ pub genesis_delay: u64, pub seconds_per_slot: u64, + pub slot_duration_ms: u64, pub min_attestation_inclusion_delay: u64, pub min_seed_lookahead: Epoch, pub max_seed_lookahead: Epoch, pub min_epochs_to_inactivity_penalty: u64, pub min_validator_withdrawability_delay: Epoch, pub shard_committee_period: u64, + pub proposer_reorg_cutoff_bps: u64, + pub attestation_due_bps: u64, + pub aggregate_due_bps: u64, + pub sync_message_due_bps: u64, + pub contribution_due_bps: u64, /* * Reward and penalty quotients @@ -964,12 +970,18 @@ impl ChainSpec { */ genesis_delay: 604800, // 7 days seconds_per_slot: 12, + slot_duration_ms: 12000, min_attestation_inclusion_delay: 1, min_seed_lookahead: Epoch::new(1), max_seed_lookahead: Epoch::new(4), min_epochs_to_inactivity_penalty: 4, min_validator_withdrawability_delay: Epoch::new(256), shard_committee_period: 256, + proposer_reorg_cutoff_bps: 1667, + attestation_due_bps: 3333, + aggregate_due_bps: 6667, + sync_message_due_bps: 3333, + contribution_due_bps: 6667, /* * Reward and penalty quotients @@ -1098,7 +1110,7 @@ impl ChainSpec { * Fulu hard fork params */ fulu_fork_version: [0x06, 0x00, 0x00, 0x00], - fulu_fork_epoch: None, + fulu_fork_epoch: Some(Epoch::new(411392)), custody_requirement: 4, number_of_custody_groups: 128, data_column_sidecar_subnet_count: 128, @@ -1158,7 +1170,16 @@ impl ChainSpec { /* * Networking Fulu specific */ - blob_schedule: BlobSchedule::default(), + blob_schedule: BlobSchedule::new(vec![ + BlobParameters { + epoch: Epoch::new(412672), + max_blobs_per_block: 15, + }, + BlobParameters { + epoch: Epoch::new(419072), + max_blobs_per_block: 21, + }, + ]), min_epochs_for_data_column_sidecars_requests: default_min_epochs_for_data_column_sidecars_requests(), max_data_columns_by_root_request: default_data_columns_by_root_request(), @@ -1310,12 +1331,18 @@ impl ChainSpec { */ genesis_delay: 6000, // 100 minutes seconds_per_slot: 5, + slot_duration_ms: 5000, min_attestation_inclusion_delay: 1, min_seed_lookahead: Epoch::new(1), max_seed_lookahead: Epoch::new(4), min_epochs_to_inactivity_penalty: 4, min_validator_withdrawability_delay: Epoch::new(256), shard_committee_period: 256, + proposer_reorg_cutoff_bps: 1667, + attestation_due_bps: 3333, + aggregate_due_bps: 6667, + sync_message_due_bps: 3333, + contribution_due_bps: 6667, /* * Reward and penalty quotients @@ -1429,8 +1456,7 @@ impl ChainSpec { .expect("pow does not overflow"), whistleblower_reward_quotient_electra: u64::checked_pow(2, 12) .expect("pow does not overflow"), - max_pending_partials_per_withdrawals_sweep: u64::checked_pow(2, 3) - .expect("pow does not overflow"), + max_pending_partials_per_withdrawals_sweep: 6, min_per_epoch_churn_limit_electra: option_wrapper(|| { u64::checked_pow(2, 7)?.checked_mul(u64::checked_pow(10, 9)?) }) From 385478174fd844fd5eccd04b72b9dd996735bb17 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Mon, 3 Nov 2025 17:36:46 +1100 Subject: [PATCH 3/4] Proposer duties backwards compat (#8335) Squashed commit of the following: commit 258c13b8bcf61eedda735cb25b74a34cdd114ac6 Author: Michael Sproul Date: Thu Oct 30 17:32:14 2025 +1100 Fix broken test commit f5368c855d3967bdcfcb70d1fd0cb34ff3c2e792 Author: Michael Sproul Date: Thu Oct 30 13:52:33 2025 +1100 Use legacy dependent root in v1 proposer duties endpoint commit e88ffd17b49e03fcab700b832bee83597f1a6e3d Author: Michael Sproul Date: Thu Oct 30 13:06:07 2025 +1100 Start working on backwards compat --- .../beacon_chain/src/beacon_proposer_cache.rs | 23 ++++++++++++++-- beacon_node/beacon_chain/tests/store_tests.rs | 9 ++++--- beacon_node/http_api/src/proposer_duties.rs | 27 +++++++++++++------ .../http_api/tests/interactive_tests.rs | 3 +-- consensus/types/src/beacon_state.rs | 16 +++++++++++ testing/ef_tests/src/cases/fork_choice.rs | 2 +- 6 files changed, 64 insertions(+), 16 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs index 6effce49f8b..bd6460eba7d 100644 --- a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs +++ b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs @@ -166,10 +166,17 @@ impl BeaconProposerCache { } /// Compute the proposer duties using the head state without cache. +/// +/// Return: +/// - Proposer indices. +/// - True dependent root. +/// - Legacy dependent root (last block of epoch `N - 1`). +/// - Head execution status. +/// - Fork at `request_epoch`. pub fn compute_proposer_duties_from_head( request_epoch: Epoch, chain: &BeaconChain, -) -> Result<(Vec, Hash256, ExecutionStatus, Fork), BeaconChainError> { +) -> Result<(Vec, Hash256, Hash256, ExecutionStatus, Fork), BeaconChainError> { // Atomically collect information about the head whilst holding the canonical head `Arc` as // short as possible. let (mut state, head_state_root, head_block_root) = { @@ -203,11 +210,23 @@ pub fn compute_proposer_duties_from_head( .proposer_shuffling_decision_root_at_epoch(request_epoch, head_block_root, &chain.spec) .map_err(BeaconChainError::from)?; + // This is only required because the V1 proposer duties endpoint spec wasn't updated for Fulu. We + // can delete this once the V1 endpoint is deprecated at the Glamsterdam fork. + let legacy_dependent_root = state + .legacy_proposer_shuffling_decision_root_at_epoch(request_epoch, head_block_root) + .map_err(BeaconChainError::from)?; + // Use fork_at_epoch rather than the state's fork, because post-Fulu we may not have advanced // the state completely into the new epoch. let fork = chain.spec.fork_at_epoch(request_epoch); - Ok((indices, dependent_root, execution_status, fork)) + Ok(( + indices, + dependent_root, + legacy_dependent_root, + execution_status, + fork, + )) } /// If required, advance `state` to the epoch required to determine proposer indices in `target_epoch`. diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 7891b224321..0c83244f447 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -1561,7 +1561,7 @@ async fn proposer_duties_from_head_fulu() { // Compute the proposer duties at the next epoch from the head let next_epoch = head_state.next_epoch().unwrap(); - let (_indices, dependent_root, _, fork) = + let (_indices, dependent_root, legacy_dependent_root, _, fork) = compute_proposer_duties_from_head(next_epoch, &harness.chain).unwrap(); assert_eq!( @@ -1570,6 +1570,8 @@ async fn proposer_duties_from_head_fulu() { .proposer_shuffling_decision_root_at_epoch(next_epoch, head_block_root.into(), spec) .unwrap() ); + assert_ne!(dependent_root, legacy_dependent_root); + assert_eq!(legacy_dependent_root, Hash256::from(head_block_root)); assert_eq!(fork, head_state.fork()); } @@ -1617,7 +1619,7 @@ async fn proposer_lookahead_gloas_fork_epoch() { assert_eq!(head_state.current_epoch(), gloas_fork_epoch - 1); // Compute the proposer duties at the fork epoch from the head. - let (indices, dependent_root, _, fork) = + let (indices, dependent_root, legacy_dependent_root, _, fork) = compute_proposer_duties_from_head(gloas_fork_epoch, &harness.chain).unwrap(); assert_eq!( @@ -1630,6 +1632,7 @@ async fn proposer_lookahead_gloas_fork_epoch() { ) .unwrap() ); + assert_ne!(dependent_root, legacy_dependent_root); assert_ne!(fork, head_state.fork()); assert_eq!(fork, spec.fork_at_epoch(gloas_fork_epoch)); @@ -1639,7 +1642,7 @@ async fn proposer_lookahead_gloas_fork_epoch() { .add_attested_blocks_at_slots(head_state, head_state_root, &gloas_slots, &all_validators) .await; - let (no_lookahead_indices, no_lookahead_dependent_root, _, no_lookahead_fork) = + let (no_lookahead_indices, no_lookahead_dependent_root, _, _, no_lookahead_fork) = compute_proposer_duties_from_head(gloas_fork_epoch, &harness.chain).unwrap(); assert_eq!(no_lookahead_indices, indices); diff --git a/beacon_node/http_api/src/proposer_duties.rs b/beacon_node/http_api/src/proposer_duties.rs index 78f99c475ce..1ebb1747851 100644 --- a/beacon_node/http_api/src/proposer_duties.rs +++ b/beacon_node/http_api/src/proposer_duties.rs @@ -60,13 +60,13 @@ pub fn proposer_duties( .safe_add(1) .map_err(warp_utils::reject::arith_error)? { - let (proposers, dependent_root, execution_status, _fork) = + let (proposers, _dependent_root, legacy_dependent_root, execution_status, _fork) = compute_proposer_duties_from_head(request_epoch, chain) .map_err(warp_utils::reject::unhandled_error)?; convert_to_api_response( chain, request_epoch, - dependent_root, + legacy_dependent_root, execution_status.is_optimistic_or_invalid(), proposers, ) @@ -116,6 +116,11 @@ fn try_proposer_duties_from_cache( .beacon_state .proposer_shuffling_decision_root_at_epoch(request_epoch, head_block_root, &chain.spec) .map_err(warp_utils::reject::beacon_state_error)?; + let legacy_dependent_root = head + .snapshot + .beacon_state + .legacy_proposer_shuffling_decision_root_at_epoch(request_epoch, head_block_root) + .map_err(warp_utils::reject::beacon_state_error)?; let execution_optimistic = chain .is_optimistic_or_invalid_head_block(head_block) .map_err(warp_utils::reject::unhandled_error)?; @@ -129,7 +134,7 @@ fn try_proposer_duties_from_cache( convert_to_api_response( chain, request_epoch, - head_decision_root, + legacy_dependent_root, execution_optimistic, indices.to_vec(), ) @@ -151,7 +156,7 @@ fn compute_and_cache_proposer_duties( current_epoch: Epoch, chain: &BeaconChain, ) -> Result { - let (indices, dependent_root, execution_status, fork) = + let (indices, dependent_root, legacy_dependent_root, execution_status, fork) = compute_proposer_duties_from_head(current_epoch, chain) .map_err(warp_utils::reject::unhandled_error)?; @@ -166,7 +171,7 @@ fn compute_and_cache_proposer_duties( convert_to_api_response( chain, current_epoch, - dependent_root, + legacy_dependent_root, execution_status.is_optimistic_or_invalid(), indices, ) @@ -229,12 +234,18 @@ fn compute_historic_proposer_duties( // We can supply the genesis block root as the block root since we know that the only block that // decides its own root is the genesis block. - let dependent_root = state - .proposer_shuffling_decision_root(chain.genesis_block_root, &chain.spec) + let legacy_dependent_root = state + .legacy_proposer_shuffling_decision_root_at_epoch(epoch, chain.genesis_block_root) .map_err(BeaconChainError::from) .map_err(warp_utils::reject::unhandled_error)?; - convert_to_api_response(chain, epoch, dependent_root, execution_optimistic, indices) + convert_to_api_response( + chain, + epoch, + legacy_dependent_root, + execution_optimistic, + indices, + ) } /// Converts the internal representation of proposer duties into one that is compatible with the diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index 5b016a7de4e..a9de737d657 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -1017,10 +1017,9 @@ async fn proposer_duties_with_gossip_tolerance() { assert_eq!( proposer_duties_tolerant_current_epoch.dependent_root, head_state - .proposer_shuffling_decision_root_at_epoch( + .legacy_proposer_shuffling_decision_root_at_epoch( tolerant_current_epoch, head_block_root, - spec ) .unwrap() ); diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 1bd4927fe87..9c4e50dc613 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -911,6 +911,22 @@ impl BeaconState { } } + /// Returns the block root at the last slot of `epoch - 1`. + /// + /// This can be deleted after Glamsterdam and the removal of the v1 proposer duties endpoint. + pub fn legacy_proposer_shuffling_decision_root_at_epoch( + &self, + epoch: Epoch, + head_block_root: Hash256, + ) -> Result { + let decision_slot = epoch.saturating_sub(1u64).end_slot(E::slots_per_epoch()); + if self.slot() <= decision_slot { + Ok(head_block_root) + } else { + self.get_block_root(decision_slot).copied() + } + } + /// Returns the block root which decided the proposer shuffling for the current epoch. This root /// can be used to key this proposer shuffling. /// diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 47b99023455..8e9d438a243 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -920,7 +920,7 @@ impl Tester { let cached_head = self.harness.chain.canonical_head.cached_head(); let next_slot = cached_head.snapshot.beacon_block.slot() + 1; let next_slot_epoch = next_slot.epoch(E::slots_per_epoch()); - let (proposer_indices, decision_root, _, fork) = + let (proposer_indices, decision_root, _, _, fork) = compute_proposer_duties_from_head(next_slot_epoch, &self.harness.chain).unwrap(); let proposer_index = proposer_indices[next_slot.as_usize() % E::slots_per_epoch() as usize]; From 5bd667cf15489e6a6b28a42e3ab8f41308e70415 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Mon, 3 Nov 2025 17:37:09 +1100 Subject: [PATCH 4/4] Ensure custody backfill sync couples all responses before importing (#8339) Squashed commit of the following: commit c32c74533d02df76b5860eb3972371f287ea8ef3 Author: Jimmy Chen Date: Mon Nov 3 16:33:06 2025 +1100 Fix tests. commit d9b2515cc7a8d6b059288ca322e520cf4c969aa0 Author: Michael Sproul Date: Mon Nov 3 16:03:46 2025 +1100 Apply suggestion from @michaelsproul commit 7edcb6dd8627545d2030c254eca8e1c2e6d7ae67 Author: Jimmy Chen Date: Mon Nov 3 15:59:44 2025 +1100 Address review comments commit bb1ace845ea6427ea745def16f99ac164f3058f8 Author: Jimmy Chen Date: Mon Nov 3 15:43:45 2025 +1100 Fix build commit 580b584a8f0838906171a421c3b404ae8a905e07 Author: Jimmy Chen Date: Mon Nov 3 15:39:01 2025 +1100 Address review comments. commit 984d1afe45ba2fae1735ff2a026df4d26f4ba9d9 Merge: 9515494ac6 b57d046c4a Author: Jimmy Chen Date: Mon Nov 3 14:47:29 2025 +1100 Merge remote-tracking branch 'origin/release-v8.0' into fork/eserilev/custody-backfill-sync-fix commit 9515494ac6e16158bc91a34096e2db536e634da5 Merge: 56111a8224 55588f7789 Author: Eitan Seri-Levi Date: Sun Nov 2 10:32:01 2025 -0800 Resolve merge conflicts commit 56111a8224381d38d2437806f510ab79f4731f2a Author: Eitan Seri- Levi Date: Thu Oct 30 14:26:03 2025 -0700 Fix comments commit b213fe53e0c60f4345b84e9f220ed8c3946ad91b Author: Eitan Seri- Levi Date: Thu Oct 30 13:44:11 2025 -0700 Resolve merge conflicts commit 1f72e63e7ae429236fbde0487fc74fdf78278fe1 Author: Eitan Seri- Levi Date: Thu Oct 30 13:33:53 2025 -0700 Fix tests commit a63347511048a1533f135100891465af288483a8 Author: Eitan Seri- Levi Date: Thu Oct 30 12:56:53 2025 -0700 lint commit e34c8816bad09053fcedab749468f5d607503e83 Author: Eitan Seri- Levi Date: Thu Oct 30 12:53:56 2025 -0700 resolve merge conflict commit ab042131154c5f61c37d7ebb30dd071e7cd273ee Author: Eitan Seri- Levi Date: Wed Oct 29 16:55:16 2025 -0700 Ensure we've collected all responses before coupling --- .../beacon_chain/src/custody_context.rs | 81 +++++++++++++++++-- .../src/historical_data_columns.rs | 5 -- beacon_node/http_api/src/lib.rs | 32 ++++++++ beacon_node/http_api/src/test_utils.rs | 18 +++++ .../tests/broadcast_validation_tests.rs | 2 + beacon_node/http_api/tests/fork_tests.rs | 2 + .../http_api/tests/interactive_tests.rs | 65 +++++++++++++++ .../src/sync/custody_backfill_sync/mod.rs | 5 +- .../sync/range_data_column_batch_request.rs | 21 ++--- book/src/api_lighthouse.md | 10 +++ common/eth2/src/lighthouse.rs | 13 +++ 11 files changed, 230 insertions(+), 24 deletions(-) diff --git a/beacon_node/beacon_chain/src/custody_context.rs b/beacon_node/beacon_chain/src/custody_context.rs index 9a6f51174ab..a5ef3ed2f65 100644 --- a/beacon_node/beacon_chain/src/custody_context.rs +++ b/beacon_node/beacon_chain/src/custody_context.rs @@ -120,9 +120,7 @@ impl ValidatorRegistrations { let effective_epoch = (current_slot + effective_delay_slots).epoch(E::slots_per_epoch()) + 1; self.epoch_validator_custody_requirements - .entry(effective_epoch) - .and_modify(|old_custody| *old_custody = validator_custody_requirement) - .or_insert(validator_custody_requirement); + .insert(effective_epoch, validator_custody_requirement); Some((effective_epoch, validator_custody_requirement)) } else { None @@ -154,11 +152,25 @@ impl ValidatorRegistrations { }); self.epoch_validator_custody_requirements - .entry(effective_epoch) - .and_modify(|old_custody| *old_custody = latest_validator_custody) - .or_insert(latest_validator_custody); + .insert(effective_epoch, latest_validator_custody); } } + + /// Updates the `epoch -> cgc` map by pruning records before `effective_epoch` + /// while setting the `cgc` at `effective_epoch` to the latest validator custody requirement. + /// + /// This is used to restart custody backfill sync at `effective_epoch` + pub fn reset_validator_custody_requirements(&mut self, effective_epoch: Epoch) { + if let Some(latest_validator_custody_requirements) = + self.latest_validator_custody_requirement() + { + self.epoch_validator_custody_requirements + .retain(|&epoch, _| epoch >= effective_epoch); + + self.epoch_validator_custody_requirements + .insert(effective_epoch, latest_validator_custody_requirements); + }; + } } /// Given the `validator_custody_units`, return the custody requirement based on @@ -535,6 +547,14 @@ impl CustodyContext { .write() .backfill_validator_custody_requirements(effective_epoch, expected_cgc); } + + /// The node is attempting to restart custody backfill. Update the internal records so that + /// custody backfill can start backfilling at `effective_epoch`. + pub fn reset_validator_custody_requirements(&self, effective_epoch: Epoch) { + self.validator_registrations + .write() + .reset_validator_custody_requirements(effective_epoch); + } } /// Indicates that the custody group count (CGC) has increased. @@ -1491,4 +1511,53 @@ mod tests { ); } } + + #[test] + fn reset_validator_custody_requirements() { + let spec = E::default_spec(); + let minimum_cgc = 4u64; + let initial_cgc = 8u64; + let mid_cgc = 16u64; + let final_cgc = 32u64; + + // Setup: Node restart after multiple validator registrations causing CGC increases + let head_epoch = Epoch::new(20); + let epoch_and_cgc_tuples = vec![ + (Epoch::new(0), initial_cgc), + (Epoch::new(10), mid_cgc), + (head_epoch, final_cgc), + ]; + let custody_context = setup_custody_context(&spec, head_epoch, epoch_and_cgc_tuples); + + // Backfill from epoch 20 to 9 + complete_backfill_for_epochs(&custody_context, Epoch::new(20), Epoch::new(9), final_cgc); + + // Reset validator custody requirements to the latest cgc requirements at `head_epoch` up to the boundary epoch + custody_context.reset_validator_custody_requirements(head_epoch); + + // Verify epochs 0 - 19 return the minimum cgc requirement because of the validator custody requirement reset + for epoch in 0..=19 { + assert_eq!( + custody_context.custody_group_count_at_epoch(Epoch::new(epoch), &spec), + minimum_cgc, + ); + } + + // Verify epoch 20 returns a CGC of 32 + assert_eq!( + custody_context.custody_group_count_at_epoch(head_epoch, &spec), + final_cgc + ); + + // Rerun Backfill to epoch 20 + complete_backfill_for_epochs(&custody_context, Epoch::new(20), Epoch::new(0), final_cgc); + + // Verify epochs 0 - 20 return the final cgc requirements + for epoch in 0..=20 { + assert_eq!( + custody_context.custody_group_count_at_epoch(Epoch::new(epoch), &spec), + final_cgc, + ); + } + } } diff --git a/beacon_node/beacon_chain/src/historical_data_columns.rs b/beacon_node/beacon_chain/src/historical_data_columns.rs index 9304f065703..6cf947adcb1 100644 --- a/beacon_node/beacon_chain/src/historical_data_columns.rs +++ b/beacon_node/beacon_chain/src/historical_data_columns.rs @@ -89,11 +89,6 @@ impl BeaconChain { .get_data_column(&block_root, &data_column.index)? .is_some() { - debug!( - block_root = ?block_root, - column_index = data_column.index, - "Skipping data column import as identical data column exists" - ); continue; } if block_root != data_column.block_root() { diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 41cd729a685..9026792b911 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -4604,6 +4604,37 @@ pub fn serve( }, ); + // POST lighthouse/custody/backfill + let post_lighthouse_custody_backfill = warp::path("lighthouse") + .and(warp::path("custody")) + .and(warp::path("backfill")) + .and(warp::path::end()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .then( + |task_spawner: TaskSpawner, chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + // Calling this endpoint will trigger custody backfill once `effective_epoch`` + // is finalized. + let effective_epoch = chain + .canonical_head + .cached_head() + .head_slot() + .epoch(T::EthSpec::slots_per_epoch()) + + 1; + let custody_context = chain.data_availability_checker.custody_context(); + // Reset validator custody requirements to `effective_epoch` with the latest + // cgc requiremnets. + custody_context.reset_validator_custody_requirements(effective_epoch); + // Update `DataColumnCustodyInfo` to reflect the custody change. + chain.update_data_column_custody_info(Some( + effective_epoch.start_slot(T::EthSpec::slots_per_epoch()), + )); + Ok(()) + }) + }, + ); + // GET lighthouse/analysis/block_rewards let get_lighthouse_block_rewards = warp::path("lighthouse") .and(warp::path("analysis")) @@ -4963,6 +4994,7 @@ pub fn serve( .uor(post_lighthouse_compaction) .uor(post_lighthouse_add_peer) .uor(post_lighthouse_remove_peer) + .uor(post_lighthouse_custody_backfill) .recover(warp_utils::reject::handle_rejection), ), ) diff --git a/beacon_node/http_api/src/test_utils.rs b/beacon_node/http_api/src/test_utils.rs index fe9e0dff704..27e2a27d35c 100644 --- a/beacon_node/http_api/src/test_utils.rs +++ b/beacon_node/http_api/src/test_utils.rs @@ -1,6 +1,7 @@ use crate::{Config, Context}; use beacon_chain::{ BeaconChain, BeaconChainTypes, + custody_context::NodeCustodyType, test_utils::{BeaconChainHarness, BoxedMutator, Builder, EphemeralHarnessType}, }; use beacon_processor::{ @@ -67,6 +68,20 @@ impl InteractiveTester { None, Config::default(), true, + NodeCustodyType::Fullnode, + ) + .await + } + + pub async fn new_supernode(spec: Option, validator_count: usize) -> Self { + Self::new_with_initializer_and_mutator( + spec, + validator_count, + None, + None, + Config::default(), + true, + NodeCustodyType::Supernode, ) .await } @@ -78,6 +93,7 @@ impl InteractiveTester { mutator: Option>, config: Config, use_mock_builder: bool, + node_custody_type: NodeCustodyType, ) -> Self { let mut harness_builder = BeaconChainHarness::builder(E::default()) .spec_or_default(spec.map(Arc::new)) @@ -93,6 +109,8 @@ impl InteractiveTester { .fresh_ephemeral_store() }; + harness_builder = harness_builder.node_custody_type(node_custody_type); + // Add a mutator for the beacon chain builder which will be called in // `HarnessBuilder::build`. if let Some(mutator) = mutator { diff --git a/beacon_node/http_api/tests/broadcast_validation_tests.rs b/beacon_node/http_api/tests/broadcast_validation_tests.rs index 9427f6fdf35..82723c2b405 100644 --- a/beacon_node/http_api/tests/broadcast_validation_tests.rs +++ b/beacon_node/http_api/tests/broadcast_validation_tests.rs @@ -1,3 +1,4 @@ +use beacon_chain::custody_context::NodeCustodyType; use beacon_chain::test_utils::test_spec; use beacon_chain::{ GossipVerifiedBlock, IntoGossipVerifiedBlock, WhenSlotSkipped, @@ -1956,6 +1957,7 @@ pub async fn duplicate_block_status_code() { ..Config::default() }, true, + NodeCustodyType::Fullnode, ) .await; diff --git a/beacon_node/http_api/tests/fork_tests.rs b/beacon_node/http_api/tests/fork_tests.rs index 62a34612760..50cf866b6a8 100644 --- a/beacon_node/http_api/tests/fork_tests.rs +++ b/beacon_node/http_api/tests/fork_tests.rs @@ -1,4 +1,5 @@ //! Tests for API behaviour across fork boundaries. +use beacon_chain::custody_context::NodeCustodyType; use beacon_chain::{ StateSkipConfig, test_utils::{DEFAULT_ETH1_BLOCK_HASH, HARNESS_GENESIS_TIME, RelativeSyncCommittee}, @@ -426,6 +427,7 @@ async fn bls_to_execution_changes_update_all_around_capella_fork() { None, Default::default(), true, + NodeCustodyType::Fullnode, ) .await; let harness = &tester.harness; diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index a9de737d657..83cb70a7a3a 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -1,4 +1,5 @@ //! Generic tests that make use of the (newer) `InteractiveApiTester` +use beacon_chain::custody_context::NodeCustodyType; use beacon_chain::{ ChainConfig, chain_config::{DisallowedReOrgOffsets, ReOrgThreshold}, @@ -76,6 +77,7 @@ async fn state_by_root_pruned_from_fork_choice() { None, Default::default(), false, + NodeCustodyType::Fullnode, ) .await; @@ -433,6 +435,7 @@ pub async fn proposer_boost_re_org_test( })), Default::default(), false, + NodeCustodyType::Fullnode, ) .await; let harness = &tester.harness; @@ -1049,6 +1052,68 @@ async fn proposer_duties_with_gossip_tolerance() { ); } +// Test that a request to `lighthouse/custody/backfill` succeeds by verifying that `CustodyContext` and `DataColumnCustodyInfo` +// have been updated with the correct values. +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn lighthouse_restart_custody_backfill() { + let spec = test_spec::(); + + // Skip pre-Fulu. + if !spec.is_fulu_scheduled() { + return; + } + + let validator_count = 24; + + let tester = InteractiveTester::::new_supernode(Some(spec), validator_count).await; + let harness = &tester.harness; + let spec = &harness.spec; + let client = &tester.client; + let min_cgc = spec.custody_requirement; + let max_cgc = spec.number_of_custody_groups; + + let num_blocks = 2 * E::slots_per_epoch(); + + let custody_context = harness.chain.data_availability_checker.custody_context(); + + harness.advance_slot(); + harness + .extend_chain_with_sync( + num_blocks as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + SyncCommitteeStrategy::NoValidators, + LightClientStrategy::Disabled, + ) + .await; + + let cgc_at_head = custody_context.custody_group_count_at_head(spec); + let earliest_data_column_epoch = harness.chain.earliest_custodied_data_column_epoch(); + + assert_eq!(cgc_at_head, max_cgc); + assert_eq!(earliest_data_column_epoch, None); + + custody_context + .update_and_backfill_custody_count_at_epoch(harness.chain.epoch().unwrap(), cgc_at_head); + client.post_lighthouse_custody_backfill().await.unwrap(); + + let cgc_at_head = custody_context.custody_group_count_at_head(spec); + let cgc_at_previous_epoch = + custody_context.custody_group_count_at_epoch(harness.chain.epoch().unwrap() - 1, spec); + let earliest_data_column_epoch = harness.chain.earliest_custodied_data_column_epoch(); + + // `DataColumnCustodyInfo` should have been updated to the head epoch + assert_eq!( + earliest_data_column_epoch, + Some(harness.chain.epoch().unwrap() + 1) + ); + // Cgc requirements should have stayed the same at head + assert_eq!(cgc_at_head, max_cgc); + // Cgc requirements at the previous epoch should be `min_cgc` + // This allows for custody backfill to re-fetch columns for this epoch. + assert_eq!(cgc_at_previous_epoch, min_cgc); +} + // Test that a request for next epoch proposer duties suceeds when the current slot clock is within // gossip clock disparity (500ms) of the new epoch. #[tokio::test(flavor = "multi_thread", worker_threads = 2)] diff --git a/beacon_node/network/src/sync/custody_backfill_sync/mod.rs b/beacon_node/network/src/sync/custody_backfill_sync/mod.rs index 5c5505083f2..bb2c6799f1d 100644 --- a/beacon_node/network/src/sync/custody_backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/custody_backfill_sync/mod.rs @@ -382,11 +382,9 @@ impl CustodyBackFillSync { return None; }; - let mut missing_columns = HashSet::new(); - // Skip all batches (Epochs) that don't have missing columns. for epoch in Epoch::range_inclusive_rev(self.to_be_downloaded, column_da_boundary) { - missing_columns = self.beacon_chain.get_missing_columns_for_epoch(epoch); + let missing_columns = self.beacon_chain.get_missing_columns_for_epoch(epoch); if !missing_columns.is_empty() { self.to_be_downloaded = epoch; @@ -445,6 +443,7 @@ impl CustodyBackFillSync { self.include_next_batch() } Entry::Vacant(entry) => { + let missing_columns = self.beacon_chain.get_missing_columns_for_epoch(batch_id); entry.insert(BatchInfo::new( &batch_id, CUSTODY_BACKFILL_EPOCHS_PER_BATCH, diff --git a/beacon_node/network/src/sync/range_data_column_batch_request.rs b/beacon_node/network/src/sync/range_data_column_batch_request.rs index 542d99d97c2..72e2fb2d5b6 100644 --- a/beacon_node/network/src/sync/range_data_column_batch_request.rs +++ b/beacon_node/network/src/sync/range_data_column_batch_request.rs @@ -70,16 +70,17 @@ impl RangeDataColumnBatchRequest { HashMap::new(); let mut column_to_peer_id: HashMap = HashMap::new(); - for column in self - .requests - .values() - .filter_map(|req| req.to_finished()) - .flatten() - { - received_columns_for_slot - .entry(column.slot()) - .or_default() - .push(column.clone()); + for req in self.requests.values() { + let Some(columns) = req.to_finished() else { + return None; + }; + + for column in columns { + received_columns_for_slot + .entry(column.slot()) + .or_default() + .push(column.clone()); + } } // Note: this assumes that only 1 peer is responsible for a column diff --git a/book/src/api_lighthouse.md b/book/src/api_lighthouse.md index 2e694989f93..f804cb9df2e 100644 --- a/book/src/api_lighthouse.md +++ b/book/src/api_lighthouse.md @@ -447,6 +447,16 @@ indicating that all states with slots `>= 0` are available, i.e., full state his on the specific meanings of these fields see the docs on [Checkpoint Sync](./advanced_checkpoint_sync.md#how-to-run-an-archived-node). +## `/lighthouse/custody/backfill` + +Starts a custody backfill sync from the next epoch with the node's latest custody requirements. The sync won't begin immediately, it waits until the next epoch is finalized before triggering. + +This endpoint should only be used to fix nodes that may have partial custody columns due to a prior backfill bug (present in v8.0.0-rc.2). Use with caution as it re-downloads all historic custody data columns and may consume significant bandwidth. + +```bash +curl -X POST "http://localhost:5052/lighthouse/custody/backfill" +``` + ## `/lighthouse/merge_readiness` Returns the current difficulty and terminal total difficulty of the network. Before [The Merge](https://ethereum.org/en/roadmap/merge/) on 15th September 2022, you will see that the current difficulty is less than the terminal total difficulty, An example is shown below: diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index f65b5a07b63..4f9a049e44e 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -208,6 +208,19 @@ impl BeaconNodeHttpClient { self.get(path).await } + /// `POST lighthouse/custody/backfill` + pub async fn post_lighthouse_custody_backfill(&self) -> Result<(), Error> { + let mut path = self.server.full.clone(); + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("lighthouse") + .push("custody") + .push("backfill"); + + self.post(path, &()).await + } + /* * Note: *