|
18 | 18 | leader_schedule_cache::LeaderScheduleCache, |
19 | 19 | next_slots_iterator::NextSlotsIterator, |
20 | 20 | shred::{ |
21 | | - self, ErasureSetId, ProcessShredsStats, ReedSolomonCache, Shred, ShredData, ShredFlags, |
| 21 | + self, |
| 22 | + merkle_tree::{ |
| 23 | + get_proof_size, make_merkle_proof, make_merkle_tree, SIZE_OF_MERKLE_PROOF_ENTRY, |
| 24 | + }, |
| 25 | + ErasureSetId, ProcessShredsStats, ReedSolomonCache, Shred, ShredData, ShredFlags, |
22 | 26 | ShredId, ShredType, Shredder, DATA_SHREDS_PER_FEC_BLOCK, |
23 | 27 | }, |
24 | 28 | slot_stats::{ShredSource, SlotsStats}, |
|
49 | 53 | solana_metrics::datapoint_error, |
50 | 54 | solana_pubkey::Pubkey, |
51 | 55 | solana_runtime::bank::Bank, |
| 56 | + solana_sha256_hasher::hashv, |
52 | 57 | solana_signature::Signature, |
53 | 58 | solana_signer::Signer, |
54 | 59 | solana_storage_proto::{StoredExtendedRewards, StoredTransactionStatusMeta}, |
@@ -869,6 +874,128 @@ impl Blockstore { |
869 | 874 | } |
870 | 875 | } |
871 | 876 |
|
| 877 | + /// Fetches (and populates if needed) the DoubleMerkleMeta for the given block. |
| 878 | + /// Returns the double_merkle_root |
| 879 | + /// |
| 880 | + /// Should only be used on full blocks. |
| 881 | + pub fn get_or_compute_double_merkle_root( |
| 882 | + &self, |
| 883 | + slot: Slot, |
| 884 | + block_location: BlockLocation, |
| 885 | + ) -> std::result::Result<Hash, BlockstoreProcessorError> { |
| 886 | + if let Some(double_merkle_meta) = self |
| 887 | + .double_merkle_meta_cf |
| 888 | + .get((slot, block_location)) |
| 889 | + .expect("Blockstore operations must succeed") |
| 890 | + { |
| 891 | + return Ok(double_merkle_meta.double_merkle_root); |
| 892 | + } |
| 893 | + |
| 894 | + // Compute double merkle - slot must be full at this point |
| 895 | + let Some(slot_meta) = self |
| 896 | + .meta_cf |
| 897 | + .get(slot) |
| 898 | + .expect("Blockstore operations must succeed") |
| 899 | + else { |
| 900 | + return Err(BlockstoreProcessorError::FailedToLoadMeta); |
| 901 | + }; |
| 902 | + |
| 903 | + if !slot_meta.is_full() { |
| 904 | + return Err(BlockstoreProcessorError::SlotNotFull(slot, block_location)); |
| 905 | + } |
| 906 | + |
| 907 | + let Some(last_index) = slot_meta.last_index else { |
| 908 | + return Err(BlockstoreProcessorError::SlotNotFull(slot, block_location)); |
| 909 | + }; |
| 910 | + |
| 911 | + // This function is only used post Alpenglow, so implicitely gated by SIMD-0317 as that is a prereq |
| 912 | + let fec_set_count = (last_index / (DATA_SHREDS_PER_FEC_BLOCK as u64) + 1) as usize; |
| 913 | + |
| 914 | + let Some(parent_meta) = self |
| 915 | + .parent_meta_cf |
| 916 | + .get((slot, block_location)) |
| 917 | + .expect("Blockstore operations must succeed") |
| 918 | + else { |
| 919 | + return Err(BlockstoreProcessorError::MissingParent( |
| 920 | + slot, |
| 921 | + block_location, |
| 922 | + )); |
| 923 | + }; |
| 924 | + |
| 925 | + // Collect merkle roots for each FEC set |
| 926 | + let mut merkle_tree_leaves = Vec::with_capacity(fec_set_count + 1); |
| 927 | + |
| 928 | + for i in 0..fec_set_count { |
| 929 | + let fec_set_index = (i * DATA_SHREDS_PER_FEC_BLOCK) as u32; |
| 930 | + let erasure_set_id = ErasureSetId::new(slot, fec_set_index); |
| 931 | + |
| 932 | + let Some(merkle_root) = self |
| 933 | + .merkle_root_meta_from_location(erasure_set_id, block_location) |
| 934 | + .expect("Blockstore operations must succeed") |
| 935 | + .and_then(|mrm| mrm.merkle_root()) |
| 936 | + else { |
| 937 | + return Err(BlockstoreProcessorError::MissingMerkleRoot( |
| 938 | + slot, |
| 939 | + fec_set_index as u64, |
| 940 | + )); |
| 941 | + }; |
| 942 | + merkle_tree_leaves.push(Ok(merkle_root)); |
| 943 | + } |
| 944 | + |
| 945 | + // Add parent info as the last leaf |
| 946 | + let parent_info_hash = hashv(&[ |
| 947 | + &parent_meta.parent_slot.to_le_bytes(), |
| 948 | + parent_meta.parent_block_id.as_ref(), |
| 949 | + ]); |
| 950 | + merkle_tree_leaves.push(Ok(parent_info_hash)); |
| 951 | + |
| 952 | + // Build the merkle tree |
| 953 | + let merkle_tree = make_merkle_tree(merkle_tree_leaves).map_err(|_| { |
| 954 | + BlockstoreProcessorError::FailedDoubleMerkleRootConstruction(slot, block_location) |
| 955 | + })?; |
| 956 | + let double_merkle_root = *merkle_tree |
| 957 | + .last() |
| 958 | + .expect("Merkle tree cannot be empty as fec_set_count is > 0"); |
| 959 | + |
| 960 | + // Build proofs |
| 961 | + let tree_size = fec_set_count + 1; |
| 962 | + let mut proofs = Vec::with_capacity(tree_size); |
| 963 | + |
| 964 | + for leaf_index in 0..tree_size { |
| 965 | + let proof_iter = make_merkle_proof(leaf_index, tree_size, &merkle_tree); |
| 966 | + let proof: Vec<u8> = proof_iter |
| 967 | + .map(|proof| proof.map(|p| p.as_slice())) |
| 968 | + .collect::<std::result::Result<Vec<_>, _>>() |
| 969 | + .map_err(|_| { |
| 970 | + BlockstoreProcessorError::FailedDoubleMerkleRootConstruction( |
| 971 | + slot, |
| 972 | + block_location, |
| 973 | + ) |
| 974 | + })? |
| 975 | + .into_iter() |
| 976 | + .flatten() |
| 977 | + .copied() |
| 978 | + .collect(); |
| 979 | + debug_assert!( |
| 980 | + proof.len() == get_proof_size(tree_size) as usize * SIZE_OF_MERKLE_PROOF_ENTRY |
| 981 | + ); |
| 982 | + proofs.push(proof); |
| 983 | + } |
| 984 | + |
| 985 | + // Create and store DoubleMerkleMeta |
| 986 | + let double_merkle_meta = DoubleMerkleMeta { |
| 987 | + double_merkle_root, |
| 988 | + fec_set_count, |
| 989 | + proofs, |
| 990 | + }; |
| 991 | + |
| 992 | + self.double_merkle_meta_cf |
| 993 | + .put((slot, block_location), &double_merkle_meta) |
| 994 | + .expect("Blockstore operations must succeed"); |
| 995 | + |
| 996 | + Ok(double_merkle_root) |
| 997 | + } |
| 998 | + |
872 | 999 | /// Check whether the specified slot is an orphan slot which does not |
873 | 1000 | /// have a parent slot. |
874 | 1001 | /// |
@@ -6109,7 +6236,11 @@ pub mod tests { |
6109 | 6236 | crate::{ |
6110 | 6237 | genesis_utils::{create_genesis_config, GenesisConfigInfo}, |
6111 | 6238 | leader_schedule::{FixedSchedule, IdentityKeyedLeaderSchedule}, |
6112 | | - shred::{max_ticks_per_n_shreds, MAX_DATA_SHREDS_PER_SLOT}, |
| 6239 | + shred::{ |
| 6240 | + max_ticks_per_n_shreds, |
| 6241 | + merkle_tree::{get_merkle_root, MerkleProofEntry}, |
| 6242 | + MAX_DATA_SHREDS_PER_SLOT, |
| 6243 | + }, |
6113 | 6244 | }, |
6114 | 6245 | assert_matches::assert_matches, |
6115 | 6246 | bincode::{serialize, Options}, |
@@ -12786,4 +12917,122 @@ pub mod tests { |
12786 | 12917 | Err(TransactionError::InsufficientFundsForFee) |
12787 | 12918 | ); |
12788 | 12919 | } |
| 12920 | + |
| 12921 | + #[test] |
| 12922 | + fn test_get_or_compute_double_merkle_root() { |
| 12923 | + let ledger_path = get_tmp_ledger_path_auto_delete!(); |
| 12924 | + let blockstore = Blockstore::open(ledger_path.path()).unwrap(); |
| 12925 | + |
| 12926 | + let parent_slot = 990; |
| 12927 | + let slot = 1000; |
| 12928 | + let num_entries = 200; |
| 12929 | + |
| 12930 | + // Create a set of shreds for a complete block |
| 12931 | + let (data_shreds, coding_shreds, leader_schedule) = |
| 12932 | + setup_erasure_shreds(slot, parent_slot, num_entries); |
| 12933 | + |
| 12934 | + // Create ParentMeta |
| 12935 | + let parent_meta = ParentMeta { |
| 12936 | + parent_slot, |
| 12937 | + parent_block_id: Hash::default(), |
| 12938 | + replay_fec_set_index: 0, |
| 12939 | + }; |
| 12940 | + blockstore |
| 12941 | + .parent_meta_cf |
| 12942 | + .put((slot, BlockLocation::Original), &parent_meta) |
| 12943 | + .unwrap(); |
| 12944 | + |
| 12945 | + // Insert shreds into blockstore |
| 12946 | + let mut fec_set_roots = [Hash::default(); 3]; |
| 12947 | + for shred in data_shreds.iter().chain(coding_shreds.iter()) { |
| 12948 | + if shred.is_data() && shred.index() % (DATA_SHREDS_PER_FEC_BLOCK as u32) == 0 { |
| 12949 | + // store fec set merkle roots for later |
| 12950 | + fec_set_roots[(shred.index() as usize) / DATA_SHREDS_PER_FEC_BLOCK] = |
| 12951 | + shred.merkle_root().unwrap(); |
| 12952 | + } |
| 12953 | + let duplicates = |
| 12954 | + blockstore.insert_shred_return_duplicate(shred.clone(), &leader_schedule); |
| 12955 | + assert!(duplicates.is_empty()); |
| 12956 | + } |
| 12957 | + |
| 12958 | + let slot_meta = blockstore.meta(slot).unwrap().unwrap(); |
| 12959 | + assert!(slot_meta.is_full()); |
| 12960 | + |
| 12961 | + // Test getting the double merkle root |
| 12962 | + let block_location = BlockLocation::Original; |
| 12963 | + let double_merkle_root = blockstore |
| 12964 | + .get_or_compute_double_merkle_root(slot, block_location) |
| 12965 | + .unwrap(); |
| 12966 | + |
| 12967 | + let double_merkle_meta = blockstore |
| 12968 | + .double_merkle_meta_cf |
| 12969 | + .get((slot, block_location)) |
| 12970 | + .unwrap() |
| 12971 | + .unwrap(); |
| 12972 | + |
| 12973 | + // Verify meta |
| 12974 | + assert_eq!(double_merkle_meta.double_merkle_root, double_merkle_root); |
| 12975 | + assert_eq!(double_merkle_meta.fec_set_count, 3); // With 200 entries, we should have 3 FEC sets |
| 12976 | + assert_eq!(double_merkle_meta.proofs.len(), 4); // 3 FEC set, 1 parent info |
| 12977 | + |
| 12978 | + // Verify the proofs |
| 12979 | + let proof_size = get_proof_size(double_merkle_meta.fec_set_count + 1) as usize; |
| 12980 | + |
| 12981 | + // Fec sets |
| 12982 | + for (fec_set, root) in fec_set_roots.iter().enumerate() { |
| 12983 | + let proof = &double_merkle_meta.proofs[fec_set]; |
| 12984 | + let proof = proof |
| 12985 | + .chunks(SIZE_OF_MERKLE_PROOF_ENTRY) |
| 12986 | + .map(<&MerkleProofEntry>::try_from) |
| 12987 | + .map(std::result::Result::unwrap); |
| 12988 | + assert_eq!(proof_size, proof.clone().count()); |
| 12989 | + |
| 12990 | + let double_merkle_root = get_merkle_root(fec_set, *root, proof).unwrap(); |
| 12991 | + assert_eq!(double_merkle_meta.double_merkle_root, double_merkle_root); |
| 12992 | + } |
| 12993 | + |
| 12994 | + // Parent info - final proof |
| 12995 | + let parent_info_hash = hashv(&[ |
| 12996 | + &parent_slot.to_le_bytes(), |
| 12997 | + parent_meta.parent_block_id.as_ref(), |
| 12998 | + ]); |
| 12999 | + let parent_info_proof = &double_merkle_meta.proofs[double_merkle_meta.fec_set_count]; |
| 13000 | + let proof = parent_info_proof |
| 13001 | + .chunks(SIZE_OF_MERKLE_PROOF_ENTRY) |
| 13002 | + .map(<&MerkleProofEntry>::try_from) |
| 13003 | + .map(std::result::Result::unwrap); |
| 13004 | + assert_eq!(proof_size, proof.clone().count()); |
| 13005 | + |
| 13006 | + let double_merkle_root = |
| 13007 | + get_merkle_root(double_merkle_meta.fec_set_count, parent_info_hash, proof).unwrap(); |
| 13008 | + assert_eq!(double_merkle_meta.double_merkle_root, double_merkle_root); |
| 13009 | + |
| 13010 | + // Slot not full should fail |
| 13011 | + let incomplete_slot = 1001; // Make it a child of slot 1000 |
| 13012 | + let (partial_shreds, _, leader_schedule) = |
| 13013 | + setup_erasure_shreds_with_index_and_chained_merkle_and_last_in_slot( |
| 13014 | + incomplete_slot, |
| 13015 | + slot, // parent is 1000 |
| 13016 | + 5, |
| 13017 | + 0, |
| 13018 | + Some(Hash::new_from_array(rand::thread_rng().gen())), |
| 13019 | + false, // not last in slot |
| 13020 | + ); |
| 13021 | + |
| 13022 | + for shred in partial_shreds.iter().take(3) { |
| 13023 | + let duplicates = |
| 13024 | + blockstore.insert_shred_return_duplicate(shred.clone(), &leader_schedule); |
| 13025 | + assert!(duplicates.is_empty()); |
| 13026 | + } |
| 13027 | + |
| 13028 | + let result = blockstore.get_or_compute_double_merkle_root(incomplete_slot, block_location); |
| 13029 | + match result { |
| 13030 | + Err(BlockstoreProcessorError::SlotNotFull(slot, loc)) => { |
| 13031 | + assert_eq!(slot, incomplete_slot); |
| 13032 | + assert_eq!(loc, block_location); |
| 13033 | + } // This is the expected error |
| 13034 | + Err(e) => panic!("Unexpected error: {e:?}"), |
| 13035 | + Ok(_) => panic!("Expected error but got Ok"), |
| 13036 | + } |
| 13037 | + } |
12789 | 13038 | } |
0 commit comments