diff --git a/hash256-std-hasher/Cargo.toml b/hash256-std-hasher/Cargo.toml index addd32fb..bdeb08ef 100644 --- a/hash256-std-hasher/Cargo.toml +++ b/hash256-std-hasher/Cargo.toml @@ -16,7 +16,7 @@ harness = false crunchy = "0.2.1" [dev-dependencies] -criterion = "0.4.0" +criterion = "0.5.1" [features] default = ["std"] diff --git a/memory-db/Cargo.toml b/memory-db/Cargo.toml index adad836a..e800a2a2 100644 --- a/memory-db/Cargo.toml +++ b/memory-db/Cargo.toml @@ -12,7 +12,7 @@ hash-db = { version = "0.16.0", path = "../hash-db", default-features = false } [dev-dependencies] keccak-hasher = { path = "../test-support/keccak-hasher" } -criterion = "0.4.0" +criterion = "0.5.1" [features] default = ["std"] diff --git a/test-support/reference-trie/Cargo.toml b/test-support/reference-trie/Cargo.toml index 90fe577a..9f0ff79a 100644 --- a/test-support/reference-trie/Cargo.toml +++ b/test-support/reference-trie/Cargo.toml @@ -18,7 +18,7 @@ paste = "1.0.12" [dev-dependencies] trie-bench = { path = "../trie-bench" } -criterion = "0.4.0" +criterion = "0.5.1" [[bench]] name = "bench" diff --git a/test-support/reference-trie/src/lib.rs b/test-support/reference-trie/src/lib.rs index be626801..3f295d82 100644 --- a/test-support/reference-trie/src/lib.rs +++ b/test-support/reference-trie/src/lib.rs @@ -51,6 +51,8 @@ macro_rules! test_layouts { ($test:ident, $test_internal:ident) => { #[test] fn $test() { + eprintln!("Running with layout `SubstrateV1`"); + $test_internal::<$crate::SubstrateV1<$crate::RefHasher>>(); eprintln!("Running with layout `HashedValueNoExtThreshold`"); $test_internal::<$crate::HashedValueNoExtThreshold<1>>(); eprintln!("Running with layout `HashedValueNoExt`"); @@ -599,6 +601,8 @@ impl<'a> Input for ByteSliceInput<'a> { // `const HASHED_NULL_NODE: ::Out = ::Out( … … )`. // Perhaps one day soon? impl NodeCodec for ReferenceNodeCodec { + const DELTA_COMPACT_OMITTED_NODE: usize = 32; + const DELTA_COMPACT_OMITTED_VALUE: usize = 30; type Error = CodecError; type HashOut = H::Out; @@ -753,6 +757,8 @@ impl NodeCodec for ReferenceNodeCodec { } impl NodeCodec for ReferenceNodeCodecNoExt { + const DELTA_COMPACT_OMITTED_NODE: usize = 32; + const DELTA_COMPACT_OMITTED_VALUE: usize = 30; type Error = CodecError; type HashOut = ::Out; diff --git a/test-support/reference-trie/src/substrate.rs b/test-support/reference-trie/src/substrate.rs index 9b1573f1..5eff71d7 100644 --- a/test-support/reference-trie/src/substrate.rs +++ b/test-support/reference-trie/src/substrate.rs @@ -209,6 +209,8 @@ impl NodeCodecT for NodeCodec where H: Hasher, { + const DELTA_COMPACT_OMITTED_NODE: usize = 32; + const DELTA_COMPACT_OMITTED_VALUE: usize = 30; const ESCAPE_HEADER: Option = Some(trie_constants::ESCAPE_COMPACT_HEADER); type Error = Error; type HashOut = H::Out; diff --git a/test-support/reference-trie/src/substrate_like.rs b/test-support/reference-trie/src/substrate_like.rs index 37cccefc..a2cab9d5 100644 --- a/test-support/reference-trie/src/substrate_like.rs +++ b/test-support/reference-trie/src/substrate_like.rs @@ -149,6 +149,8 @@ impl NodeCodecT for NodeCodec where H: Hasher, { + const DELTA_COMPACT_OMITTED_NODE: usize = 32; + const DELTA_COMPACT_OMITTED_VALUE: usize = 30; const ESCAPE_HEADER: Option = Some(trie_constants::ESCAPE_COMPACT_HEADER); type Error = Error; type HashOut = H::Out; diff --git a/test-support/trie-bench/Cargo.toml b/test-support/trie-bench/Cargo.toml index f8de1f96..e0779c1e 100644 --- a/test-support/trie-bench/Cargo.toml +++ b/test-support/trie-bench/Cargo.toml @@ -14,5 +14,5 @@ hash-db = { path = "../../hash-db" , version = "0.16.0"} memory-db = { path = "../../memory-db", version = "0.32.0" } trie-root = { path = "../../trie-root", version = "0.18.0" } trie-db = { path = "../../trie-db", version = "0.27.0" } -criterion = "0.4.0" +criterion = "0.5.1" parity-scale-codec = "3.0.0" diff --git a/trie-db/fuzz/Cargo.toml b/trie-db/fuzz/Cargo.toml index 84a03257..3b4c9990 100644 --- a/trie-db/fuzz/Cargo.toml +++ b/trie-db/fuzz/Cargo.toml @@ -14,9 +14,12 @@ memory-db = { path = "../../memory-db", version = "0.32.0" } reference-trie = { path = "../../test-support/reference-trie", version = "0.29.0" } arbitrary = { version = "1.3.0", features = ["derive"] } array-bytes = "6.0.0" +lazy_static = "1.4.0" [dependencies.trie-db] path = ".." +[dependencies.trie-db-test] +path = "../test" [dependencies.libfuzzer-sys] version = "0.4.6" @@ -68,3 +71,19 @@ path = "fuzz_targets/trie_proof_invalid.rs" [[bin]] name = "prefix_seek_iter" path = "fuzz_targets/prefix_seek_iter.rs" + +[[bin]] +name = "query_plan_1" +path = "fuzz_targets/query_plan_1.rs" + +[[bin]] +name = "query_plan_2" +path = "fuzz_targets/query_plan_2.rs" + +[[bin]] +name = "query_plan_3" +path = "fuzz_targets/query_plan_3.rs" + +[[bin]] +name = "query_plan_4" +path = "fuzz_targets/query_plan_4.rs" diff --git a/trie-db/fuzz/fuzz_targets/no_ext_insert.rs b/trie-db/fuzz/fuzz_targets/no_ext_insert.rs index ce741e8a..de43db0c 100644 --- a/trie-db/fuzz/fuzz_targets/no_ext_insert.rs +++ b/trie-db/fuzz/fuzz_targets/no_ext_insert.rs @@ -1,7 +1,7 @@ #![no_main] use libfuzzer_sys::fuzz_target; -use trie_db_fuzz::fuzz_that_no_extension_insert; +use trie_db_test::fuzz::fuzz_that_no_extension_insert; fuzz_target!(|data: &[u8]| { // fuzzed code goes here diff --git a/trie-db/fuzz/fuzz_targets/no_ext_insert_rem.rs b/trie-db/fuzz/fuzz_targets/no_ext_insert_rem.rs index e6edb578..35d05f89 100644 --- a/trie-db/fuzz/fuzz_targets/no_ext_insert_rem.rs +++ b/trie-db/fuzz/fuzz_targets/no_ext_insert_rem.rs @@ -1,7 +1,7 @@ #![no_main] use libfuzzer_sys::fuzz_target; -use trie_db_fuzz::fuzz_that_no_extension_insert_remove; +use trie_db_test::fuzz::fuzz_that_no_extension_insert_remove; fuzz_target!(|data: &[u8]| { // fuzzed code goes here diff --git a/trie-db/fuzz/fuzz_targets/prefix_iter.rs b/trie-db/fuzz/fuzz_targets/prefix_iter.rs index 85a4add0..351be348 100644 --- a/trie-db/fuzz/fuzz_targets/prefix_iter.rs +++ b/trie-db/fuzz/fuzz_targets/prefix_iter.rs @@ -1,7 +1,7 @@ #![no_main] use libfuzzer_sys::fuzz_target; -use trie_db_fuzz::fuzz_prefix_iter; +use trie_db_test::fuzz::fuzz_prefix_iter; fuzz_target!(|data: &[u8]| { fuzz_prefix_iter::(data); diff --git a/trie-db/fuzz/fuzz_targets/prefix_seek_iter.rs b/trie-db/fuzz/fuzz_targets/prefix_seek_iter.rs index 23e1f734..9806bc1a 100644 --- a/trie-db/fuzz/fuzz_targets/prefix_seek_iter.rs +++ b/trie-db/fuzz/fuzz_targets/prefix_seek_iter.rs @@ -1,7 +1,7 @@ #![no_main] use libfuzzer_sys::fuzz_target; -use trie_db_fuzz::{fuzz_prefix_seek_iter, PrefixSeekTestInput}; +use trie_db_test::fuzz::{fuzz_prefix_seek_iter, PrefixSeekTestInput}; fuzz_target!(|data: PrefixSeekTestInput| { fuzz_prefix_seek_iter::>(data); diff --git a/trie-db/fuzz/fuzz_targets/query_plan_1.rs b/trie-db/fuzz/fuzz_targets/query_plan_1.rs new file mode 100644 index 00000000..93b42dd5 --- /dev/null +++ b/trie-db/fuzz/fuzz_targets/query_plan_1.rs @@ -0,0 +1,15 @@ +#![no_main] +use lazy_static::lazy_static; +use libfuzzer_sys::fuzz_target; +use reference_trie::{RefHasher, SubstrateV1}; +use trie_db_test::fuzz::query_plan::{ + build_state, fuzz_query_plan, ArbitraryQueryPlan, FuzzContext, CONF1, +}; + +lazy_static! { + static ref CONTEXT: FuzzContext> = build_state(CONF1); +} + +fuzz_target!(|plan: ArbitraryQueryPlan| { + fuzz_query_plan::>(&CONTEXT, plan); +}); diff --git a/trie-db/fuzz/fuzz_targets/query_plan_2.rs b/trie-db/fuzz/fuzz_targets/query_plan_2.rs new file mode 100644 index 00000000..d06fab41 --- /dev/null +++ b/trie-db/fuzz/fuzz_targets/query_plan_2.rs @@ -0,0 +1,36 @@ +#![no_main] +use lazy_static::lazy_static; +use libfuzzer_sys::fuzz_target; +use reference_trie::{RefHasher, SubstrateV1}; +use trie_db_test::fuzz::query_plan::{ + build_state, fuzz_query_plan_conf, ArbitraryQueryPlan, FuzzContext, CONF1, +}; +use arbitrary::Arbitrary; + +lazy_static! { + static ref CONTEXT: FuzzContext> = build_state(CONF1); +} + +#[derive(Debug, Clone, Copy, Arbitrary)] +#[repr(usize)] +enum SplitSize { + One = 1, + Two = 2, + Three = 3, + More = 10, + MoreMore = 50, +} + +#[derive(Debug, Clone, Copy, Eq, PartialEq, Arbitrary)] +enum SplitKind { + Stateless, + Stateful, +} + +fuzz_target!(|input: (ArbitraryQueryPlan, SplitSize, SplitKind)| { + let (plan, split_size, split_kind) = input; + let mut conf = CONTEXT.conf.clone(); + conf.limit = split_size as usize; + conf.proof_spawn_with_persistence = split_kind == SplitKind::Stateful; + fuzz_query_plan_conf::>(&CONTEXT, conf, plan); +}); diff --git a/trie-db/fuzz/fuzz_targets/query_plan_3.rs b/trie-db/fuzz/fuzz_targets/query_plan_3.rs new file mode 100644 index 00000000..2e9028bb --- /dev/null +++ b/trie-db/fuzz/fuzz_targets/query_plan_3.rs @@ -0,0 +1,15 @@ +#![no_main] +use lazy_static::lazy_static; +use libfuzzer_sys::fuzz_target; +use reference_trie::{RefHasher, SubstrateV1}; +use trie_db_test::fuzz::query_plan::{ + build_state, fuzz_query_plan, ArbitraryQueryPlan, FuzzContext, CONF2, +}; + +lazy_static! { + static ref CONTEXT: FuzzContext> = build_state(CONF2); +} + +fuzz_target!(|plan: ArbitraryQueryPlan| { + fuzz_query_plan::>(&CONTEXT, plan); +}); diff --git a/trie-db/fuzz/fuzz_targets/query_plan_4.rs b/trie-db/fuzz/fuzz_targets/query_plan_4.rs new file mode 100644 index 00000000..29c4a563 --- /dev/null +++ b/trie-db/fuzz/fuzz_targets/query_plan_4.rs @@ -0,0 +1,36 @@ +#![no_main] +use lazy_static::lazy_static; +use libfuzzer_sys::fuzz_target; +use reference_trie::{RefHasher, SubstrateV1}; +use trie_db_test::fuzz::query_plan::{ + build_state, fuzz_query_plan_conf, ArbitraryQueryPlan, FuzzContext, CONF2, +}; +use arbitrary::Arbitrary; + +lazy_static! { + static ref CONTEXT: FuzzContext> = build_state(CONF2); +} + +#[derive(Debug, Clone, Copy, Arbitrary)] +#[repr(usize)] +enum SplitSize { + One = 1, + Two = 2, + Three = 3, + More = 10, + MoreMore = 50, +} + +#[derive(Debug, Clone, Copy, Eq, PartialEq, Arbitrary)] +enum SplitKind { + Stateless, + Stateful, +} + +fuzz_target!(|input: (ArbitraryQueryPlan, SplitSize, SplitKind)| { + let (plan, split_size, split_kind) = input; + let mut conf = CONTEXT.conf.clone(); + conf.limit = split_size as usize; + conf.proof_spawn_with_persistence = split_kind == SplitKind::Stateful; + fuzz_query_plan_conf::>(&CONTEXT, conf, plan); +}); diff --git a/trie-db/fuzz/fuzz_targets/seek_iter.rs b/trie-db/fuzz/fuzz_targets/seek_iter.rs index a41b6d60..bcf36d76 100644 --- a/trie-db/fuzz/fuzz_targets/seek_iter.rs +++ b/trie-db/fuzz/fuzz_targets/seek_iter.rs @@ -1,7 +1,7 @@ #![no_main] use libfuzzer_sys::fuzz_target; -use trie_db_fuzz::fuzz_seek_iter; +use trie_db_test::fuzz::fuzz_seek_iter; fuzz_target!(|data: &[u8]| { fuzz_seek_iter::(data); diff --git a/trie-db/fuzz/fuzz_targets/trie_codec_proof.rs b/trie-db/fuzz/fuzz_targets/trie_codec_proof.rs index ba7e92b6..6b593e48 100644 --- a/trie-db/fuzz/fuzz_targets/trie_codec_proof.rs +++ b/trie-db/fuzz/fuzz_targets/trie_codec_proof.rs @@ -1,7 +1,7 @@ #![no_main] use libfuzzer_sys::fuzz_target; -use trie_db_fuzz::fuzz_that_trie_codec_proofs; +use trie_db_test::fuzz::fuzz_that_trie_codec_proofs; fuzz_target!(|data: &[u8]| { fuzz_that_trie_codec_proofs::(data); diff --git a/trie-db/fuzz/fuzz_targets/trie_proof_invalid.rs b/trie-db/fuzz/fuzz_targets/trie_proof_invalid.rs index 263f8573..aa0a5f24 100644 --- a/trie-db/fuzz/fuzz_targets/trie_proof_invalid.rs +++ b/trie-db/fuzz/fuzz_targets/trie_proof_invalid.rs @@ -1,7 +1,7 @@ #![no_main] use libfuzzer_sys::fuzz_target; -use trie_db_fuzz::fuzz_that_verify_rejects_invalid_proofs; +use trie_db_test::fuzz::fuzz_that_verify_rejects_invalid_proofs; fuzz_target!(|data: &[u8]| { fuzz_that_verify_rejects_invalid_proofs::(data); diff --git a/trie-db/fuzz/fuzz_targets/trie_proof_valid.rs b/trie-db/fuzz/fuzz_targets/trie_proof_valid.rs index c28b1ae1..be9b3907 100644 --- a/trie-db/fuzz/fuzz_targets/trie_proof_valid.rs +++ b/trie-db/fuzz/fuzz_targets/trie_proof_valid.rs @@ -1,7 +1,7 @@ #![no_main] use libfuzzer_sys::fuzz_target; -use trie_db_fuzz::fuzz_that_verify_accepts_valid_proofs; +use trie_db_test::fuzz::fuzz_that_verify_accepts_valid_proofs; fuzz_target!(|data: &[u8]| { fuzz_that_verify_accepts_valid_proofs::(data); diff --git a/trie-db/fuzz/fuzz_targets/trie_root.rs b/trie-db/fuzz/fuzz_targets/trie_root.rs index 49d5a0b0..b69fbc5a 100644 --- a/trie-db/fuzz/fuzz_targets/trie_root.rs +++ b/trie-db/fuzz/fuzz_targets/trie_root.rs @@ -1,7 +1,7 @@ #![no_main] use libfuzzer_sys::fuzz_target; -use trie_db_fuzz::fuzz_that_reference_trie_root; +use trie_db_test::fuzz::fuzz_that_reference_trie_root; fuzz_target!(|data: &[u8]| { fuzz_that_reference_trie_root::(data); diff --git a/trie-db/fuzz/fuzz_targets/trie_root_fix_len.rs b/trie-db/fuzz/fuzz_targets/trie_root_fix_len.rs index 04591bae..68651c3f 100644 --- a/trie-db/fuzz/fuzz_targets/trie_root_fix_len.rs +++ b/trie-db/fuzz/fuzz_targets/trie_root_fix_len.rs @@ -1,7 +1,7 @@ #![no_main] use libfuzzer_sys::fuzz_target; -use trie_db_fuzz::fuzz_that_reference_trie_root_fix_length; +use trie_db_test::fuzz::fuzz_that_reference_trie_root_fix_length; fuzz_target!(|data: &[u8]| { fuzz_that_reference_trie_root_fix_length::(data); diff --git a/trie-db/fuzz/fuzz_targets/trie_root_new.rs b/trie-db/fuzz/fuzz_targets/trie_root_new.rs index be597711..2e0e8615 100644 --- a/trie-db/fuzz/fuzz_targets/trie_root_new.rs +++ b/trie-db/fuzz/fuzz_targets/trie_root_new.rs @@ -1,7 +1,7 @@ #![no_main] use libfuzzer_sys::fuzz_target; -use trie_db_fuzz::fuzz_that_compare_implementations; +use trie_db_test::fuzz::fuzz_that_compare_implementations; fuzz_target!(|data: &[u8]| { // fuzzed code goes here diff --git a/trie-db/src/iterator.rs b/trie-db/src/iterator.rs index ca382b70..dca3b8fb 100644 --- a/trie-db/src/iterator.rs +++ b/trie-db/src/iterator.rs @@ -34,12 +34,18 @@ enum Status { #[cfg_attr(feature = "std", derive(Debug))] #[derive(Eq, PartialEq)] -struct Crumb { +pub struct Crumb { hash: Option, node: Arc>, status: Status, } +impl Clone for Crumb { + fn clone(&self) -> Self { + Self { hash: self.hash.clone(), node: self.node.clone(), status: self.status.clone() } + } +} + impl Crumb { /// Move on to next status in the node's sequence. fn increment(&mut self) { @@ -64,6 +70,12 @@ pub struct TrieDBRawIterator { key_nibbles: NibbleVec, } +impl Clone for TrieDBRawIterator { + fn clone(&self) -> Self { + Self { trail: self.trail.clone(), key_nibbles: self.key_nibbles.clone() } + } +} + impl TrieDBRawIterator { /// Create a new empty iterator. pub fn empty() -> Self { diff --git a/trie-db/src/lib.rs b/trie-db/src/lib.rs index b09372b2..590ad4fa 100644 --- a/trie-db/src/lib.rs +++ b/trie-db/src/lib.rs @@ -43,6 +43,7 @@ use node::NodeOwned; pub mod node; pub mod proof; +pub mod query_plan; pub mod recorder; pub mod sectriedb; pub mod sectriedbmut; diff --git a/trie-db/src/lookup.rs b/trie-db/src/lookup.rs index 5e34055f..90bdcb5e 100644 --- a/trie-db/src/lookup.rs +++ b/trie-db/src/lookup.rs @@ -367,7 +367,6 @@ where NodeOwned::Leaf(slice, value) => return if partial == *slice { let value = (*value).clone(); - drop(node); load_value_owned( value, nibble_key.original_data_as_prefix(), @@ -395,7 +394,6 @@ where NodeOwned::Branch(children, value) => if partial.is_empty() { return if let Some(value) = value.clone() { - drop(node); load_value_owned( value, nibble_key.original_data_as_prefix(), @@ -433,7 +431,6 @@ where if partial.len() == slice.len() { return if let Some(value) = value.clone() { - drop(node); load_value_owned( value, nibble_key.original_data_as_prefix(), diff --git a/trie-db/src/nibble/leftnibbleslice.rs b/trie-db/src/nibble/leftnibbleslice.rs index f3ba2527..931b06c2 100644 --- a/trie-db/src/nibble/leftnibbleslice.rs +++ b/trie-db/src/nibble/leftnibbleslice.rs @@ -16,16 +16,13 @@ use crate::rstd::cmp::{self, Ordering}; use crate::nibble::{ nibble_ops::{self, NIBBLE_PER_BYTE}, - NibbleSlice, + LeftNibbleSlice, NibbleSlice, NibbleVec, }; -/// A representation of a nibble slice which is left-aligned. The regular `NibbleSlice` is -/// right-aligned, meaning it does not support efficient truncation from the right side. -/// -/// This is an immutable struct. No operations actually change it. -pub struct LeftNibbleSlice<'a> { - bytes: &'a [u8], - len: usize, +impl<'a> From<&'a NibbleVec> for LeftNibbleSlice<'a> { + fn from(v: &'a NibbleVec) -> Self { + LeftNibbleSlice { bytes: v.inner.as_slice(), len: v.len } + } } impl<'a> LeftNibbleSlice<'a> { diff --git a/trie-db/src/nibble/mod.rs b/trie-db/src/nibble/mod.rs index e1d758e9..76f1642f 100644 --- a/trie-db/src/nibble/mod.rs +++ b/trie-db/src/nibble/mod.rs @@ -16,8 +16,6 @@ use crate::{node::NodeKey, rstd::cmp}; -pub use self::leftnibbleslice::LeftNibbleSlice; - mod leftnibbleslice; mod nibbleslice; mod nibblevec; @@ -101,6 +99,19 @@ pub mod nibble_ops { upper_bound * NIBBLE_PER_BYTE } + /// Count the biggest common depth between two left aligned packed nibble slice and return + /// ordering. + pub fn biggest_depth_and_order(v1: &[u8], v2: &[u8]) -> (usize, cmp::Ordering) { + let upper_bound = cmp::min(v1.len(), v2.len()); + for a in 0..upper_bound { + if v1[a] != v2[a] { + let (common, order) = left_common_and_order(v1[a], v2[a]); + return (a * NIBBLE_PER_BYTE + common, order) + } + } + (upper_bound * NIBBLE_PER_BYTE, v1.len().cmp(&v2.len())) + } + /// Calculate the number of common nibble between two left aligned bytes. #[inline(always)] pub fn left_common(a: u8, b: u8) -> usize { @@ -113,6 +124,19 @@ pub mod nibble_ops { } } + /// Calculate the number of common nibble between two left aligned bytes. + #[inline(always)] + pub fn left_common_and_order(a: u8, b: u8) -> (usize, cmp::Ordering) { + let byte_order = a.cmp(&b); + if byte_order == cmp::Ordering::Equal { + (2, byte_order) + } else if pad_left(a) == pad_left(b) { + (1, byte_order) + } else { + (0, byte_order) + } + } + /// Shifts right aligned key to add a given left offset. /// Resulting in possibly padding at both left and right /// (example usage when combining two keys). @@ -182,6 +206,15 @@ pub struct NibbleSlice<'a> { offset: usize, } +/// A representation of a nibble slice which is left-aligned. The regular `NibbleSlice` is +/// right-aligned, meaning it does not support efficient truncation from the right side. +/// +/// This is an immutable struct. No operations actually change it. +pub struct LeftNibbleSlice<'a> { + bytes: &'a [u8], + len: usize, +} + /// Iterator type for a nibble slice. pub struct NibbleSliceIterator<'a> { p: &'a NibbleSlice<'a>, diff --git a/trie-db/src/nibble/nibbleslice.rs b/trie-db/src/nibble/nibbleslice.rs index f91fad7f..8e0c0e86 100644 --- a/trie-db/src/nibble/nibbleslice.rs +++ b/trie-db/src/nibble/nibbleslice.rs @@ -42,6 +42,10 @@ impl<'a> NibbleSlice<'a> { Self::new_slice(data, offset) } + pub(crate) fn offset(&self) -> usize { + self.offset + } + fn new_slice(data: &'a [u8], offset: usize) -> Self { NibbleSlice { data, offset } } diff --git a/trie-db/src/nibble/nibblevec.rs b/trie-db/src/nibble/nibblevec.rs index f612585a..31bb5816 100644 --- a/trie-db/src/nibble/nibblevec.rs +++ b/trie-db/src/nibble/nibblevec.rs @@ -16,7 +16,7 @@ use super::NibbleVec; use crate::{ - nibble::{nibble_ops, BackingByteVec, NibbleSlice}, + nibble::{nibble_ops, BackingByteVec, LeftNibbleSlice, NibbleSlice}, node::NodeKey, node_codec::Partial, }; @@ -119,27 +119,32 @@ impl NibbleVec { /// Append another `NibbleVec`. Can be slow (alignement of second vec). pub fn append(&mut self, v: &NibbleVec) { - if v.len == 0 { + self.append_slice(v.into()); + } + + /// Append a `LeftNibbleSlice`. Can be slow (alignement of second vec). + pub fn append_slice(&mut self, v: crate::nibble::LeftNibbleSlice) { + if v.len() == 0 { return } - let final_len = self.len + v.len; + let final_len = self.len + v.len(); let offset = self.len % nibble_ops::NIBBLE_PER_BYTE; let final_offset = final_len % nibble_ops::NIBBLE_PER_BYTE; let last_index = self.len / nibble_ops::NIBBLE_PER_BYTE; if offset > 0 { let (s1, s2) = nibble_ops::SPLIT_SHIFTS; self.inner[last_index] = - nibble_ops::pad_left(self.inner[last_index]) | (v.inner[0] >> s2); - (0..v.inner.len() - 1) - .for_each(|i| self.inner.push(v.inner[i] << s1 | v.inner[i + 1] >> s2)); + nibble_ops::pad_left(self.inner[last_index]) | (v.bytes[0] >> s2); + (0..v.bytes.len() - 1) + .for_each(|i| self.inner.push(v.bytes[i] << s1 | v.bytes[i + 1] >> s2)); if final_offset > 0 { - self.inner.push(v.inner[v.inner.len() - 1] << s1); + self.inner.push(v.bytes[v.bytes.len() - 1] << s1); } } else { - (0..v.inner.len()).for_each(|i| self.inner.push(v.inner[i])); + (0..v.bytes.len()).for_each(|i| self.inner.push(v.bytes[i])); } - self.len += v.len; + self.len += v.len(); } /// Append a `Partial`. Can be slow (alignement of partial). @@ -216,6 +221,11 @@ impl NibbleVec { } } + /// `NibbleVec` as a `LeftNibbleSlice`. + pub fn as_leftnibbleslice(&self) -> LeftNibbleSlice { + LeftNibbleSlice::new(&self.inner).truncate(self.len) + } + /// Do we start with the same nibbles as the whole of `them`? pub fn starts_with(&self, other: &Self) -> bool { if self.len() < other.len() { diff --git a/trie-db/src/node.rs b/trie-db/src/node.rs index 19ed9162..25dbe9d3 100644 --- a/trie-db/src/node.rs +++ b/trie-db/src/node.rs @@ -583,11 +583,17 @@ impl NodePlan { /// the `OwnedNode`. This is useful for trie iterators. #[cfg_attr(feature = "std", derive(Debug))] #[derive(PartialEq, Eq)] -pub struct OwnedNode> { +pub struct OwnedNode { data: D, plan: NodePlan, } +impl Clone for OwnedNode { + fn clone(&self) -> Self { + OwnedNode { data: self.data.clone(), plan: self.plan.clone() } + } +} + impl> OwnedNode { /// Construct an `OwnedNode` by decoding an owned data source according to some codec. pub fn new(data: D) -> core::result::Result { diff --git a/trie-db/src/node_codec.rs b/trie-db/src/node_codec.rs index eb9b1f67..032f05e8 100644 --- a/trie-db/src/node_codec.rs +++ b/trie-db/src/node_codec.rs @@ -36,6 +36,12 @@ pub trait NodeCodec: Sized { /// branch or leaf with hash of value, followed by the value node. const ESCAPE_HEADER: Option = None; + /// Size delta for compact encoding of omitted nodes. + const DELTA_COMPACT_OMITTED_NODE: usize; + + /// Size delta for compact encoding of omitted value nodes. + const DELTA_COMPACT_OMITTED_VALUE: usize; + /// Codec error type. type Error: Error; diff --git a/trie-db/src/query_plan/mod.rs b/trie-db/src/query_plan/mod.rs new file mode 100644 index 00000000..b9223773 --- /dev/null +++ b/trie-db/src/query_plan/mod.rs @@ -0,0 +1,564 @@ +// Copyright 2023, 2023 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Iterate on multiple values following a specific query plan. +//! Can be use on a trie db to register a proof, or +//! be use on a proof directly. +//! When use on a proof, process can be interrupted, checked and +//! restore (at the cost of additional hashes in proof). +//! +//! Because nodes are guaranted to be accessed only once and recorded +//! in proof, we do not use a cache (or only the node caching part). + +use core::marker::PhantomData; + +use crate::{ + nibble::{nibble_ops, nibble_ops::NIBBLE_LENGTH, LeftNibbleSlice, NibbleSlice}, + node::{NodeHandle, NodePlan, OwnedNode, Value}, + node_codec::NodeCodec, + rstd::{ + borrow::{Borrow, Cow}, + boxed::Box, + cmp::*, + convert::{TryFrom, TryInto}, + result::Result, + vec, + vec::Vec, + }, + CError, ChildReference, DBValue, NibbleVec, Trie, TrieDB, TrieHash, TrieLayout, +}; +use hash_db::Hasher; +pub use record::{record_query_plan, HaltedStateRecord, Recorder}; +pub use verify::{verify_query_plan_iter, HaltedStateCheck}; + +mod record; +mod verify; + +/// Errors that may occur during proof verification. Most of the errors types simply indicate that +/// the proof is invalid with respect to the statement being verified, and the exact error type can +/// be used for debugging. +#[derive(PartialEq, Eq)] +#[cfg_attr(feature = "std", derive(Debug))] +pub enum Error { + /// The statement being verified contains multiple key-value pairs with the same key. The + /// parameter is the duplicated key. + DuplicateKey(Vec), + /// The statement being verified contains key not ordered properly. + UnorderedKey(Vec), + /// The proof contains at least one extraneous node. + ExtraneousNode, + /// The proof contains at least one extraneous value which should have been omitted from the + /// proof. + ExtraneousValue(Vec), + /// The proof contains at least one extraneous hash reference the should have been omitted. + ExtraneousHashReference(HO), + /// The proof contains an invalid child reference that exceeds the hash length. + /// TODO extension only? + InvalidChildReference(Vec), + /// The proof is missing trie nodes required to verify. + IncompleteProof, + /// Item missing in backend when recording. + IncompleteDB(HO), + /// The root hash computed from the proof is incorrect. + RootMismatch(HO), + /// The hash computed from a node is incorrect. + HashMismatch(HO), + /// One of the proof nodes could not be decoded. + DecodeError(CE), + /// Node does not match existing handle. + /// This should not happen. + InvalidNodeHandle(Vec), + /// Node type in proof inconsistent with node ordering. + UnexpectedNodeType, +} + +#[cfg(feature = "std")] +impl std::fmt::Display for Error { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { + match self { + Error::DuplicateKey(key) => + write!(f, "Duplicate key in input statement: key={:?}", key), + Error::UnorderedKey(key) => + write!(f, "Unordered key in input statement: key={:?}", key), + Error::ExtraneousNode => write!(f, "Extraneous node found in proof"), + Error::ExtraneousValue(key) => + write!(f, "Extraneous value found in proof should have been omitted: key={:?}", key), + Error::ExtraneousHashReference(hash) => write!( + f, + "Extraneous hash reference found in proof should have been omitted: hash={:?}", + hash + ), + Error::InvalidChildReference(data) => + write!(f, "Invalid child reference exceeds hash length: {:?}", data), + Error::IncompleteProof => write!(f, "Proof is incomplete -- expected more nodes"), + Error::IncompleteDB(hash) => write!(f, "Missing node in db: {:?}", hash), + Error::RootMismatch(hash) => write!(f, "Computed incorrect root {:?} from proof", hash), + Error::HashMismatch(hash) => write!(f, "Computed incorrect hash {:?} from node", hash), + Error::DecodeError(err) => write!(f, "Unable to decode proof node: {}", err), + Error::InvalidNodeHandle(node) => write!(f, "Invalid node handle: {:?}", node), + Error::UnexpectedNodeType => + write!(f, "Node type in proof inconsistent with node ordering."), + } + } +} + +#[cfg(feature = "std")] +impl std::error::Error for Error { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + Error::DecodeError(err) => Some(err), + _ => None, + } + } +} + +/// Item to query, in memory. +#[derive(Default, Clone, Debug)] +pub struct QueryPlanItem { + key: Vec, + hash_only: bool, + as_prefix: bool, +} + +impl QueryPlanItem { + /// Create new item. + pub fn new(key: Vec, hash_only: bool, as_prefix: bool) -> Self { + Self { key, hash_only, as_prefix } + } + /// Get ref. + pub fn as_ref(&self) -> QueryPlanItemRef { + QueryPlanItemRef { key: &self.key, hash_only: self.hash_only, as_prefix: self.as_prefix } + } +} + +/// Item to query. +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct QueryPlanItemRef<'a> { + pub key: &'a [u8], + pub hash_only: bool, + pub as_prefix: bool, +} + +impl<'a> QueryPlanItemRef<'a> { + fn before(&self, other: &Self) -> (bool, usize) { + let (common_depth, ordering) = nibble_ops::biggest_depth_and_order(&self.key, &other.key); + + ( + match ordering { + Ordering::Less => { + if self.as_prefix { + // do not allow querying content inside a prefix + !other.key.starts_with(self.key) + } else { + true + } + }, + Ordering::Greater | Ordering::Equal => false, + }, + common_depth, + ) + } + + fn to_owned(&self) -> QueryPlanItem { + QueryPlanItem { + key: self.key.to_vec(), + hash_only: self.hash_only, + as_prefix: self.as_prefix, + } + } +} + +/// Query plan in memory. +#[derive(Clone, Debug)] +pub struct InMemQueryPlan { + pub items: Vec, + pub kind: ProofKind, +} + +/// Iterator as type of mapped slice iter is very noisy. +pub struct QueryPlanItemIter<'a>(&'a Vec, usize); + +impl<'a> Iterator for QueryPlanItemIter<'a> { + type Item = QueryPlanItemRef<'a>; + + fn next(&mut self) -> Option { + if self.1 >= self.0.len() { + return None + } + self.1 += 1; + Some(self.0[self.1 - 1].as_ref()) + } +} + +impl InMemQueryPlan { + /// Get ref. + pub fn as_ref(&self) -> QueryPlan { + QueryPlan { items: QueryPlanItemIter(&self.items, 0), kind: self.kind, _ph: PhantomData } + } +} + +/// Query plan. +pub struct QueryPlan<'a, I> { + pub items: I, + pub kind: ProofKind, + pub _ph: PhantomData<&'a ()>, +} + +/// Different proof support. +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub enum ProofKind { + /// Proof is a sequence of fully encoded node, this is not + /// size efficient but allows streaming a proof better, since + /// the consumer can halt at the first invalid node. + FullNodes, + + /// Proof got its accessed hash and value removed (same scheme + /// as in trie_codec.rs ordering is same as full node, from + /// root and in lexicographic order). + /// + /// Checking this proof requires to read it fully. When stopping + /// recording, the partial proof stays valid, but it will + /// contains hashes that would not be needed if creating the + /// proof at once. + CompactNodes, +} + +impl ProofKind { + /// Check if compact variant of proof. + pub fn is_compact(self) -> bool { + matches!(self, ProofKind::CompactNodes) + } +} + +#[derive(Default, Clone, Copy)] +struct Bitmap(u16); + +pub(crate) trait BitmapAccess: Copy { + fn at(&self, i: usize) -> bool; +} + +impl BitmapAccess for Bitmap { + fn at(&self, i: usize) -> bool { + self.0 & (1u16 << i) != 0 + } +} + +impl<'a> BitmapAccess for &'a [bool] { + fn at(&self, i: usize) -> bool { + self[i] + } +} + +impl Bitmap { + fn set(&mut self, i: usize, v: bool) { + if v { + self.0 |= 1u16 << i + } else { + self.0 &= !(1u16 << i) + } + } +} + +#[derive(Clone)] +struct StackedNodeRecord { + /// Node in memory content. + node: OwnedNode, + /// Flags indicating whether each child is omitted (accessed) in the encoded node. + /// For some encoding, it also record if the child has already been written. + accessed_children_node: Bitmap, + /// Skip value if value node is after. + accessed_value_node: bool, + /// Depth of node in nibbles (actual depth of an attached value (post partial)). + depth: usize, + /// Next descended child, can also be use to get node position in parent + /// (this minus one). + next_descended_child: u8, + /// Is the node inline. + is_inline: bool, +} + +/// Limits to size proof to record. +struct Limits { + remaining_node: Option, + remaining_size: Option, + kind: ProofKind, +} + +impl Limits { + #[must_use] + fn add_node(&mut self, size: usize, hash_size: usize, is_root: bool) -> bool { + let mut res = false; + match self.kind { + ProofKind::CompactNodes | ProofKind::FullNodes => { + if let Some(rem_size) = self.remaining_size.as_mut() { + if let ProofKind::CompactNodes = self.kind { + if !is_root { + // remove a parent hash + *rem_size += hash_size; + } + } + if *rem_size >= size { + *rem_size -= size; + } else { + *rem_size = 0; + res = true; + } + } + if let Some(rem_node) = self.remaining_node.as_mut() { + if *rem_node > 1 { + *rem_node -= 1; + } else { + *rem_node = 0; + res = true; + } + } + }, + } + res + } + + #[must_use] + fn add_value(&mut self, size: usize, hash_size: usize) -> bool { + let mut res = false; + match self.kind { + ProofKind::CompactNodes | ProofKind::FullNodes => { + if let Some(rem_size) = self.remaining_size.as_mut() { + if let ProofKind::CompactNodes = self.kind { + // remove a parent value hash + *rem_size += hash_size; + } + if *rem_size >= size { + *rem_size -= size; + } else { + *rem_size = 0; + res = true; + } + } + if let Some(rem_node) = self.remaining_node.as_mut() { + if *rem_node > 1 { + *rem_node -= 1; + } else { + *rem_node = 0; + res = true; + } + } + }, + } + res + } +} + +#[derive(Eq, PartialEq)] +enum TryStackChildResult { + /// If there is no child to stack. + NotStacked, + /// Same indicating it is a branch so in case of iteration + /// we will attempt next child. + NotStackedBranch, + /// Nothing stacked, this is a next child attempt that allows + /// suspending proof registering of proof check iteration. + Halted, + /// Child stacked and matched of the full partial key. + StackedFull, + /// Child stacked but part of partial key is into. + /// If prefix query plan item, this is part of the prefix. + StackedInto, + /// Child stacked but part of partial key is after. + /// Indicate that the query plan item need to be switched. + /// Next query plan item could still be using this stacked node (as any stacked variant). + StackedAfter, +} + +#[derive(Eq, PartialEq)] +enum ReadProofState { + /// Iteration not started. + NotStarted, + /// Iterating. + Running, + /// Switch next item. + SwitchQueryPlan, + /// Proof read. + PlanConsumed, + /// Proof read. + Halted, + /// Iteration finished. + Finished, +} + +struct StackedNodeCheck { + node: ItemStackNode, + children: Vec>>>, + attached_value_hash: Option>, + depth: usize, + next_descended_child: u8, +} + +impl Clone for StackedNodeCheck { + fn clone(&self) -> Self { + StackedNodeCheck { + node: self.node.clone(), + children: self.children.clone(), + attached_value_hash: self.attached_value_hash, + depth: self.depth, + next_descended_child: self.next_descended_child, + } + } +} + +#[derive(Clone)] +enum ItemStackNode { + Inline(OwnedNode>), + Node(OwnedNode), +} + +impl TryFrom<(ItemStackNode, ProofKind)> + for StackedNodeCheck +{ + type Error = Error, CError>; + + fn try_from( + (node, kind): (ItemStackNode, ProofKind), + ) -> crate::rstd::result::Result { + let children = if !kind.is_compact() { + Vec::new() + } else { + match &node { + ItemStackNode::Inline(_) => Vec::new(), + ItemStackNode::Node(node) => match node.node_plan() { + NodePlan::Empty | NodePlan::Leaf { .. } => Vec::new(), + NodePlan::Extension { child, .. } => { + let mut result: Vec>>> = vec![None; 1]; + let node_data = node.data(); + match child.build(node_data) { + NodeHandle::Inline(data) if data.is_empty() => (), + child => { + let child_ref = + child.try_into().map_err(|d| Error::InvalidNodeHandle(d))?; + result[0] = Some(child_ref); + }, + } + result + }, + NodePlan::Branch { children, .. } | + NodePlan::NibbledBranch { children, .. } => { + let mut i = 0; + let mut result: Vec>>> = + vec![None; NIBBLE_LENGTH]; + let node_data = node.data(); + while i < NIBBLE_LENGTH { + match children[i].as_ref().map(|c| c.build(node_data)) { + Some(NodeHandle::Inline(data)) if data.is_empty() => (), + Some(child) => { + let child_ref = child + .try_into() + .map_err(|d| Error::InvalidNodeHandle(d))?; + + result[i] = Some(child_ref); + }, + None => {}, + } + i += 1; + } + result + }, + }, + } + }; + + Ok(StackedNodeCheck { + node, + depth: 0, + next_descended_child: 0, + children, + attached_value_hash: None, + }) + } +} + +impl StackedNodeCheck { + fn data(&self) -> &[u8] { + match &self.node { + ItemStackNode::Inline(n) => n.data(), + ItemStackNode::Node(n) => n.data(), + } + } + + fn node_plan(&self) -> &NodePlan { + match &self.node { + ItemStackNode::Inline(n) => n.node_plan(), + ItemStackNode::Node(n) => n.node_plan(), + } + } +} + +fn verify_hash( + data: &[u8], + expected: &[u8], +) -> Result<(), Error, CError>> { + let checked_hash = L::Hash::hash(data); + if checked_hash.as_ref() != expected { + let mut error_hash = TrieHash::::default(); + error_hash.as_mut().copy_from_slice(expected); + Err(Error::HashMismatch(error_hash)) + } else { + Ok(()) + } +} + +/// Byte array where we can remove first item. +/// This is only needed for the ESCAPE_HEADER of COMPACT which +/// itself is not strictly needed (we can know if escaped from +/// query plan). +pub trait SplitFirst: Borrow<[u8]> + Clone { + fn split_first(&mut self); +} + +impl SplitFirst for Vec { + fn split_first(&mut self) { + *self = self.split_off(1); + } +} + +impl<'a> SplitFirst for &'a [u8] { + fn split_first(&mut self) { + *self = &self[1..]; + } +} + +/// Content return on success when reading proof. +pub enum ReadProofItem<'a, L: TrieLayout, C, D: SplitFirst> { + /// Successfull read of proof, not all content read. + Halted(Box>), + /// Seen value and key in proof. + /// We only return content matching the query plan. + Value(Cow<'a, [u8]>, Vec), + /// Seen hash of value and key in proof. + /// We only return content matching the query plan. + Hash(Cow<'a, [u8]>, TrieHash), + /// No value seen for a key in the input query plan. + NoValue(&'a [u8]), + /// Seen fully covered prefix in proof, this is only + /// return when we read the proof with the query input (otherwhise + /// we would need to indicate every child without a hash as a prefix). + StartPrefix(Vec), + /// End of a previously start prefix. + EndPrefix, +} + +#[derive(Clone)] +struct InPrefix { + start: usize, + send_value: bool, + hash_only: bool, +} diff --git a/trie-db/src/query_plan/record.rs b/trie-db/src/query_plan/record.rs new file mode 100644 index 00000000..e4566717 --- /dev/null +++ b/trie-db/src/query_plan/record.rs @@ -0,0 +1,716 @@ +// Copyright 2023, 2023 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Record query plan proof. + +use super::*; + +/// Simplified recorder. +pub struct Recorder { + output: RecorderStateInner, + limits: Limits, + // on restore only record content AFTER this position. + start_at: Option, + _ph: PhantomData, +} + +impl Recorder { + /// Check and update start at record. + /// When return true, do record. + /// Else already was. + fn check_start_at(&mut self, depth: usize) -> bool { + if self.start_at.map(|s| s >= depth).unwrap_or(false) { + false + } else { + self.start_at = None; + true + } + } + + /// Get back output handle from a recorder. + pub fn output(self) -> Vec { + match self.output { + RecorderStateInner::Stream(output) | RecorderStateInner::Compact { output, .. } => + output, + } + } + + /// Instantiate a new recorder. + pub fn new( + kind: ProofKind, + output: Vec, + limit_node: Option, + limit_size: Option, + ) -> Self { + let output = match kind { + ProofKind::FullNodes => RecorderStateInner::Stream(output), + ProofKind::CompactNodes => + RecorderStateInner::Compact { output, proof: Vec::new(), stacked_pos: Vec::new() }, + }; + let limits = Limits { remaining_node: limit_node, remaining_size: limit_size, kind }; + Self { output, limits, start_at: None, _ph: PhantomData } + } + + #[must_use] + fn record_stacked_node(&mut self, item: &StackedNodeRecord, is_root: bool) -> bool { + if !self.check_start_at(item.depth) { + return false + } + let mut res = false; + match &mut self.output { + RecorderStateInner::Stream(output) => + if !item.is_inline { + res |= self.limits.add_node( + item.node.data().len(), + L::Codec::DELTA_COMPACT_OMITTED_NODE, + is_root, + ); + output.push(item.node.data().into()); + }, + RecorderStateInner::Compact { output: _, proof, stacked_pos } => + if !item.is_inline { + res |= self.limits.add_node( + item.node.data().len(), + L::Codec::DELTA_COMPACT_OMITTED_NODE, + is_root, + ); + stacked_pos.push(proof.len()); + proof.push(Vec::new()); + }, + } + res + } + + #[must_use] + fn record_value_node(&mut self, value: Vec, depth: usize) -> bool { + if !self.check_start_at(depth) { + return false + } + + let mut res = false; + match &mut self.output { + RecorderStateInner::Stream(output) => { + res |= self.limits.add_value(value.len(), L::Codec::DELTA_COMPACT_OMITTED_VALUE); + output.push(value.into()); + }, + RecorderStateInner::Compact { output: _, proof, stacked_pos: _ } => { + res |= self.limits.add_value(value.len(), L::Codec::DELTA_COMPACT_OMITTED_VALUE); + proof.push(value.into()); + }, + } + res + } +} + +enum RecorderStateInner { + /// For FullNodes proofs, just send node to this stream. + Stream(Vec), + /// For FullNodes proofs, Requires keeping all proof before sending it. + Compact { + output: Vec, + proof: Vec>, + /// Stacked position in proof to modify proof as needed + /// when information got accessed. + stacked_pos: Vec, + }, +} + +/// When process is halted keep execution state +/// to restore later. +pub struct HaltedStateRecord { + currently_query_item: Option, + stack: RecordStack, + // This indicate a restore point, it takes precedence over + // stack and currently_query_item. + from: Option<(Vec, bool)>, +} + +impl HaltedStateRecord { + /// Indicate we reuse the query plan iterator + /// and stack. + pub fn statefull(&mut self, recorder: Recorder) -> Vec { + let result = core::mem::replace(&mut self.stack.recorder, recorder); + result.output() + } + + /// Indicate to use stateless (on a fresh proof + /// and a fresh query plan iterator). + pub fn stateless(&mut self, recorder: Recorder) -> Vec { + let new_start = Self::from_start(recorder); + let old = core::mem::replace(self, new_start); + self.from = old.from; + self.currently_query_item = None; + old.stack.recorder.output() + } + + /// Init from start. + pub fn from_start(recorder: Recorder) -> Self { + Self::from_at(recorder, None) + } + + /// Init from position or start. + pub fn from_at(recorder: Recorder, at: Option<(Vec, bool)>) -> Self { + HaltedStateRecord { + currently_query_item: None, + stack: RecordStack { + recorder, + items: Vec::new(), + prefix: NibbleVec::new(), + iter_prefix: None, + halt: false, + seek: None, + }, + from: at, + } + } + + /// If halted, postition where it was halted. + pub fn stopped_at(&self) -> Option<(Vec, bool)> { + self.from.clone() + } + + /// Check if the state is halted. + pub fn is_halted(&self) -> bool { + self.from.is_some() + } + + /// Finalize state, and return the proof output. + pub fn finish(self) -> Vec { + self.stack.recorder.output() + } + + fn finalize(&mut self) -> Result<(), Error, CError>> { + let stack = &mut self.stack; + let items = &stack.items; + match &mut stack.recorder.output { + RecorderStateInner::Compact { output, proof, stacked_pos } => { + if stacked_pos.len() > 0 { + // halted: complete up to 0 and write all nodes keeping stack. + let mut items = items.iter().rev(); + while let Some(pos) = stacked_pos.pop() { + loop { + let item = items.next().expect("pos stacked with an item"); + if !item.is_inline { + proof[pos] = crate::trie_codec::encode_node_internal::( + &item.node, + item.accessed_value_node, + item.accessed_children_node, + ) + .map_err(|e| { + if let Some(data) = e { + // invalid node handle conversion for data + Error::InvalidNodeHandle(data) + } else { + // unexpected node in proof + Error::ExtraneousNode + } + })?; + break + } + } + } + } + for entry in core::mem::take(proof) { + output.push(entry.into()); + } + }, + RecorderStateInner::Stream(_output) => { + // all written on access + }, + } + Ok(()) + } + + /// Callback on node before a node in the stack. + /// `at` is the the position in the stack (in some case we keep + /// the stack and will not pop the node). + fn record_popped_node(&mut self, at: usize) -> Result<(), Error, CError>> { + let item = self.stack.items.get(at).expect("bounded check call"); + if !self.stack.recorder.check_start_at(item.depth) { + return Ok(()) + } + + match &mut self.stack.recorder.output { + RecorderStateInner::Stream(_) => (), + RecorderStateInner::Compact { proof, stacked_pos, .. } => + if !item.is_inline { + if let Some(at) = stacked_pos.pop() { + proof[at] = crate::trie_codec::encode_node_internal::( + &item.node, + item.accessed_value_node, + item.accessed_children_node, + ) + .map_err(|e| { + if let Some(data) = e { + // invalid node handle conversion for data + Error::InvalidNodeHandle(data) + } else { + // unexpected node in proof + Error::ExtraneousNode + } + })?; + } // else when restarting record, this is not to be recorded + }, + } + Ok(()) + } + + fn pop(&mut self) -> Result, CError>> { + if self + .stack + .iter_prefix + .map(|(l, _)| l == self.stack.items.len()) + .unwrap_or(false) + { + return Ok(false) + } + let at = self.stack.items.len(); + if at > 0 { + self.record_popped_node(at - 1)?; + } + Ok(if let Some(item) = self.stack.items.pop() { + let depth = self.stack.items.last().map(|i| i.depth).unwrap_or(0); + self.stack.prefix.drop_lasts(self.stack.prefix.len() - depth); + if depth == item.depth { + // Two consecutive identical depth is an extension + self.pop()?; + } + true + } else { + false + }) + } + + fn iter_prefix( + &mut self, + prev_query: Option<&QueryPlanItemRef>, + db: &TrieDB, + hash_only: bool, + first_iter: bool, + ) -> Result, CError>> { + let dummy_parent_hash = TrieHash::::default(); + if first_iter { + self.stack.enter_prefix_iter(hash_only); + } + + // run prefix iteration + let mut stacked = first_iter; + loop { + // descend + loop { + if stacked { + // try access value in next node + self.stack.access_value(db, hash_only)?; + stacked = false; + } + + let child_index = if let Some(item) = self.stack.items.last_mut() { + if item.next_descended_child as usize >= NIBBLE_LENGTH { + break + } + item.next_descended_child += 1; + item.next_descended_child - 1 + } else { + break + }; + + match self.stack.try_stack_child(child_index, db, dummy_parent_hash, None)? { + TryStackChildResult::StackedFull => { + stacked = true; + }, + TryStackChildResult::StackedInto => unreachable!("Not following plan"), + TryStackChildResult::StackedAfter => unreachable!("Not following plan"), + TryStackChildResult::NotStackedBranch => (), + TryStackChildResult::NotStacked => break, + TryStackChildResult::Halted => { + if let Some(item) = self.stack.items.last_mut() { + item.next_descended_child -= 1; + } + self.stack.halt = false; + self.stack.prefix.push(child_index); + let dest_from = Some(( + self.stack.prefix.inner().to_vec(), + (self.stack.prefix.len() % nibble_ops::NIBBLE_PER_BYTE) != 0, + )); + self.stack.prefix.pop(); + self.finalize()?; + self.stack.halt = false; + self.from = dest_from; + self.currently_query_item = prev_query.map(|q| q.to_owned()); + return Ok(true) + }, + } + } + + // pop + if !self.pop()? { + break + } + } + self.stack.exit_prefix_iter(); + Ok(false) + } +} + +struct RecordStack { + recorder: Recorder, + items: Vec, + prefix: NibbleVec, + iter_prefix: Option<(usize, bool)>, + seek: Option, + halt: bool, +} + +/// Run query plan on a full db and record it. +pub fn record_query_plan<'a, L: TrieLayout, I: Iterator>>( + db: &TrieDB, + query_plan: &mut QueryPlan<'a, I>, + from: &mut HaltedStateRecord, +) -> Result<(), Error, CError>> { + let dummy_parent_hash = TrieHash::::default(); + let mut stateless = false; + let mut statefull = None; + // From indicate we restart,. + if let Some(lower_bound) = from.from.take() { + if from.currently_query_item.is_none() { + stateless = true; + let mut bound = NibbleVec::new(); + bound.append_optional_slice_and_nibble(Some(&NibbleSlice::new(&lower_bound.0)), None); + if lower_bound.1 { + bound.pop(); + } + from.stack.recorder.start_at = Some(bound.len() - 1); + from.stack.seek = Some(bound); + } else { + // statefull case + let bound_len = lower_bound.0.len() * nibble_ops::NIBBLE_PER_BYTE - + if lower_bound.1 { 2 } else { 1 }; + from.stack.recorder.start_at = Some(bound_len); + statefull = Some(bound_len); + } + } + + let mut prev_query: Option = None; + let from_query = from.currently_query_item.take(); + let mut from_query_ref = from_query.as_ref().map(|f| f.as_ref()); + while let Some(query) = from_query_ref.clone().or_else(|| query_plan.items.next()) { + if stateless { + // advance query plan + let bound = from.stack.seek.as_ref().expect("Initiated for stateless"); + let bound = bound.as_leftnibbleslice(); + let query_slice = LeftNibbleSlice::new(&query.key); + if query_slice.starts_with(&bound) { + } else if query.as_prefix { + if bound.starts_with(&query_slice) { + } else { + continue + } + } else { + continue + } + stateless = false; + if !query.as_prefix { + from.stack.seek = None; + } + } + if statefull.take().is_none() { + let (ordered, common_nibbles) = + prev_query.as_ref().map(|p| p.before(&query)).unwrap_or((true, 0)); + if !ordered { + return Err(Error::UnorderedKey(query.key.to_vec())) + } + let skip_query = loop { + match from.stack.prefix.len().cmp(&common_nibbles) { + Ordering::Equal => break false, + Ordering::Less => break true, + Ordering::Greater => + if !from.pop()? { + from.finalize()?; + return Ok(()) + }, + } + }; + if skip_query { + // will go down in same branch, skip query_plan + from_query_ref = None; + prev_query = Some(query); + continue + } + }; + if let Some((_, hash_only)) = from.stack.iter_prefix.clone() { + // statefull halted during iteration. + let halt = from.iter_prefix(Some(&query), db, hash_only, false)?; + if halt { + return Ok(()) + } + from_query_ref = None; + prev_query = Some(query); + continue + } + // descend + let mut slice_query = NibbleSlice::new_offset(&query.key, from.stack.prefix.len()); + + let touched = loop { + if !from.stack.items.is_empty() { + if slice_query.is_empty() { + if query.as_prefix { + let halt = from.iter_prefix(Some(&query), db, query.hash_only, true)?; + if halt { + return Ok(()) + } + break false + } else { + break true + } + } + } + + let child_index = if from.stack.items.is_empty() { 0 } else { slice_query.at(0) }; + from.stack.items.last_mut().map(|i| { + i.next_descended_child = child_index + 1; + }); + match from.stack.try_stack_child( + child_index, + db, + dummy_parent_hash, + Some(&mut slice_query), + )? { + TryStackChildResult::StackedFull => {}, + TryStackChildResult::NotStackedBranch | TryStackChildResult::NotStacked => + break false, + TryStackChildResult::StackedAfter => break false, + TryStackChildResult::StackedInto => { + if query.as_prefix { + let halt = from.iter_prefix(Some(&query), db, query.hash_only, true)?; + if halt { + return Ok(()) + } + } + break false + }, + TryStackChildResult::Halted => { + from.stack.halt = false; + from.stack.prefix.push(child_index); + from.from = Some(( + from.stack.prefix.inner().to_vec(), + (from.stack.prefix.len() % nibble_ops::NIBBLE_PER_BYTE) != 0, + )); + from.stack.prefix.pop(); + from.currently_query_item = Some(query.to_owned()); + from.finalize()?; + return Ok(()) + }, + } + }; + + if touched { + // try access value + from.stack.access_value(db, query.hash_only)?; + } + from_query_ref = None; + prev_query = Some(query); + } + from.finalize()?; + Ok(()) +} + +impl RecordStack { + fn try_stack_child<'a>( + &mut self, + child_index: u8, + db: &TrieDB, + parent_hash: TrieHash, + mut slice_query: Option<&mut NibbleSlice>, + ) -> Result, CError>> { + let mut is_inline = false; + let prefix = &mut self.prefix; + let mut stack_extension = false; + let mut from_branch = None; + let child_handle = if let Some(item) = self.items.last_mut() { + //if inline_only && item.accessed_children_node.at(child_index as usize) { + debug_assert!(!item.accessed_children_node.at(child_index as usize)); + if item.accessed_children_node.at(child_index as usize) { + // No reason to go twice in a same branch + return Ok(TryStackChildResult::NotStackedBranch) + } + + let node_data = item.node.data(); + + match item.node.node_plan() { + NodePlan::Empty | NodePlan::Leaf { .. } => + return Ok(TryStackChildResult::NotStacked), + NodePlan::Extension { child, .. } => + if child_index == 0 { + let child_handle = child.build(node_data); + if let &NodeHandle::Hash(_) = &child_handle { + item.accessed_children_node.set(child_index as usize, true); + } + child_handle + } else { + return Ok(TryStackChildResult::NotStacked) + }, + NodePlan::NibbledBranch { children, .. } | NodePlan::Branch { children, .. } => + if let Some(child) = &children[child_index as usize] { + from_branch = Some(&mut item.accessed_children_node); + child.build(node_data) + } else { + return Ok(TryStackChildResult::NotStackedBranch) + }, + } + } else { + NodeHandle::Hash(db.root().as_ref()) + }; + match &child_handle { + NodeHandle::Inline(_) => { + is_inline = true; + }, + NodeHandle::Hash(_) => { + if self.halt && from_branch.is_some() { + // halt condition + return Ok(TryStackChildResult::Halted) + } + }, + } + if let Some(accessed_children_node) = from_branch { + if !is_inline { + accessed_children_node.set(child_index as usize, true); + } + + slice_query.as_mut().map(|s| s.advance(1)); + prefix.push(child_index); + } + let child_node = db + .get_raw_or_lookup_with_cache( + parent_hash, + child_handle, + prefix.as_prefix(), + false, + true, + ) + .map_err(|_| { + let mut hash = TrieHash::::default(); + if let NodeHandle::Hash(h) = &child_handle { + let bound = crate::rstd::cmp::min(h.as_ref().len(), h.len()); + hash.as_mut()[..bound].copy_from_slice(&h[..bound]); + } + Error::IncompleteDB(hash) + })? + .0; + + let node_data = child_node.data(); + //println!("r: {:?}", &node_data); + + let result = match child_node.node_plan() { + NodePlan::Branch { .. } | NodePlan::Empty => TryStackChildResult::StackedFull, + NodePlan::Leaf { partial, .. } | + NodePlan::NibbledBranch { partial, .. } | + NodePlan::Extension { partial, .. } => { + let partial = partial.build(node_data); + prefix.append_partial(partial.right()); + if let Some(s) = slice_query.as_mut() { + let common = partial.common_prefix(s); + // s starts with partial + let r = if common == partial.len() { + TryStackChildResult::StackedFull + } else if common == s.len() { + // partial strats with s + TryStackChildResult::StackedInto + } else { + TryStackChildResult::StackedAfter + }; + s.advance(common); + r + } else { + TryStackChildResult::StackedFull + } + }, + }; + if let NodePlan::Extension { .. } = child_node.node_plan() { + stack_extension = true; + } + let next_descended_child = if let Some(seek) = self.seek.as_ref() { + if result != TryStackChildResult::StackedAfter && prefix.len() < seek.len() { + seek.at(prefix.len()) + } else { + self.seek = None; + 0 + } + } else { + 0 + }; + let infos = StackedNodeRecord { + node: child_node, + accessed_children_node: Default::default(), + accessed_value_node: false, + depth: prefix.len(), + next_descended_child, + is_inline, + }; + self.halt |= self.recorder.record_stacked_node(&infos, self.items.is_empty()); + self.items.push(infos); + if stack_extension { + let sbranch = self.try_stack_child(0, db, parent_hash, slice_query)?; + let TryStackChildResult::StackedFull = sbranch else { + return Err(Error::InvalidChildReference( + b"branch in db should follow extension".to_vec(), + )) + }; + } + + Ok(result) + } + + fn access_value<'a>( + &mut self, + db: &TrieDB, + hash_only: bool, + ) -> Result, CError>> { + let Some(item) = self.items.last_mut() else { return Ok(false) }; + let node_data = item.node.data(); + + let value = match item.node.node_plan() { + NodePlan::Leaf { value, .. } => value.build(node_data), + NodePlan::Branch { value, .. } | NodePlan::NibbledBranch { value, .. } => { + if let Some(value) = value { + value.build(node_data) + } else { + return Ok(false) + } + }, + _ => return Ok(false), + }; + match value { + Value::Node(hash_slice) => + if !hash_only { + item.accessed_value_node = true; + let mut hash = TrieHash::::default(); + hash.as_mut().copy_from_slice(hash_slice); + let Some(value) = db.db().get(&hash, self.prefix.as_prefix()) else { + return Err(Error::IncompleteProof) + }; + self.halt |= self.recorder.record_value_node(value, self.prefix.len()); + }, + Value::Inline(_) => (), + } + Ok(true) + } + + fn enter_prefix_iter(&mut self, hash_only: bool) { + self.iter_prefix = Some((self.items.len(), hash_only)); + } + + fn exit_prefix_iter(&mut self) { + self.iter_prefix = None + } +} diff --git a/trie-db/src/query_plan/verify.rs b/trie-db/src/query_plan/verify.rs new file mode 100644 index 00000000..8641178d --- /dev/null +++ b/trie-db/src/query_plan/verify.rs @@ -0,0 +1,890 @@ +// Copyright 2023, 2023 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Verify query plan proof. + +use super::*; +use core::marker::PhantomData; + +use crate::{ + nibble::{nibble_ops, nibble_ops::NIBBLE_LENGTH, NibbleSlice}, + rstd::{boxed::Box, convert::TryInto, result::Result}, + CError, TrieHash, TrieLayout, +}; +pub use record::{record_query_plan, HaltedStateRecord, Recorder}; + +/// Result of verify iterator. +type VerifyIteratorResult<'a, L, C, D> = + Result, Error, CError>>; + +/// Proof reading iterator. +pub struct ReadProofIterator<'a, L, C, D, P> +where + L: TrieLayout, + C: Iterator>, + P: Iterator, + D: SplitFirst, +{ + // always needed, this is option only + // to avoid unsafe code when halting. + query_plan: Option>, + proof: P, + kind: ProofKind, + expected_root: Option>, + current: Option>, + current_offset: usize, + state: ReadProofState, + stack: Stack, + send_enter_prefix: Option>, + send_exit_prefix: bool, + buffed_result: Option>>, +} + +struct Stack { + items: Vec>, + prefix: NibbleVec, + // limit and wether we return value and if hash only iteration. + iter_prefix: Option, + start_items: usize, + kind: ProofKind, + expect_value: bool, + accessed_root: bool, + _ph: PhantomData, +} + +impl From for Stack { + fn from(kind: ProofKind) -> Self { + Self { + items: Default::default(), + start_items: 0, + prefix: Default::default(), + kind, + expect_value: false, + iter_prefix: None, + accessed_root: false, + _ph: PhantomData, + } + } +} + +impl Clone for Stack { + fn clone(&self) -> Self { + Self { + items: self.items.clone(), + prefix: self.prefix.clone(), + start_items: self.start_items.clone(), + iter_prefix: self.iter_prefix.clone(), + kind: self.kind, + expect_value: self.expect_value, + accessed_root: self.accessed_root, + _ph: PhantomData, + } + } +} + +/// Read the proof. +/// +/// If expected root is None, then we do not check hashes at all. +pub fn verify_query_plan_iter<'a, L, C, D, P>( + state: HaltedStateCheck<'a, L, C, D>, + proof: P, + expected_root: Option>, +) -> Result, Error, CError>> +where + L: TrieLayout, + C: Iterator>, + P: Iterator, + D: SplitFirst, +{ + let HaltedStateCheck { query_plan, current, stack, state, restore_offset } = state; + + Ok(ReadProofIterator { + query_plan: Some(query_plan), + proof, + kind: stack.kind, + expected_root, + current, + state, + stack, + current_offset: restore_offset, + send_enter_prefix: None, + send_exit_prefix: false, + buffed_result: None, + }) +} + +impl<'a, L, C, D, P> ReadProofIterator<'a, L, C, D, P> +where + L: TrieLayout, + C: Iterator>, + P: Iterator, + D: SplitFirst, +{ + fn halt(&mut self) -> VerifyIteratorResult<'a, L, C, D> { + if self.kind.is_compact() { + let r = self.stack.pop_until(None, &self.expected_root, true); + if let Err(e) = r { + self.state = ReadProofState::Finished; + return Err(e) + } + } + self.state = ReadProofState::Finished; + let query_plan = crate::rstd::mem::replace(&mut self.query_plan, None); + let query_plan = query_plan.expect("Init with state"); + let current = crate::rstd::mem::take(&mut self.current); + let mut stack = crate::rstd::mem::replace(&mut self.stack, self.kind.into()); + stack.start_items = stack.items.len(); + Ok(ReadProofItem::Halted(Box::new(HaltedStateCheck { + query_plan, + current, + restore_offset: self.current_offset, + stack, + state: ReadProofState::Halted, + }))) + } + + fn enter_prefix_iter(&mut self, hash_only: bool, key: &[u8]) { + self.send_enter_prefix = Some(key.to_vec()); + self.stack.iter_prefix = + Some(InPrefix { start: self.stack.items.len(), send_value: false, hash_only }); + } + + fn exit_prefix_iter(&mut self) { + self.send_exit_prefix = true; + self.stack.iter_prefix = None + } +} + +impl<'a, L, C, D, P> Iterator for ReadProofIterator<'a, L, C, D, P> +where + L: TrieLayout, + C: Iterator>, + P: Iterator, + D: SplitFirst, +{ + type Item = VerifyIteratorResult<'a, L, C, D>; + + fn next(&mut self) -> Option { + debug_assert!(self.send_enter_prefix.is_none()); + debug_assert!(!self.send_exit_prefix); + if let Some(r) = self.buffed_result.take() { + return r + } + let r = self.next_inner(); + if let Some(k) = self.send_enter_prefix.take() { + self.buffed_result = Some(r); + return Some(Ok(ReadProofItem::StartPrefix(k))) + } + if self.send_exit_prefix { + self.buffed_result = Some(r); + self.send_exit_prefix = false; + return Some(Ok(ReadProofItem::EndPrefix)) + } else { + r + } + } +} + +impl<'a, L, C, D, P> ReadProofIterator<'a, L, C, D, P> +where + L: TrieLayout, + C: Iterator>, + P: Iterator, + D: SplitFirst, +{ + fn next_inner(&mut self) -> Option> { + if self.state == ReadProofState::Finished { + return None + } + let check_hash = self.expected_root.is_some(); + if self.state == ReadProofState::Halted { + self.state = ReadProofState::Running; + } + let mut to_check_slice = self + .current + .as_ref() + .map(|n| NibbleSlice::new_offset(n.key, self.current_offset)); + + // read proof + loop { + if self.send_exit_prefix { + debug_assert!(self.send_enter_prefix.is_none()); + debug_assert!(self.buffed_result.is_none()); + self.send_exit_prefix = false; + return Some(Ok(ReadProofItem::EndPrefix)) + } + if self.state == ReadProofState::SwitchQueryPlan || + self.state == ReadProofState::NotStarted + { + let query_plan = self.query_plan.as_mut().expect("Removed with state"); + if let Some(next) = query_plan.items.next() { + let (ordered, common_nibbles) = if let Some(old) = self.current.as_ref() { + old.before(&next) + } else { + (true, 0) + }; + if !ordered { + self.state = ReadProofState::Finished; + return Some(Err(Error::UnorderedKey(next.key.to_vec()))) + } + + match self.stack.pop_until(Some(common_nibbles), &self.expected_root, false) { + Ok(true) => { + self.current = Some(next); + let current = self.current.as_ref().expect("current is set"); + return self.missing_switch_next(current.as_prefix, current.key) + }, + Err(e) => { + self.state = ReadProofState::Finished; + return Some(Err(e)) + }, + Ok(false) => (), + } + + self.state = ReadProofState::Running; + self.current = Some(next); + to_check_slice = self + .current + .as_ref() + .map(|n| NibbleSlice::new_offset(n.key, common_nibbles)); + } else { + self.state = ReadProofState::PlanConsumed; + self.current = None; + break + } + }; + let did_prefix = self.stack.iter_prefix.is_some(); + + while let Some(InPrefix { send_value, hash_only, .. }) = self.stack.iter_prefix.clone() + { + // prefix iteration + if !send_value { + self.stack.iter_prefix.as_mut().map(|s| { + s.send_value = true; + }); + match self.stack.access_value(&mut self.proof, check_hash, hash_only) { + Ok((Some(value), None)) => + return Some(Ok(ReadProofItem::Value( + self.stack.prefix.inner().to_vec().into(), + value, + ))), + Ok((None, Some(hash))) => + return Some(Ok(ReadProofItem::Hash( + self.stack.prefix.inner().to_vec().into(), + hash, + ))), + Ok((None, None)) => (), + Ok(_) => unreachable!(), + Err(e) => { + self.state = ReadProofState::Finished; + return Some(Err(e)) + }, + }; + } + while let Some(child_index) = self.stack.items.last_mut().and_then(|last| { + if last.next_descended_child as usize >= NIBBLE_LENGTH { + None + } else { + let child_index = last.next_descended_child; + last.next_descended_child += 1; + Some(child_index) + } + }) { + let r = match self.stack.try_stack_child( + child_index, + &mut self.proof, + &self.expected_root, + None, + ) { + Ok(r) => r, + Err(e) => { + self.state = ReadProofState::Finished; + return Some(Err(e)) + }, + }; + self.current_offset = to_check_slice.map(|s| s.offset()).unwrap_or(0); + match r { + TryStackChildResult::StackedFull => { + self.stack.iter_prefix.as_mut().map(|p| { + p.send_value = false; + }); + break + }, + TryStackChildResult::StackedAfter | TryStackChildResult::StackedInto => { + unreachable!("slice query none"); + }, + TryStackChildResult::NotStacked => break, + TryStackChildResult::NotStackedBranch => (), + TryStackChildResult::Halted => { + if let Some(last) = self.stack.items.last_mut() { + last.next_descended_child -= 1; + } + return Some(self.halt()) + }, + } + } + if self.stack.iter_prefix.as_ref().map(|p| p.send_value).unwrap_or_default() { + if !match self.stack.pop(&self.expected_root) { + Ok(r) => r, + Err(e) => { + self.state = ReadProofState::Finished; + return Some(Err(e)) + }, + } { + // end iter + self.exit_prefix_iter(); + } + } + } + if did_prefix { + // exit a prefix iter, next content looping + self.state = ReadProofState::SwitchQueryPlan; + continue + } + + let to_check = self.current.as_ref().expect("Init above"); + let to_check_len = to_check.key.len() * nibble_ops::NIBBLE_PER_BYTE; + let mut to_check_slice = to_check_slice.as_mut().expect("Init above"); + let as_prefix = to_check.as_prefix; + let hash_only = to_check.hash_only; + let mut at_value = false; + match self.stack.prefix.len().cmp(&to_check_len) { + Ordering::Equal => + if !self.stack.items.is_empty() { + at_value = true; + }, + Ordering::Less => (), + Ordering::Greater => { + // two consecutive query in a node that hide them (two miss in a same proof + // node). + return self.missing_switch_next(as_prefix, to_check.key) + }, + } + + if at_value { + if as_prefix { + self.enter_prefix_iter( + hash_only, + &self.current.as_ref().expect("enter prefix").key, + ); + continue + } + self.state = ReadProofState::SwitchQueryPlan; + match self.stack.access_value(&mut self.proof, check_hash, hash_only) { + Ok((Some(value), None)) => + return Some(Ok(ReadProofItem::Value(to_check.key.into(), value))), + Ok((None, Some(hash))) => + return Some(Ok(ReadProofItem::Hash(to_check.key.into(), hash))), + Ok((None, None)) => return Some(Ok(ReadProofItem::NoValue(to_check.key))), + Ok(_) => unreachable!(), + Err(e) => { + self.state = ReadProofState::Finished; + return Some(Err(e)) + }, + } + } + + let child_index = if self.stack.items.len() == 0 { + // dummy + 0 + } else { + to_check_slice.at(0) + }; + let r = match self.stack.try_stack_child( + child_index, + &mut self.proof, + &self.expected_root, + Some(&mut to_check_slice), + ) { + Ok(r) => r, + Err(e) => { + self.state = ReadProofState::Finished; + return Some(Err(e)) + }, + }; + self.current_offset = to_check_slice.offset(); + match r { + TryStackChildResult::StackedFull => (), + TryStackChildResult::StackedInto => { + if as_prefix { + self.enter_prefix_iter( + hash_only, + &self.current.as_ref().expect("enter prefix").key, + ); + continue + } + self.state = ReadProofState::SwitchQueryPlan; + return Some(Ok(ReadProofItem::NoValue(to_check.key))) + }, + TryStackChildResult::NotStackedBranch | TryStackChildResult::NotStacked => + return self.missing_switch_next(as_prefix, to_check.key), + TryStackChildResult::StackedAfter => + return self.missing_switch_next(as_prefix, to_check.key), + TryStackChildResult::Halted => return Some(self.halt()), + } + } + + debug_assert!(self.state == ReadProofState::PlanConsumed); + if self.kind.is_compact() { + let r = self.stack.pop_until(None, &self.expected_root, false); + if let Err(e) = r { + self.state = ReadProofState::Finished; + return Some(Err(e)) + } + } else { + if self.proof.next().is_some() { + self.state = ReadProofState::Finished; + return Some(Err(Error::ExtraneousNode)) + } + } + self.state = ReadProofState::Finished; + return None + } + + fn missing_switch_next( + &mut self, + as_prefix: bool, + key: &'a [u8], + ) -> Option> { + self.state = ReadProofState::SwitchQueryPlan; + if as_prefix { + self.send_enter_prefix = Some(key.to_vec()); + return Some(Ok(ReadProofItem::EndPrefix)) + } else { + return Some(Ok(ReadProofItem::NoValue(key))) + } + } +} + +/// When process is halted keep execution state +/// to restore later. +pub struct HaltedStateCheck<'a, L: TrieLayout, C, D: SplitFirst> { + query_plan: QueryPlan<'a, C>, + current: Option>, + stack: Stack, + state: ReadProofState, + restore_offset: usize, +} + +impl<'a, L: TrieLayout, C, D: SplitFirst> From> for HaltedStateCheck<'a, L, C, D> { + fn from(query_plan: QueryPlan<'a, C>) -> Self { + HaltedStateCheck { + stack: Stack { + items: Default::default(), + start_items: 0, + prefix: Default::default(), + kind: query_plan.kind, + expect_value: false, + iter_prefix: None, + accessed_root: false, + _ph: PhantomData, + }, + state: ReadProofState::NotStarted, + current: None, + restore_offset: 0, + query_plan, + } + } +} + +impl Stack { + fn try_stack_child( + &mut self, + child_index: u8, + proof: &mut impl Iterator, + expected_root: &Option>, + mut slice_query: Option<&mut NibbleSlice>, + ) -> Result, CError>> { + let check_hash = expected_root.is_some(); + let items_len = self.items.len(); + let child_handle = if let Some(node) = self.items.last_mut() { + let node_data = node.data(); + + match node.node_plan() { + NodePlan::Empty | NodePlan::Leaf { .. } => + return Ok(TryStackChildResult::NotStacked), + NodePlan::Extension { .. } => { + unreachable!("Extension never stacked") + }, + NodePlan::NibbledBranch { children, .. } | NodePlan::Branch { children, .. } => + if let Some(child) = &children[child_index as usize] { + child.build(node_data) + } else { + return Ok(TryStackChildResult::NotStackedBranch) + }, + } + } else { + if self.accessed_root { + return Ok(TryStackChildResult::NotStacked) + } + if self.kind.is_compact() { + NodeHandle::Inline(&[]) + } else { + NodeHandle::Hash(expected_root.as_ref().map(AsRef::as_ref).unwrap_or(&[])) + } + }; + let mut node: StackedNodeCheck<_, _> = match child_handle { + NodeHandle::Inline(data) => + if self.kind.is_compact() && data.len() == 0 { + // ommitted hash + let Some(mut encoded_node) = proof.next() else { + // halt happens with a hash, this is not. + return Err(Error::IncompleteProof) + }; + if self.kind.is_compact() && + encoded_node.borrow().len() > 0 && + Some(encoded_node.borrow()[0]) == + ::ESCAPE_HEADER + { + self.expect_value = true; + encoded_node.split_first(); + } + let node = match OwnedNode::new::(encoded_node) { + Ok(node) => node, + Err(e) => return Err(Error::DecodeError(e)), + }; + (ItemStackNode::Node(node), self.kind).try_into()? + } else { + // try access in inline then return + ( + ItemStackNode::Inline(match OwnedNode::new::(data.to_vec()) { + Ok(node) => node, + Err(e) => return Err(Error::DecodeError(e)), + }), + self.kind, + ) + .try_into()? + }, + NodeHandle::Hash(hash) => { + let Some(mut encoded_node) = proof.next() else { + return Ok(TryStackChildResult::Halted) + }; + if self.kind.is_compact() && items_len > self.start_items { + let mut error_hash = TrieHash::::default(); + error_hash.as_mut().copy_from_slice(hash); + return Err(Error::ExtraneousHashReference(error_hash)) + } + if self.kind.is_compact() && + encoded_node.borrow().len() > 0 && + Some(encoded_node.borrow()[0]) == + ::ESCAPE_HEADER + { + self.expect_value = true; + encoded_node.split_first(); + } + let node = match OwnedNode::new::(encoded_node) { + Ok(node) => node, + Err(e) => return Err(Error::DecodeError(e)), + }; + if !self.kind.is_compact() && check_hash { + verify_hash::(node.data(), hash)?; + } + (ItemStackNode::Node(node), self.kind).try_into()? + }, + }; + let node_data = node.data(); + let mut result = TryStackChildResult::StackedFull; + match node.node_plan() { + NodePlan::Branch { .. } => (), + | NodePlan::Empty => (), + NodePlan::Leaf { partial, .. } | + NodePlan::NibbledBranch { partial, .. } | + NodePlan::Extension { partial, .. } => { + let partial = partial.build(node_data); + if self.items.len() > 0 { + if let Some(slice) = slice_query.as_mut() { + slice.advance(1); + } + self.prefix.push(child_index); + } + result = if let Some(slice) = slice_query.as_mut() { + if slice.starts_with(&partial) { + TryStackChildResult::StackedFull + } else if partial.starts_with(slice) { + TryStackChildResult::StackedInto + } else { + TryStackChildResult::StackedAfter + } + } else { + TryStackChildResult::StackedFull + }; + if result != TryStackChildResult::StackedFull { + // end of query + } else if let Some(slice) = slice_query.as_mut() { + slice.advance(partial.len()); + } + self.prefix.append_partial(partial.right()); + }, + } + if let NodePlan::Extension { child, .. } = node.node_plan() { + let node_data = node.data(); + let child = child.build(node_data); + match child { + NodeHandle::Hash(hash) => { + let Some(encoded_branch) = proof.next() else { + // No halt on extension node (restart over a child index). + return Err(Error::IncompleteProof) + }; + if self.kind.is_compact() { + let mut error_hash = TrieHash::::default(); + error_hash.as_mut().copy_from_slice(hash); + return Err(Error::ExtraneousHashReference(error_hash)) + } + if check_hash { + verify_hash::(encoded_branch.borrow(), hash)?; + } + node = match OwnedNode::new::(encoded_branch) { + Ok(node) => (ItemStackNode::Node(node), self.kind).try_into()?, + Err(e) => return Err(Error::DecodeError(e)), + }; + }, + NodeHandle::Inline(data) => + if self.kind.is_compact() && data.len() == 0 { + unimplemented!("This will requires to put extension in stack"); + } else { + node = match OwnedNode::new::(data.to_vec()) { + Ok(node) => (ItemStackNode::Inline(node), self.kind).try_into()?, + Err(e) => return Err(Error::DecodeError(e)), + }; + }, + } + let NodePlan::Branch { .. } = node.node_plan() else { + return Err(Error::UnexpectedNodeType) + }; + } + node.depth = self.prefix.len(); + // needed for compact + self.items.last_mut().map(|parent| { + parent.next_descended_child = child_index + 1; + }); + self.items.push(node); + Ok(result) + } + + fn access_value( + &mut self, + proof: &mut impl Iterator, + check_hash: bool, + hash_only: bool, + ) -> Result<(Option>, Option>), Error, CError>> { + if let Some(node) = self.items.last() { + let node_data = node.data(); + + let value = match node.node_plan() { + NodePlan::Leaf { value, .. } => Some(value.build(node_data)), + NodePlan::Branch { value, .. } | NodePlan::NibbledBranch { value, .. } => + value.as_ref().map(|v| v.build(node_data)), + _ => return Ok((None, None)), + }; + if let Some(value) = value { + match value { + Value::Inline(value) => + if self.expect_value { + assert!(self.kind.is_compact()); + self.expect_value = false; + if hash_only { + return Err(Error::ExtraneousValue(Default::default())) + } + + let Some(value) = proof.next() else { + return Err(Error::IncompleteProof) + }; + if check_hash { + let hash = L::Hash::hash(value.borrow()); + self.items.last_mut().map(|i| i.attached_value_hash = Some(hash)); + } + return Ok((Some(value.borrow().to_vec()), None)) + } else { + if hash_only { + let hash = L::Hash::hash(value); + return Ok((None, Some(hash))) + } + return Ok((Some(value.to_vec()), None)) + }, + Value::Node(hash) => { + if self.expect_value { + if hash_only { + return Err(Error::ExtraneousValue(Default::default())) + } + self.expect_value = false; + let mut error_hash = TrieHash::::default(); + error_hash.as_mut().copy_from_slice(hash); + return Err(Error::ExtraneousHashReference(error_hash)) + } + if hash_only { + let mut result_hash = TrieHash::::default(); + result_hash.as_mut().copy_from_slice(hash); + return Ok((None, Some(result_hash))) + } + let Some(value) = proof.next() else { return Err(Error::IncompleteProof) }; + if check_hash { + verify_hash::(value.borrow(), hash)?; + } + return Ok((Some(value.borrow().to_vec()), None)) + }, + } + } + } else { + return Err(Error::IncompleteProof) + } + + Ok((None, None)) + } + + fn pop( + &mut self, + expected_root: &Option>, + ) -> Result, CError>> { + if self.iter_prefix.as_ref().map(|p| p.start == self.items.len()).unwrap_or(false) { + return Ok(false) + } + if let Some(last) = self.items.pop() { + let depth = self.items.last().map(|i| i.depth).unwrap_or(0); + self.prefix.drop_lasts(self.prefix.len() - depth); + if self.kind.is_compact() && expected_root.is_some() { + match last.node { + ItemStackNode::Inline(_) => (), + ItemStackNode::Node(node) => { + let origin = self.start_items; + let node_data = node.data(); + let node = node.node_plan().build(node_data); + let encoded_node = crate::trie_codec::encode_read_node_internal::( + node, + &last.children, + last.attached_value_hash.as_ref().map(|h| h.as_ref()), + ); + + //println!("{:?}", encoded_node); + if self.items.len() == origin { + if let Some(parent) = self.items.last() { + let at = parent.next_descended_child - 1; + if let Some(Some(ChildReference::Hash(expected))) = + parent.children.get(at as usize) + { + verify_hash::(&encoded_node, expected.as_ref())?; + } else { + return Err(Error::RootMismatch(Default::default())) + } + } else { + let expected = expected_root.as_ref().expect("checked above"); + verify_hash::(&encoded_node, expected.as_ref())?; + } + } else if self.items.len() < origin { + // popped origin, need to check against new origin + self.start_items = self.items.len(); + } else { + let hash = L::Hash::hash(&encoded_node); + if let Some(parent) = self.items.last_mut() { + let at = parent.next_descended_child - 1; + match parent.children[at as usize] { + Some(ChildReference::Hash(expected)) => { + // can append if chunks are concatenated (not progressively + // checked) + verify_hash::(&encoded_node, expected.as_ref())?; + }, + None => { + // Complete + parent.children[at as usize] = + Some(ChildReference::Hash(hash)); + }, + Some(ChildReference::Inline(_h, size)) if size == 0 => { + // Remove hash from compact, complete it. + parent.children[at as usize] = + Some(ChildReference::Hash(hash)); + }, + // non null inline, only non inline are stacked. + _ => return Err(Error::RootMismatch(Default::default())), + } + } else { + if &Some(hash) != expected_root { + return Err(Error::RootMismatch(hash)) + } + } + } + }, + } + } + Ok(true) + } else { + Ok(false) + } + } + + fn pop_until( + &mut self, + target: Option, + expected_root: &Option>, + check_only: bool, + ) -> Result, CError>> { + if self.kind.is_compact() && expected_root.is_some() { + let mut restore = None; + if check_only { + restore = Some(self.clone()); + self.iter_prefix = None; + } + // one by one + while let Some(last) = self.items.last() { + if let Some(target) = target.as_ref() { + match last.depth.cmp(&target) { + Ordering::Greater => (), + // depth should match. + Ordering::Less => { + // skip query plan + return Ok(true) + }, + Ordering::Equal => return Ok(false), + } + } + // one by one + let _ = self.pop(expected_root)?; + if self.items.len() == self.start_items { + break + } + } + + if let Some(old) = restore.take() { + *self = old; + return Ok(false) + } + } + loop { + if let Some(last) = self.items.last() { + if let Some(target) = target.as_ref() { + match last.depth.cmp(&target) { + Ordering::Greater => (), + // depth should match. + Ordering::Less => { + // skip + return Ok(true) + }, + Ordering::Equal => { + self.prefix.drop_lasts(self.prefix.len() - last.depth); + return Ok(false) + }, + } + } + } else { + if target.unwrap_or(0) == 0 { + return Ok(false) + } else { + return Ok(true) + } + } + let _ = self.items.pop(); + if self.items.len() < self.start_items { + self.start_items = self.items.len(); + } + } + } +} diff --git a/trie-db/src/trie_codec.rs b/trie-db/src/trie_codec.rs index 9a1f51b3..6fc5f199 100644 --- a/trie-db/src/trie_codec.rs +++ b/trie-db/src/trie_codec.rs @@ -28,6 +28,7 @@ use crate::{ nibble_ops::NIBBLE_LENGTH, node::{Node, NodeHandle, NodeHandlePlan, NodePlan, OwnedNode, ValuePlan}, + query_plan::BitmapAccess, rstd::{boxed::Box, convert::TryInto, marker::PhantomData, result, sync::Arc, vec, vec::Vec}, CError, ChildReference, DBValue, NibbleVec, NodeCodec, Result, TrieDB, TrieDBRawIterator, TrieError, TrieHash, TrieLayout, @@ -100,88 +101,94 @@ impl EncoderStackEntry { /// Generates the encoding of the subtrie rooted at this entry. fn encode_node(&mut self) -> Result, C::HashOut, C::Error> { - let node_data = self.node.data(); - let node_plan = self.node.node_plan(); - let mut encoded = match node_plan { - NodePlan::Empty => node_data.to_vec(), - NodePlan::Leaf { partial, value: _ } => - if self.omit_value { - let partial = partial.build(node_data); - C::leaf_node(partial.right_iter(), partial.len(), OMIT_VALUE_HASH) - } else { - node_data.to_vec() - }, - NodePlan::Extension { partial, child: _ } => - if !self.omit_children[0] { - node_data.to_vec() - } else { - let partial = partial.build(node_data); - let empty_child = ChildReference::Inline(C::HashOut::default(), 0); - C::extension_node(partial.right_iter(), partial.len(), empty_child) - }, - NodePlan::Branch { value, children } => { - let value = if self.omit_value { - value.is_some().then_some(OMIT_VALUE_HASH) - } else { - value.as_ref().map(|v| v.build(node_data)) - }; - C::branch_node( - Self::branch_children(node_data, &children, &self.omit_children)?.iter(), - value, - ) + encode_node_internal::(&*self.node, self.omit_value, self.omit_children.as_slice()) + .map_err(|err| { + Box::new(TrieError::InvalidHash(C::HashOut::default(), err.unwrap_or_default())) + }) + } +} + +pub(crate) fn encode_node_internal( + node: &OwnedNode, + omit_value: bool, + omit_children: impl BitmapAccess, +) -> crate::rstd::result::Result, Option>> { + let node_data = node.data(); + let node_plan = node.node_plan(); + let mut encoded = match node_plan { + NodePlan::Empty => node_data.to_vec(), + NodePlan::Leaf { partial, value: _ } => + if omit_value { + let partial = partial.build(node_data); + C::leaf_node(partial.right_iter(), partial.len(), OMIT_VALUE_HASH) + } else { + node_data.to_vec() }, - NodePlan::NibbledBranch { partial, value, children } => { + NodePlan::Extension { partial, child: _ } => + if !omit_children.at(0) { + node_data.to_vec() + } else { let partial = partial.build(node_data); - let value = if self.omit_value { - value.is_some().then_some(OMIT_VALUE_HASH) - } else { - value.as_ref().map(|v| v.build(node_data)) - }; - C::branch_node_nibbled( - partial.right_iter(), - partial.len(), - Self::branch_children(node_data, &children, &self.omit_children)?.iter(), - value, - ) + let empty_child = ChildReference::Inline(C::HashOut::default(), 0); + C::extension_node(partial.right_iter(), partial.len(), empty_child) }, - }; - - if self.omit_value { - if let Some(header) = C::ESCAPE_HEADER { - encoded.insert(0, header); + NodePlan::Branch { value, children } => { + let value = if omit_value { + value.is_some().then_some(OMIT_VALUE_HASH) } else { - return Err(Box::new(TrieError::InvalidStateRoot(Default::default()))) - } - } - Ok(encoded) - } - - /// Generate the list of child references for a branch node with certain children omitted. - /// - /// Preconditions: - /// - omit_children has size NIBBLE_LENGTH. - /// - omit_children[i] is only true if child_handles[i] is Some - fn branch_children( - node_data: &[u8], - child_handles: &[Option; NIBBLE_LENGTH], - omit_children: &[bool], - ) -> Result<[Option>; NIBBLE_LENGTH], C::HashOut, C::Error> { - let empty_child = ChildReference::Inline(C::HashOut::default(), 0); - let mut children = [None; NIBBLE_LENGTH]; - for i in 0..NIBBLE_LENGTH { - children[i] = if omit_children[i] { - Some(empty_child) - } else if let Some(child_plan) = &child_handles[i] { - let child_ref = child_plan.build(node_data).try_into().map_err(|hash| { - Box::new(TrieError::InvalidHash(C::HashOut::default(), hash)) - })?; - Some(child_ref) + value.as_ref().map(|v| v.build(node_data)) + }; + C::branch_node(branch_children::(node_data, &children, omit_children)?.iter(), value) + }, + NodePlan::NibbledBranch { partial, value, children } => { + let partial = partial.build(node_data); + let value = if omit_value { + value.is_some().then_some(OMIT_VALUE_HASH) } else { - None + value.as_ref().map(|v| v.build(node_data)) }; + C::branch_node_nibbled( + partial.right_iter(), + partial.len(), + branch_children::(node_data, &children, omit_children)?.iter(), + value, + ) + }, + }; + + if omit_value { + if let Some(header) = C::ESCAPE_HEADER { + encoded.insert(0, header); + } else { + return Err(None) } - Ok(children) } + Ok(encoded) +} + +/// Generate the list of child references for a branch node with certain children omitted. +/// +/// Preconditions: +/// - omit_children has size NIBBLE_LENGTH. +/// - omit_children[i] is only true if child_handles[i] is Some +fn branch_children( + node_data: &[u8], + child_handles: &[Option; NIBBLE_LENGTH], + omit_children: impl BitmapAccess, +) -> crate::rstd::result::Result<[Option>; NIBBLE_LENGTH], Vec> { + let empty_child = ChildReference::Inline(C::HashOut::default(), 0); + let mut children = [None; NIBBLE_LENGTH]; + for i in 0..NIBBLE_LENGTH { + children[i] = if omit_children.at(i) { + Some(empty_child) + } else if let Some(child_plan) = &child_handles[i] { + let child_ref = child_plan.build(node_data).try_into()?; + Some(child_ref) + } else { + None + }; + } + Ok(children) } /// Detached value if included does write a reserved header, @@ -407,27 +414,35 @@ impl<'a, C: NodeCodec> DecoderStackEntry<'a, C> { /// Preconditions: /// - if node is an extension node, then `children[0]` is Some. fn encode_node(self, attached_hash: Option<&[u8]>) -> Vec { - let attached_hash = attached_hash.map(|h| crate::node::Value::Node(h)); - match self.node { - Node::Empty => C::empty_node().to_vec(), - Node::Leaf(partial, value) => - C::leaf_node(partial.right_iter(), partial.len(), attached_hash.unwrap_or(value)), - Node::Extension(partial, _) => C::extension_node( - partial.right_iter(), - partial.len(), - self.children[0].expect("required by method precondition; qed"), - ), - Node::Branch(_, value) => C::branch_node( - self.children.into_iter(), - if attached_hash.is_some() { attached_hash } else { value }, - ), - Node::NibbledBranch(partial, _, value) => C::branch_node_nibbled( - partial.right_iter(), - partial.len(), - self.children.iter(), - if attached_hash.is_some() { attached_hash } else { value }, - ), - } + encode_read_node_internal::(self.node, &self.children, attached_hash) + } +} + +pub(crate) fn encode_read_node_internal( + node: Node, + children: &Vec>>, + attached_hash: Option<&[u8]>, +) -> Vec { + let attached_hash = attached_hash.map(|h| crate::node::Value::Node(h)); + match node { + Node::Empty => C::empty_node().to_vec(), + Node::Leaf(partial, value) => + C::leaf_node(partial.right_iter(), partial.len(), attached_hash.unwrap_or(value)), + Node::Extension(partial, _) => C::extension_node( + partial.right_iter(), + partial.len(), + children[0].expect("required by method precondition; qed"), + ), + Node::Branch(_, value) => C::branch_node( + children.into_iter(), + if attached_hash.is_some() { attached_hash } else { value }, + ), + Node::NibbledBranch(partial, _, value) => C::branch_node_nibbled( + partial.right_iter(), + partial.len(), + children.iter(), + if attached_hash.is_some() { attached_hash } else { value }, + ), } } diff --git a/trie-db/src/triedb.rs b/trie-db/src/triedb.rs index ad6166eb..55f723bc 100644 --- a/trie-db/src/triedb.rs +++ b/trie-db/src/triedb.rs @@ -153,20 +153,65 @@ where node_handle: NodeHandle, partial_key: Prefix, record_access: bool, + ) -> Result<(OwnedNode, Option>), TrieHash, CError> { + self.get_raw_or_lookup_with_cache( + parent_hash, + node_handle, + partial_key, + record_access, + false, + ) + } + + /// Same as get_raw_or_lookup but with optionally use of the node cache. + /// Warning cache usage double encode decode here so only to use for + /// avoiding a costy db access, generally db cache would be better. + /// A small switch in cache api could lift this. + pub(crate) fn get_raw_or_lookup_with_cache( + &self, + parent_hash: TrieHash, + node_handle: NodeHandle, + partial_key: Prefix, + record_access: bool, + use_cache: bool, ) -> Result<(OwnedNode, Option>), TrieHash, CError> { let (node_hash, node_data) = match node_handle { NodeHandle::Hash(data) => { let node_hash = decode_hash::(data) .ok_or_else(|| Box::new(TrieError::InvalidHash(parent_hash, data.to_vec())))?; - let node_data = self.db.get(&node_hash, partial_key).ok_or_else(|| { - if partial_key == EMPTY_PREFIX { - Box::new(TrieError::InvalidStateRoot(node_hash)) - } else { - Box::new(TrieError::IncompleteDatabase(node_hash)) - } - })?; - - (Some(node_hash), node_data) + if use_cache && self.cache.as_ref().is_some() { + let c = self.cache.as_ref().expect("checked above"); + let mut cache = c.borrow_mut(); + let node = cache.get_or_insert_node(node_hash, &mut || { + let node_data = self.db.get(&node_hash, partial_key).ok_or_else(|| { + if partial_key == EMPTY_PREFIX { + Box::new(TrieError::InvalidStateRoot(node_hash)) + } else { + Box::new(TrieError::IncompleteDatabase(node_hash)) + } + })?; + use crate::node_codec::NodeCodec; + let decoded = match L::Codec::decode(&node_data[..]) { + Ok(node) => node, + Err(e) => return Err(Box::new(TrieError::DecoderError(node_hash, e))), + }; + + decoded.to_owned_node::() + })?; + let encoded = node.to_encoded::(); + + (Some(node_hash), encoded) + } else { + let node_data = self.db.get(&node_hash, partial_key).ok_or_else(|| { + if partial_key == EMPTY_PREFIX { + Box::new(TrieError::InvalidStateRoot(node_hash)) + } else { + Box::new(TrieError::IncompleteDatabase(node_hash)) + } + })?; + + (Some(node_hash), node_data) + } }, NodeHandle::Inline(data) => (None, data.to_vec()), }; diff --git a/trie-db/src/triedbmut.rs b/trie-db/src/triedbmut.rs index bf3edbcb..e6c19353 100644 --- a/trie-db/src/triedbmut.rs +++ b/trie-db/src/triedbmut.rs @@ -555,7 +555,7 @@ enum Stored { } /// Used to build a collection of child nodes from a collection of `NodeHandle`s -#[derive(Clone, Copy)] +#[derive(Clone, Copy, PartialEq, Eq)] #[cfg_attr(feature = "std", derive(Debug))] pub enum ChildReference { // `HO` is e.g. `H256`, i.e. the output of a `Hasher` @@ -1911,7 +1911,6 @@ where cache_child_values::(&node, &mut values_to_cache, full_key.clone()); } - drop(node); values_to_cache.into_iter().for_each(|(k, v)| cache.cache_value_for_key(&k, v)); } } diff --git a/trie-db/test/Cargo.toml b/trie-db/test/Cargo.toml index 547df945..cecfe751 100644 --- a/trie-db/test/Cargo.toml +++ b/trie-db/test/Cargo.toml @@ -15,13 +15,15 @@ harness = false trie-db = { path = "..", version = "0.27.0"} hash-db = { path = "../../hash-db", version = "0.16.0"} memory-db = { path = "../../memory-db", version = "0.32.0" } -rand = { version = "0.8", default-features = false, features = ["small_rng"] } trie-standardmap = { path = "../../test-support/trie-standardmap", version = "0.16.0" } reference-trie = { path = "../../test-support/reference-trie", version = "0.29.0" } -hex-literal = "0.3" -criterion = "0.4.0" +arbitrary = { version = "1.3.0", features = ["derive"] } +array-bytes = "6.0.0" +criterion = "0.5.1" env_logger = { version = "0.10", default-features = false } +hex-literal = "0.3" log = "0.4" +rand = { version = "0.8", default-features = false, features = ["small_rng"] } [dev-dependencies] array-bytes = "6.0.0" diff --git a/trie-db/fuzz/src/lib.rs b/trie-db/test/src/fuzz.rs similarity index 56% rename from trie-db/fuzz/src/lib.rs rename to trie-db/test/src/fuzz.rs index f9de8c07..4d5ccf4d 100644 --- a/trie-db/fuzz/src/lib.rs +++ b/trie-db/test/src/fuzz.rs @@ -471,3 +471,375 @@ fn test_trie_codec_proof(entries: Vec<(Vec, Vec)>, keys: assert_eq!(trie.get(key.as_slice()).unwrap(), expected_value); } } + +/// Query plan proof fuzzing. +pub mod query_plan { + use super::*; + use crate::{test_entries, MemoryDB}; + use arbitrary::Arbitrary; + use rand::{rngs::SmallRng, RngCore, SeedableRng}; + use reference_trie::TestTrieCache; + use std::collections::{BTreeMap, BTreeSet}; + use trie_db::{ + query_plan::{ + record_query_plan, HaltedStateRecord, InMemQueryPlan, ProofKind, QueryPlanItem, + Recorder, + }, + TrieHash, TrieLayout, + }; + + const KEY_SIZES: [usize; 7] = [1, 2, 3, 4, 5, 29, 300]; + + // deterministic generator. + type Rng = SmallRng; + + /// Config for fuzzing. + #[derive(Debug, Clone, Copy, PartialEq, Eq)] + pub struct Conf { + /// Seed. + pub seed: u64, + /// proof kind. + pub kind: ProofKind, + /// number of items in state. + pub nb_key_value: usize, + /// number of different small value. + pub nb_small_value_set: usize, + /// number of different small value. + pub nb_big_value_set: usize, + /// Test querying hash only + pub hash_only: bool, + /// Limit the number of non inline value per proof + /// TODO could be arbitrary. + pub limit: usize, + /// Do we create proof on same memory. + pub proof_spawn_with_persistence: bool, + /* + /// number of query existing. + pub nb_existing_value_query: usize, + /// number of query existing. + pub nb_missing_value_query: usize, + /// prefix query (can reduce `nb_existing_value_query`). + pub nb_prefix_query: usize, + */ + } + + #[derive(Clone)] + pub struct FuzzContext { + pub reference: BTreeMap, Vec>, + pub db: MemoryDB, + pub root: TrieHash, + pub conf: Conf, + pub small_values: BTreeSet>, + pub big_values: BTreeSet>, + pub values: Vec>, + } + + fn bytes_set( + rng: &mut Rng, + nb: usize, + sizes: &[usize], + max_byte_value: Option, + ) -> BTreeSet> { + if let Some(max_byte_value) = max_byte_value { + let max_nb_value = sizes.len() * max_byte_value; + if nb > (max_nb_value / 2) { + panic!("too many value {}, max is {}", nb, max_nb_value / 2); + } + } + let mut set = BTreeSet::new(); + let mut buff = vec![0u8; nb * 2]; + while set.len() < nb { + rng.fill_bytes(&mut buff); + for i in 0..buff.len() / 2 { + let size = buff[i * 2] as usize % sizes.len(); + let value = if let Some(max_byte_value) = max_byte_value { + let byte = buff[(i * 2) + 1] % max_byte_value as u8; + vec![byte; sizes[size]] + } else { + let mut value = vec![0u8; sizes[size]]; + rng.fill_bytes(&mut value); + value + }; + set.insert(value); + } + } + set + } + + fn small_value_set(rng: &mut Rng, nb: usize) -> BTreeSet> { + let sizes = [1, 2, 30, 31, 32]; + let max_byte_value = 4; // avoid to many different values. + bytes_set(rng, nb, &sizes, Some(max_byte_value)) + } + + fn big_value_set(rng: &mut Rng, nb: usize) -> BTreeSet> { + let sizes = [33, 34, 301, 302]; + let max_byte_value = 4; // avoid to many different values. + bytes_set(rng, nb, &sizes, Some(max_byte_value)) + } + + fn key_set(rng: &mut Rng, nb: usize) -> BTreeSet> { + bytes_set(rng, nb, &KEY_SIZES[..], None) + } + + /// State building (out of fuzzing loop). + pub fn build_state(conf: Conf) -> FuzzContext { + let mut rng = Rng::seed_from_u64(conf.seed); + let mut reference = BTreeMap::, Vec>::new(); + let small_values = small_value_set(&mut rng, conf.nb_small_value_set); + let big_values = big_value_set(&mut rng, conf.nb_big_value_set); + let mut values: Vec> = small_values.iter().cloned().collect(); + values.extend(big_values.iter().cloned()); + let values = values; + let keys = key_set(&mut rng, conf.nb_key_value); + for k in keys.into_iter() { + let value_index = rng.next_u32() as usize % values.len(); + reference.insert(k, values[value_index].clone()); + } + + // add the test entries + for (key, value) in test_entries() { + reference.insert(key.to_vec(), value.to_vec()); + } + + let (db, root) = { + let mut db = >::default(); + let mut root = Default::default(); + { + let mut trie = >::new(&mut db, &mut root).build(); + for (key, value) in reference.iter() { + trie.insert(key, value).unwrap(); + } + } + (db, root) + }; + FuzzContext { reference, db, root, conf, small_values, big_values, values } + } + + #[derive(Arbitrary, Clone, Debug)] + enum ArbitraryKey { + Indexed(usize), + Random(Vec), + } + + /// Base arbitrary for fuzzing. + #[derive(Arbitrary, Clone, Debug)] + pub struct ArbitraryQueryPlan(Vec<(bool, ArbitraryKey)>); + + fn arbitrary_query_plan( + context: &FuzzContext, + plan: ArbitraryQueryPlan, + ) -> InMemQueryPlan { + let conf = &context.conf; + let mut set = BTreeSet::new(); + for (prefix, k) in plan.0.iter() { + // TODO Rc to avoid clone + match k { + ArbitraryKey::Indexed(at) => { + set.insert((context.values[at % context.values.len()].clone(), !prefix)); + }, + ArbitraryKey::Random(k) => { + set.insert((k.clone(), !prefix)); + }, + } + } + let mut prev_pref: Option> = None; + let mut query_plan = + InMemQueryPlan { items: Vec::with_capacity(set.len()), kind: conf.kind }; + for (key, not_prefix) in set.into_iter() { + if let Some(pref) = prev_pref.as_ref() { + if key.starts_with(pref) { + continue + } + prev_pref = None; + } + + if !not_prefix { + prev_pref = Some(key.clone()); + } + + query_plan.items.push(QueryPlanItem::new(key, conf.hash_only, !not_prefix)); + } + query_plan + } + + /// Main entry point for query plan fuzzing. + pub fn fuzz_query_plan(context: &FuzzContext, plan: ArbitraryQueryPlan) { + let conf = context.conf.clone(); + fuzz_query_plan_conf(context, conf, plan); + } + + /// Main entry point for query plan fuzzing. + pub fn fuzz_query_plan_conf( + context: &FuzzContext, + conf: Conf, + plan: ArbitraryQueryPlan, + ) { + let query_plan = arbitrary_query_plan(context, plan); + + let kind = conf.kind; + let limit = conf.limit; + let limit = (limit != 0).then(|| limit); + let recorder = Recorder::new(conf.kind, Default::default(), limit, None); + let mut from = HaltedStateRecord::from_start(recorder); + let mut proofs: Vec>> = Default::default(); + let mut query_plan_iter = query_plan.as_ref(); + let mut cache = TestTrieCache::::default(); + let db = >::new(&context.db, &context.root) + .with_cache(&mut cache) + .build(); + loop { + record_query_plan::(&db, &mut query_plan_iter, &mut from).unwrap(); + + if limit.is_none() { + assert!(!from.is_halted()); + } + if !from.is_halted() { + proofs.push(from.finish()); + break + } + let rec = if conf.proof_spawn_with_persistence { + from.statefull(Recorder::new(kind, Default::default(), limit, None)) + } else { + query_plan_iter = query_plan.as_ref(); + from.stateless(Recorder::new(kind, Default::default(), limit, None)) + }; + proofs.push(rec); + } + + crate::query_plan::check_proofs::( + proofs, + &query_plan, + conf.kind, + context.root, + &context.reference, + conf.hash_only, + ); + } + + /// Fuzzing conf 1. + pub const CONF1: Conf = Conf { + seed: 0u64, + kind: ProofKind::FullNodes, + nb_key_value: 300, + nb_small_value_set: 5, + nb_big_value_set: 5, + hash_only: false, + limit: 0, // no limit + proof_spawn_with_persistence: false, + }; + + /// Fuzzing conf 2. + pub const CONF2: Conf = Conf { + seed: 0u64, + kind: ProofKind::CompactNodes, + nb_key_value: 300, + nb_small_value_set: 5, + nb_big_value_set: 5, + hash_only: false, + limit: 0, // no limit + proof_spawn_with_persistence: false, + }; + + #[test] + fn fuzz_query_plan_1() { + use reference_trie::{RefHasher, SubstrateV1}; + let plans = [ + ArbitraryQueryPlan(vec![ + (false, ArbitraryKey::Indexed(9137484785696899328)), + (false, ArbitraryKey::Indexed(393082)), + ]), + ArbitraryQueryPlan(vec![ + (false, ArbitraryKey::Indexed(17942346408707227648)), + (false, ArbitraryKey::Indexed(37833)), + ]), + ArbitraryQueryPlan(vec![ + (true, ArbitraryKey::Random(vec![131, 1, 11, 234, 137, 233, 233, 233, 180])), + (false, ArbitraryKey::Random(vec![137])), + ]), + ArbitraryQueryPlan(vec![ + (true, ArbitraryKey::Random(vec![76])), + (true, ArbitraryKey::Random(vec![198, 198, 234, 35, 76, 76, 1])), + ]), + ArbitraryQueryPlan(vec![ + (false, ArbitraryKey::Random(vec![225])), + (true, ArbitraryKey::Random(vec![225, 225, 225, 142])), + ]), + ArbitraryQueryPlan(vec![ + (false, ArbitraryKey::Indexed(18446475631341993995)), + (true, ArbitraryKey::Indexed(254)), + ]), + ArbitraryQueryPlan(vec![( + true, + ArbitraryKey::Random(vec![252, 63, 149, 166, 164, 38]), + )]), + ArbitraryQueryPlan(vec![(false, ArbitraryKey::Indexed(459829968682))]), + ArbitraryQueryPlan(vec![(true, ArbitraryKey::Indexed(43218140957))]), + ArbitraryQueryPlan(vec![]), + ]; + let context: FuzzContext> = build_state(CONF1); + for plan in plans { + fuzz_query_plan::>(&context, plan.clone()); + } + } + + #[test] + fn fuzz_query_plan_2() { + use reference_trie::{RefHasher, SubstrateV1}; + let plans = [ + ArbitraryQueryPlan(vec![ + (false, ArbitraryKey::Indexed(18446475631341993995)), + (true, ArbitraryKey::Indexed(254)), + ]), + ArbitraryQueryPlan(vec![( + true, + ArbitraryKey::Random(vec![252, 63, 149, 166, 164, 38]), + )]), + ArbitraryQueryPlan(vec![(false, ArbitraryKey::Indexed(459829968682))]), + ArbitraryQueryPlan(vec![ + (false, ArbitraryKey::Indexed(17942346408707227648)), + (false, ArbitraryKey::Indexed(37833)), + ]), + ArbitraryQueryPlan(vec![(true, ArbitraryKey::Indexed(43218140957))]), + ArbitraryQueryPlan(vec![]), + ]; + let mut conf = CONF1.clone(); + let context: FuzzContext> = build_state(CONF1); + for plan in plans { + conf.limit = 2; + conf.proof_spawn_with_persistence = true; + fuzz_query_plan_conf::>(&context, conf, plan.clone()); + } + } + + #[test] + fn fuzz_query_plan_3() { + use reference_trie::{RefHasher, SubstrateV1}; + let plans = [ArbitraryQueryPlan(vec![])]; + let context: FuzzContext> = build_state(CONF2); + for plan in plans { + fuzz_query_plan::>(&context, plan.clone()); + } + } + + #[test] + fn fuzz_query_plan_4() { + use reference_trie::{RefHasher, SubstrateV1}; + let plans = [( + ArbitraryQueryPlan(vec![ + (true, ArbitraryKey::Random(vec![86])), + (false, ArbitraryKey::Random(vec![232])), + ]), + 3, + true, // TODO false + )]; + [(ArbitraryQueryPlan(vec![(false, ArbitraryKey::Random(vec![115]))]), 1, false)]; + let mut conf = CONF2.clone(); + let context: FuzzContext> = build_state(CONF2); + for (plan, nb, statefull) in plans { + conf.limit = nb; + conf.proof_spawn_with_persistence = statefull; + fuzz_query_plan_conf::>(&context, conf, plan.clone()); + } + } +} diff --git a/trie-db/test/src/lib.rs b/trie-db/test/src/lib.rs index 7802247e..370093cb 100644 --- a/trie-db/test/src/lib.rs +++ b/trie-db/test/src/lib.rs @@ -18,12 +18,14 @@ mod fatdb; #[cfg(test)] mod fatdbmut; +pub mod fuzz; #[cfg(test)] mod iter_build; #[cfg(test)] mod iterator; #[cfg(test)] mod proof; +mod query_plan; #[cfg(test)] mod recorder; #[cfg(test)] @@ -36,3 +38,31 @@ mod trie_codec; mod triedb; #[cfg(test)] mod triedbmut; + +use trie_db::{DBValue, TrieLayout}; + +/// Testing memory db type. +pub type MemoryDB = memory_db::MemoryDB< + ::Hash, + memory_db::HashKey<::Hash>, + DBValue, +>; + +/// Set of entries for base testing. +pub fn test_entries() -> Vec<(&'static [u8], &'static [u8])> { + vec![ + // "alfa" is at a hash-referenced leaf node. + (b"alfa", &[0; 32]), + // "bravo" is at an inline leaf node. + (b"bravo", b"bravo"), + // "do" is at a hash-referenced branch node. + (b"do", b"verb"), + // "dog" is at a hash-referenced branch node. + (b"dog", b"puppy"), + // "doge" is at a hash-referenced leaf node. + (b"doge", &[0; 32]), + // extension node "o" (plus nibble) to next branch. + (b"horse", b"stallion"), + (b"house", b"building"), + ] +} diff --git a/trie-db/test/src/proof.rs b/trie-db/test/src/proof.rs index cca2c70e..53faa814 100644 --- a/trie-db/test/src/proof.rs +++ b/trie-db/test/src/proof.rs @@ -12,38 +12,14 @@ // See the License for the specific language governing permissions and // limitations under the License. +use crate::{test_entries, MemoryDB}; use hash_db::Hasher; use reference_trie::{test_layouts, NoExtensionLayout}; - use trie_db::{ proof::{generate_proof, verify_proof, VerifyError}, DBValue, Trie, TrieDBBuilder, TrieDBMutBuilder, TrieLayout, TrieMut, }; -type MemoryDB = memory_db::MemoryDB< - ::Hash, - memory_db::HashKey<::Hash>, - DBValue, ->; - -fn test_entries() -> Vec<(&'static [u8], &'static [u8])> { - vec![ - // "alfa" is at a hash-referenced leaf node. - (b"alfa", &[0; 32]), - // "bravo" is at an inline leaf node. - (b"bravo", b"bravo"), - // "do" is at a hash-referenced branch node. - (b"do", b"verb"), - // "dog" is at a hash-referenced branch node. - (b"dog", b"puppy"), - // "doge" is at a hash-referenced leaf node. - (b"doge", &[0; 32]), - // extension node "o" (plus nibble) to next branch. - (b"horse", b"stallion"), - (b"house", b"building"), - ] -} - fn test_generate_proof( entries: Vec<(&'static [u8], &'static [u8])>, keys: Vec<&'static [u8]>, diff --git a/trie-db/test/src/query_plan.rs b/trie-db/test/src/query_plan.rs new file mode 100644 index 00000000..e12aab71 --- /dev/null +++ b/trie-db/test/src/query_plan.rs @@ -0,0 +1,288 @@ +// Copyright 2019, 2020 Parity Technologies +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Query plan tests. + +use hash_db::Hasher; +use reference_trie::test_layouts; + +use std::collections::BTreeMap; +use trie_db::{ + query_plan::{ + verify_query_plan_iter, HaltedStateCheck, InMemQueryPlan, ProofKind, QueryPlan, + QueryPlanItemRef, ReadProofItem, + }, + TrieHash, TrieLayout, +}; + +test_layouts!(test_query_plan_full, test_query_plan_full_internal); +#[cfg(test)] +fn test_query_plan_full_internal() { + test_query_plan_internal::(ProofKind::FullNodes, false); + test_query_plan_internal::(ProofKind::FullNodes, true); +} + +test_layouts!(test_query_plan_compact, test_query_plan_compact_internal); +#[cfg(test)] +fn test_query_plan_compact_internal() { + test_query_plan_internal::(ProofKind::CompactNodes, false); + test_query_plan_internal::(ProofKind::CompactNodes, true); +} + +#[cfg(test)] +fn test_query_plan_internal(kind: ProofKind, hash_only: bool) { + use trie_db::{ + query_plan::{QueryPlanItem, Recorder}, + TrieDBBuilder, TrieDBMutBuilder, TrieMut, + }; + let set = crate::test_entries(); + + let mut cache = reference_trie::TestTrieCache::::default(); + + let (db, root) = { + let mut db = >::default(); + let mut root = Default::default(); + { + let mut trie = >::new(&mut db, &mut root).build(); + for (key, value) in set.iter() { + trie.insert(key, value).unwrap(); + } + } + (db, root) + }; + let db = >::new(&db, &root).with_cache(&mut cache).build(); + + if kind == ProofKind::CompactNodes && L::USE_EXTENSION { + // Compact proofs are not supported with extensions. + // Requires changing the way extension are handled + // when decoding (putting on stack). + // Not implemented for `CompactContent`, would need + // to not append 0 after pushing an extension node. + return + } + let query_plans = [ + InMemQueryPlan { items: vec![QueryPlanItem::new(b"".to_vec(), hash_only, true)], kind }, + InMemQueryPlan { + items: vec![ + QueryPlanItem::new(b"bravo".to_vec(), hash_only, false), + QueryPlanItem::new(b"do".to_vec(), hash_only, true), + ], + kind, + }, + InMemQueryPlan { + items: vec![ + QueryPlanItem::new(b"bravo".to_vec(), hash_only, false), + QueryPlanItem::new(b"doge".to_vec(), hash_only, false), + QueryPlanItem::new(b"horsey".to_vec(), hash_only, false), + ], + kind, + }, + ]; + for (_nb_plan, query_plan) in query_plans.iter().enumerate() { + for limit_conf in [ + (0, false), /* TODO uncomment (0, false), (1, false), (1, true), (2, false), (2, + * true), (3, true) */ + ] { + let limit = limit_conf.0; + let limit = (limit != 0).then(|| limit); + let recorder = Recorder::new(kind, Default::default(), limit, None); + let mut from = trie_db::query_plan::HaltedStateRecord::from_start(recorder); + // no limit + let mut proofs: Vec>> = Default::default(); + let mut query_plan_iter = query_plan.as_ref(); + loop { + trie_db::query_plan::record_query_plan::( + &db, + &mut query_plan_iter, + &mut from, + ) + .unwrap(); + + if limit.is_none() { + assert!(!from.is_halted()); + } + if !from.is_halted() { + proofs.push(from.finish()); + break + } + let rec = if limit_conf.1 { + query_plan_iter = query_plan.as_ref(); + from.stateless(Recorder::new(kind, Default::default(), limit, None)) + } else { + from.statefull(Recorder::new(kind, Default::default(), limit, None)) + }; + proofs.push(rec); + } + let content: BTreeMap<_, _> = + set.iter().map(|(k, v)| (k.to_vec(), v.to_vec())).collect(); + check_proofs::(proofs, query_plan, kind, root, &content, hash_only); + } + } +} + +/// Proof checking. +pub fn check_proofs( + mut proofs: Vec>>, + query_plan_in_mem: &InMemQueryPlan, + kind: ProofKind, + root: TrieHash, + content: &BTreeMap, Vec>, + hash_only: bool, +) { + let mut full_proof: Vec> = Default::default(); + proofs.reverse(); + + let query_plan: QueryPlan<_> = query_plan_in_mem.as_ref(); + let mut run_state: Option> = Some(query_plan.into()); + let mut query_plan_iter: QueryPlan<_> = query_plan_in_mem.as_ref(); + let mut current_plan = query_plan_iter.items.next(); + let mut has_run_full = false; + let mut in_prefix = false; + while let Some(state) = run_state.take() { + let proof = if let Some(proof) = proofs.pop() { + full_proof.extend_from_slice(&proof); + proof + } else { + if full_proof.is_empty() { + break + } + proofs.clear(); + std::mem::take(&mut full_proof) + }; + let mut verify_iter = + verify_query_plan_iter::(state, proof.into_iter(), Some(root.clone())) + .unwrap(); + while let Some(item) = verify_iter.next() { + match item.unwrap() { + ReadProofItem::Hash(key, hash) => { + assert!(hash_only); + assert_eq!(content.get(&*key).map(|v| L::Hash::hash(&v.as_ref())), Some(hash)); + if in_prefix { + assert!(current_plan + .as_ref() + .map(|item| key.starts_with(item.key) && + item.hash_only && item.as_prefix) + .unwrap_or(false)); + } else { + assert_eq!( + current_plan.as_ref(), + Some(&QueryPlanItemRef { + key: &key, + hash_only: true, + as_prefix: false + }) + ); + current_plan = query_plan_iter.items.next(); + } + }, + ReadProofItem::Value(key, value) => { + assert_eq!(content.get(&*key), Some(value.as_ref())); + if in_prefix { + assert!(current_plan + .as_ref() + .map(|item| key.starts_with(item.key) && + !item.hash_only && item.as_prefix) + .unwrap_or(false)); + } else { + assert_eq!( + current_plan.as_ref(), + Some(&QueryPlanItemRef { + key: &key, + hash_only: false, + as_prefix: false + }) + ); + current_plan = query_plan_iter.items.next(); + } + }, + ReadProofItem::NoValue(key) => { + assert_eq!(content.get(key), None); + assert!(!in_prefix); + if hash_only { + assert_eq!( + current_plan.as_ref(), + Some(&QueryPlanItemRef { + key: &key, + hash_only: true, + as_prefix: false + }) + ); + } else { + assert_eq!( + current_plan.as_ref(), + Some(&QueryPlanItemRef { + key: &key, + hash_only: false, + as_prefix: false + }) + ); + } + current_plan = query_plan_iter.items.next(); + }, + ReadProofItem::StartPrefix(prefix) => { + assert!(!in_prefix); + in_prefix = true; + if hash_only { + assert_eq!( + current_plan.as_ref(), + Some(&QueryPlanItemRef { + key: &prefix, + hash_only: true, + as_prefix: true + }) + ); + } else { + assert_eq!( + current_plan.as_ref(), + Some(&QueryPlanItemRef { + key: &prefix, + hash_only: false, + as_prefix: true + }) + ); + } + }, + ReadProofItem::EndPrefix => { + assert!(in_prefix); + in_prefix = false; + assert!(current_plan.as_ref().map(|item| item.as_prefix).unwrap_or(false)); + current_plan = query_plan_iter.items.next(); + }, + ReadProofItem::Halted(resume) => { + run_state = Some(*resume); + break + }, + } + } + if run_state.is_none() { + assert_eq!(current_plan.as_ref(), None) + } + if kind == ProofKind::FullNodes { + if run_state.is_none() && !has_run_full { + has_run_full = true; + query_plan_iter = query_plan_in_mem.as_ref(); + current_plan = query_plan_iter.items.next(); + + let query_plan_iter_2 = query_plan_in_mem.as_ref(); + run_state = Some(query_plan_iter_2.into()); + } + } else { + has_run_full = true; + } + } + if !has_run_full { + panic!("did not run full proof") + } + assert!(!in_prefix); +} diff --git a/trie-db/test/src/triedbmut.rs b/trie-db/test/src/triedbmut.rs index 02316892..baa5c105 100644 --- a/trie-db/test/src/triedbmut.rs +++ b/trie-db/test/src/triedbmut.rs @@ -477,13 +477,15 @@ fn insert_empty_internal() { assert_eq!(*t.root(), reference_trie_root::(x.clone())); - for &(ref key, _) in &x { - t.insert(key, &[]).unwrap(); - } + if T::ALLOW_EMPTY == false { + for &(ref key, _) in &x { + t.insert(key, &[]).unwrap(); + } - assert!(t.is_empty()); - let hashed_null_node = reference_hashed_null_node::(); - assert_eq!(*t.root(), hashed_null_node); + assert!(t.is_empty()); + let hashed_null_node = reference_hashed_null_node::(); + assert_eq!(*t.root(), hashed_null_node); + } } test_layouts!(return_old_values, return_old_values_internal);