From e026ea68570d8bda613435fe9a414d86eb0cf029 Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 17 May 2021 18:03:51 +0200 Subject: [PATCH 001/188] starting --- Cargo.lock | 69 ++++-- Cargo.toml | 6 + .../state-machine/src/changes_trie/mod.rs | 10 +- .../state-machine/src/changes_trie/storage.rs | 12 +- .../state-machine/src/proving_backend.rs | 10 +- .../state-machine/src/trie_backend_essence.rs | 106 ++++++-- primitives/trie/src/lib.rs | 233 +++++++++++++----- primitives/trie/src/node_codec.rs | 60 +++-- 8 files changed, 381 insertions(+), 125 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f11953213d01e..1ccdd8f9ea5c7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2329,8 +2329,7 @@ dependencies = [ [[package]] name = "hash-db" version = "0.15.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d23bd4e7b5eda0d0f3a307e8b381fdc8ba9000f26fbe912250c0a4cc3956364a" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple2#594c900614841eea6ab66ac0d169352596ea3322" [[package]] name = "hash256-std-hasher" @@ -2341,6 +2340,14 @@ dependencies = [ "crunchy", ] +[[package]] +name = "hash256-std-hasher" +version = "0.15.2" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple2#594c900614841eea6ab66ac0d169352596ea3322" +dependencies = [ + "crunchy", +] + [[package]] name = "hashbrown" version = "0.9.1" @@ -2982,7 +2989,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "711adba9940a039f4374fc5724c0a5eaca84a2d558cce62256bfe26f0dbef05e" dependencies = [ "hash-db", - "hash256-std-hasher", + "hash256-std-hasher 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)", + "tiny-keccak", +] + +[[package]] +name = "keccak-hasher" +version = "0.15.3" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple2#594c900614841eea6ab66ac0d169352596ea3322" +dependencies = [ + "hash-db", + "hash256-std-hasher 0.15.2 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple2)", "tiny-keccak", ] @@ -3771,8 +3788,7 @@ dependencies = [ [[package]] name = "memory-db" version = "0.26.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "814bbecfc0451fc314eeea34f05bbcd5b98a7ad7af37faee088b86a1e633f1d4" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple2#594c900614841eea6ab66ac0d169352596ea3322" dependencies = [ "hash-db", "hashbrown", @@ -4201,7 +4217,7 @@ dependencies = [ "sp-state-machine", "sp-trie", "substrate-test-client", - "trie-root", + "trie-root 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)", "wat", ] @@ -8776,7 +8792,7 @@ dependencies = [ "ed25519-dalek", "futures 0.3.13", "hash-db", - "hash256-std-hasher", + "hash256-std-hasher 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)", "hex", "hex-literal", "impl-serde", @@ -9005,7 +9021,7 @@ name = "sp-runtime" version = "3.0.0" dependencies = [ "either", - "hash256-std-hasher", + "hash256-std-hasher 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)", "impl-trait-for-tuples", "log", "parity-scale-codec", @@ -9161,7 +9177,7 @@ dependencies = [ "thiserror", "tracing", "trie-db", - "trie-root", + "trie-root 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -9267,8 +9283,8 @@ dependencies = [ "sp-std", "trie-bench", "trie-db", - "trie-root", - "trie-standardmap", + "trie-root 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)", + "trie-standardmap 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -10316,24 +10332,22 @@ checksum = "a7f741b240f1a48843f9b8e0444fb55fb2a4ff67293b50a9179dfd5ea67f8d41" [[package]] name = "trie-bench" version = "0.27.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "568257edb909a5c532b1f4ab38ee6b5dedfbf8775be6a55a29020513ebe3e072" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple2#594c900614841eea6ab66ac0d169352596ea3322" dependencies = [ "criterion", "hash-db", - "keccak-hasher", + "keccak-hasher 0.15.3 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple2)", "memory-db", "parity-scale-codec", "trie-db", - "trie-root", - "trie-standardmap", + "trie-root 0.16.0 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple2)", + "trie-standardmap 0.15.2 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple2)", ] [[package]] name = "trie-db" version = "0.22.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec051edf7f0fc9499a2cb0947652cab2148b9d7f61cee7605e312e9f970dacaf" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple2#594c900614841eea6ab66ac0d169352596ea3322" dependencies = [ "hash-db", "hashbrown", @@ -10351,6 +10365,14 @@ dependencies = [ "hash-db", ] +[[package]] +name = "trie-root" +version = "0.16.0" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple2#594c900614841eea6ab66ac0d169352596ea3322" +dependencies = [ + "hash-db", +] + [[package]] name = "trie-standardmap" version = "0.15.2" @@ -10358,7 +10380,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3161ba520ab28cd8e6b68e1126f1009f6e335339d1a73b978139011703264c8" dependencies = [ "hash-db", - "keccak-hasher", + "keccak-hasher 0.15.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "trie-standardmap" +version = "0.15.2" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple2#594c900614841eea6ab66ac0d169352596ea3322" +dependencies = [ + "hash-db", + "keccak-hasher 0.15.3 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple2)", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 1b35c7181d17d..1bda1bc4f5ec2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -269,3 +269,9 @@ zeroize = { opt-level = 3 } [profile.release] # Substrate runtime requires unwinding. panic = "unwind" + +[patch.crates-io] +hash-db = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple2" } +memory-db = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple2" } +trie-db = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple2" } +trie-bench = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple2" } diff --git a/primitives/state-machine/src/changes_trie/mod.rs b/primitives/state-machine/src/changes_trie/mod.rs index 105f3d7de6d39..89d869d38d999 100644 --- a/primitives/state-machine/src/changes_trie/mod.rs +++ b/primitives/state-machine/src/changes_trie/mod.rs @@ -167,9 +167,15 @@ pub struct TrieBackendStorageAdapter<'a, H: Hasher, Number: BlockNumber>(pub &'a impl<'a, H: Hasher, N: BlockNumber> crate::TrieBackendStorage for TrieBackendStorageAdapter<'a, H, N> { type Overlay = sp_trie::MemoryDB; - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { - self.0.get(key, prefix) + fn get(&self, key: &H::Out, prefix: Prefix, _parent: Option<&sp_trie::TrieMeta>) -> Result, String> { + match self.0.get(key, prefix) { + // change trie do not use meta. + Ok(Some(v)) => Ok(Some((v, Default::default()))), + Ok(None) => Ok(None), + Err(e) => Err(e), + } } + fn access_from(&self, _key: &H::Out) { } } /// Changes trie configuration. diff --git a/primitives/state-machine/src/changes_trie/storage.rs b/primitives/state-machine/src/changes_trie/storage.rs index e08fe36126c7b..b41b2e549e82a 100644 --- a/primitives/state-machine/src/changes_trie/storage.rs +++ b/primitives/state-machine/src/changes_trie/storage.rs @@ -190,7 +190,7 @@ impl Storage for InMemoryStorage Result, String> { - MemoryDB::::get(&self.data.read().mdb, key, prefix) + Ok( as hash_db::HashDBRef>::get(&self.data.read().mdb, key, prefix)) } } @@ -207,7 +207,13 @@ impl<'a, H, Number> TrieBackendStorage for TrieBackendAdapter<'a, H, Number> { type Overlay = MemoryDB; - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { - self.storage.get(key, prefix) + fn get(&self, key: &H::Out, prefix: Prefix, _parent: Option<&sp_trie::TrieMeta>) -> Result, String> { + match self.storage.get(key, prefix) { + // change trie do not use meta. + Ok(Some(v)) => Ok(Some((v, Default::default()))), + Ok(None) => Ok(None), + Err(e) => Err(e), + } } + fn access_from(&self, _key: &H::Out) { } } diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 28672659fa10c..fc01cb58d8bf5 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -24,7 +24,7 @@ use log::debug; use hash_db::{Hasher, HashDB, EMPTY_PREFIX, Prefix}; use sp_trie::{ MemoryDB, empty_child_trie_root, read_trie_value_with, read_child_trie_value_with, - record_all_keys, StorageProof, + record_all_keys, StorageProof, TrieMeta, }; pub use sp_trie::{Recorder, trie_types::{Layout, TrieError}}; use crate::trie_backend::TrieBackend; @@ -219,15 +219,19 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> TrieBackendStorage { type Overlay = S::Overlay; - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { + fn get(&self, key: &H::Out, prefix: Prefix, parent: Option<&TrieMeta>) -> Result, String> { if let Some(v) = self.proof_recorder.get(key) { return Ok(v); } - let backend_value = self.backend.get(key, prefix)?; + let backend_value = self.backend.get(key, prefix, parent)?; self.proof_recorder.record(key.clone(), backend_value.clone()); Ok(backend_value) } + + fn access_from(&self, _key: &H::Out) { + // access_from is mainly for proof recorder, not forwarding it. + } } impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> std::fmt::Debug diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index c085099da77d8..9d57ff1cafef8 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -25,7 +25,7 @@ use crate::{warn, debug}; use hash_db::{self, Hasher, Prefix}; use sp_trie::{Trie, MemoryDB, PrefixedMemoryDB, DBValue, empty_child_trie_root, read_trie_value, read_child_trie_value, - for_keys_in_child_trie, KeySpacedDB, TrieDBIterator}; + for_keys_in_child_trie, KeySpacedDB, TrieDBIterator, TrieMeta}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; use crate::{backend::Consolidate, StorageKey, StorageValue}; use sp_core::storage::ChildInfo; @@ -281,11 +281,11 @@ pub(crate) struct Ephemeral<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { overlay: &'a mut S::Overlay, } -impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> hash_db::AsHashDB +impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> hash_db::AsHashDB for Ephemeral<'a, S, H> { - fn as_hash_db<'b>(&'b self) -> &'b (dyn hash_db::HashDB + 'b) { self } - fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::HashDB + 'b) { self } + fn as_hash_db<'b>(&'b self) -> &'b (dyn hash_db::HashDB + 'b) { self } + fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::HashDB + 'b) { self } } impl<'a, S: TrieBackendStorage, H: Hasher> Ephemeral<'a, S, H> { @@ -297,14 +297,18 @@ impl<'a, S: TrieBackendStorage, H: Hasher> Ephemeral<'a, S, H> { } } -impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::HashDB +impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::HashDB for Ephemeral<'a, S, H> { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { - if let Some(val) = hash_db::HashDB::get(self.overlay, key, prefix) { + Self::get_with_meta(self, key, prefix, None).map(|r| r.0) + } + + fn get_with_meta(&self, key: &H::Out, prefix: Prefix, parent: Option<&TrieMeta>) -> Option<(DBValue, TrieMeta)> { + if let Some(val) = hash_db::HashDB::get_with_meta(self.overlay, key, prefix, parent) { Some(val) } else { - match self.storage.get(&key, prefix) { + match self.storage.get(&key, prefix, parent) { Ok(x) => x, Err(e) => { warn!(target: "trie", "Failed to read from DB: {}", e); @@ -314,6 +318,12 @@ impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::HashDB } } + fn access_from(&self, key: &H::Out, _at: Option<&H::Out>) -> Option { + // call back to storage even if the overlay was hit. + self.storage.access_from(key); + None + } + fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { hash_db::HashDB::get(self, key, prefix).is_some() } @@ -322,6 +332,15 @@ impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::HashDB hash_db::HashDB::insert(self.overlay, prefix, value) } + fn insert_with_meta( + &mut self, + prefix: Prefix, + value: &[u8], + meta: TrieMeta, + ) -> H::Out { + hash_db::HashDB::insert_with_meta(self.overlay, prefix, value, meta) + } + fn emplace(&mut self, key: H::Out, prefix: Prefix, value: DBValue) { hash_db::HashDB::emplace(self.overlay, key, prefix, value) } @@ -331,13 +350,21 @@ impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::HashDB } } -impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::HashDBRef +impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::HashDBRef for Ephemeral<'a, S, H> { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { hash_db::HashDB::get(self, key, prefix) } + fn get_with_meta(&self, key: &H::Out, prefix: Prefix, parent: Option<&TrieMeta>) -> Option<(DBValue, TrieMeta)> { + hash_db::HashDB::get_with_meta(self, key, prefix, parent) + } + + fn access_from(&self, key: &H::Out, at: Option<&H::Out>) -> Option { + hash_db::HashDB::access_from(self, key, at) + } + fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { hash_db::HashDB::contains(self, key, prefix) } @@ -346,9 +373,11 @@ impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::HashDBRef: Send + Sync { /// Type of in-memory overlay. - type Overlay: hash_db::HashDB + Default + Consolidate; + type Overlay: hash_db::HashDB + Default + Consolidate; /// Get the value stored at key. - fn get(&self, key: &H::Out, prefix: Prefix) -> Result>; + fn get(&self, key: &H::Out, prefix: Prefix, parent_meta: Option<&TrieMeta>) -> Result>; + /// Call back when value get accessed in trie. + fn access_from(&self, key: &H::Out); } // This implementation is used by normal storage trie clients. @@ -356,41 +385,58 @@ pub trait TrieBackendStorage: Send + Sync { impl TrieBackendStorage for Arc> { type Overlay = PrefixedMemoryDB; - fn get(&self, key: &H::Out, prefix: Prefix) -> Result> { + fn get(&self, key: &H::Out, prefix: Prefix, parent: Option<&TrieMeta>) -> Result> { + // TODO get impl from memorydb Storage::::get(self.deref(), key, prefix) } + + fn access_from(&self, key: &H::Out) { + Storage::::access_from(self.deref(), key) + } } // This implementation is used by test storage trie clients. impl TrieBackendStorage for PrefixedMemoryDB { type Overlay = PrefixedMemoryDB; - fn get(&self, key: &H::Out, prefix: Prefix) -> Result> { - Ok(hash_db::HashDB::get(self, key, prefix)) + fn get(&self, key: &H::Out, prefix: Prefix) -> Result> { + Ok(hash_db::HashDB::get_with_meta(self, key, prefix)) + } + + fn access_from(&self, key: &H::Out) { + hash_db::HashDB::access_from(self, key, None); } } impl TrieBackendStorage for MemoryDB { type Overlay = MemoryDB; - fn get(&self, key: &H::Out, prefix: Prefix) -> Result> { - Ok(hash_db::HashDB::get(self, key, prefix)) + fn get(&self, key: &H::Out, prefix: Prefix) -> Result> { + Ok(hash_db::HashDB::get_with_meta(self, key, prefix)) + } + + fn access_from(&self, key: &H::Out) { + hash_db::HashDB::access_from(self, key, None); } } -impl, H: Hasher> hash_db::AsHashDB +impl, H: Hasher> hash_db::AsHashDB for TrieBackendEssence { - fn as_hash_db<'b>(&'b self) -> &'b (dyn hash_db::HashDB + 'b) { self } - fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::HashDB + 'b) { self } + fn as_hash_db<'b>(&'b self) -> &'b (dyn hash_db::HashDB + 'b) { self } + fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::HashDB + 'b) { self } } -impl, H: Hasher> hash_db::HashDB +impl, H: Hasher> hash_db::HashDB for TrieBackendEssence { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { + self.get_with_meta(key, prefix).map(|r| r.0) + } + + fn get_with_meta(&self, key: &H::Out, prefix: Prefix) -> Option<(DBValue, TrieMeta)> { if *key == self.empty { - return Some([0u8].to_vec()) + return Some(([0u8].to_vec(), ::meta_for_empty())) } match self.storage.get(&key, prefix) { Ok(x) => x, @@ -401,10 +447,20 @@ impl, H: Hasher> hash_db::HashDB } } + fn access_from(&self, key: &H::Out, _at: Option<&H::Out>) -> Option { + // access storage since this is only to register access for proof. + self.storage.access_from(key); + None + } + fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { hash_db::HashDB::get(self, key, prefix).is_some() } + fn insert_with_meta(&mut self, _prefix: Prefix, _value: &[u8], _meta: TrieMeta) -> H::Out { + unimplemented!(); + } + fn insert(&mut self, _prefix: Prefix, _value: &[u8]) -> H::Out { unimplemented!(); } @@ -418,13 +474,21 @@ impl, H: Hasher> hash_db::HashDB } } -impl, H: Hasher> hash_db::HashDBRef +impl, H: Hasher> hash_db::HashDBRef for TrieBackendEssence { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { hash_db::HashDB::get(self, key, prefix) } + fn get_with_meta(&self, key: &H::Out, prefix: Prefix, parent: Option<&TrieMeta>) -> Option<(DBValue, TrieMeta)> { + hash_db::HashDB::get_with_meta(self, key, prefix, parent) + } + + fn access_from(&self, key: &H::Out, at: Option<&H::Out>) -> Option { + hash_db::HashDB::access_from(self, key, at) + } + fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { hash_db::HashDB::contains(self, key, prefix) } diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 572283f1c027e..5892b26b91146 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -25,9 +25,9 @@ mod node_codec; mod storage_proof; mod trie_stream; -use sp_std::{boxed::Box, marker::PhantomData, vec::Vec, borrow::Borrow}; +use sp_std::{boxed::Box, marker::PhantomData, vec::Vec, borrow::Borrow, fmt}; use hash_db::{Hasher, Prefix}; -use trie_db::proof::{generate_proof, verify_proof}; +//use trie_db::proof::{generate_proof, verify_proof}; pub use trie_db::proof::VerifyError; /// Our `NodeCodec`-specific error. pub use error::Error; @@ -38,27 +38,90 @@ pub use node_codec::NodeCodec; pub use storage_proof::StorageProof; /// Various re-exports from the `trie-db` crate. pub use trie_db::{ - Trie, TrieMut, DBValue, Recorder, CError, Query, TrieLayout, TrieConfiguration, nibble_ops, TrieDBIterator, + Trie, TrieMut, DBValue, Recorder, CError, Query, TrieLayout, TrieConfiguration, + nibble_ops, TrieDBIterator, Meta, }; /// Various re-exports from the `memory-db` crate. pub use memory_db::KeyFunction; pub use memory_db::prefixed_key; /// Various re-exports from the `hash-db` crate. -pub use hash_db::{HashDB as HashDBT, EMPTY_PREFIX}; +pub use hash_db::{HashDB as HashDBT, EMPTY_PREFIX, MetaHasher}; +pub use hash_db::NoMeta; -#[derive(Default)] +/// Meta use by trie state. +pub type TrieMeta = (); /// substrate trie layout pub struct Layout(sp_std::marker::PhantomData); +impl fmt::Debug for Layout { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Layout").finish() + } +} + +impl Default for Layout { + fn default() -> Self { + Layout(sp_std::marker::PhantomData) + } +} + +impl Clone for Layout { + fn clone(&self) -> Self { + Layout(sp_std::marker::PhantomData) + } +} + impl TrieLayout for Layout { const USE_EXTENSION: bool = false; const ALLOW_EMPTY: bool = true; type Hash = H; type Codec = NodeCodec; + type MetaHasher = StateHasher; + type Meta = TrieMeta; + + fn metainput_for_new_node(&self) -> ::MetaInput { + () + } + fn metainput_for_stored_inline_node(&self) -> ::MetaInput { + () + } +} + +/// Reimplement `NoMeta` `MetaHasher` with +/// additional constraint. +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub struct StateHasher; + +impl MetaHasher for StateHasher + where + H: Hasher, + T: for<'a> From<&'a [u8]>, +{ + type Meta = TrieMeta; + + fn hash(value: &[u8], _meta: &Self::Meta) -> H::Out { + H::hash(value) + } + + fn stored_value(value: &[u8], _meta: Self::Meta) -> T { + value.into() + } + + fn stored_value_owned(value: T, _meta: Self::Meta) -> T { + value + } + + fn extract_value<'a>(stored: &'a [u8], _parent_meta: Option<&Self::Meta>) -> (&'a [u8], Self::Meta) { + (stored, ()) + } + + fn extract_value_owned(stored: T, _parent_meta: Option<&Self::Meta>) -> (T, Self::Meta) { + (stored, ()) + } } impl TrieConfiguration for Layout { - fn trie_root(input: I) -> ::Out where + fn trie_root(&self, input: I) -> ::Out where I: IntoIterator, A: AsRef<[u8]> + Ord, B: AsRef<[u8]>, @@ -66,7 +129,7 @@ impl TrieConfiguration for Layout { trie_root::trie_root_no_extension::(input) } - fn trie_root_unhashed(input: I) -> Vec where + fn trie_root_unhashed(&self, input: I) -> Vec where I: IntoIterator, A: AsRef<[u8]> + Ord, B: AsRef<[u8]>, @@ -87,25 +150,30 @@ type MemTracker = memory_db::MemCounter; /// TrieDB error over `TrieConfiguration` trait. pub type TrieError = trie_db::TrieError, CError>; /// Reexport from `hash_db`, with genericity set for `Hasher` trait. -pub trait AsHashDB: hash_db::AsHashDB {} -impl> AsHashDB for T {} +pub trait AsHashDB: hash_db::AsHashDB {} +impl> AsHashDB for T {} /// Reexport from `hash_db`, with genericity set for `Hasher` trait. -pub type HashDB<'a, H> = dyn hash_db::HashDB + 'a; +pub type HashDB<'a, H, M> = dyn hash_db::HashDB + 'a; /// Reexport from `hash_db`, with genericity set for `Hasher` trait. /// This uses a `KeyFunction` for prefixing keys internally (avoiding /// key conflict for non random keys). pub type PrefixedMemoryDB = memory_db::MemoryDB< - H, memory_db::PrefixedKey, trie_db::DBValue, MemTracker + H, memory_db::PrefixedKey, trie_db::DBValue, StateHasher, MemTracker >; /// Reexport from `hash_db`, with genericity set for `Hasher` trait. /// This uses a noops `KeyFunction` (key addressing must be hashed or using /// an encoding scheme that avoid key conflict). pub type MemoryDB = memory_db::MemoryDB< - H, memory_db::HashKey, trie_db::DBValue, MemTracker, + H, memory_db::HashKey, trie_db::DBValue, StateHasher, MemTracker, +>; +/// MemoryDB with specific meta hasher. +pub type MemoryDBMeta = memory_db::MemoryDB< + H, memory_db::HashKey, trie_db::DBValue, M, MemTracker, >; + /// Reexport from `hash_db`, with genericity set for `Hasher` trait. pub type GenericMemoryDB = memory_db::MemoryDB< - H, KF, trie_db::DBValue, MemTracker + H, KF, trie_db::DBValue, StateHasher, MemTracker >; /// Persistent trie database read-access interface for the a given hasher. @@ -116,7 +184,6 @@ pub type TrieDBMut<'a, L> = trie_db::TrieDBMut<'a, L>; pub type Lookup<'a, L, Q> = trie_db::Lookup<'a, L, Q>; /// Hash type for a trie layout. pub type TrieHash = <::Hash as Hasher>::Out; - /// This module is for non generic definition of trie type. /// Only the `Hasher` trait is generic in this case. pub mod trie_types { @@ -131,6 +198,7 @@ pub mod trie_types { pub type TrieError = trie_db::TrieError; } +/* /// Create a proof for a subset of keys in a trie. /// /// The `keys` may contain any set of keys regardless of each one of them is included @@ -146,7 +214,7 @@ pub fn generate_trie_proof<'a, L: TrieConfiguration, I, K, DB>( ) -> Result>, Box>> where I: IntoIterator, K: 'a + AsRef<[u8]>, - DB: hash_db::HashDBRef, + DB: hash_db::HashDBRef, { let trie = TrieDB::::new(db, &root)?; generate_proof(&trie, keys) @@ -171,6 +239,7 @@ pub fn verify_trie_proof<'a, L: TrieConfiguration, I, K, V>( { verify_proof::, _, _, _>(root, proof, items) } +*/ /// Determine a trie root given a hash DB and delta values. pub fn delta_trie_root( @@ -182,7 +251,7 @@ pub fn delta_trie_root( A: Borrow<[u8]>, B: Borrow>, V: Borrow<[u8]>, - DB: hash_db::HashDB, + DB: hash_db::HashDB, { { let mut trie = TrieDBMut::::from_existing(db, &mut root)?; @@ -202,7 +271,7 @@ pub fn delta_trie_root( } /// Read a value from the trie. -pub fn read_trie_value>( +pub fn read_trie_value>( db: &DB, root: &TrieHash, key: &[u8] @@ -214,7 +283,7 @@ pub fn read_trie_value, - DB: hash_db::HashDBRef + DB: hash_db::HashDBRef >( db: &DB, root: &TrieHash, @@ -226,17 +295,18 @@ pub fn read_trie_value_with< /// Determine the empty trie root. pub fn empty_trie_root() -> ::Out { - L::trie_root::<_, Vec, Vec>(core::iter::empty()) + L::default().trie_root::<_, Vec, Vec>(core::iter::empty()) } /// Determine the empty child trie root. pub fn empty_child_trie_root() -> ::Out { - L::trie_root::<_, Vec, Vec>(core::iter::empty()) + L::default().trie_root::<_, Vec, Vec>(core::iter::empty()) } /// Determine a child trie root given its ordered contents, closed form. H is the default hasher, /// but a generic implementation may ignore this type parameter and use other hashers. pub fn child_trie_root( + layout: &L, input: I, ) -> ::Out where @@ -244,7 +314,7 @@ pub fn child_trie_root( A: AsRef<[u8]> + Ord, B: AsRef<[u8]>, { - L::trie_root(input) + layout.trie_root(input) } /// Determine a child trie root given a hash DB and delta values. H is the default hasher, @@ -261,7 +331,7 @@ pub fn child_delta_trie_root( B: Borrow>, V: Borrow<[u8]>, RD: AsRef<[u8]>, - DB: hash_db::HashDB + DB: hash_db::HashDB { let mut root = TrieHash::::default(); // root is fetched from DB, not writable by runtime, so it's always valid. @@ -284,7 +354,7 @@ pub fn for_keys_in_child_trie bool, DB> mut f: F ) -> Result<(), Box>> where - DB: hash_db::HashDBRef + DB: hash_db::HashDBRef { let mut root = TrieHash::::default(); // root is fetched from DB, not writable by runtime, so it's always valid. @@ -310,7 +380,7 @@ pub fn record_all_keys( root: &TrieHash, recorder: &mut Recorder> ) -> Result<(), Box>> where - DB: hash_db::HashDBRef + DB: hash_db::HashDBRef { let trie = TrieDB::::new(&*db, root)?; let iter = trie.iter()?; @@ -335,7 +405,7 @@ pub fn read_child_trie_value( key: &[u8] ) -> Result>, Box>> where - DB: hash_db::HashDBRef + DB: hash_db::HashDBRef { let mut root = TrieHash::::default(); // root is fetched from DB, not writable by runtime, so it's always valid. @@ -354,7 +424,7 @@ pub fn read_child_trie_value_with Result>, Box>> where - DB: hash_db::HashDBRef + DB: hash_db::HashDBRef { let mut root = TrieHash::::default(); // root is fetched from DB, not writable by runtime, so it's always valid. @@ -401,8 +471,8 @@ impl<'a, DB, H> KeySpacedDBMut<'a, DB, H> where } } -impl<'a, DB, H, T> hash_db::HashDBRef for KeySpacedDB<'a, DB, H> where - DB: hash_db::HashDBRef, +impl<'a, DB, H, T, M> hash_db::HashDBRef for KeySpacedDB<'a, DB, H> where + DB: hash_db::HashDBRef, H: Hasher, T: From<&'static [u8]>, { @@ -411,14 +481,23 @@ impl<'a, DB, H, T> hash_db::HashDBRef for KeySpacedDB<'a, DB, H> where self.0.get(key, (&derived_prefix.0, derived_prefix.1)) } + fn access_from(&self, key: &H::Out, at: Option<&H::Out>) -> Option { + self.0.access_from(key, at) + } + + fn get_with_meta(&self, key: &H::Out, prefix: Prefix, parent: Option<&M>) -> Option<(T, M)> { + let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); + self.0.get_with_meta(key, (&derived_prefix.0, derived_prefix.1), parent) + } + fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); self.0.contains(key, (&derived_prefix.0, derived_prefix.1)) } } -impl<'a, DB, H, T> hash_db::HashDB for KeySpacedDBMut<'a, DB, H> where - DB: hash_db::HashDB, +impl<'a, DB, H, T, M> hash_db::HashDB for KeySpacedDBMut<'a, DB, H> where + DB: hash_db::HashDB, H: Hasher, T: Default + PartialEq + for<'b> From<&'b [u8]> + Clone + Send + Sync, { @@ -427,6 +506,15 @@ impl<'a, DB, H, T> hash_db::HashDB for KeySpacedDBMut<'a, DB, H> where self.0.get(key, (&derived_prefix.0, derived_prefix.1)) } + fn access_from(&self, key: &H::Out, at: Option<&H::Out>) -> Option { + self.0.access_from(key, at) + } + + fn get_with_meta(&self, key: &H::Out, prefix: Prefix, parent: Option<&M>) -> Option<(T, M)> { + let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); + self.0.get_with_meta(key, (&derived_prefix.0, derived_prefix.1), parent) + } + fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); self.0.contains(key, (&derived_prefix.0, derived_prefix.1)) @@ -437,6 +525,16 @@ impl<'a, DB, H, T> hash_db::HashDB for KeySpacedDBMut<'a, DB, H> where self.0.insert((&derived_prefix.0, derived_prefix.1), value) } + fn insert_with_meta( + &mut self, + prefix: Prefix, + value: &[u8], + meta: M, + ) -> H::Out { + let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); + self.0.insert_with_meta((&derived_prefix.0, derived_prefix.1), value, meta) + } + fn emplace(&mut self, key: H::Out, prefix: Prefix, value: T) { let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); self.0.emplace(key, (&derived_prefix.0, derived_prefix.1), value) @@ -448,14 +546,14 @@ impl<'a, DB, H, T> hash_db::HashDB for KeySpacedDBMut<'a, DB, H> where } } -impl<'a, DB, H, T> hash_db::AsHashDB for KeySpacedDBMut<'a, DB, H> where - DB: hash_db::HashDB, +impl<'a, DB, H, T, M> hash_db::AsHashDB for KeySpacedDBMut<'a, DB, H> where + DB: hash_db::HashDB, H: Hasher, T: Default + PartialEq + for<'b> From<&'b [u8]> + Clone + Send + Sync, { - fn as_hash_db(&self) -> &dyn hash_db::HashDB { &*self } + fn as_hash_db(&self) -> &dyn hash_db::HashDB { &*self } - fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::HashDB + 'b) { + fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::HashDB + 'b) { &mut *self } } @@ -482,18 +580,21 @@ mod tests { type Layout = super::Layout; fn hashed_null_node() -> TrieHash { - ::hashed_null_node() + >::hashed_null_node() } fn check_equivalent(input: &Vec<(&[u8], &[u8])>) { { - let closed_form = T::trie_root(input.clone()); - let d = T::trie_root_unhashed(input.clone()); + // TODO test flagged + let layout = T::default(); + let closed_form = layout.trie_root(input.clone()); + let d = layout.trie_root_unhashed(input.clone()); println!("Data: {:#x?}, {:#x?}", d, Blake2Hasher::hash(&d[..])); let persistent = { - let mut memdb = MemoryDB::default(); + let mut memdb = MemoryDBMeta::<_, T::MetaHasher>::default(); let mut root = Default::default(); - let mut t = TrieDBMut::::new(&mut memdb, &mut root); + // TODO test flagged + let mut t = TrieDBMut::::new(&mut memdb, &mut root, layout); for (x, y) in input.iter().rev() { t.insert(x, y).unwrap(); } @@ -504,16 +605,18 @@ mod tests { } fn check_iteration(input: &Vec<(&[u8], &[u8])>) { - let mut memdb = MemoryDB::default(); + // TODO test flagged + let layout = T::default(); + let mut memdb = MemoryDBMeta::<_, T::MetaHasher>::default(); let mut root = Default::default(); { - let mut t = TrieDBMut::::new(&mut memdb, &mut root); + let mut t = TrieDBMut::::new(&mut memdb, &mut root, layout.clone()); for (x, y) in input.clone() { t.insert(x, y).unwrap(); } } { - let t = TrieDB::::new(&mut memdb, &root).unwrap(); + let t = TrieDB::::new(&mut memdb, &root, layout).unwrap(); assert_eq!( input.iter().map(|(i, j)| (i.to_vec(), j.to_vec())).collect::>(), t.iter().unwrap() @@ -525,12 +628,14 @@ mod tests { #[test] fn default_trie_root() { + // TODO test flagged + let layout = Layout::default(); let mut db = MemoryDB::default(); let mut root = TrieHash::::default(); let mut empty = TrieDBMut::::new(&mut db, &mut root); empty.commit(); let root1 = empty.root().as_ref().to_vec(); - let root2: Vec = Layout::trie_root::<_, Vec, Vec>( + let root2: Vec = layout.trie_root::<_, Vec, Vec>( std::iter::empty(), ).as_ref().iter().cloned().collect(); @@ -633,11 +738,13 @@ mod tests { } fn populate_trie<'db, T: TrieConfiguration>( - db: &'db mut dyn HashDB, + db: &'db mut dyn HashDB, root: &'db mut TrieHash, + layout: T, v: &[(Vec, Vec)] ) -> TrieDBMut<'db, T> { - let mut t = TrieDBMut::::new(db, root); + // TODO test non default layout + let mut t = TrieDBMut::::new_with_layout(db, root, layout); for i in 0..v.len() { let key: &[u8]= &v[i].0; let val: &[u8] = &v[i].1; @@ -671,10 +778,12 @@ mod tests { count: 100, }.make_with(seed.as_fixed_bytes_mut()); - let real = Layout::trie_root(x.clone()); + // TODO test other layout states. + let layout = Layout::default(); + let real = layout.trie_root(x.clone()); let mut memdb = MemoryDB::default(); let mut root = Default::default(); - let mut memtrie = populate_trie::(&mut memdb, &mut root, &x); + let mut memtrie = populate_trie::(&mut memdb, &mut root, layout, &x); memtrie.commit(); if *memtrie.root() != real { @@ -707,18 +816,22 @@ mod tests { #[test] fn codec_trie_empty() { + // TODO test other layout states. + let layout = Layout::default(); let input: Vec<(&[u8], &[u8])> = vec![]; - let trie = Layout::trie_root_unhashed::<_, _, _>(input); + let trie = layout.trie_root_unhashed(input); println!("trie: {:#x?}", trie); assert_eq!(trie, vec![0x0]); } #[test] fn codec_trie_single_tuple() { + // TODO switch to old layout + let layout = Layout::default(); let input = vec![ (vec![0xaa], vec![0xbb]) ]; - let trie = Layout::trie_root_unhashed::<_, _, _>(input); + let trie = layout.trie_root_unhashed(input); println!("trie: {:#x?}", trie); assert_eq!(trie, vec![ 0x42, // leaf 0x40 (2^6) with (+) key of 2 nibbles (0x02) @@ -730,8 +843,10 @@ mod tests { #[test] fn codec_trie_two_tuples_disjoint_keys() { + // TODO switch to old layout + let layout = Layout::default(); let input = vec![(&[0x48, 0x19], &[0xfe]), (&[0x13, 0x14], &[0xff])]; - let trie = Layout::trie_root_unhashed::<_, _, _>(input); + let trie = layout.trie_root_unhashed(input); println!("trie: {:#x?}", trie); let mut ex = Vec::::new(); ex.push(0x80); // branch, no value (0b_10..) no nibble @@ -760,11 +875,13 @@ mod tests { (hex!("0103000000000000000469").to_vec(), hex!("0401000000").to_vec()), ]; + // TODO test non default layout + let layout = Layout::default(); let mut mdb = MemoryDB::default(); let mut root = Default::default(); - let _ = populate_trie::(&mut mdb, &mut root, &pairs); + let _ = populate_trie::(&mut mdb, &mut root, layout.clone(), &pairs); - let trie = TrieDB::::new(&mdb, &root).unwrap(); + let trie = TrieDB::::new_with_layout(&mdb, &root, layout).unwrap(); let iter = trie.iter().unwrap(); let mut iter_pairs = Vec::new(); @@ -775,7 +892,7 @@ mod tests { assert_eq!(pairs, iter_pairs); } - +/* #[test] fn proof_non_inclusion_works() { let pairs = vec![ @@ -860,9 +977,11 @@ mod tests { ).is_err() ); } - +*/ #[test] fn generate_storage_root_with_proof_works_independently_from_the_delta_order() { + // TODO use old format. + let layout = Layout::default(); let proof = StorageProof::decode(&mut &include_bytes!("../test-res/proof")[..]).unwrap(); let storage_root = sp_core::H256::decode( &mut &include_bytes!("../test-res/storage_root")[..], @@ -877,12 +996,14 @@ mod tests { ).unwrap(); let proof_db = proof.into_memory_db::(); - let first_storage_root = delta_trie_root::( + let first_storage_root = delta_trie_root( + layout.clone(), &mut proof_db.clone(), storage_root, valid_delta, ).unwrap(); - let second_storage_root = delta_trie_root::( + let second_storage_root = delta_trie_root( + layout, &mut proof_db.clone(), storage_root, invalid_delta, diff --git a/primitives/trie/src/node_codec.rs b/primitives/trie/src/node_codec.rs index 0c923ff024c55..11b379fce6f2d 100644 --- a/primitives/trie/src/node_codec.rs +++ b/primitives/trie/src/node_codec.rs @@ -23,8 +23,8 @@ use sp_std::vec::Vec; use sp_std::borrow::Borrow; use codec::{Encode, Decode, Input, Compact}; use hash_db::Hasher; -use trie_db::{self, node::{NibbleSlicePlan, NodePlan, NodeHandlePlan}, ChildReference, - nibble_ops, Partial, NodeCodec as NodeCodecT}; +use trie_db::{self, node::{NibbleSlicePlan, NodePlan, Value, ValuePlan, NodeHandlePlan}, + ChildReference, nibble_ops, Partial, NodeCodec as NodeCodecT, Meta}; use crate::error::Error; use crate::trie_constants; use super::{node_header::{NodeHeader, NodeKind}}; @@ -87,15 +87,15 @@ impl<'a> Input for ByteSliceInput<'a> { #[derive(Default, Clone)] pub struct NodeCodec(PhantomData); -impl NodeCodecT for NodeCodec { +impl NodeCodecT for NodeCodec { type Error = Error; type HashOut = H::Out; fn hashed_null_node() -> ::Out { - H::hash(::empty_node()) + H::hash(>::empty_node()) } - fn decode_plan(data: &[u8]) -> sp_std::result::Result { + fn decode_plan_inner(data: &[u8]) -> sp_std::result::Result { let mut input = ByteSliceInput::new(data); match NodeHeader::decode(&mut input)? { NodeHeader::Null => Ok(NodePlan::Empty), @@ -113,9 +113,9 @@ impl NodeCodecT for NodeCodec { let bitmap = Bitmap::decode(&data[bitmap_range])?; let value = if has_value { let count = >::decode(&mut input)?.0 as usize; - Some(input.take(count)?) + ValuePlan::Value(input.take(count)?) } else { - None + ValuePlan::NoValue }; let mut children = [ None, None, None, None, None, None, None, None, @@ -151,23 +151,31 @@ impl NodeCodecT for NodeCodec { let count = >::decode(&mut input)?.0 as usize; Ok(NodePlan::Leaf { partial: NibbleSlicePlan::new(partial, partial_padding), - value: input.take(count)?, + value: ValuePlan::Value(input.take(count)?), }) } } } fn is_empty_node(data: &[u8]) -> bool { - data == ::empty_node() + data == >::empty_node() } fn empty_node() -> &'static [u8] { &[trie_constants::EMPTY_TRIE] } - fn leaf_node(partial: Partial, value: &[u8]) -> Vec { + fn leaf_node(partial: Partial, value: Value, meta: &mut M) -> Vec { let mut output = partial_encode(partial, NodeKind::Leaf); - value.encode_to(&mut output); + if let Value::Value(value) = value { + Compact(value.len() as u32).encode_to(&mut output); + let start = output.len(); + output.extend_from_slice(value); + let end = output.len(); + meta.encoded_value_callback(ValuePlan::Value(start..end)); + } else { + unimplemented!("No support for incomplete nodes"); + } output } @@ -175,13 +183,15 @@ impl NodeCodecT for NodeCodec { _partial: impl Iterator, _nbnibble: usize, _child: ChildReference<::Out>, + _meta: &mut M, ) -> Vec { unreachable!() } fn branch_node( _children: impl Iterator::Out>>>>, - _maybe_value: Option<&[u8]>, + _maybe_value: Value, + _meta: &mut M, ) -> Vec { unreachable!() } @@ -190,19 +200,28 @@ impl NodeCodecT for NodeCodec { partial: impl Iterator, number_nibble: usize, children: impl Iterator::Out>>>>, - maybe_value: Option<&[u8]>, + maybe_value: Value, + meta: &mut M, ) -> Vec { - let mut output = if maybe_value.is_some() { - partial_from_iterator_encode(partial, number_nibble, NodeKind::BranchWithValue) - } else { - partial_from_iterator_encode(partial, number_nibble, NodeKind::BranchNoValue) + let mut output = match maybe_value { + Value::Value(..) => partial_from_iterator_encode(partial, number_nibble, NodeKind::BranchWithValue), + Value::NoValue => partial_from_iterator_encode(partial, number_nibble, NodeKind::BranchNoValue), + Value::HashedValue(..) => unimplemented!("No support for incomplete nodes"), }; let bitmap_index = output.len(); let mut bitmap: [u8; BITMAP_LENGTH] = [0; BITMAP_LENGTH]; (0..BITMAP_LENGTH).for_each(|_|output.push(0)); - if let Some(value) = maybe_value { - value.encode_to(&mut output); - }; + match maybe_value { + Value::Value(value) => { + Compact(value.len() as u32).encode_to(&mut output); + let start = output.len(); + output.extend_from_slice(value); + let end = output.len(); + meta.encoded_value_callback(ValuePlan::Value(start..end)); + }, + Value::NoValue => (), + Value::HashedValue(..) => unimplemented!("No support for incomplete nodes"), + } Bitmap::encode(children.map(|maybe_child| match maybe_child.borrow() { Some(ChildReference::Hash(h)) => { h.as_ref().encode_to(&mut output); @@ -218,7 +237,6 @@ impl NodeCodecT for NodeCodec { .copy_from_slice(&bitmap[..BITMAP_LENGTH]); output } - } // utils From 5b1c550c17b949714f0e3d7c33f097492d1f00e1 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 18 May 2021 10:53:29 +0200 Subject: [PATCH 002/188] Updated from other branch. --- bin/node/bench/src/simple_trie.rs | 22 ++++++++++--- bin/node/bench/src/trie.rs | 11 +++++-- bin/node/executor/tests/common.rs | 2 +- client/api/src/cht.rs | 2 +- client/db/src/bench.rs | 11 +++++-- client/db/src/lib.rs | 17 +++++++--- client/executor/src/integration_tests/mod.rs | 2 +- client/service/test/src/client/light.rs | 2 +- client/service/test/src/client/mod.rs | 2 +- primitives/io/src/lib.rs | 8 ++--- primitives/state-machine/src/basic.rs | 2 +- .../state-machine/src/proving_backend.rs | 17 +++++++--- .../state-machine/src/trie_backend_essence.rs | 23 ++++++------- primitives/trie/src/lib.rs | 33 +++++++------------ 14 files changed, 94 insertions(+), 60 deletions(-) diff --git a/bin/node/bench/src/simple_trie.rs b/bin/node/bench/src/simple_trie.rs index a29b51a38af58..6cc32e00e34d9 100644 --- a/bin/node/bench/src/simple_trie.rs +++ b/bin/node/bench/src/simple_trie.rs @@ -20,7 +20,7 @@ use std::{collections::HashMap, sync::Arc}; use kvdb::KeyValueDB; use node_primitives::Hash; -use sp_trie::DBValue; +use sp_trie::{DBValue, TrieMeta, StateHasher, MetaHasher}; use hash_db::{HashDB, AsHashDB, Prefix, Hasher as _}; pub type Hasher = sp_core::Blake2Hasher; @@ -31,15 +31,15 @@ pub struct SimpleTrie<'a> { pub overlay: &'a mut HashMap, Option>>, } -impl<'a> AsHashDB for SimpleTrie<'a> { - fn as_hash_db(&self) -> &dyn hash_db::HashDB { &*self } +impl<'a> AsHashDB for SimpleTrie<'a> { + fn as_hash_db(&self) -> &dyn hash_db::HashDB { &*self } - fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn HashDB + 'b) { + fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn HashDB + 'b) { &mut *self } } -impl<'a> HashDB for SimpleTrie<'a> { +impl<'a> HashDB for SimpleTrie<'a> { fn get(&self, key: &Hash, prefix: Prefix) -> Option { let key = sp_trie::prefixed_key::(key, prefix); if let Some(value) = self.overlay.get(&key) { @@ -48,10 +48,22 @@ impl<'a> HashDB for SimpleTrie<'a> { self.db.get(0, &key).expect("Database backend error") } + fn get_with_meta(&self, key: &Hash, prefix: Prefix, parent: Option<&TrieMeta>) -> Option<(DBValue, TrieMeta)> { + let result = self.get(key, prefix); + result.map(|value| >::extract_value_owned(value, parent)) + } + fn contains(&self, hash: &Hash, prefix: Prefix) -> bool { self.get(hash, prefix).is_some() } + fn insert_with_meta(&mut self, prefix: Prefix, value: &[u8], meta: TrieMeta) -> Hash { + let key = >::hash(value, &meta); + let stored_value = >::stored_value(value, meta); + self.emplace(key, prefix, stored_value); + key + } + fn insert(&mut self, prefix: Prefix, value: &[u8]) -> Hash { let key = Hasher::hash(value); self.emplace(key, prefix, value.to_vec()); diff --git a/bin/node/bench/src/trie.rs b/bin/node/bench/src/trie.rs index a3e7620473d98..267d0cc16c8c4 100644 --- a/bin/node/bench/src/trie.rs +++ b/bin/node/bench/src/trie.rs @@ -24,7 +24,7 @@ use lazy_static::lazy_static; use rand::Rng; use hash_db::Prefix; use sp_state_machine::Backend as _; -use sp_trie::{trie_types::TrieDBMut, TrieMut as _}; +use sp_trie::{trie_types::TrieDBMut, TrieMut as _, TrieMeta, MetaHasher, StateHasher}; use node_primitives::Hash; @@ -117,6 +117,7 @@ impl core::BenchmarkDescription for TrieReadBenchmarkDescription { let mut rng = rand::thread_rng(); let warmup_prefix = KUSAMA_STATE_DISTRIBUTION.key(&mut rng); + // TODO flag trie for hash of value. let mut key_values = KeyValues::new(); let mut warmup_keys = KeyValues::new(); let mut query_keys = KeyValues::new(); @@ -169,9 +170,15 @@ impl core::BenchmarkDescription for TrieReadBenchmarkDescription { struct Storage(Arc); impl sp_state_machine::Storage for Storage { - fn get(&self, key: &Hash, prefix: Prefix) -> Result>, String> { + fn get(&self, key: &Hash, prefix: Prefix, parent: Option<&TrieMeta>) -> Result, TrieMeta)>, String> { let key = sp_trie::prefixed_key::(key, prefix); self.0.get(0, &key).map_err(|e| format!("Database backend error: {:?}", e)) + .map(|result| result + .map(|value| >::extract_value_owned(value, parent)) + ) + } + + fn access_from(&self, _key: &Hash) { } } diff --git a/bin/node/executor/tests/common.rs b/bin/node/executor/tests/common.rs index 635155b5d00b2..7ccb4b511ae8a 100644 --- a/bin/node/executor/tests/common.rs +++ b/bin/node/executor/tests/common.rs @@ -157,7 +157,7 @@ pub fn construct_block( // calculate the header fields that we can. let extrinsics_root = - Layout::::ordered_trie_root(extrinsics.iter().map(Encode::encode)) + Layout::::default().ordered_trie_root(extrinsics.iter().map(Encode::encode)) .to_fixed_bytes() .into(); diff --git a/client/api/src/cht.rs b/client/api/src/cht.rs index 96a5a272916e5..1fdeedcde976f 100644 --- a/client/api/src/cht.rs +++ b/client/api/src/cht.rs @@ -93,7 +93,7 @@ pub fn compute_root( I: IntoIterator>>, { use sp_trie::TrieConfiguration; - Ok(sp_trie::trie_types::Layout::::trie_root( + Ok(sp_trie::trie_types::Layout::::default().trie_root( build_pairs::(cht_size, cht_num, hashes)? )) } diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index a2501891b31e3..c046c5e64545b 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -23,7 +23,7 @@ use std::cell::{Cell, RefCell}; use std::collections::HashMap; use hash_db::{Prefix, Hasher}; -use sp_trie::{MemoryDB, prefixed_key}; +use sp_trie::{MemoryDB, prefixed_key, StateHasher, TrieMeta, MetaHasher}; use sp_core::{ storage::{ChildInfo, TrackedStorageKey}, hexdisplay::HexDisplay @@ -49,21 +49,28 @@ struct StorageDb { } impl sp_state_machine::Storage> for StorageDb { - fn get(&self, key: &Block::Hash, prefix: Prefix) -> Result, String> { + fn get(&self, key: &Block::Hash, prefix: Prefix, parent: Option<&TrieMeta>) -> Result, String> { let prefixed_key = prefixed_key::>(key, prefix); if let Some(recorder) = &self.proof_recorder { if let Some(v) = recorder.get(&key) { return Ok(v.clone()); } let backend_value = self.db.get(0, &prefixed_key) + .map(|result| result.map(|value| , _>>::extract_value_owned(value, parent))) .map_err(|e| format!("Database backend error: {:?}", e))?; recorder.record(key.clone(), backend_value.clone()); Ok(backend_value) } else { self.db.get(0, &prefixed_key) + .map(|result| result.map(|value| , _>>::extract_value_owned(value, parent))) .map_err(|e| format!("Database backend error: {:?}", e)) } } + fn access_from(&self, key: &Block::Hash) { + if let Some(recorder) = &self.proof_recorder { + recorder.access_from(key); + } + } } /// Track whether a specific key has already been read or written to. diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 94535cf28aea5..245fc3b84f04c 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -65,7 +65,8 @@ use sp_blockchain::{ }; use codec::{Decode, Encode}; use hash_db::Prefix; -use sp_trie::{MemoryDB, PrefixedMemoryDB, prefixed_key}; +use sp_trie::{MemoryDB, PrefixedMemoryDB, prefixed_key, StateHasher, TrieMeta, + MetaHasher}; use sp_database::Transaction; use sp_core::{Hasher, ChangesTrieConfiguration}; use sp_core::offchain::OffchainOverlayedChange; @@ -73,7 +74,8 @@ use sp_core::storage::{well_known_keys, ChildInfo}; use sp_arithmetic::traits::Saturating; use sp_runtime::{generic::{DigestItem, BlockId}, Justification, Justifications, Storage}; use sp_runtime::traits::{ - Block as BlockT, Header as HeaderT, NumberFor, Zero, One, SaturatedConversion, HashFor, + Block as BlockT, Header as HeaderT, NumberFor, Zero, One, SaturatedConversion, + HashFor, }; use sp_state_machine::{ DBValue, ChangesTrieTransaction, ChangesTrieCacheAction, UsageInfo as StateUsageInfo, @@ -858,15 +860,19 @@ struct StorageDb { } impl sp_state_machine::Storage> for StorageDb { - fn get(&self, key: &Block::Hash, prefix: Prefix) -> Result, String> { + fn get(&self, key: &Block::Hash, prefix: Prefix, parent: Option<&TrieMeta>) -> Result, String> { if self.prefix_keys { let key = prefixed_key::>(key, prefix); self.state_db.get(&key, self) } else { self.state_db.get(key.as_ref(), self) } + .map(|result| result.map(|value| , _>>::extract_value_owned(value, parent))) .map_err(|e| format!("Database backend error: {:?}", e)) } + + fn access_from(&self, _key: &Block::Hash) { + } } impl sc_state_db::NodeDb for StorageDb { @@ -890,9 +896,11 @@ impl DbGenesisStorage { } impl sp_state_machine::Storage> for DbGenesisStorage { - fn get(&self, _key: &Block::Hash, _prefix: Prefix) -> Result, String> { + fn get(&self, _key: &Block::Hash, _prefix: Prefix, _parent: Option<&TrieMeta>) -> Result, String> { Ok(None) } + fn access_from(&self, _key: &Block::Hash) { + } } /// Frozen `value` at time `at`. @@ -2110,6 +2118,7 @@ impl sc_client_api::backend::Backend for Backend { self.storage.as_ref(), &header.state_root, (&[], None), + None, ).unwrap_or(None).is_some() }, _ => false, diff --git a/client/executor/src/integration_tests/mod.rs b/client/executor/src/integration_tests/mod.rs index d08f830f40dae..ecb48da91490a 100644 --- a/client/executor/src/integration_tests/mod.rs +++ b/client/executor/src/integration_tests/mod.rs @@ -460,7 +460,7 @@ fn ordered_trie_root_should_work(wasm_method: WasmExecutionMethod) { wasm_method, &mut ext.ext(), ).unwrap(), - Layout::::ordered_trie_root(trie_input.iter()).as_bytes().encode(), + Layout::::default().ordered_trie_root(trie_input.iter()).as_bytes().encode(), ); } diff --git a/client/service/test/src/client/light.rs b/client/service/test/src/client/light.rs index a183cbce62bdb..33bcc96d1bb63 100644 --- a/client/service/test/src/client/light.rs +++ b/client/service/test/src/client/light.rs @@ -568,7 +568,7 @@ fn prepare_for_header_proof_check(insert_cht: bool) -> (TestChecker, Hash, Heade fn header_with_computed_extrinsics_root(extrinsics: Vec) -> Header { use sp_trie::{TrieConfiguration, trie_types::Layout}; let iter = extrinsics.iter().map(Encode::encode); - let extrinsics_root = Layout::::ordered_trie_root(iter); + let extrinsics_root = Layout::::default().ordered_trie_root(iter); // only care about `extrinsics_root` Header::new(0, extrinsics_root, H256::zero(), H256::zero(), Default::default()) diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index 0234f43513d56..0b4a9ed76c56d 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -156,7 +156,7 @@ fn construct_block( let transactions = txs.into_iter().map(|tx| tx.into_signed_tx()).collect::>(); let iter = transactions.iter().map(Encode::encode); - let extrinsics_root = Layout::::ordered_trie_root(iter).into(); + let extrinsics_root = Layout::::default().ordered_trie_root(iter).into(); let mut header = Header { parent_hash, diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index 35daaa3989907..af6f049f0c529 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -410,22 +410,22 @@ pub trait DefaultChildStorage { pub trait Trie { /// A trie root formed from the iterated items. fn blake2_256_root(input: Vec<(Vec, Vec)>) -> H256 { - Layout::::trie_root(input) + Layout::::default().trie_root(input) } /// A trie root formed from the enumerated items. fn blake2_256_ordered_root(input: Vec>) -> H256 { - Layout::::ordered_trie_root(input) + Layout::::default().ordered_trie_root(input) } /// A trie root formed from the iterated items. fn keccak_256_root(input: Vec<(Vec, Vec)>) -> H256 { - Layout::::trie_root(input) + Layout::::default().trie_root(input) } /// A trie root formed from the enumerated items. fn keccak_256_ordered_root(input: Vec>) -> H256 { - Layout::::ordered_trie_root(input) + Layout::::default().ordered_trie_root(input) } } diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index dda8f523b77f9..e4806c7878ed2 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -281,7 +281,7 @@ impl Externalities for BasicExternalities { } } - Layout::::trie_root(self.inner.top.clone()).as_ref().into() + Layout::::default().trie_root(self.inner.top.clone()).as_ref().into() } fn child_storage_root( diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index fc01cb58d8bf5..66d5b8b2acb2e 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -112,7 +112,7 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> #[derive(Default)] struct ProofRecorderInner { /// All the records that we have stored so far. - records: HashMap>, + records: HashMap>, /// The encoded size of all recorded values. encoded_size: usize, } @@ -125,11 +125,13 @@ pub struct ProofRecorder { impl ProofRecorder { /// Record the given `key` => `val` combination. - pub fn record(&self, key: Hash, val: Option) { + pub fn record(&self, key: Hash, val: Option<(DBValue, TrieMeta)>) { let mut inner = self.inner.write(); let encoded_size = if let Entry::Vacant(entry) = inner.records.entry(key) { let encoded_size = val.as_ref().map(Encode::encoded_size).unwrap_or(0); +// TODO with new meta +// val.as_mut().map(|val| val.1.set_accessed_value(false)); entry.insert(val); encoded_size } else { @@ -139,8 +141,15 @@ impl ProofRecorder { inner.encoded_size += encoded_size; } + /// Record actual trie level value access. + pub fn access_from(&self, _key: &Hash) { +// TODO with new meta +// self.inner.write().entry(key[..].to_vec()) +// .and_modify(|entry| entry.1.set_accessed_value(true)); + } + /// Returns the value at the given `key`. - pub fn get(&self, key: &Hash) -> Option> { + pub fn get(&self, key: &Hash) -> Option> { self.inner.read().records.get(key).cloned() } @@ -159,7 +168,7 @@ impl ProofRecorder { let trie_nodes = self.inner.read() .records .iter() - .filter_map(|(_k, v)| v.as_ref().map(|v| v.to_vec())) + .filter_map(|(_k, v)| v.as_ref().map(|v| v.0.to_vec())) .collect(); StorageProof::new(trie_nodes) diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 9d57ff1cafef8..3b7d47d9b0c62 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -43,7 +43,9 @@ type Result = sp_std::result::Result; /// Patricia trie-based storage trait. pub trait Storage: Send + Sync { /// Get a trie node. - fn get(&self, key: &H::Out, prefix: Prefix) -> Result>; + fn get(&self, key: &H::Out, prefix: Prefix, parent: Option<&TrieMeta>) -> Result>; + /// Call back when value get accessed in trie. + fn access_from(&self, key: &H::Out); } /// Patricia trie-based pairs storage essence. @@ -129,7 +131,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: child_info: Option<&ChildInfo>, key: &[u8], ) -> Result> { - let dyn_eph: &dyn hash_db::HashDBRef<_, _>; + let dyn_eph: &dyn hash_db::HashDBRef<_, _, _>; let keyspace_eph; if let Some(child_info) = child_info.as_ref() { keyspace_eph = KeySpacedDB::new(self, child_info.keyspace()); @@ -386,8 +388,7 @@ impl TrieBackendStorage for Arc> { type Overlay = PrefixedMemoryDB; fn get(&self, key: &H::Out, prefix: Prefix, parent: Option<&TrieMeta>) -> Result> { - // TODO get impl from memorydb - Storage::::get(self.deref(), key, prefix) + Storage::::get(self.deref(), key, prefix, parent) } fn access_from(&self, key: &H::Out) { @@ -399,8 +400,8 @@ impl TrieBackendStorage for Arc> { impl TrieBackendStorage for PrefixedMemoryDB { type Overlay = PrefixedMemoryDB; - fn get(&self, key: &H::Out, prefix: Prefix) -> Result> { - Ok(hash_db::HashDB::get_with_meta(self, key, prefix)) + fn get(&self, key: &H::Out, prefix: Prefix, parent: Option<&TrieMeta>) -> Result> { + Ok(hash_db::HashDB::get_with_meta(self, key, prefix, parent)) } fn access_from(&self, key: &H::Out) { @@ -411,8 +412,8 @@ impl TrieBackendStorage for PrefixedMemoryDB { impl TrieBackendStorage for MemoryDB { type Overlay = MemoryDB; - fn get(&self, key: &H::Out, prefix: Prefix) -> Result> { - Ok(hash_db::HashDB::get_with_meta(self, key, prefix)) + fn get(&self, key: &H::Out, prefix: Prefix, parent: Option<&TrieMeta>) -> Result> { + Ok(hash_db::HashDB::get_with_meta(self, key, prefix, parent)) } fn access_from(&self, key: &H::Out) { @@ -431,14 +432,14 @@ impl, H: Hasher> hash_db::HashDB for TrieBackendEssence { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { - self.get_with_meta(key, prefix).map(|r| r.0) + self.get_with_meta(key, prefix, None).map(|r| r.0) } - fn get_with_meta(&self, key: &H::Out, prefix: Prefix) -> Option<(DBValue, TrieMeta)> { + fn get_with_meta(&self, key: &H::Out, prefix: Prefix, parent: Option<&TrieMeta>) -> Option<(DBValue, TrieMeta)> { if *key == self.empty { return Some(([0u8].to_vec(), ::meta_for_empty())) } - match self.storage.get(&key, prefix) { + match self.storage.get(&key, prefix, parent) { Ok(x) => x, Err(e) => { warn!(target: "trie", "Failed to read from DB: {}", e); diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 5892b26b91146..c1615b2fa5acc 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -594,7 +594,7 @@ mod tests { let mut memdb = MemoryDBMeta::<_, T::MetaHasher>::default(); let mut root = Default::default(); // TODO test flagged - let mut t = TrieDBMut::::new(&mut memdb, &mut root, layout); + let mut t = TrieDBMut::::new(&mut memdb, &mut root); for (x, y) in input.iter().rev() { t.insert(x, y).unwrap(); } @@ -605,18 +605,16 @@ mod tests { } fn check_iteration(input: &Vec<(&[u8], &[u8])>) { - // TODO test flagged - let layout = T::default(); let mut memdb = MemoryDBMeta::<_, T::MetaHasher>::default(); let mut root = Default::default(); { - let mut t = TrieDBMut::::new(&mut memdb, &mut root, layout.clone()); + let mut t = TrieDBMut::::new(&mut memdb, &mut root); for (x, y) in input.clone() { t.insert(x, y).unwrap(); } } { - let t = TrieDB::::new(&mut memdb, &root, layout).unwrap(); + let t = TrieDB::::new(&mut memdb, &root).unwrap(); assert_eq!( input.iter().map(|(i, j)| (i.to_vec(), j.to_vec())).collect::>(), t.iter().unwrap() @@ -628,14 +626,12 @@ mod tests { #[test] fn default_trie_root() { - // TODO test flagged - let layout = Layout::default(); let mut db = MemoryDB::default(); let mut root = TrieHash::::default(); let mut empty = TrieDBMut::::new(&mut db, &mut root); empty.commit(); let root1 = empty.root().as_ref().to_vec(); - let root2: Vec = layout.trie_root::<_, Vec, Vec>( + let root2: Vec = Layout::default().trie_root::<_, Vec, Vec>( std::iter::empty(), ).as_ref().iter().cloned().collect(); @@ -737,14 +733,13 @@ mod tests { check_iteration::(&input); } + // TODO add flag fn populate_trie<'db, T: TrieConfiguration>( db: &'db mut dyn HashDB, root: &'db mut TrieHash, - layout: T, v: &[(Vec, Vec)] ) -> TrieDBMut<'db, T> { - // TODO test non default layout - let mut t = TrieDBMut::::new_with_layout(db, root, layout); + let mut t = TrieDBMut::::new(db, root); for i in 0..v.len() { let key: &[u8]= &v[i].0; let val: &[u8] = &v[i].1; @@ -783,7 +778,7 @@ mod tests { let real = layout.trie_root(x.clone()); let mut memdb = MemoryDB::default(); let mut root = Default::default(); - let mut memtrie = populate_trie::(&mut memdb, &mut root, layout, &x); + let mut memtrie = populate_trie::(&mut memdb, &mut root, &x); memtrie.commit(); if *memtrie.root() != real { @@ -875,13 +870,11 @@ mod tests { (hex!("0103000000000000000469").to_vec(), hex!("0401000000").to_vec()), ]; - // TODO test non default layout - let layout = Layout::default(); let mut mdb = MemoryDB::default(); let mut root = Default::default(); - let _ = populate_trie::(&mut mdb, &mut root, layout.clone(), &pairs); + let _ = populate_trie::(&mut mdb, &mut root, &pairs); - let trie = TrieDB::::new_with_layout(&mdb, &root, layout).unwrap(); + let trie = TrieDB::::new(&mdb, &root).unwrap(); let iter = trie.iter().unwrap(); let mut iter_pairs = Vec::new(); @@ -980,8 +973,6 @@ mod tests { */ #[test] fn generate_storage_root_with_proof_works_independently_from_the_delta_order() { - // TODO use old format. - let layout = Layout::default(); let proof = StorageProof::decode(&mut &include_bytes!("../test-res/proof")[..]).unwrap(); let storage_root = sp_core::H256::decode( &mut &include_bytes!("../test-res/storage_root")[..], @@ -996,14 +987,12 @@ mod tests { ).unwrap(); let proof_db = proof.into_memory_db::(); - let first_storage_root = delta_trie_root( - layout.clone(), + let first_storage_root = delta_trie_root::( &mut proof_db.clone(), storage_root, valid_delta, ).unwrap(); - let second_storage_root = delta_trie_root( - layout, + let second_storage_root = delta_trie_root::( &mut proof_db.clone(), storage_root, invalid_delta, From 7e982917628e9d253a9541adaadee0b2dbe1641b Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 18 May 2021 16:07:17 +0200 Subject: [PATCH 003/188] setting flag --- client/api/src/cht.rs | 2 +- client/api/src/in_mem.rs | 1 + client/db/src/bench.rs | 9 +- client/db/src/lib.rs | 33 +- client/db/src/storage_cache.rs | 9 +- client/light/src/backend.rs | 6 +- client/service/test/src/client/light.rs | 6 +- primitives/externalities/src/lib.rs | 3 + primitives/io/src/lib.rs | 6 + primitives/state-machine/src/backend.rs | 9 +- primitives/state-machine/src/basic.rs | 6 + primitives/state-machine/src/ext.rs | 4 + .../state-machine/src/in_memory_backend.rs | 15 +- primitives/state-machine/src/lib.rs | 10 +- .../src/overlayed_changes/mod.rs | 14 +- .../state-machine/src/proving_backend.rs | 76 +++-- primitives/state-machine/src/read_only.rs | 4 + primitives/state-machine/src/testing.rs | 4 +- primitives/state-machine/src/trie_backend.rs | 28 +- primitives/tasks/src/async_externalities.rs | 4 + primitives/trie/src/lib.rs | 312 +++++++++++++++++- primitives/trie/src/node_codec.rs | 117 +++++-- 22 files changed, 567 insertions(+), 111 deletions(-) diff --git a/client/api/src/cht.rs b/client/api/src/cht.rs index 1fdeedcde976f..235976af09018 100644 --- a/client/api/src/cht.rs +++ b/client/api/src/cht.rs @@ -116,7 +116,7 @@ pub fn build_proof( .into_iter() .map(|(k, v)| (k, Some(v))) .collect::>(); - let mut storage = InMemoryBackend::::default().update(vec![(None, transaction)]); + let mut storage = InMemoryBackend::::default().update(vec![(None, transaction)], false); let trie_storage = storage.as_trie_backend() .expect("InMemoryState::as_trie_backend always returns Some; qed"); prove_read_on_trie_backend( diff --git a/client/api/src/in_mem.rs b/client/api/src/in_mem.rs index d756e1cc0bbc4..3f2a26a2c55c8 100644 --- a/client/api/src/in_mem.rs +++ b/client/api/src/in_mem.rs @@ -576,6 +576,7 @@ impl backend::BlockImportOperation for BlockImportOperatio let (root, transaction) = self.old_state.full_storage_root( storage.top.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))), child_delta, + false, // TODO push flag in storage ); self.new_state = Some(transaction); diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index c046c5e64545b..2b74dd139cf0a 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -58,7 +58,7 @@ impl sp_state_machine::Storage> for StorageDb, _>>::extract_value_owned(value, parent))) .map_err(|e| format!("Database backend error: {:?}", e))?; - recorder.record(key.clone(), backend_value.clone()); + recorder.record(key.clone(), backend_value.clone(), HashFor::::LENGTH); Ok(backend_value) } else { self.db.get(0, &prefixed_key) @@ -68,7 +68,7 @@ impl sp_state_machine::Storage> for StorageDb::LENGTH); } } } @@ -151,6 +151,7 @@ impl BenchmarkingState { state.add_whitelist_to_tracker(); state.reopen()?; + let flagged = false; // TODO from genesis Storage let child_delta = genesis.children_default.iter().map(|(_storage_key, child_content)| ( &child_content.child_info, child_content.data.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))), @@ -158,6 +159,7 @@ impl BenchmarkingState { let (root, transaction): (B::Hash, _) = state.state.borrow_mut().as_mut().unwrap().full_storage_root( genesis.top.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))), child_delta, + flagged, ); state.genesis = transaction.clone().drain(); state.genesis_root = root.clone(); @@ -399,8 +401,9 @@ impl StateBackend> for BenchmarkingState { fn storage_root<'a>( &self, delta: impl Iterator)>, + flag_hash_value: bool, ) -> (B::Hash, Self::Transaction) where B::Hash: Ord { - self.state.borrow().as_ref().map_or(Default::default(), |s| s.storage_root(delta)) + self.state.borrow().as_ref().map_or(Default::default(), |s| s.storage_root(delta, flag_hash_value)) } fn child_storage_root<'a>( diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 245fc3b84f04c..ffeb9be4cd35c 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -227,8 +227,9 @@ impl StateBackend> for RefTrackingState { fn storage_root<'a>( &self, delta: impl Iterator)>, + flag_hash_value: bool, ) -> (B::Hash, Self::Transaction) where B::Hash: Ord { - self.state.storage_root(delta) + self.state.storage_root(delta, flag_hash_value) } fn child_storage_root<'a>( @@ -779,6 +780,7 @@ impl sc_client_api::backend::BlockImportOperation for Bloc )); let mut changes_trie_config: Option = None; + let flag = false; // TODO flag from storage!! let (root, transaction) = self.old_state.full_storage_root( storage.top.iter().map(|(k, v)| { if &k[..] == well_known_keys::CHANGES_TRIE_CONFIG { @@ -789,7 +791,8 @@ impl sc_client_api::backend::BlockImportOperation for Bloc } (&k[..], Some(&v[..])) }), - child_delta + child_delta, + flag, ); self.db_updates = transaction; @@ -2283,6 +2286,7 @@ pub(crate) mod tests { #[test] fn set_state_data() { + let flagged = false; // TODO test with flagged let db = Backend::::new_test(2, 0); let hash = { let mut op = db.begin_operation().unwrap(); @@ -2302,7 +2306,8 @@ pub(crate) mod tests { header.state_root = op.old_state.storage_root(storage .iter() - .map(|(x, y)| (&x[..], Some(&y[..]))) + .map(|(x, y)| (&x[..], Some(&y[..]))), + flagged, ).0.into(); let hash = header.hash(); @@ -2346,7 +2351,8 @@ pub(crate) mod tests { let (root, overlay) = op.old_state.storage_root( storage.iter() - .map(|(k, v)| (&k[..], v.as_ref().map(|v| &v[..]))) + .map(|(k, v)| (&k[..], v.as_ref().map(|v| &v[..]))), + flagged, ); op.update_db_storage(overlay).unwrap(); header.state_root = root.into(); @@ -2373,6 +2379,7 @@ pub(crate) mod tests { fn delete_only_when_negative_rc() { sp_tracing::try_init_simple(); let key; + let flagged = false; let backend = Backend::::new_test(1, 0); let hash = { @@ -2386,7 +2393,7 @@ pub(crate) mod tests { extrinsics_root: Default::default(), }; - header.state_root = op.old_state.storage_root(std::iter::empty()).0.into(); + header.state_root = op.old_state.storage_root(std::iter::empty(), flagged).0.into(); let hash = header.hash(); op.reset_storage(Storage { @@ -2426,7 +2433,8 @@ pub(crate) mod tests { header.state_root = op.old_state.storage_root(storage .iter() .cloned() - .map(|(x, y)| (x, Some(y))) + .map(|(x, y)| (x, Some(y))), + flagged, ).0.into(); let hash = header.hash(); @@ -2463,7 +2471,8 @@ pub(crate) mod tests { header.state_root = op.old_state.storage_root(storage .iter() .cloned() - .map(|(x, y)| (x, Some(y))) + .map(|(x, y)| (x, Some(y))), + flagged, ).0.into(); let hash = header.hash(); @@ -2500,7 +2509,8 @@ pub(crate) mod tests { header.state_root = op.old_state.storage_root(storage .iter() .cloned() - .map(|(x, y)| (x, Some(y))) + .map(|(x, y)| (x, Some(y))), + flagged, ).0.into(); op.set_block_data( @@ -2816,6 +2826,7 @@ pub(crate) mod tests { #[test] fn storage_hash_is_cached_correctly() { let backend = Backend::::new_test(10, 10); + let flagged = false; let hash0 = { let mut op = backend.begin_operation().unwrap(); @@ -2832,7 +2843,8 @@ pub(crate) mod tests { header.state_root = op.old_state.storage_root(storage .iter() - .map(|(x, y)| (&x[..], Some(&y[..]))) + .map(|(x, y)| (&x[..], Some(&y[..]))), + flagged, ).0.into(); let hash = header.hash(); @@ -2872,7 +2884,8 @@ pub(crate) mod tests { let (root, overlay) = op.old_state.storage_root( storage.iter() - .map(|(k, v)| (&k[..], v.as_ref().map(|v| &v[..]))) + .map(|(k, v)| (&k[..], v.as_ref().map(|v| &v[..]))), + flagged, ); op.update_db_storage(overlay).unwrap(); header.state_root = root.into(); diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index 8929972e26e66..eeb5092fa6c8f 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -645,8 +645,9 @@ impl>, B: BlockT> StateBackend> for Cachin fn storage_root<'a>( &self, delta: impl Iterator)>, + flag_hash_value: bool, ) -> (B::Hash, Self::Transaction) where B::Hash: Ord { - self.state.storage_root(delta) + self.state.storage_root(delta, flag_hash_value) } fn child_storage_root<'a>( @@ -827,8 +828,9 @@ impl>, B: BlockT> StateBackend> for Syncin fn storage_root<'a>( &self, delta: impl Iterator)>, + flag_hash_value: bool, ) -> (B::Hash, Self::Transaction) where B::Hash: Ord { - self.caching_state().storage_root(delta) + self.caching_state().storage_root(delta, flag_hash_value) } fn child_storage_root<'a>( @@ -1196,7 +1198,8 @@ mod tests { let shared = new_shared_cache::(256*1024, (0,1)); let mut backend = InMemoryBackend::::default(); - backend.insert(std::iter::once((None, vec![(key.clone(), Some(vec![1]))]))); + let flagged = false; + backend.insert(std::iter::once((None, vec![(key.clone(), Some(vec![1]))])), flagged); let mut s = CachingState::new( backend.clone(), diff --git a/client/light/src/backend.rs b/client/light/src/backend.rs index d6f86209afe9f..596a6fab61acc 100644 --- a/client/light/src/backend.rs +++ b/client/light/src/backend.rs @@ -346,7 +346,8 @@ impl BlockImportOperation for ImportOperation } let storage_update = InMemoryBackend::from(storage); - let (storage_root, _) = storage_update.full_storage_root(std::iter::empty(), child_delta); + let flag = false; // TODO flag_hash_value in Storage + let (storage_root, _) = storage_update.full_storage_root(std::iter::empty(), child_delta, flag); self.storage_update = Some(storage_update); Ok(storage_root) @@ -489,10 +490,11 @@ impl StateBackend for GenesisOrUnavailableState fn storage_root<'a>( &self, delta: impl Iterator)>, + flag_hash_value: bool, ) -> (H::Out, Self::Transaction) where H::Out: Ord { match *self { GenesisOrUnavailableState::Genesis(ref state) => - state.storage_root(delta), + state.storage_root(delta, flag_hash_value), GenesisOrUnavailableState::Unavailable => Default::default(), } } diff --git a/client/service/test/src/client/light.rs b/client/service/test/src/client/light.rs index 33bcc96d1bb63..f9daf460e8eb5 100644 --- a/client/service/test/src/client/light.rs +++ b/client/service/test/src/client/light.rs @@ -455,10 +455,11 @@ fn prepare_for_read_proof_check() -> (TestChecker, Header, StorageProof, u32) { // prepare remote client let remote_client = substrate_test_runtime_client::new(); let remote_block_id = BlockId::Number(0); + let flagged = false; let remote_block_hash = remote_client.block_hash(0).unwrap().unwrap(); let mut remote_block_header = remote_client.header(&remote_block_id).unwrap().unwrap(); remote_block_header.state_root = remote_client.state_at(&remote_block_id).unwrap() - .storage_root(::std::iter::empty()).0.into(); + .storage_root(std::iter::empty(), flagged).0.into(); // 'fetch' read proof from remote node let heap_pages = remote_client.storage(&remote_block_id, &StorageKey(well_known_keys::HEAP_PAGES.to_vec())) @@ -491,6 +492,7 @@ fn prepare_for_read_child_proof_check() -> (TestChecker, Header, StorageProof, V use substrate_test_runtime_client::TestClientBuilderExt; let child_info = ChildInfo::new_default(b"child1"); let child_info = &child_info; + let flagged = false; // prepare remote client let remote_client = substrate_test_runtime_client::TestClientBuilder::new() .add_extra_child_storage( @@ -502,7 +504,7 @@ fn prepare_for_read_child_proof_check() -> (TestChecker, Header, StorageProof, V let remote_block_hash = remote_client.block_hash(0).unwrap().unwrap(); let mut remote_block_header = remote_client.header(&remote_block_id).unwrap().unwrap(); remote_block_header.state_root = remote_client.state_at(&remote_block_id).unwrap() - .storage_root(::std::iter::empty()).0.into(); + .storage_root(std::iter::empty(), flagged).0.into(); // 'fetch' child read proof from remote node let child_value = remote_client.child_storage( diff --git a/primitives/externalities/src/lib.rs b/primitives/externalities/src/lib.rs index ce5a0990d738d..7d315812b2178 100644 --- a/primitives/externalities/src/lib.rs +++ b/primitives/externalities/src/lib.rs @@ -291,6 +291,9 @@ pub trait Externalities: ExtensionStore { fn proof_size(&self) -> Option { None } + + /// Set flag in inner state to activate hashing of values. + fn flag_hash_value(&mut self); } /// Extension for the [`Externalities`] trait. diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index af6f049f0c529..69e20347f8bf8 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -211,6 +211,12 @@ pub trait Storage { self.storage_commit_transaction() .expect("No open transaction that can be committed."); } + + /// Set flag to switch storage state + /// to internally hash its values. + fn flag_hash_value(&mut self) { + self.flag_hash_value(); + } } /// Interface for accessing the child storage for default child trie, diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 1a8892f8dd141..e52fa999f149b 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -123,10 +123,12 @@ pub trait Backend: sp_std::fmt::Debug { /// Calculate the storage root, with given delta over what is already stored in /// the backend, and produce a "transaction" that can be used to commit. + /// A flag `flag_inner_hash_value` can be set, it switches inner trie implementation. /// Does not include child storage updates. fn storage_root<'a>( &self, delta: impl Iterator)>, + flag_inner_hash_value: bool, ) -> (H::Out, Self::Transaction) where H::Out: Ord; /// Calculate the child storage root, with given delta over what is already stored in @@ -174,6 +176,7 @@ pub trait Backend: sp_std::fmt::Debug { &'a ChildInfo, impl Iterator)>, )>, + flag_inner_hash_value: bool, ) -> (H::Out, Self::Transaction) where H::Out: Ord + Encode { let mut txs: Self::Transaction = Default::default(); let mut child_roots: Vec<_> = Default::default(); @@ -195,7 +198,8 @@ pub trait Backend: sp_std::fmt::Debug { child_roots .iter() .map(|(k, v)| (&k[..], v.as_ref().map(|v| &v[..]))) - ) + ), + flag_inner_hash_value, ); txs.consolidate(parent_txs); (root, txs) @@ -305,8 +309,9 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { fn storage_root<'b>( &self, delta: impl Iterator)>, + flag_inner_hash_value: bool, ) -> (H::Out, Self::Transaction) where H::Out: Ord { - (*self).storage_root(delta) + (*self).storage_root(delta, flag_inner_hash_value) } fn child_storage_root<'b>( diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index e4806c7878ed2..5fcf349eb72fc 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -281,6 +281,7 @@ impl Externalities for BasicExternalities { } } + // TODO set flag on layout??? Layout::::default().trie_root(self.inner.top.clone()).as_ref().into() } @@ -332,6 +333,11 @@ impl Externalities for BasicExternalities { fn set_whitelist(&mut self, _: Vec) { unimplemented!("set_whitelist is not supported in Basic") } + + fn flag_hash_value(&mut self) { + unimplemented!("flag_hash_value is not supported in Basic") + // TODO consider flag in layout so doable by adding to storage. + } } impl sp_externalities::ExtensionStore for BasicExternalities { diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index 43793d3c815dc..abaa51939c914 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -737,6 +737,10 @@ where fn proof_size(&self) -> Option { self.backend.proof_size() } + + fn flag_hash_value(&mut self) { + self.overlay.set_flag_hash_value() + } } /// Implement `Encode` by forwarding the stored raw vec. diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index 4ee16dfd2f8a8..4e2bef05e50ab 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -45,9 +45,10 @@ where >( &self, changes: T, + flag_inner_hash_value: bool, ) -> Self { let mut clone = self.clone(); - clone.insert(changes); + clone.insert(changes, flag_inner_hash_value); clone } @@ -57,6 +58,7 @@ where >( &mut self, changes: T, + flag_inner_hash_value: bool, ) { let (top, child) = changes.into_iter().partition::, _>(|v| v.0.is_none()); let (root, transaction) = self.full_storage_root( @@ -65,6 +67,7 @@ where .filter_map(|v| v.0.as_ref().map(|c| (c, v.1.iter().map(|(k, v)| (&k[..], v.as_deref())))) ), + flag_inner_hash_value, ); self.apply_transaction(root, transaction); @@ -116,6 +119,7 @@ where let mut backend = new_in_mem(); backend.insert( inner.into_iter().map(|(k, m)| (k, m.into_iter().map(|(k, v)| (k, Some(v))).collect())), + false, ); backend } @@ -178,11 +182,13 @@ mod tests { let storage = new_in_mem::(); let child_info = ChildInfo::new_default(b"1"); let child_info = &child_info; + let flagged = false; let mut storage = storage.update( vec![( Some(child_info.clone()), vec![(b"2".to_vec(), Some(b"3".to_vec()))] - )] + )], + flagged, ); let trie_backend = storage.as_trie_backend().unwrap(); assert_eq!(trie_backend.child_storage(child_info, b"2").unwrap(), @@ -195,9 +201,10 @@ mod tests { fn insert_multiple_times_child_data_works() { let mut storage = new_in_mem::(); let child_info = ChildInfo::new_default(b"1"); + let flagged = false; - storage.insert(vec![(Some(child_info.clone()), vec![(b"2".to_vec(), Some(b"3".to_vec()))])]); - storage.insert(vec![(Some(child_info.clone()), vec![(b"1".to_vec(), Some(b"3".to_vec()))])]); + storage.insert(vec![(Some(child_info.clone()), vec![(b"2".to_vec(), Some(b"3".to_vec()))])], flagged); + storage.insert(vec![(Some(child_info.clone()), vec![(b"1".to_vec(), Some(b"3".to_vec()))])], flagged); assert_eq!(storage.child_storage(&child_info, &b"2"[..]), Ok(Some(b"3".to_vec()))); assert_eq!(storage.child_storage(&child_info, &b"1"[..]), Ok(Some(b"3".to_vec()))); diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index a6f1fb1f0e788..5c0074691b1c7 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -1060,7 +1060,8 @@ mod tests { // fetch execution proof from 'remote' full node let remote_backend = trie_backend::tests::test_trie(); - let remote_root = remote_backend.storage_root(std::iter::empty()).0; + let flagged = false; // TODO try with flagged and trie with test_trie of already flagged + let remote_root = remote_backend.storage_root(std::iter::empty(), flagged).0; let (remote_result, remote_proof) = prove_execution::<_, _, u64, _, _>( remote_backend, &mut Default::default(), @@ -1411,8 +1412,9 @@ mod tests { let child_info = ChildInfo::new_default(b"sub1"); let child_info = &child_info; // fetch read proof from 'remote' full node - let remote_backend = trie_backend::tests::test_trie(); - let remote_root = remote_backend.storage_root(::std::iter::empty()).0; + let remote_backend = trie_backend::tests::test_trie(); // TODO test with flagged and flagged. + let flagged = false; + let remote_root = remote_backend.storage_root(::std::iter::empty(), flagged).0; let remote_proof = prove_read(remote_backend, &[b"value2"]).unwrap(); // check proof locally let local_result1 = read_proof_check::( @@ -1433,7 +1435,7 @@ mod tests { assert_eq!(local_result2, false); // on child trie let remote_backend = trie_backend::tests::test_trie(); - let remote_root = remote_backend.storage_root(::std::iter::empty()).0; + let remote_root = remote_backend.storage_root(::std::iter::empty(), false).0; let remote_proof = prove_child_read( remote_backend, child_info, diff --git a/primitives/state-machine/src/overlayed_changes/mod.rs b/primitives/state-machine/src/overlayed_changes/mod.rs index 1d3cbb59ba0c1..5386192255ad3 100644 --- a/primitives/state-machine/src/overlayed_changes/mod.rs +++ b/primitives/state-machine/src/overlayed_changes/mod.rs @@ -107,6 +107,8 @@ pub struct OverlayedChanges { transaction_index_ops: Vec, /// True if extrinsics stats must be collected. collect_extrinsics: bool, + /// True if we flag inner state to store hash of values. + flag_hash_value: bool, /// Collect statistic on this execution. stats: StateMachineStats, } @@ -260,6 +262,16 @@ impl OverlayedChanges { self.collect_extrinsics = collect_extrinsics; } + /// Ask to switch state to use inner hash. + pub fn set_flag_hash_value(&mut self) { + self.flag_hash_value = true; + } + + /// Is `flag_hash_value` flag set. + pub fn flag_hash_value(&self) -> bool { + self.flag_hash_value + } + /// Returns a double-Option: None if the key is unknown (i.e. and the query should be referred /// to the backend); Some(None) if the key has been deleted. Some(Some(...)) for a key whose /// value has been set. @@ -631,7 +643,7 @@ impl OverlayedChanges { |(k, v)| (&k[..], v.value().map(|v| &v[..])) ))); - let (root, transaction) = backend.full_storage_root(delta, child_delta); + let (root, transaction) = backend.full_storage_root(delta, child_delta, self.flag_hash_value); cache.transaction = Some(transaction); cache.transaction_storage_root = Some(root); diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 66d5b8b2acb2e..ef9e1f26c0a69 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -17,7 +17,7 @@ //! Proving state machine backend. -use std::{sync::Arc, collections::{HashMap, hash_map::Entry}}; +use std::{sync::Arc, collections::HashMap}; use parking_lot::RwLock; use codec::{Decode, Codec, Encode}; use log::debug; @@ -123,29 +123,38 @@ pub struct ProofRecorder { inner: Arc>>, } -impl ProofRecorder { +impl ProofRecorder { /// Record the given `key` => `val` combination. - pub fn record(&self, key: Hash, val: Option<(DBValue, TrieMeta)>) { + pub fn record(&self, key: Hash, mut val: Option<(DBValue, TrieMeta)>, hash_len: usize) { let mut inner = self.inner.write(); - let encoded_size = if let Entry::Vacant(entry) = inner.records.entry(key) { - let encoded_size = val.as_ref().map(Encode::encoded_size).unwrap_or(0); - -// TODO with new meta -// val.as_mut().map(|val| val.1.set_accessed_value(false)); - entry.insert(val); - encoded_size - } else { - 0 - }; - inner.encoded_size += encoded_size; + let ProofRecorderInner { encoded_size, records } = &mut *inner; + records.entry(key).or_insert_with(|| { + if let Some(val) = val.as_mut() { + val.1.set_accessed_value(false); + *encoded_size += sp_trie::estimate_entry_size(val, hash_len); + } + val + }); + } /// Record actual trie level value access. - pub fn access_from(&self, _key: &Hash) { -// TODO with new meta -// self.inner.write().entry(key[..].to_vec()) -// .and_modify(|entry| entry.1.set_accessed_value(true)); + pub fn access_from(&self, key: &Hash, hash_len: usize) { + let mut inner = self.inner.write(); + let ProofRecorderInner { encoded_size, records } = &mut *inner; + records.entry(key.clone()) + .and_modify(|entry| { + if let Some(entry) = entry.as_mut() { + if !entry.1.accessed_value() { + let old_size = sp_trie::estimate_entry_size(entry, hash_len); + entry.1.set_accessed_value(true); + let new_size = sp_trie::estimate_entry_size(entry, hash_len); + *encoded_size += new_size; + *encoded_size -= old_size; + } + } + }); } /// Returns the value at the given `key`. @@ -234,12 +243,12 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> TrieBackendStorage } let backend_value = self.backend.get(key, prefix, parent)?; - self.proof_recorder.record(key.clone(), backend_value.clone()); + self.proof_recorder.record(key.clone(), backend_value.clone(), H::LENGTH); Ok(backend_value) } - fn access_from(&self, _key: &H::Out) { - // access_from is mainly for proof recorder, not forwarding it. + fn access_from(&self, key: &H::Out) { + self.proof_recorder.access_from(key, H::LENGTH); } } @@ -329,8 +338,9 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn storage_root<'b>( &self, delta: impl Iterator)>, + flag_inner_hash_value: bool, ) -> (H::Out, Self::Transaction) where H::Out: Ord { - self.0.storage_root(delta) + self.0.storage_root(delta, flag_inner_hash_value) } fn child_storage_root<'b>( @@ -411,9 +421,10 @@ mod tests { let proving_backend = test_proving(&trie_backend); assert_eq!(trie_backend.storage(b"key").unwrap(), proving_backend.storage(b"key").unwrap()); assert_eq!(trie_backend.pairs(), proving_backend.pairs()); + let flagged = false; - let (trie_root, mut trie_mdb) = trie_backend.storage_root(std::iter::empty()); - let (proving_root, mut proving_mdb) = proving_backend.storage_root(std::iter::empty()); + let (trie_root, mut trie_mdb) = trie_backend.storage_root(std::iter::empty(), flagged); + let (proving_root, mut proving_mdb) = proving_backend.storage_root(std::iter::empty(), flagged); assert_eq!(trie_root, proving_root); assert_eq!(trie_mdb.drain(), proving_mdb.drain()); } @@ -422,12 +433,13 @@ mod tests { fn proof_recorded_and_checked() { let contents = (0..64).map(|i| (vec![i], Some(vec![i]))).collect::>(); let in_memory = InMemoryBackend::::default(); - let mut in_memory = in_memory.update(vec![(None, contents)]); - let in_memory_root = in_memory.storage_root(::std::iter::empty()).0; + let flagged = false; // TODO test with flag + let mut in_memory = in_memory.update(vec![(None, contents)], flagged); + let in_memory_root = in_memory.storage_root(::std::iter::empty(), flagged).0; (0..64).for_each(|i| assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i])); let trie = in_memory.as_trie_backend().unwrap(); - let trie_root = trie.storage_root(::std::iter::empty()).0; + let trie_root = trie.storage_root(::std::iter::empty(), flagged).0; assert_eq!(in_memory_root, trie_root); (0..64).for_each(|i| assert_eq!(trie.storage(&[i]).unwrap().unwrap(), vec![i])); @@ -454,11 +466,13 @@ mod tests { (10..15).map(|i| (vec![i], Some(vec![i]))).collect()), ]; let in_memory = InMemoryBackend::::default(); - let mut in_memory = in_memory.update(contents); + let flagged = false; // TODO test with flag + let mut in_memory = in_memory.update(contents, flagged); let child_storage_keys = vec![child_info_1.to_owned(), child_info_2.to_owned()]; let in_memory_root = in_memory.full_storage_root( std::iter::empty(), - child_storage_keys.iter().map(|k|(k, std::iter::empty())) + child_storage_keys.iter().map(|k|(k, std::iter::empty())), + flagged, ).0; (0..64).for_each(|i| assert_eq!( in_memory.storage(&[i]).unwrap().unwrap(), @@ -474,7 +488,7 @@ mod tests { )); let trie = in_memory.as_trie_backend().unwrap(); - let trie_root = trie.storage_root(std::iter::empty()).0; + let trie_root = trie.storage_root(std::iter::empty(), flagged).0; assert_eq!(in_memory_root, trie_root); (0..64).for_each(|i| assert_eq!( trie.storage(&[i]).unwrap().unwrap(), @@ -511,7 +525,7 @@ mod tests { } #[test] - fn storage_proof_encoded_size_estimation_works() { + fn storage_proof_encoded_size_estimation_works() { // TODO same with flag -> test_trie with flag let trie_backend = test_trie(); let backend = test_proving(&trie_backend); diff --git a/primitives/state-machine/src/read_only.rs b/primitives/state-machine/src/read_only.rs index 296520900c952..a54b6eaba6848 100644 --- a/primitives/state-machine/src/read_only.rs +++ b/primitives/state-machine/src/read_only.rs @@ -202,6 +202,10 @@ impl<'a, H: Hasher, B: 'a + Backend> Externalities for ReadOnlyExternalities< fn set_whitelist(&mut self, _: Vec) { unimplemented!("set_whitelist is not supported in ReadOnlyExternalities") } + + fn flag_hash_value(&mut self) { + unimplemented!("flag_hash_value is not supported by ReadOnlyExternalities"); + } } impl<'a, H: Hasher, B: 'a + Backend> sp_externalities::ExtensionStore for ReadOnlyExternalities<'a, H, B> { diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index f4b0cb6592ce2..359dfce8b77ea 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -139,7 +139,7 @@ where /// Insert key/value into backend pub fn insert(&mut self, k: StorageKey, v: StorageValue) { - self.backend.insert(vec![(None, vec![(k, Some(v))])]); + self.backend.insert(vec![(None, vec![(k, Some(v))])], false); } /// Registers the given extension for this instance. @@ -171,7 +171,7 @@ where )) } - self.backend.update(transaction) + self.backend.update(transaction, self.overlay.flag_hash_value()) } /// Commit all pending changes to the underlying backend. diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 3e74f2d3df4b8..ae96d5bc97d2b 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -171,6 +171,7 @@ impl, H: Hasher> Backend for TrieBackend where fn storage_root<'a>( &self, delta: impl Iterator)>, + flag_inner_hash_value: bool, ) -> (H::Out, Self::Transaction) where H::Out: Ord { let mut write_overlay = S::Overlay::default(); let mut root = *self.essence.root(); @@ -180,7 +181,16 @@ impl, H: Hasher> Backend for TrieBackend where self.essence.backend_storage(), &mut write_overlay, ); - + if flag_inner_hash_value { + root = match sp_trie::flag_inner_meta_hasher::, _>(&mut eph, root) { + Ok(ret) => ret, + Err(e) => { + warn!(target: "trie", "Failed to flag trie: {}", e); + root + }, + } + } + match delta_trie_root::, _, _, _, _, _>(&mut eph, root, delta) { Ok(ret) => root = ret, Err(e) => warn!(target: "trie", "Failed to write to trie: {}", e), @@ -325,21 +335,31 @@ pub mod tests { #[test] fn storage_root_is_non_default() { - assert!(test_trie().storage_root(iter::empty()).0 != H256::repeat_byte(0)); + let flagged = false; + assert!(test_trie().storage_root(iter::empty(), flagged).0 != H256::repeat_byte(0)); } #[test] fn storage_root_transaction_is_empty() { - assert!(test_trie().storage_root(iter::empty()).1.drain().is_empty()); + let flagged = false; + assert!(test_trie().storage_root(iter::empty(), flagged).1.drain().is_empty()); + } + + #[test] + fn storage_root_flagged_is_not_empty() { + let flagged = true; + assert!(!test_trie().storage_root(iter::empty(), flagged).1.drain().is_empty()); } #[test] fn storage_root_transaction_is_non_empty() { + // TODO test with flagged `test_trie` (initially only). let (new_root, mut tx) = test_trie().storage_root( iter::once((&b"new-key"[..], Some(&b"new-value"[..]))), + false, ); assert!(!tx.drain().is_empty()); - assert!(new_root != test_trie().storage_root(iter::empty()).0); + assert!(new_root != test_trie().storage_root(iter::empty(), false).0); } #[test] diff --git a/primitives/tasks/src/async_externalities.rs b/primitives/tasks/src/async_externalities.rs index 5d99ca4368d0b..ff21e74889d06 100644 --- a/primitives/tasks/src/async_externalities.rs +++ b/primitives/tasks/src/async_externalities.rs @@ -189,6 +189,10 @@ impl Externalities for AsyncExternalities { fn set_whitelist(&mut self, _: Vec) { unimplemented!("set_whitelist is not supported in AsyncExternalities") } + + fn flag_hash_value(&mut self) { + unimplemented!("flag_hash_value is not supported in AsyncExternalities") + } } impl sp_externalities::ExtensionStore for AsyncExternalities { diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index c1615b2fa5acc..0d08ac22d2243 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -25,7 +25,7 @@ mod node_codec; mod storage_proof; mod trie_stream; -use sp_std::{boxed::Box, marker::PhantomData, vec::Vec, borrow::Borrow, fmt}; +use sp_std::{boxed::Box, marker::PhantomData, vec, vec::Vec, borrow::Borrow, fmt}; use hash_db::{Hasher, Prefix}; //use trie_db::proof::{generate_proof, verify_proof}; pub use trie_db::proof::VerifyError; @@ -39,7 +39,7 @@ pub use storage_proof::StorageProof; /// Various re-exports from the `trie-db` crate. pub use trie_db::{ Trie, TrieMut, DBValue, Recorder, CError, Query, TrieLayout, TrieConfiguration, - nibble_ops, TrieDBIterator, Meta, + nibble_ops, TrieDBIterator, Meta, NodeChange, node::{NodePlan, ValuePlan}, }; /// Various re-exports from the `memory-db` crate. pub use memory_db::KeyFunction; @@ -49,7 +49,146 @@ pub use hash_db::{HashDB as HashDBT, EMPTY_PREFIX, MetaHasher}; pub use hash_db::NoMeta; /// Meta use by trie state. -pub type TrieMeta = (); +#[derive(Default, Clone)] +pub struct TrieMeta { + // range of encoded value or hashed value. + pub range: Option>, + // When `do_value_hash` is true, try to + // store this behavior in top node + // encoded (need to be part of state). + pub recorded_do_value_hash: bool, + // Does current encoded contains a hash instead of + // a value (information stored in meta for proofs). + pub contain_hash: bool, + // Flag indicating if value hash can run. + // When defined for a node it gets active + // for all children node + pub do_value_hash: bool, + // Record if a value was accessed, this is + // set as accessed by defalult, but can be + // change on access explicitely: `HashDB::get_with_meta`. + // and reset on access explicitely: `HashDB::access_from`. + pub unused_value: bool, +} + +impl Meta for TrieMeta { + /// Layout do not have content. + type MetaInput = (); + + /// When true apply inner hashing of value. + type StateMeta = bool; + + fn set_state_meta(&mut self, state_meta: Self::StateMeta) { + self.recorded_do_value_hash = state_meta; + self.do_value_hash = state_meta; + } + + fn has_state_meta(&self) -> bool { + self.recorded_do_value_hash + } + + fn read_state_meta(&mut self, data: &[u8]) -> Result { + let offset = if data[0] == trie_constants::ENCODED_META_ALLOW_HASH { + self.recorded_do_value_hash = true; + self.do_value_hash = true; + 1 + } else { + 0 + }; + Ok(offset) + } + + fn write_state_meta(&self) -> Vec { + if self.do_value_hash { + // Note that this only works with sp_trie codec that + // cannot encode node starting by this byte. + [trie_constants::ENCODED_META_ALLOW_HASH].to_vec() + } else { + Vec::new() + } + } + + fn meta_for_new( + _input: Self::MetaInput, + parent: Option<&Self>, + ) -> Self { + let mut result = Self::default(); + result.do_value_hash = parent.map(|p| p.do_value_hash).unwrap_or_default(); + result + } + + fn meta_for_existing_inline_node( + input: Self::MetaInput, + parent: Option<&Self>, + ) -> Self { + Self::meta_for_new(input, parent) + } + + fn meta_for_empty( + ) -> Self { + Default::default() + } + + fn set_value_callback( + &mut self, + _new_value: Option<&[u8]>, + _is_branch: bool, + changed: NodeChange, + ) -> NodeChange { + changed + } + + fn encoded_value_callback( + &mut self, + value_plan: ValuePlan, + ) { + let (contain_hash, range) = match value_plan { + ValuePlan::Value(range) => (false, range), + ValuePlan::HashedValue(range, _size) => (true, range), + ValuePlan::NoValue => return, + }; + + self.range = Some(range); + self.contain_hash = contain_hash; + } + + fn set_child_callback( + &mut self, + _child: Option<&Self>, + changed: NodeChange, + _at: usize, + ) -> NodeChange { + changed + } + + fn decoded_callback( + &mut self, + _node_plan: &NodePlan, + ) { + } + + fn contains_hash_of_value(&self) -> bool { + self.contain_hash + } + + fn do_value_hash(&self) -> bool { + self.unused_value + } +} + +impl TrieMeta { + /// Was value accessed. + pub fn accessed_value(&mut self) -> bool { + !self.unused_value + } + + /// For proof, this allow setting node as unaccessed until + /// a call to `access_from`. + pub fn set_accessed_value(&mut self, accessed: bool) { + self.unused_value = !accessed; + } +} + /// substrate trie layout pub struct Layout(sp_std::marker::PhantomData); @@ -74,6 +213,7 @@ impl Clone for Layout { impl TrieLayout for Layout { const USE_EXTENSION: bool = false; const ALLOW_EMPTY: bool = true; + const USE_META: bool = true; type Hash = H; type Codec = NodeCodec; type MetaHasher = StateHasher; @@ -92,31 +232,90 @@ impl TrieLayout for Layout { #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub struct StateHasher; -impl MetaHasher for StateHasher +impl MetaHasher for StateHasher where H: Hasher, - T: for<'a> From<&'a [u8]>, { type Meta = TrieMeta; - fn hash(value: &[u8], _meta: &Self::Meta) -> H::Out { - H::hash(value) + fn hash(value: &[u8], meta: &Self::Meta) -> H::Out { + match &meta { + TrieMeta { range: Some(range), contain_hash: false, do_value_hash, .. } => { + if *do_value_hash { + let value = inner_hashed_value::(value, Some((range.start, range.end))); + H::hash(value.as_slice()) + } else { + H::hash(value) + } + }, + TrieMeta { range: Some(_range), contain_hash: true, .. } => { + // value contains a hash of data (already inner_hashed_value). + H::hash(value) + }, + _ => { + H::hash(value) + }, + } } - fn stored_value(value: &[u8], _meta: Self::Meta) -> T { - value.into() + fn stored_value(value: &[u8], mut meta: Self::Meta) -> DBValue { + let mut stored = Vec::with_capacity(value.len() + 1); + if meta.contain_hash { + // already contain hash, just flag it. + stored.push(trie_constants::DEAD_HEADER_META_HASHED_VALUE); + stored.extend_from_slice(value); + return stored; + } + if meta.unused_value { + if let Some(range) = meta.range.as_ref() { + if range.end - range.start >= trie_constants::INNER_HASH_TRESHOLD { + // Waring this assume that encoded value does not start by this, so it is tightly coupled + // with the header type of the codec: only for optimization. + stored.push(trie_constants::DEAD_HEADER_META_HASHED_VALUE); + let range = meta.range.as_ref().expect("Tested in condition"); + meta.contain_hash = true; // useless but could be with meta as &mut + // store hash instead of value. + let value = inner_hashed_value::(value, Some((range.start, range.end))); + stored.extend_from_slice(value.as_slice()); + return stored; + } + } + } + stored.extend_from_slice(value); + stored } - fn stored_value_owned(value: T, _meta: Self::Meta) -> T { - value + fn stored_value_owned(value: DBValue, meta: Self::Meta) -> DBValue { + >::stored_value(value.as_slice(), meta) } - fn extract_value<'a>(stored: &'a [u8], _parent_meta: Option<&Self::Meta>) -> (&'a [u8], Self::Meta) { - (stored, ()) + fn extract_value<'a>(mut stored: &'a [u8], parent_meta: Option<&Self::Meta>) -> (&'a [u8], Self::Meta) { + let input = &mut stored; + let mut contain_hash = false; + if input.get(0) == Some(&trie_constants::DEAD_HEADER_META_HASHED_VALUE) { + contain_hash = true; + *input = &input[1..]; + } + let mut meta = TrieMeta { + range: None, + unused_value: contain_hash, + contain_hash, + do_value_hash: false, + recorded_do_value_hash: false, + }; + // get recorded_do_value_hash + let _offset = meta.read_state_meta(stored) + .expect("State meta reading failure."); + //let stored = &stored[offset..]; + meta.do_value_hash = meta.recorded_do_value_hash || parent_meta.map(|m| m.do_value_hash).unwrap_or(false); + (stored, meta) } - fn extract_value_owned(stored: T, _parent_meta: Option<&Self::Meta>) -> (T, Self::Meta) { - (stored, ()) + fn extract_value_owned(mut stored: DBValue, parent_meta: Option<&Self::Meta>) -> (DBValue, Self::Meta) { + let len = stored.len(); + let (v, meta) = >::extract_value(stored.as_slice(), parent_meta); + let removed = len - v.len(); + (stored.split_off(removed), meta) } } @@ -270,6 +469,26 @@ pub fn delta_trie_root( Ok(root) } +/// Flag inner trie with state metadata to enable hash of value internally. +pub fn flag_inner_meta_hasher( + db: &mut DB, + mut root: TrieHash, +) -> Result, Box>> where + L: TrieConfiguration, + DB: hash_db::HashDB, +{ + { + let mut t = TrieDBMut::::from_existing(db, &mut root)?; + let flag = true; + let key: &[u8]= &[]; + if !t.flag(key, flag)? { + t.insert(key, b"")?; + assert!(t.flag(key, flag)?); + } + } + Ok(root) +} + /// Read a value from the trie. pub fn read_trie_value>( db: &DB, @@ -558,9 +777,70 @@ impl<'a, DB, H, T, M> hash_db::AsHashDB for KeySpacedDBMut<'a, DB, H> w } } +/// Representation of node with with inner hash instead of value. +pub fn inner_hashed_value(x: &[u8], range: Option<(usize, usize)>) -> Vec { + if let Some((start, end)) = range { + let len = x.len(); + if start < len && end == len { + // terminal inner hash + let hash_end = H::hash(&x[start..]); + let mut buff = vec![0; x.len() + hash_end.as_ref().len() - (end - start)]; + buff[..start].copy_from_slice(&x[..start]); + buff[start..].copy_from_slice(hash_end.as_ref()); + return buff; + } + if start == 0 && end < len { + // start inner hash + let hash_start = H::hash(&x[..start]); + let hash_len = hash_start.as_ref().len(); + let mut buff = vec![0; x.len() + hash_len - (end - start)]; + buff[..hash_len].copy_from_slice(hash_start.as_ref()); + buff[hash_len..].copy_from_slice(&x[end..]); + return buff; + } + if start < len && end < len { + // middle inner hash + let hash_middle = H::hash(&x[start..end]); + let hash_len = hash_middle.as_ref().len(); + let mut buff = vec![0; x.len() + hash_len - (end - start)]; + buff[..start].copy_from_slice(&x[..start]); + buff[start..start + hash_len].copy_from_slice(hash_middle.as_ref()); + buff[start + hash_len..].copy_from_slice(&x[end..]); + return buff; + } + } + // if anything wrong default to hash + x.to_vec() +} + +/// Estimate encoded size of node. +pub fn estimate_entry_size(entry: &(DBValue, TrieMeta), hash_len: usize) -> usize { + use codec::Encode; + let mut full_encoded = entry.0.encoded_size(); + if entry.1.unused_value { + if let Some(range) = entry.1.range.as_ref() { + let value_size = range.end - range.start; + if range.end - range.start >= trie_constants::INNER_HASH_TRESHOLD { + full_encoded -= value_size; + full_encoded += hash_len; + full_encoded += 1; + } + } + } + + full_encoded +} + /// Constants used into trie simplification codec. mod trie_constants { - pub const EMPTY_TRIE: u8 = 0; + /// Treshold for using hash of value instead of value + /// in encoded trie node when flagged. + pub const INNER_HASH_TRESHOLD: usize = 33; + const FIRST_PREFIX: u8 = 0b_00 << 6; + pub const EMPTY_TRIE: u8 = FIRST_PREFIX | 0b_00; + pub const ENCODED_META_ALLOW_HASH: u8 = FIRST_PREFIX | 0b_01; + /// In proof this header is used when only hashed value is stored. + pub const DEAD_HEADER_META_HASHED_VALUE: u8 = FIRST_PREFIX | 0b_00_10; pub const NIBBLE_SIZE_BOUND: usize = u16::max_value() as usize; pub const LEAF_PREFIX_MASK: u8 = 0b_01 << 6; pub const BRANCH_WITHOUT_MASK: u8 = 0b_10 << 6; diff --git a/primitives/trie/src/node_codec.rs b/primitives/trie/src/node_codec.rs index 11b379fce6f2d..b0b7c771c6b55 100644 --- a/primitives/trie/src/node_codec.rs +++ b/primitives/trie/src/node_codec.rs @@ -87,16 +87,24 @@ impl<'a> Input for ByteSliceInput<'a> { #[derive(Default, Clone)] pub struct NodeCodec(PhantomData); -impl NodeCodecT for NodeCodec { - type Error = Error; - type HashOut = H::Out; - - fn hashed_null_node() -> ::Out { - H::hash(>::empty_node()) - } - - fn decode_plan_inner(data: &[u8]) -> sp_std::result::Result { +impl NodeCodec { + fn decode_plan_inner_hashed( + data: &[u8], + meta: Option<&mut M>, + ) -> Result { + let contains_hash = meta.as_ref() + .map(|m| m.contains_hash_of_value()).unwrap_or_default(); + if data.len() < 1 { + return Err(Error::BadFormat); + } + let offset = if let Some(meta) = meta { + meta.read_state_meta(data).map_err(|_| Error::BadFormat)? + } else { + 0 + }; let mut input = ByteSliceInput::new(data); + let _ = input.take(offset)?; + match NodeHeader::decode(&mut input)? { NodeHeader::Null => Ok(NodePlan::Empty), NodeHeader::Branch(has_value, nibble_count) => { @@ -113,7 +121,11 @@ impl NodeCodecT for NodeCodec { let bitmap = Bitmap::decode(&data[bitmap_range])?; let value = if has_value { let count = >::decode(&mut input)?.0 as usize; - ValuePlan::Value(input.take(count)?) + if contains_hash { + ValuePlan::HashedValue(input.take(H::LENGTH)?, count) + } else { + ValuePlan::Value(input.take(count)?) + } } else { ValuePlan::NoValue }; @@ -149,13 +161,40 @@ impl NodeCodecT for NodeCodec { )?; let partial_padding = nibble_ops::number_padding(nibble_count); let count = >::decode(&mut input)?.0 as usize; + let value = if contains_hash { + ValuePlan::HashedValue(input.take(H::LENGTH)?, count) + } else { + ValuePlan::Value(input.take(count)?) + }; + Ok(NodePlan::Leaf { partial: NibbleSlicePlan::new(partial, partial_padding), - value: ValuePlan::Value(input.take(count)?), + value, }) } } } +} + +impl NodeCodecT for NodeCodec { + type Error = Error; + type HashOut = H::Out; + + fn hashed_null_node() -> ::Out { + H::hash(>::empty_node()) + } + + fn decode_plan(data: &[u8], meta: &mut M) -> Result { + Self::decode_plan_inner_hashed(data, Some(meta)).map(|plan| { + meta.decoded_callback(&plan); + plan + }) + } + + fn decode_plan_inner(data: &[u8]) -> Result { + let meta: Option<&mut M> = None; + Self::decode_plan_inner_hashed(data, meta) + } fn is_empty_node(data: &[u8]) -> bool { data == >::empty_node() @@ -166,15 +205,25 @@ impl NodeCodecT for NodeCodec { } fn leaf_node(partial: Partial, value: Value, meta: &mut M) -> Vec { - let mut output = partial_encode(partial, NodeKind::Leaf); - if let Value::Value(value) = value { - Compact(value.len() as u32).encode_to(&mut output); - let start = output.len(); - output.extend_from_slice(value); - let end = output.len(); - meta.encoded_value_callback(ValuePlan::Value(start..end)); - } else { - unimplemented!("No support for incomplete nodes"); + let mut output = meta.write_state_meta(); + output.append(&mut partial_encode(partial, NodeKind::Leaf)); + match value { + Value::Value(value) => { + Compact(value.len() as u32).encode_to(&mut output); + let start = output.len(); + output.extend_from_slice(value); + let end = output.len(); + meta.encoded_value_callback(ValuePlan::Value(start..end)); + }, + Value::HashedValue(hash, size) => { + debug_assert!(hash.len() == H::LENGTH); + Compact(size as u32).encode_to(&mut output); + let start = output.len(); + output.extend_from_slice(hash); + let end = output.len(); + meta.encoded_value_callback(ValuePlan::HashedValue(start..end, size)); + }, + Value::NoValue => unimplemented!("No support for incomplete nodes"), } output } @@ -203,11 +252,20 @@ impl NodeCodecT for NodeCodec { maybe_value: Value, meta: &mut M, ) -> Vec { - let mut output = match maybe_value { - Value::Value(..) => partial_from_iterator_encode(partial, number_nibble, NodeKind::BranchWithValue), - Value::NoValue => partial_from_iterator_encode(partial, number_nibble, NodeKind::BranchNoValue), - Value::HashedValue(..) => unimplemented!("No support for incomplete nodes"), - }; + let mut output = meta.write_state_meta(); + output.append(&mut if let Value::NoValue = &maybe_value { + partial_from_iterator_encode( + partial, + number_nibble, + NodeKind::BranchNoValue, + ) + } else { + partial_from_iterator_encode( + partial, + number_nibble, + NodeKind::BranchWithValue, + ) + }); let bitmap_index = output.len(); let mut bitmap: [u8; BITMAP_LENGTH] = [0; BITMAP_LENGTH]; (0..BITMAP_LENGTH).for_each(|_|output.push(0)); @@ -219,8 +277,15 @@ impl NodeCodecT for NodeCodec { let end = output.len(); meta.encoded_value_callback(ValuePlan::Value(start..end)); }, + Value::HashedValue(hash, size) => { + debug_assert!(hash.len() == H::LENGTH); + Compact(size as u32).encode_to(&mut output); + let start = output.len(); + output.extend_from_slice(hash); + let end = output.len(); + meta.encoded_value_callback(ValuePlan::HashedValue(start..end, size)); + }, Value::NoValue => (), - Value::HashedValue(..) => unimplemented!("No support for incomplete nodes"), } Bitmap::encode(children.map(|maybe_child| match maybe_child.borrow() { Some(ChildReference::Hash(h)) => { From 5d3a1b209626c5a7d27bcb89454f62f4311acb21 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 18 May 2021 17:05:34 +0200 Subject: [PATCH 004/188] flag in storage struct --- client/api/src/in_mem.rs | 2 +- client/chain-spec/src/chain_spec.rs | 11 ++++++++--- client/db/src/bench.rs | 2 +- client/db/src/lib.rs | 5 ++++- client/executor/src/integration_tests/mod.rs | 2 ++ client/light/src/backend.rs | 2 +- client/service/src/chain_ops/export_raw_state.rs | 3 ++- frame/support/test/tests/instance.rs | 3 ++- frame/system/src/lib.rs | 1 + primitives/io/src/lib.rs | 3 +++ primitives/state-machine/src/basic.rs | 12 +++++++----- primitives/state-machine/src/ext.rs | 6 +++++- primitives/storage/src/lib.rs | 2 ++ test-utils/runtime/src/genesismap.rs | 4 +++- test-utils/runtime/src/system.rs | 1 + 15 files changed, 43 insertions(+), 16 deletions(-) diff --git a/client/api/src/in_mem.rs b/client/api/src/in_mem.rs index 3f2a26a2c55c8..3dbcd4aa897de 100644 --- a/client/api/src/in_mem.rs +++ b/client/api/src/in_mem.rs @@ -576,7 +576,7 @@ impl backend::BlockImportOperation for BlockImportOperatio let (root, transaction) = self.old_state.full_storage_root( storage.top.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))), child_delta, - false, // TODO push flag in storage + storage.flag_hashed_value, ); self.new_state = Some(transaction); diff --git a/client/chain-spec/src/chain_spec.rs b/client/chain-spec/src/chain_spec.rs index 2faf95568290e..3838c5dbc0def 100644 --- a/client/chain-spec/src/chain_spec.rs +++ b/client/chain-spec/src/chain_spec.rs @@ -87,7 +87,9 @@ impl GenesisSource { ) .collect(); - Ok(Genesis::Raw(RawGenesis { top, children_default })) + let flag_hashed_value = storage.flag_hashed_value; + + Ok(Genesis::Raw(RawGenesis { top, children_default, flag_hashed_value })) }, } } @@ -97,7 +99,7 @@ impl BuildStorage for ChainSpec { fn build_storage(&self) -> Result { match self.genesis.resolve()? { Genesis::Runtime(gc) => gc.build_storage(), - Genesis::Raw(RawGenesis { top: map, children_default: children_map }) => Ok(Storage { + Genesis::Raw(RawGenesis { top: map, children_default: children_map, flag_hashed_value }) => Ok(Storage { top: map.into_iter().map(|(k, v)| (k.0, v.0)).collect(), children_default: children_map.into_iter().map(|(storage_key, child_content)| { let child_info = ChildInfo::new_default(storage_key.0.as_slice()); @@ -109,6 +111,7 @@ impl BuildStorage for ChainSpec { }, ) }).collect(), + flag_hashed_value, }), } } @@ -130,6 +133,7 @@ pub type GenesisStorage = HashMap; pub struct RawGenesis { pub top: GenesisStorage, pub children_default: HashMap, + pub flag_hashed_value: bool, } #[derive(Serialize, Deserialize)] @@ -316,8 +320,9 @@ impl ChainSpec { .collect(), )) .collect(); + let flag_hashed_value = storage.flag_hashed_value; - Genesis::Raw(RawGenesis { top, children_default }) + Genesis::Raw(RawGenesis { top, children_default, flag_hashed_value }) }, (_, genesis) => genesis, }; diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index 2b74dd139cf0a..bc5ab72dee66e 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -151,7 +151,7 @@ impl BenchmarkingState { state.add_whitelist_to_tracker(); state.reopen()?; - let flagged = false; // TODO from genesis Storage + let flagged = genesis.flag_hashed_value; let child_delta = genesis.children_default.iter().map(|(_storage_key, child_content)| ( &child_content.child_info, child_content.data.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))), diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index ffeb9be4cd35c..ce45c298e9de3 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -780,7 +780,7 @@ impl sc_client_api::backend::BlockImportOperation for Bloc )); let mut changes_trie_config: Option = None; - let flag = false; // TODO flag from storage!! + let flag = storage.flag_hashed_value; let (root, transaction) = self.old_state.full_storage_root( storage.top.iter().map(|(k, v)| { if &k[..] == well_known_keys::CHANGES_TRIE_CONFIG { @@ -2314,6 +2314,7 @@ pub(crate) mod tests { op.reset_storage(Storage { top: storage.into_iter().collect(), children_default: Default::default(), + flag_hashed_value: flagged, }).unwrap(); op.set_block_data( header.clone(), @@ -2399,6 +2400,7 @@ pub(crate) mod tests { op.reset_storage(Storage { top: Default::default(), children_default: Default::default(), + flag_hashed_value: flagged, }).unwrap(); key = op.db_updates.insert(EMPTY_PREFIX, b"hello"); @@ -2851,6 +2853,7 @@ pub(crate) mod tests { op.reset_storage(Storage { top: storage.into_iter().collect(), children_default: Default::default(), + flag_hashed_value: flagged, }).unwrap(); op.set_block_data( header.clone(), diff --git a/client/executor/src/integration_tests/mod.rs b/client/executor/src/integration_tests/mod.rs index ecb48da91490a..2b76825ea51bc 100644 --- a/client/executor/src/integration_tests/mod.rs +++ b/client/executor/src/integration_tests/mod.rs @@ -210,6 +210,7 @@ fn storage_should_work(wasm_method: WasmExecutionMethod) { b"baz".to_vec() => b"bar".to_vec() ], children_default: map![], + flag_hashed_value: false, }); assert_eq!(ext, expected); } @@ -243,6 +244,7 @@ fn clear_prefix_should_work(wasm_method: WasmExecutionMethod) { b"bbb".to_vec() => b"5".to_vec() ], children_default: map![], + flag_hashed_value: false, }); assert_eq!(expected, ext); } diff --git a/client/light/src/backend.rs b/client/light/src/backend.rs index 596a6fab61acc..49c861411bb45 100644 --- a/client/light/src/backend.rs +++ b/client/light/src/backend.rs @@ -346,7 +346,7 @@ impl BlockImportOperation for ImportOperation } let storage_update = InMemoryBackend::from(storage); - let flag = false; // TODO flag_hash_value in Storage + let flag = input.flag_hashed_value; let (storage_root, _) = storage_update.full_storage_root(std::iter::empty(), child_delta, flag); self.storage_update = Some(storage_update); diff --git a/client/service/src/chain_ops/export_raw_state.rs b/client/service/src/chain_ops/export_raw_state.rs index 71822cf6275f8..bdba4911f8db6 100644 --- a/client/service/src/chain_ops/export_raw_state.rs +++ b/client/service/src/chain_ops/export_raw_state.rs @@ -42,6 +42,7 @@ where let empty_key = StorageKey(Vec::new()); let mut top_storage = client.storage_pairs(&block, &empty_key)?; let mut children_default = HashMap::new(); + let flag_hashed_value = false; // TODO read from trie (through clinet api). // Remove all default child storage roots from the top storage and collect the child storage // pairs. @@ -69,5 +70,5 @@ where } let top = top_storage.into_iter().map(|(k, v)| (k.0, v.0)).collect(); - Ok(Storage { top, children_default }) + Ok(Storage { top, children_default, flag_hashed_value }) } diff --git a/frame/support/test/tests/instance.rs b/frame/support/test/tests/instance.rs index 077763ac9128d..e698f989de45f 100644 --- a/frame/support/test/tests/instance.rs +++ b/frame/support/test/tests/instance.rs @@ -326,7 +326,8 @@ fn new_test_ext() -> sp_io::TestExternalities { fn storage_instance_independence() { let mut storage = sp_core::storage::Storage { top: std::collections::BTreeMap::new(), - children_default: std::collections::HashMap::new() + children_default: std::collections::HashMap::new(), + flag_hashed_value: false, // TODO test with }; sp_state_machine::BasicExternalities::execute_with_storage(&mut storage, || { module2::Value::::put(0); diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index c3fe688420097..44e7a3d5752fe 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -1408,6 +1408,7 @@ impl Pallet { >::hashed_key().to_vec() => [69u8; 32].encode() ], children_default: map![], + flag_hashed_value: false, }) } diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index 69e20347f8bf8..03bfd73772724 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -1454,6 +1454,7 @@ mod tests { t = BasicExternalities::new(Storage { top: map![b"foo".to_vec() => b"bar".to_vec()], children_default: map![], + flag_hashed_value: false, }); t.execute_with(|| { @@ -1468,6 +1469,7 @@ mod tests { let mut t = BasicExternalities::new(Storage { top: map![b":test".to_vec() => value.clone()], children_default: map![], + flag_hashed_value: false, }); t.execute_with(|| { @@ -1490,6 +1492,7 @@ mod tests { b":abdd".to_vec() => b"\x0b\0\0\0Hello world".to_vec() ], children_default: map![], + flag_hashed_value: false, }); t.execute_with(|| { diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index 5fcf349eb72fc..950b6d715b18b 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -74,6 +74,7 @@ impl BasicExternalities { inner: Storage { top: std::mem::take(&mut storage.top), children_default: std::mem::take(&mut storage.children_default), + flag_hashed_value: storage.flag_hashed_value, }, extensions: Default::default(), }; @@ -128,6 +129,7 @@ impl From> for BasicExternalities { inner: Storage { top: hashmap, children_default: Default::default(), + flag_hashed_value: false, }, extensions: Default::default(), } @@ -335,8 +337,7 @@ impl Externalities for BasicExternalities { } fn flag_hash_value(&mut self) { - unimplemented!("flag_hash_value is not supported in Basic") - // TODO consider flag in layout so doable by adding to storage. + self.inner.flag_hashed_value = true; } } @@ -402,7 +403,8 @@ mod tests { data: map![ b"doe".to_vec() => b"reindeer".to_vec() ], child_info: child_info.to_owned(), } - ] + ], + flag_hashed_value: false, }); assert_eq!(ext.child_storage(child_info, b"doe"), Some(b"reindeer".to_vec())); @@ -432,10 +434,10 @@ mod tests { ], child_info: child_info.to_owned(), } - ] + ], + flag_hashed_value: false, }); - let res = ext.kill_child_storage(child_info, None); assert_eq!(res, (true, 3)); } diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index abaa51939c914..0c20a186a989e 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -950,7 +950,8 @@ mod tests { vec![20] => vec![20], vec![40] => vec![40] ], - children_default: map![] + children_default: map![], + flag_hashed_value: false, }.into(); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); @@ -996,6 +997,7 @@ mod tests { child_info: child_info.to_owned(), } ], + flag_hashed_value: false, }.into(); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); @@ -1040,6 +1042,7 @@ mod tests { child_info: child_info.to_owned(), } ], + flag_hashed_value: false, }.into(); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); @@ -1079,6 +1082,7 @@ mod tests { child_info: child_info.to_owned(), } ], + flag_hashed_value: false, }.into(); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index ced8d8c02a80b..5c8028e40c2a8 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -140,6 +140,8 @@ pub struct Storage { /// trie kind, so this is exclusively for the `ChildType::ParentKeyId` /// tries. pub children_default: std::collections::HashMap, StorageChild>, + /// Flag state for using hash of values internally. + pub flag_hashed_value: bool, } /// Storage change set diff --git a/test-utils/runtime/src/genesismap.rs b/test-utils/runtime/src/genesismap.rs index 63c4bab55ec49..93b10e21e71d9 100644 --- a/test-utils/runtime/src/genesismap.rs +++ b/test-utils/runtime/src/genesismap.rs @@ -74,8 +74,10 @@ impl GenesisConfig { // Add the extra storage entries. map.extend(self.extra_storage.top.clone().into_iter()); + let flag_hashed_value = self.extra_storage.flag_hashed_value; + // Assimilate the system genesis config. - let mut storage = Storage { top: map, children_default: self.extra_storage.children_default.clone()}; + let mut storage = Storage { top: map, children_default: self.extra_storage.children_default.clone(), flag_hashed_value}; let mut config = system::GenesisConfig::default(); config.authorities = self.authorities.clone(); config.assimilate_storage(&mut storage).expect("Adding `system::GensisConfig` to the genesis"); diff --git a/test-utils/runtime/src/system.rs b/test-utils/runtime/src/system.rs index 33ef7b12d8db0..fbf6012d083e7 100644 --- a/test-utils/runtime/src/system.rs +++ b/test-utils/runtime/src/system.rs @@ -385,6 +385,7 @@ mod tests { } ], children_default: map![], + flag_hashed_value: false, // TODO test with true variant }, ) } From 4256a407a495f7e8f7590d87b91ebe398cf20f95 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 18 May 2021 18:17:47 +0200 Subject: [PATCH 005/188] fix flagging to access and insert. --- primitives/state-machine/src/lib.rs | 41 ++++++++--- .../state-machine/src/proving_backend.rs | 41 ++++++++--- primitives/state-machine/src/trie_backend.rs | 70 ++++++++++++++----- primitives/trie/src/lib.rs | 22 ++++-- 4 files changed, 130 insertions(+), 44 deletions(-) diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 5c0074691b1c7..8b12c8aa030e6 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -959,7 +959,11 @@ mod tests { #[test] fn execute_works() { - let backend = trie_backend::tests::test_trie(); + execute_works_inner(false); + execute_works_inner(true); + } + fn execute_works_inner(hashed: bool) { + let backend = trie_backend::tests::test_trie(hashed); let mut overlayed_changes = Default::default(); let wasm_code = RuntimeCode::empty(); @@ -986,10 +990,13 @@ mod tests { ); } - #[test] fn execute_works_with_native_else_wasm() { - let backend = trie_backend::tests::test_trie(); + execute_works_with_native_else_wasm_inner(false); + execute_works_with_native_else_wasm_inner(true); + } + fn execute_works_with_native_else_wasm_inner(hashed: bool) { + let backend = trie_backend::tests::test_trie(hashed); let mut overlayed_changes = Default::default(); let wasm_code = RuntimeCode::empty(); @@ -1015,8 +1022,12 @@ mod tests { #[test] fn dual_execution_strategy_detects_consensus_failure() { + dual_execution_strategy_detects_consensus_failure_inner(false); + dual_execution_strategy_detects_consensus_failure_inner(true); + } + fn dual_execution_strategy_detects_consensus_failure_inner(hashed: bool) { let mut consensus_failed = false; - let backend = trie_backend::tests::test_trie(); + let backend = trie_backend::tests::test_trie(hashed); let mut overlayed_changes = Default::default(); let wasm_code = RuntimeCode::empty(); @@ -1051,6 +1062,10 @@ mod tests { #[test] fn prove_execution_and_proof_check_works() { + prove_execution_and_proof_check_works_inner(true); + prove_execution_and_proof_check_works_inner(false); + } + fn prove_execution_and_proof_check_works_inner(flagged: bool) { let executor = DummyCodeExecutor { change_changes_trie_config: false, native_available: true, @@ -1059,9 +1074,10 @@ mod tests { }; // fetch execution proof from 'remote' full node - let remote_backend = trie_backend::tests::test_trie(); - let flagged = false; // TODO try with flagged and trie with test_trie of already flagged + let remote_backend = trie_backend::tests::test_trie(flagged); let remote_root = remote_backend.storage_root(std::iter::empty(), flagged).0; + let remote_root_2 = remote_backend.storage_root(std::iter::empty(), false).0; + assert_eq!(remote_root, remote_root_2); let (remote_result, remote_proof) = prove_execution::<_, _, u64, _, _>( remote_backend, &mut Default::default(), @@ -1409,11 +1425,14 @@ mod tests { #[test] fn prove_read_and_proof_check_works() { + prove_read_and_proof_check_works_inner(false); + prove_read_and_proof_check_works_inner(true); + } + fn prove_read_and_proof_check_works_inner(flagged: bool) { let child_info = ChildInfo::new_default(b"sub1"); let child_info = &child_info; // fetch read proof from 'remote' full node - let remote_backend = trie_backend::tests::test_trie(); // TODO test with flagged and flagged. - let flagged = false; + let remote_backend = trie_backend::tests::test_trie(flagged); let remote_root = remote_backend.storage_root(::std::iter::empty(), flagged).0; let remote_proof = prove_read(remote_backend, &[b"value2"]).unwrap(); // check proof locally @@ -1434,7 +1453,7 @@ mod tests { ); assert_eq!(local_result2, false); // on child trie - let remote_backend = trie_backend::tests::test_trie(); + let remote_backend = trie_backend::tests::test_trie(flagged); let remote_root = remote_backend.storage_root(::std::iter::empty(), false).0; let remote_proof = prove_child_read( remote_backend, @@ -1473,7 +1492,7 @@ mod tests { let mut overlay = OverlayedChanges::default(); let mut transaction = { - let backend = test_trie(); + let backend = test_trie(false); let mut cache = StorageTransactionCache::default(); let mut ext = Ext::new( &mut overlay, @@ -1541,7 +1560,7 @@ mod tests { struct DummyExt(u32); } - let backend = trie_backend::tests::test_trie(); + let backend = trie_backend::tests::test_trie(false); let mut overlayed_changes = Default::default(); let wasm_code = RuntimeCode::empty(); diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index ef9e1f26c0a69..b6f1251a03dbf 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -393,13 +393,21 @@ mod tests { #[test] fn proof_is_empty_until_value_is_read() { - let trie_backend = test_trie(); + proof_is_empty_until_value_is_read_inner(false); + proof_is_empty_until_value_is_read_inner(true); + } + fn proof_is_empty_until_value_is_read_inner(flagged: bool) { + let trie_backend = test_trie(flagged); assert!(test_proving(&trie_backend).extract_proof().is_empty()); } #[test] fn proof_is_non_empty_after_value_is_read() { - let trie_backend = test_trie(); + proof_is_non_empty_after_value_is_read_inner(false); + proof_is_non_empty_after_value_is_read_inner(true); + } + fn proof_is_non_empty_after_value_is_read_inner(flagged: bool) { + let trie_backend = test_trie(flagged); let backend = test_proving(&trie_backend); assert_eq!(backend.storage(b"key").unwrap(), Some(b"value".to_vec())); assert!(!backend.extract_proof().is_empty()); @@ -417,11 +425,14 @@ mod tests { #[test] fn passes_through_backend_calls() { - let trie_backend = test_trie(); + passes_through_backend_calls_inner(false); + passes_through_backend_calls_inner(true); + } + fn passes_through_backend_calls_inner(flagged: bool) { + let trie_backend = test_trie(flagged); let proving_backend = test_proving(&trie_backend); assert_eq!(trie_backend.storage(b"key").unwrap(), proving_backend.storage(b"key").unwrap()); assert_eq!(trie_backend.pairs(), proving_backend.pairs()); - let flagged = false; let (trie_root, mut trie_mdb) = trie_backend.storage_root(std::iter::empty(), flagged); let (proving_root, mut proving_mdb) = proving_backend.storage_root(std::iter::empty(), flagged); @@ -431,15 +442,20 @@ mod tests { #[test] fn proof_recorded_and_checked() { + proof_recorded_and_checked_inner(false, false); + proof_recorded_and_checked_inner(false, true); + proof_recorded_and_checked_inner(true, false); + proof_recorded_and_checked_inner(true, true); + } + fn proof_recorded_and_checked_inner(flagged: bool, do_flag: bool) { let contents = (0..64).map(|i| (vec![i], Some(vec![i]))).collect::>(); let in_memory = InMemoryBackend::::default(); - let flagged = false; // TODO test with flag let mut in_memory = in_memory.update(vec![(None, contents)], flagged); let in_memory_root = in_memory.storage_root(::std::iter::empty(), flagged).0; (0..64).for_each(|i| assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i])); let trie = in_memory.as_trie_backend().unwrap(); - let trie_root = trie.storage_root(::std::iter::empty(), flagged).0; + let trie_root = trie.storage_root(::std::iter::empty(), do_flag).0; assert_eq!(in_memory_root, trie_root); (0..64).for_each(|i| assert_eq!(trie.storage(&[i]).unwrap().unwrap(), vec![i])); @@ -454,6 +470,10 @@ mod tests { #[test] fn proof_recorded_and_checked_with_child() { + proof_recorded_and_checked_with_child_inner(false); + proof_recorded_and_checked_with_child_inner(true); + } + fn proof_recorded_and_checked_with_child_inner(flagged: bool) { let child_info_1 = ChildInfo::new_default(b"sub1"); let child_info_2 = ChildInfo::new_default(b"sub2"); let child_info_1 = &child_info_1; @@ -466,7 +486,6 @@ mod tests { (10..15).map(|i| (vec![i], Some(vec![i]))).collect()), ]; let in_memory = InMemoryBackend::::default(); - let flagged = false; // TODO test with flag let mut in_memory = in_memory.update(contents, flagged); let child_storage_keys = vec![child_info_1.to_owned(), child_info_2.to_owned()]; let in_memory_root = in_memory.full_storage_root( @@ -525,8 +544,12 @@ mod tests { } #[test] - fn storage_proof_encoded_size_estimation_works() { // TODO same with flag -> test_trie with flag - let trie_backend = test_trie(); + fn storage_proof_encoded_size_estimation_works() { + storage_proof_encoded_size_estimation_works_inner(false); + storage_proof_encoded_size_estimation_works_inner(true); + } + fn storage_proof_encoded_size_estimation_works_inner(flagged: bool) { + let trie_backend = test_trie(flagged); let backend = test_proving(&trie_backend); let check_estimation = |backend: &ProvingBackend<'_, PrefixedMemoryDB, BlakeTwo256>| { diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index ae96d5bc97d2b..986f6db63670f 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -268,7 +268,7 @@ pub mod tests { const CHILD_KEY_1: &[u8] = b"sub1"; - fn test_db() -> (PrefixedMemoryDB, H256) { + fn test_db(hashed_value: bool) -> (PrefixedMemoryDB, H256) { let child_info = ChildInfo::new_default(CHILD_KEY_1); let mut root = H256::default(); let mut mdb = PrefixedMemoryDB::::default(); @@ -283,6 +283,10 @@ pub mod tests { let mut sub_root = Vec::new(); root.encode_to(&mut sub_root); let mut trie = TrieDBMut::new(&mut mdb, &mut root); + if hashed_value { + sp_trie::flag_meta_hasher(&mut trie).expect("flag failed"); + } + trie.insert(child_info.prefixed_storage_key().as_slice(), &sub_root[..]) .expect("insert failed"); trie.insert(b"key", b"value").expect("insert failed"); @@ -296,19 +300,27 @@ pub mod tests { (mdb, root) } - pub(crate) fn test_trie() -> TrieBackend, BlakeTwo256> { - let (mdb, root) = test_db(); + pub(crate) fn test_trie(hashed_value: bool) -> TrieBackend, BlakeTwo256> { + let (mdb, root) = test_db(hashed_value); TrieBackend::new(mdb, root) } #[test] fn read_from_storage_returns_some() { - assert_eq!(test_trie().storage(b"key").unwrap(), Some(b"value".to_vec())); + read_from_storage_returns_some_inner(false); + read_from_storage_returns_some_inner(true); + } + fn read_from_storage_returns_some_inner(flagged: bool) { + assert_eq!(test_trie(flagged).storage(b"key").unwrap(), Some(b"value".to_vec())); } #[test] fn read_from_child_storage_returns_some() { - let test_trie = test_trie(); + read_from_child_storage_returns_some_inner(false); + read_from_child_storage_returns_some_inner(true); + } + fn read_from_child_storage_returns_some_inner(flagged: bool) { + let test_trie = test_trie(flagged); assert_eq!( test_trie.child_storage(&ChildInfo::new_default(CHILD_KEY_1), b"value3").unwrap(), Some(vec![142u8]), @@ -317,12 +329,20 @@ pub mod tests { #[test] fn read_from_storage_returns_none() { - assert_eq!(test_trie().storage(b"non-existing-key").unwrap(), None); + read_from_storage_returns_none_inner(false); + read_from_storage_returns_none_inner(true); + } + fn read_from_storage_returns_none_inner(flagged: bool) { + assert_eq!(test_trie(flagged).storage(b"non-existing-key").unwrap(), None); } #[test] fn pairs_are_not_empty_on_non_empty_storage() { - assert!(!test_trie().pairs().is_empty()); + pairs_are_not_empty_on_non_empty_storage_inner(false); + pairs_are_not_empty_on_non_empty_storage_inner(true); + } + fn pairs_are_not_empty_on_non_empty_storage_inner(flagged: bool) { + assert!(!test_trie(flagged).pairs().is_empty()); } #[test] @@ -335,36 +355,50 @@ pub mod tests { #[test] fn storage_root_is_non_default() { - let flagged = false; - assert!(test_trie().storage_root(iter::empty(), flagged).0 != H256::repeat_byte(0)); + storage_root_is_non_default_inner(false); + storage_root_is_non_default_inner(true); + } + fn storage_root_is_non_default_inner(flagged: bool) { + assert!(test_trie(flagged).storage_root(iter::empty(), flagged).0 != H256::repeat_byte(0)); } #[test] fn storage_root_transaction_is_empty() { - let flagged = false; - assert!(test_trie().storage_root(iter::empty(), flagged).1.drain().is_empty()); + storage_root_transaction_is_empty_inner(false); + storage_root_transaction_is_empty_inner(true); + } + fn storage_root_transaction_is_empty_inner(flagged: bool) { + assert!(test_trie(flagged).storage_root(iter::empty(), false).1.drain().is_empty()); } #[test] fn storage_root_flagged_is_not_empty() { - let flagged = true; - assert!(!test_trie().storage_root(iter::empty(), flagged).1.drain().is_empty()); + assert!(!test_trie(false).storage_root(iter::empty(), true).1.drain().is_empty()); } #[test] fn storage_root_transaction_is_non_empty() { - // TODO test with flagged `test_trie` (initially only). - let (new_root, mut tx) = test_trie().storage_root( + storage_root_transaction_is_non_empty_inner(false, false); + storage_root_transaction_is_non_empty_inner(false, true); + storage_root_transaction_is_non_empty_inner(true, false); + storage_root_transaction_is_non_empty_inner(true, true); + } + fn storage_root_transaction_is_non_empty_inner(flagged: bool, do_flag: bool) { + let (new_root, mut tx) = test_trie(flagged).storage_root( iter::once((&b"new-key"[..], Some(&b"new-value"[..]))), - false, + do_flag, ); assert!(!tx.drain().is_empty()); - assert!(new_root != test_trie().storage_root(iter::empty(), false).0); + assert!(new_root != test_trie(false).storage_root(iter::empty(), false).0); } #[test] fn prefix_walking_works() { - let trie = test_trie(); + prefix_walking_works_inner(false); + prefix_walking_works_inner(true); + } + fn prefix_walking_works_inner(flagged: bool) { + let trie = test_trie(flagged); let mut seen = HashSet::new(); trie.for_keys_with_prefix(b"value", |key| { diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 0d08ac22d2243..50ef70f121af6 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -479,16 +479,26 @@ pub fn flag_inner_meta_hasher( { { let mut t = TrieDBMut::::from_existing(db, &mut root)?; - let flag = true; - let key: &[u8]= &[]; - if !t.flag(key, flag)? { - t.insert(key, b"")?; - assert!(t.flag(key, flag)?); - } + flag_meta_hasher(&mut t)?; } Ok(root) } +/// Flag inner trie with state metadata to enable hash of value internally. +pub fn flag_meta_hasher( + t: &mut TrieDBMut +) -> Result<(), Box>> where + L: TrieConfiguration, +{ + let flag = true; + let key: &[u8]= &[]; + if !t.contains(key)? { + t.insert(key, b"")?; + } + assert!(t.flag(key, flag)?); + Ok(()) +} + /// Read a value from the trie. pub fn read_trie_value>( db: &DB, From 5bb0b6889bfc6b1994e98f7deb17326666484428 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 18 May 2021 18:36:07 +0200 Subject: [PATCH 006/188] added todo to fix --- primitives/state-machine/src/proving_backend.rs | 4 ++-- primitives/trie/src/storage_proof.rs | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index b6f1251a03dbf..cbb136d017a83 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -451,11 +451,11 @@ mod tests { let contents = (0..64).map(|i| (vec![i], Some(vec![i]))).collect::>(); let in_memory = InMemoryBackend::::default(); let mut in_memory = in_memory.update(vec![(None, contents)], flagged); - let in_memory_root = in_memory.storage_root(::std::iter::empty(), flagged).0; + let in_memory_root = in_memory.storage_root(std::iter::empty(), do_flag).0; (0..64).for_each(|i| assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i])); let trie = in_memory.as_trie_backend().unwrap(); - let trie_root = trie.storage_root(::std::iter::empty(), do_flag).0; + let trie_root = trie.storage_root(std::iter::empty(), do_flag).0; assert_eq!(in_memory_root, trie_root); (0..64).for_each(|i| assert_eq!(trie.storage(&[i]).unwrap().unwrap(), vec![i])); diff --git a/primitives/trie/src/storage_proof.rs b/primitives/trie/src/storage_proof.rs index f0b2bfd4bc3d3..c6cfd34348ce8 100644 --- a/primitives/trie/src/storage_proof.rs +++ b/primitives/trie/src/storage_proof.rs @@ -103,6 +103,7 @@ impl From for crate::MemoryDB { fn from(proof: StorageProof) -> Self { let mut db = crate::MemoryDB::default(); for item in proof.iter_nodes() { + // TODO insert_with_meta here db.insert(crate::EMPTY_PREFIX, &item); } db From 708186489e4a4e26c90cd5de29a96b97110844d7 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 18 May 2021 18:39:05 +0200 Subject: [PATCH 007/188] also missing serialize meta to storage proof --- primitives/state-machine/src/proving_backend.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index cbb136d017a83..1346bfc71c6fb 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -174,6 +174,7 @@ impl ProofRecorder { /// Convert into a [`StorageProof`]. pub fn to_storage_proof(&self) -> StorageProof { + // TODO serialize meta. let trie_nodes = self.inner.read() .records .iter() From 3f1ee8390d7b7cebd8fb929c05260c3006b2ce28 Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 19 May 2021 11:57:22 +0200 Subject: [PATCH 008/188] extract meta. --- Cargo.lock | 16 ++++----- .../state-machine/src/proving_backend.rs | 7 ++-- primitives/trie/src/lib.rs | 23 +++++++++++-- primitives/trie/src/storage_proof.rs | 34 +++++++++++++++++-- 4 files changed, 64 insertions(+), 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1ccdd8f9ea5c7..b0acfa17a74c8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2329,7 +2329,7 @@ dependencies = [ [[package]] name = "hash-db" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple2#594c900614841eea6ab66ac0d169352596ea3322" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple2#b3a6725a3d7f25185826b97f5efc1174a34c58f4" [[package]] name = "hash256-std-hasher" @@ -2343,7 +2343,7 @@ dependencies = [ [[package]] name = "hash256-std-hasher" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple2#594c900614841eea6ab66ac0d169352596ea3322" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple2#b3a6725a3d7f25185826b97f5efc1174a34c58f4" dependencies = [ "crunchy", ] @@ -2996,7 +2996,7 @@ dependencies = [ [[package]] name = "keccak-hasher" version = "0.15.3" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple2#594c900614841eea6ab66ac0d169352596ea3322" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple2#b3a6725a3d7f25185826b97f5efc1174a34c58f4" dependencies = [ "hash-db", "hash256-std-hasher 0.15.2 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple2)", @@ -3788,7 +3788,7 @@ dependencies = [ [[package]] name = "memory-db" version = "0.26.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple2#594c900614841eea6ab66ac0d169352596ea3322" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple2#b3a6725a3d7f25185826b97f5efc1174a34c58f4" dependencies = [ "hash-db", "hashbrown", @@ -10332,7 +10332,7 @@ checksum = "a7f741b240f1a48843f9b8e0444fb55fb2a4ff67293b50a9179dfd5ea67f8d41" [[package]] name = "trie-bench" version = "0.27.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple2#594c900614841eea6ab66ac0d169352596ea3322" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple2#b3a6725a3d7f25185826b97f5efc1174a34c58f4" dependencies = [ "criterion", "hash-db", @@ -10347,7 +10347,7 @@ dependencies = [ [[package]] name = "trie-db" version = "0.22.3" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple2#594c900614841eea6ab66ac0d169352596ea3322" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple2#b3a6725a3d7f25185826b97f5efc1174a34c58f4" dependencies = [ "hash-db", "hashbrown", @@ -10368,7 +10368,7 @@ dependencies = [ [[package]] name = "trie-root" version = "0.16.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple2#594c900614841eea6ab66ac0d169352596ea3322" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple2#b3a6725a3d7f25185826b97f5efc1174a34c58f4" dependencies = [ "hash-db", ] @@ -10386,7 +10386,7 @@ dependencies = [ [[package]] name = "trie-standardmap" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple2#594c900614841eea6ab66ac0d169352596ea3322" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple2#b3a6725a3d7f25185826b97f5efc1174a34c58f4" dependencies = [ "hash-db", "keccak-hasher 0.15.3 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple2)", diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 1346bfc71c6fb..747467ee3b66d 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -125,14 +125,15 @@ pub struct ProofRecorder { impl ProofRecorder { /// Record the given `key` => `val` combination. - pub fn record(&self, key: Hash, mut val: Option<(DBValue, TrieMeta)>, hash_len: usize) { + pub fn record(&self, key: Hash, mut val: Option<(DBValue, TrieMeta)>) { let mut inner = self.inner.write(); let ProofRecorderInner { encoded_size, records } = &mut *inner; records.entry(key).or_insert_with(|| { if let Some(val) = val.as_mut() { val.1.set_accessed_value(false); - *encoded_size += sp_trie::estimate_entry_size(val, hash_len); + sp_trie::resolve_encoded_meta::(val); + *encoded_size += sp_trie::estimate_entry_size(val, H::LENGTH); } val }); @@ -244,7 +245,7 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> TrieBackendStorage } let backend_value = self.backend.get(key, prefix, parent)?; - self.proof_recorder.record(key.clone(), backend_value.clone(), H::LENGTH); + self.proof_recorder.record::(key.clone(), backend_value.clone()); Ok(backend_value) } diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 50ef70f121af6..9456f1e2ddfcf 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -163,8 +163,17 @@ impl Meta for TrieMeta { fn decoded_callback( &mut self, - _node_plan: &NodePlan, + node_plan: &NodePlan, ) { + let (contain_hash, range) = match node_plan.value_plan() { + Some(ValuePlan::Value(range)) => (false, range.clone()), + Some(ValuePlan::HashedValue(range, _size)) => (true, range.clone()), + Some(ValuePlan::NoValue) => return, + None => return, + }; + + self.range = Some(range); + self.contain_hash = contain_hash; } fn contains_hash_of_value(&self) -> bool { @@ -241,7 +250,7 @@ impl MetaHasher for StateHasher fn hash(value: &[u8], meta: &Self::Meta) -> H::Out { match &meta { TrieMeta { range: Some(range), contain_hash: false, do_value_hash, .. } => { - if *do_value_hash { + if *do_value_hash && range.end - range.start >= trie_constants::INNER_HASH_TRESHOLD { let value = inner_hashed_value::(value, Some((range.start, range.end))); H::hash(value.as_slice()) } else { @@ -788,7 +797,7 @@ impl<'a, DB, H, T, M> hash_db::AsHashDB for KeySpacedDBMut<'a, DB, H> w } /// Representation of node with with inner hash instead of value. -pub fn inner_hashed_value(x: &[u8], range: Option<(usize, usize)>) -> Vec { +fn inner_hashed_value(x: &[u8], range: Option<(usize, usize)>) -> Vec { if let Some((start, end)) = range { let len = x.len(); if start < len && end == len { @@ -841,6 +850,14 @@ pub fn estimate_entry_size(entry: &(DBValue, TrieMeta), hash_len: usize) -> usiz full_encoded } +/// If needed, call to decode plan in order to record meta. +pub fn resolve_encoded_meta(entry: &mut (DBValue, TrieMeta)) { + use trie_db::NodeCodec; + if entry.1.do_value_hash { + let _ = as TrieLayout>::Codec::decode_plan(entry.0.as_slice(), &mut entry.1); + } +} + /// Constants used into trie simplification codec. mod trie_constants { /// Treshold for using hash of value instead of value diff --git a/primitives/trie/src/storage_proof.rs b/primitives/trie/src/storage_proof.rs index c6cfd34348ce8..ed755237c4be3 100644 --- a/primitives/trie/src/storage_proof.rs +++ b/primitives/trie/src/storage_proof.rs @@ -101,10 +101,40 @@ impl Iterator for StorageProofNodeIterator { impl From for crate::MemoryDB { fn from(proof: StorageProof) -> Self { + use hash_db::MetaHasher; + use trie_db::NodeCodec; + use crate::{Layout, TrieLayout}; let mut db = crate::MemoryDB::default(); - for item in proof.iter_nodes() { + // Needed because we do not read trie structure, so + // we do a heuristic related to the fact that host function + // only allow global definition. + // Using compact proof will work directly here (read trie structure and + // work directly. + let mut is_hashed_value = false; + let mut accum = Vec::new(); + for item in proof.trie_nodes.iter() { + // Note using `None` as parent meta does not impact `extract_value` of + // sp_trie meta hasher. + // But does not with `insert_with_meta`. + let (encoded_node, mut meta) = < + as TrieLayout>::MetaHasher as MetaHasher + >::extract_value(item.as_slice(), None); + if !is_hashed_value { + // read state meta. + let _ = as TrieLayout>::Codec::decode_plan(encoded_node, &mut meta); + if meta.recorded_do_value_hash { + is_hashed_value = true; + } + } // TODO insert_with_meta here - db.insert(crate::EMPTY_PREFIX, &item); + accum.push((encoded_node, meta)); + } + for mut item in accum.into_iter() { + if is_hashed_value { + // skipping hierarchy. + item.1.do_value_hash = true; + } + db.insert_with_meta(crate::EMPTY_PREFIX, item.0, item.1); } db } From 1bbdf35e6fa7e4734baca5a445d3e8f9311ea930 Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 19 May 2021 17:32:12 +0200 Subject: [PATCH 009/188] Isolate old trie layout. --- client/api/src/cht.rs | 7 +- client/db/src/bench.rs | 2 +- client/db/src/changes_tries_storage.rs | 2 +- client/db/src/lib.rs | 12 +-- client/light/src/fetcher.rs | 2 +- frame/session/src/historical/mod.rs | 2 +- .../api/proc-macro/src/impl_runtime_apis.rs | 2 +- primitives/state-machine/src/backend.rs | 32 ++++++- .../state-machine/src/changes_trie/mod.rs | 2 +- .../state-machine/src/changes_trie/prune.rs | 4 +- .../state-machine/src/changes_trie/storage.rs | 4 +- .../state-machine/src/in_memory_backend.rs | 2 +- primitives/state-machine/src/lib.rs | 19 +++- .../state-machine/src/proving_backend.rs | 39 +++++---- .../state-machine/src/trie_backend_essence.rs | 24 ++--- primitives/trie/src/lib.rs | 87 +++++++++++++++---- primitives/trie/src/storage_proof.rs | 22 +++-- 17 files changed, 184 insertions(+), 80 deletions(-) diff --git a/client/api/src/cht.rs b/client/api/src/cht.rs index 235976af09018..5c8dadcd6825f 100644 --- a/client/api/src/cht.rs +++ b/client/api/src/cht.rs @@ -32,8 +32,9 @@ use sp_trie; use sp_core::{H256, convert_hash}; use sp_runtime::traits::{Header as HeaderT, AtLeast32Bit, Zero, One}; use sp_state_machine::{ - MemoryDB, TrieBackend, Backend as StateBackend, StorageProof, InMemoryBackend, - prove_read_on_trie_backend, read_proof_check, read_proof_check_on_proving_backend + MemoryDBNoMeta as MemoryDB, TrieBackend, Backend as StateBackend, StorageProof, InMemoryBackend, + prove_read_on_trie_backend, read_proof_check, + read_proof_check_on_proving_backend_generic as read_proof_check_on_proving_backend, }; use sp_blockchain::{Error as ClientError, Result as ClientResult}; @@ -171,7 +172,7 @@ pub fn check_proof_on_proving_backend( local_number, remote_hash, |_, local_cht_key| - read_proof_check_on_proving_backend::( + read_proof_check_on_proving_backend::( proving_backend, local_cht_key, ).map_err(ClientError::from_state), diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index bc5ab72dee66e..fdc53ab224966 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -58,7 +58,7 @@ impl sp_state_machine::Storage> for StorageDb, _>>::extract_value_owned(value, parent))) .map_err(|e| format!("Database backend error: {:?}", e))?; - recorder.record(key.clone(), backend_value.clone(), HashFor::::LENGTH); + recorder.record::>(key.clone(), backend_value.clone()); Ok(backend_value) } else { self.db.get(0, &prefixed_key) diff --git a/client/db/src/changes_tries_storage.rs b/client/db/src/changes_tries_storage.rs index 860ca41730518..3f7e19cacfc24 100644 --- a/client/db/src/changes_tries_storage.rs +++ b/client/db/src/changes_tries_storage.rs @@ -24,7 +24,7 @@ use hash_db::Prefix; use codec::{Decode, Encode}; use parking_lot::RwLock; use sp_blockchain::{Error as ClientError, Result as ClientResult}; -use sp_trie::MemoryDB; +use sp_trie::MemoryDBNoMeta as MemoryDB; use sc_client_api::backend::PrunableStateChangesTrieStorage; use sp_blockchain::{well_known_cache_keys, Cache as BlockchainCache, HeaderMetadataCache}; use sp_core::{ChangesTrieConfiguration, ChangesTrieConfigurationRange, convert_hash}; diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index ce45c298e9de3..c465598632396 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -65,8 +65,8 @@ use sp_blockchain::{ }; use codec::{Decode, Encode}; use hash_db::Prefix; -use sp_trie::{MemoryDB, PrefixedMemoryDB, prefixed_key, StateHasher, TrieMeta, - MetaHasher}; +use sp_trie::{MemoryDB, MemoryDBNoMeta, PrefixedMemoryDB, prefixed_key, StateHasher, + TrieMeta, MetaHasher}; use sp_database::Transaction; use sp_core::{Hasher, ChangesTrieConfiguration}; use sp_core::offchain::OffchainOverlayedChange; @@ -690,7 +690,7 @@ pub struct BlockImportOperation { storage_updates: StorageCollection, child_storage_updates: ChildStorageCollection, offchain_storage_updates: OffchainChangesCollection, - changes_trie_updates: MemoryDB>, + changes_trie_updates: MemoryDBNoMeta>, changes_trie_build_cache_update: Option>>, changes_trie_config_update: Option>, pending_block: Option>, @@ -1731,7 +1731,7 @@ impl sc_client_api::backend::Backend for Backend { child_storage_updates: Default::default(), offchain_storage_updates: Default::default(), changes_trie_config_update: None, - changes_trie_updates: MemoryDB::default(), + changes_trie_updates: MemoryDBNoMeta::default(), changes_trie_build_cache_update: None, aux_ops: Vec::new(), finalized_blocks: Vec::new(), @@ -2158,9 +2158,9 @@ pub(crate) mod tests { pub(crate) type Block = RawBlock>; - pub fn prepare_changes(changes: Vec<(Vec, Vec)>) -> (H256, MemoryDB) { + pub fn prepare_changes(changes: Vec<(Vec, Vec)>) -> (H256, MemoryDBNoMeta) { let mut changes_root = H256::default(); - let mut changes_trie_update = MemoryDB::::default(); + let mut changes_trie_update = MemoryDBNoMeta::::default(); { let mut trie = TrieDBMut::::new( &mut changes_trie_update, diff --git a/client/light/src/fetcher.rs b/client/light/src/fetcher.rs index e39cfe07fbf5e..117c2d6970bd9 100644 --- a/client/light/src/fetcher.rs +++ b/client/light/src/fetcher.rs @@ -161,7 +161,7 @@ impl> LightDataChecker { H::Out: Ord + codec::Codec, { // all the checks are sharing the same storage - let storage = remote_roots_proof.into_memory_db(); + let storage: sp_state_machine::MemoryDBNoMeta = remote_roots_proof.into(); // remote_roots.keys() are sorted => we can use this to group changes tries roots // that are belongs to the same CHT diff --git a/frame/session/src/historical/mod.rs b/frame/session/src/historical/mod.rs index 8902ebe551f6c..0564d159e1e3f 100644 --- a/frame/session/src/historical/mod.rs +++ b/frame/session/src/historical/mod.rs @@ -35,7 +35,7 @@ use frame_support::{ decl_module, decl_storage, Parameter, print, traits::{ValidatorSet, ValidatorSetWithIdentification}, }; -use sp_trie::{MemoryDB, Trie, TrieMut, Recorder, EMPTY_PREFIX}; +use sp_trie::{MemoryDBNoMeta as MemoryDB, Trie, TrieMut, Recorder, EMPTY_PREFIX}; use sp_trie::trie_types::{TrieDBMut, TrieDB}; use super::{SessionIndex, Module as SessionModule}; diff --git a/primitives/api/proc-macro/src/impl_runtime_apis.rs b/primitives/api/proc-macro/src/impl_runtime_apis.rs index 85ba0788105d7..712987f8ac845 100644 --- a/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -276,7 +276,7 @@ fn generate_runtime_api_base_structures() -> Result { fn extract_proof(&mut self) -> Option<#crate_::StorageProof> { self.recorder .take() - .map(|recorder| recorder.to_storage_proof()) + .map(|recorder| recorder.to_storage_proof::<#crate_::HashFor>()) } fn into_storage_changes( diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index e52fa999f149b..90923b19b21c9 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -358,13 +358,19 @@ impl Consolidate for Vec<( } } -impl> Consolidate for sp_trie::GenericMemoryDB { +impl Consolidate for sp_trie::GenericMemoryDB + where + H: Hasher, + MH: sp_trie::MetaHasher, + KF: sp_trie::KeyFunction, +{ fn consolidate(&mut self, other: Self) { sp_trie::GenericMemoryDB::consolidate(self, other) } } /// Insert input pairs into memory db. +/// TODO unused remove? #[cfg(test)] pub(crate) fn insert_into_memory_db(mdb: &mut sp_trie::MemoryDB, input: I) -> Option where @@ -387,6 +393,30 @@ pub(crate) fn insert_into_memory_db(mdb: &mut sp_trie::MemoryDB, input: Some(root) } +/// Insert input pairs into memory db. +#[cfg(test)] +pub(crate) fn insert_into_memory_db_no_meta(mdb: &mut sp_trie::MemoryDBNoMeta, input: I) -> Option + where + H: Hasher, + I: IntoIterator, +{ + use sp_trie::{TrieMut, trie_types::TrieDBMutNoMeta}; + + let mut root = ::Out::default(); + { + let mut trie = TrieDBMutNoMeta::::new(mdb, &mut root); + for (key, value) in input { + if let Err(e) = trie.insert(&key, &value) { + log::warn!(target: "trie", "Failed to write to trie: {}", e); + return None; + } + } + } + + Some(root) +} + + /// Wrapper to create a [`RuntimeCode`] from a type that implements [`Backend`]. #[cfg(feature = "std")] pub struct BackendRuntimeCode<'a, B, H> { diff --git a/primitives/state-machine/src/changes_trie/mod.rs b/primitives/state-machine/src/changes_trie/mod.rs index 89d869d38d999..dd6d4f0df3b88 100644 --- a/primitives/state-machine/src/changes_trie/mod.rs +++ b/primitives/state-machine/src/changes_trie/mod.rs @@ -73,7 +73,7 @@ use num_traits::{One, Zero}; use codec::{Decode, Encode}; use sp_core; use sp_core::storage::PrefixedStorageKey; -use sp_trie::{MemoryDB, DBValue, TrieMut}; +use sp_trie::{MemoryDBNoMeta as MemoryDB, DBValue, TrieMut}; use sp_trie::trie_types::TrieDBMut; use crate::{ StorageKey, diff --git a/primitives/state-machine/src/changes_trie/prune.rs b/primitives/state-machine/src/changes_trie/prune.rs index a741b814a5c70..4098eadb98b31 100644 --- a/primitives/state-machine/src/changes_trie/prune.rs +++ b/primitives/state-machine/src/changes_trie/prune.rs @@ -114,9 +114,9 @@ fn prune_trie( #[cfg(test)] mod tests { use std::collections::HashSet; - use sp_trie::MemoryDB; + use sp_trie::MemoryDBNoMeta as MemoryDB; use sp_core::H256; - use crate::backend::insert_into_memory_db; + use crate::backend::insert_into_memory_db_no_meta as insert_into_memory_db; use crate::changes_trie::storage::InMemoryStorage; use codec::Encode; use sp_runtime::traits::BlakeTwo256; diff --git a/primitives/state-machine/src/changes_trie/storage.rs b/primitives/state-machine/src/changes_trie/storage.rs index b41b2e549e82a..f81838f11f6e9 100644 --- a/primitives/state-machine/src/changes_trie/storage.rs +++ b/primitives/state-machine/src/changes_trie/storage.rs @@ -21,7 +21,7 @@ use std::collections::{BTreeMap, HashSet, HashMap}; use hash_db::{Hasher, Prefix, EMPTY_PREFIX}; use sp_core::storage::PrefixedStorageKey; use sp_trie::DBValue; -use sp_trie::MemoryDB; +use sp_trie::MemoryDBNoMeta as MemoryDB; use parking_lot::RwLock; use crate::{ StorageKey, @@ -30,7 +30,7 @@ use crate::{ }; #[cfg(test)] -use crate::backend::insert_into_memory_db; +use crate::backend::insert_into_memory_db_no_meta as insert_into_memory_db; #[cfg(test)] use crate::changes_trie::input::{InputPair, ChildIndex}; diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index 4e2bef05e50ab..d2a4bf6ae8e98 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -22,7 +22,7 @@ use crate::{ }; use std::collections::{BTreeMap, HashMap}; use hash_db::Hasher; -use sp_trie::{MemoryDB, empty_trie_root, Layout}; +use sp_trie::{MemoryDB, empty_trie_root, trie_types::Layout}; use codec::Codec; use sp_core::storage::{ChildInfo, Storage}; diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 8b12c8aa030e6..d12686b43ab2b 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -143,7 +143,8 @@ mod changes_trie { #[cfg(feature = "std")] mod std_reexport { - pub use sp_trie::{trie_types::{Layout, TrieDBMut}, StorageProof, TrieMut, DBValue, MemoryDB}; + pub use sp_trie::{trie_types::{Layout, TrieDBMut}, StorageProof, TrieMut, + DBValue, MemoryDB, MemoryDBNoMeta}; pub use crate::testing::TestExternalities; pub use crate::basic::BasicExternalities; pub use crate::read_only::{ReadOnlyExternalities, InspectState}; @@ -194,7 +195,7 @@ mod execution { /// Type of changes trie transaction. pub type ChangesTrieTransaction = ( - MemoryDB, + MemoryDBNoMeta, ChangesTrieCacheAction<::Out, N>, ); @@ -842,6 +843,20 @@ mod execution { where H: Hasher, H::Out: Ord + Codec, + { + read_proof_check_on_proving_backend_generic(proving_backend, key) + } + + /// Check storage read proof on pre-created proving backend. + pub fn read_proof_check_on_proving_backend_generic( + proving_backend: &TrieBackend, H>, + key: &[u8], + ) -> Result>, Box> + where + H: Hasher, + H::Out: Ord + Codec, + MH: sp_trie::MetaHasher, + KF: sp_trie::KeyFunction + Send + Sync, { proving_backend.storage(key).map_err(|e| Box::new(e) as Box) } diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 747467ee3b66d..d42d9ae51c3c9 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -174,12 +174,15 @@ impl ProofRecorder { } /// Convert into a [`StorageProof`]. - pub fn to_storage_proof(&self) -> StorageProof { - // TODO serialize meta. + pub fn to_storage_proof(&self) -> StorageProof { let trie_nodes = self.inner.read() .records .iter() - .filter_map(|(_k, v)| v.as_ref().map(|v| v.0.to_vec())) + .filter_map(|(_k, v)| v.as_ref().map(|v| { + < + as sp_trie::TrieLayout>::MetaHasher as hash_db::MetaHasher + >::stored_value(v.0.as_slice(), v.1.clone()) + })) .collect(); StorageProof::new(trie_nodes) @@ -230,7 +233,7 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ProvingBackend<'a, S, H> /// Extracting the gathered unordered proof. pub fn extract_proof(&self) -> StorageProof { - self.0.essence().backend_storage().proof_recorder.to_storage_proof() + self.0.essence().backend_storage().proof_recorder.to_storage_proof::() } } @@ -443,31 +446,31 @@ mod tests { } #[test] - fn proof_recorded_and_checked() { - proof_recorded_and_checked_inner(false, false); - proof_recorded_and_checked_inner(false, true); - proof_recorded_and_checked_inner(true, false); - proof_recorded_and_checked_inner(true, true); - } - fn proof_recorded_and_checked_inner(flagged: bool, do_flag: bool) { - let contents = (0..64).map(|i| (vec![i], Some(vec![i]))).collect::>(); + fn proof_recorded_and_checked_top() { + proof_recorded_and_checked_inner(true); + proof_recorded_and_checked_inner(false); + } + fn proof_recorded_and_checked_inner(flagged: bool) { + let size_content = 33; // above hashable value treshold. + let value_range = 0..64; + let contents = value_range.clone().map(|i| (vec![i], Some(vec![i; size_content]))).collect::>(); let in_memory = InMemoryBackend::::default(); let mut in_memory = in_memory.update(vec![(None, contents)], flagged); - let in_memory_root = in_memory.storage_root(std::iter::empty(), do_flag).0; - (0..64).for_each(|i| assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i])); + let in_memory_root = in_memory.storage_root(std::iter::empty(), flagged).0; + value_range.clone().for_each(|i| assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i; size_content])); let trie = in_memory.as_trie_backend().unwrap(); - let trie_root = trie.storage_root(std::iter::empty(), do_flag).0; + let trie_root = trie.storage_root(std::iter::empty(), flagged).0; assert_eq!(in_memory_root, trie_root); - (0..64).for_each(|i| assert_eq!(trie.storage(&[i]).unwrap().unwrap(), vec![i])); + value_range.clone().for_each(|i| assert_eq!(trie.storage(&[i]).unwrap().unwrap(), vec![i; size_content])); let proving = ProvingBackend::new(trie); - assert_eq!(proving.storage(&[42]).unwrap().unwrap(), vec![42]); + assert_eq!(proving.storage(&[42]).unwrap().unwrap(), vec![42; size_content]); let proof = proving.extract_proof(); let proof_check = create_proof_check_backend::(in_memory_root.into(), proof).unwrap(); - assert_eq!(proof_check.storage(&[42]).unwrap().unwrap(), vec![42]); + assert_eq!(proof_check.storage(&[42]).unwrap().unwrap(), vec![42; size_content]); } #[test] diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 3b7d47d9b0c62..594abf38fa698 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -23,7 +23,7 @@ use std::sync::Arc; use sp_std::{ops::Deref, boxed::Box, vec::Vec}; use crate::{warn, debug}; use hash_db::{self, Hasher, Prefix}; -use sp_trie::{Trie, MemoryDB, PrefixedMemoryDB, DBValue, +use sp_trie::{Trie, PrefixedMemoryDB, DBValue, empty_child_trie_root, read_trie_value, read_child_trie_value, for_keys_in_child_trie, KeySpacedDB, TrieDBIterator, TrieMeta}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; @@ -396,21 +396,13 @@ impl TrieBackendStorage for Arc> { } } -// This implementation is used by test storage trie clients. -impl TrieBackendStorage for PrefixedMemoryDB { - type Overlay = PrefixedMemoryDB; - - fn get(&self, key: &H::Out, prefix: Prefix, parent: Option<&TrieMeta>) -> Result> { - Ok(hash_db::HashDB::get_with_meta(self, key, prefix, parent)) - } - - fn access_from(&self, key: &H::Out) { - hash_db::HashDB::access_from(self, key, None); - } -} - -impl TrieBackendStorage for MemoryDB { - type Overlay = MemoryDB; +impl TrieBackendStorage for sp_trie::GenericMemoryDB + where + H: Hasher, + MH: sp_trie::MetaHasher, + KF: sp_trie::KeyFunction + Send + Sync, +{ + type Overlay = Self; fn get(&self, key: &H::Out, prefix: Prefix, parent: Option<&TrieMeta>) -> Result> { Ok(hash_db::HashDB::get_with_meta(self, key, prefix, parent)) diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 9456f1e2ddfcf..d92a3d34f6110 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -99,7 +99,7 @@ impl Meta for TrieMeta { } fn write_state_meta(&self) -> Vec { - if self.do_value_hash { + if self.recorded_do_value_hash { // Note that this only works with sp_trie codec that // cannot encode node starting by this byte. [trie_constants::ENCODED_META_ALLOW_HASH].to_vec() @@ -199,34 +199,39 @@ impl TrieMeta { } /// substrate trie layout -pub struct Layout(sp_std::marker::PhantomData); +pub struct Layout(sp_std::marker::PhantomData<(H, M)>); -impl fmt::Debug for Layout { +impl fmt::Debug for Layout { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Layout").finish() } } -impl Default for Layout { +impl Default for Layout { fn default() -> Self { Layout(sp_std::marker::PhantomData) } } -impl Clone for Layout { +impl Clone for Layout { fn clone(&self) -> Self { Layout(sp_std::marker::PhantomData) } } -impl TrieLayout for Layout { +impl TrieLayout for Layout + where + H: Hasher, + M: MetaHasher, + M::Meta: Meta, +{ const USE_EXTENSION: bool = false; const ALLOW_EMPTY: bool = true; const USE_META: bool = true; type Hash = H; type Codec = NodeCodec; - type MetaHasher = StateHasher; - type Meta = TrieMeta; + type MetaHasher = M; + type Meta = M::Meta; fn metainput_for_new_node(&self) -> ::MetaInput { () @@ -236,8 +241,7 @@ impl TrieLayout for Layout { } } -/// Reimplement `NoMeta` `MetaHasher` with -/// additional constraint. +/// Hasher with support to meta. #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub struct StateHasher; @@ -328,7 +332,45 @@ impl MetaHasher for StateHasher } } -impl TrieConfiguration for Layout { +/// Reimplement `NoMeta` `MetaHasher` with +/// additional constraint. +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub struct NoMetaHasher; + +impl MetaHasher for NoMetaHasher + where + H: Hasher, +{ + type Meta = TrieMeta; + + fn hash(value: &[u8], _meta: &Self::Meta) -> H::Out { + H::hash(value) + } + + fn stored_value(value: &[u8], _meta: Self::Meta) -> DBValue { + value.to_vec() + } + + fn stored_value_owned(value: DBValue, _meta: Self::Meta) -> DBValue { + value + } + + fn extract_value<'a>(stored: &'a [u8], _parent_meta: Option<&Self::Meta>) -> (&'a [u8], Self::Meta) { + (stored, Default::default()) + } + + fn extract_value_owned(stored: DBValue, _parent_meta: Option<&Self::Meta>) -> (DBValue, Self::Meta) { + (stored, Default::default()) + } +} + + +impl TrieConfiguration for Layout + where + H: Hasher, + M: MetaHasher, + M::Meta: Meta, +{ fn trie_root(&self, input: I) -> ::Out where I: IntoIterator, A: AsRef<[u8]> + Ord, @@ -374,14 +416,20 @@ pub type PrefixedMemoryDB = memory_db::MemoryDB< pub type MemoryDB = memory_db::MemoryDB< H, memory_db::HashKey, trie_db::DBValue, StateHasher, MemTracker, >; +/// Reexport from `hash_db`, with genericity set for `Hasher` trait. +/// This uses a noops `KeyFunction` (key addressing must be hashed or using +/// an encoding scheme that avoid key conflict). +pub type MemoryDBNoMeta = memory_db::MemoryDB< + H, memory_db::HashKey, trie_db::DBValue, NoMetaHasher, MemTracker, +>; /// MemoryDB with specific meta hasher. pub type MemoryDBMeta = memory_db::MemoryDB< H, memory_db::HashKey, trie_db::DBValue, M, MemTracker, >; /// Reexport from `hash_db`, with genericity set for `Hasher` trait. -pub type GenericMemoryDB = memory_db::MemoryDB< - H, KF, trie_db::DBValue, StateHasher, MemTracker +pub type GenericMemoryDB = memory_db::MemoryDB< + H, KF, trie_db::DBValue, MH, MemTracker >; /// Persistent trie database read-access interface for the a given hasher. @@ -395,11 +443,18 @@ pub type TrieHash = <::Hash as Hasher>::Out; /// This module is for non generic definition of trie type. /// Only the `Hasher` trait is generic in this case. pub mod trie_types { - pub type Layout = super::Layout; + /// State layout. + pub type Layout = super::Layout; + /// Old state layout definition, do not use meta, do not + /// do internal value hashing. + pub type LayoutNoMeta = super::Layout; /// Persistent trie database read-access interface for the a given hasher. pub type TrieDB<'a, H> = super::TrieDB<'a, Layout>; /// Persistent trie database write-access interface for the a given hasher. pub type TrieDBMut<'a, H> = super::TrieDBMut<'a, Layout>; + /// Persistent trie database write-access interface for the a given hasher, + /// old layout. + pub type TrieDBMutNoMeta<'a, H> = super::TrieDBMut<'a, LayoutNoMeta>; /// Querying interface, as in `trie_db` but less generic. pub type Lookup<'a, H, Q> = trie_db::Lookup<'a, Layout, Q>; /// As in `trie_db`, but less generic, error type for the crate. @@ -854,7 +909,7 @@ pub fn estimate_entry_size(entry: &(DBValue, TrieMeta), hash_len: usize) -> usiz pub fn resolve_encoded_meta(entry: &mut (DBValue, TrieMeta)) { use trie_db::NodeCodec; if entry.1.do_value_hash { - let _ = as TrieLayout>::Codec::decode_plan(entry.0.as_slice(), &mut entry.1); + let _ = as TrieLayout>::Codec::decode_plan(entry.0.as_slice(), &mut entry.1); } } @@ -884,7 +939,7 @@ mod tests { use trie_standardmap::{Alphabet, ValueMode, StandardMap}; use hex_literal::hex; - type Layout = super::Layout; + type Layout = super::trie_types::Layout; fn hashed_null_node() -> TrieHash { >::hashed_null_node() diff --git a/primitives/trie/src/storage_proof.rs b/primitives/trie/src/storage_proof.rs index ed755237c4be3..915dd92b1c16c 100644 --- a/primitives/trie/src/storage_proof.rs +++ b/primitives/trie/src/storage_proof.rs @@ -103,7 +103,7 @@ impl From for crate::MemoryDB { fn from(proof: StorageProof) -> Self { use hash_db::MetaHasher; use trie_db::NodeCodec; - use crate::{Layout, TrieLayout}; + use crate::{trie_types::Layout, TrieLayout}; let mut db = crate::MemoryDB::default(); // Needed because we do not read trie structure, so // we do a heuristic related to the fact that host function @@ -119,12 +119,10 @@ impl From for crate::MemoryDB { let (encoded_node, mut meta) = < as TrieLayout>::MetaHasher as MetaHasher >::extract_value(item.as_slice(), None); - if !is_hashed_value { - // read state meta. - let _ = as TrieLayout>::Codec::decode_plan(encoded_node, &mut meta); - if meta.recorded_do_value_hash { - is_hashed_value = true; - } + // read state meta. + let _ = as TrieLayout>::Codec::decode_plan(encoded_node, &mut meta); + if meta.recorded_do_value_hash { + is_hashed_value = true; } // TODO insert_with_meta here accum.push((encoded_node, meta)); @@ -139,3 +137,13 @@ impl From for crate::MemoryDB { db } } + +impl From for crate::MemoryDBNoMeta { + fn from(proof: StorageProof) -> Self { + let mut db = crate::MemoryDBNoMeta::default(); + for item in proof.iter_nodes() { + db.insert(crate::EMPTY_PREFIX, &item); + } + db + } +} From e5feabfc3bb1536e56f17cf81eefd7fbf22de3fb Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 19 May 2021 17:48:42 +0200 Subject: [PATCH 010/188] failing test that requires storing in meta when old hash scheme is used. --- Cargo.lock | 16 +++++------ .../state-machine/src/proving_backend.rs | 27 +++++++++++++++++++ 2 files changed, 35 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b0acfa17a74c8..79c33175dcfc3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2329,7 +2329,7 @@ dependencies = [ [[package]] name = "hash-db" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple2#b3a6725a3d7f25185826b97f5efc1174a34c58f4" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple2#ab6d8bbc73d620af5357f350fa132142c2d57a33" [[package]] name = "hash256-std-hasher" @@ -2343,7 +2343,7 @@ dependencies = [ [[package]] name = "hash256-std-hasher" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple2#b3a6725a3d7f25185826b97f5efc1174a34c58f4" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple2#ab6d8bbc73d620af5357f350fa132142c2d57a33" dependencies = [ "crunchy", ] @@ -2996,7 +2996,7 @@ dependencies = [ [[package]] name = "keccak-hasher" version = "0.15.3" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple2#b3a6725a3d7f25185826b97f5efc1174a34c58f4" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple2#ab6d8bbc73d620af5357f350fa132142c2d57a33" dependencies = [ "hash-db", "hash256-std-hasher 0.15.2 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple2)", @@ -3788,7 +3788,7 @@ dependencies = [ [[package]] name = "memory-db" version = "0.26.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple2#b3a6725a3d7f25185826b97f5efc1174a34c58f4" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple2#ab6d8bbc73d620af5357f350fa132142c2d57a33" dependencies = [ "hash-db", "hashbrown", @@ -10332,7 +10332,7 @@ checksum = "a7f741b240f1a48843f9b8e0444fb55fb2a4ff67293b50a9179dfd5ea67f8d41" [[package]] name = "trie-bench" version = "0.27.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple2#b3a6725a3d7f25185826b97f5efc1174a34c58f4" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple2#ab6d8bbc73d620af5357f350fa132142c2d57a33" dependencies = [ "criterion", "hash-db", @@ -10347,7 +10347,7 @@ dependencies = [ [[package]] name = "trie-db" version = "0.22.3" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple2#b3a6725a3d7f25185826b97f5efc1174a34c58f4" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple2#ab6d8bbc73d620af5357f350fa132142c2d57a33" dependencies = [ "hash-db", "hashbrown", @@ -10368,7 +10368,7 @@ dependencies = [ [[package]] name = "trie-root" version = "0.16.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple2#b3a6725a3d7f25185826b97f5efc1174a34c58f4" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple2#ab6d8bbc73d620af5357f350fa132142c2d57a33" dependencies = [ "hash-db", ] @@ -10386,7 +10386,7 @@ dependencies = [ [[package]] name = "trie-standardmap" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple2#b3a6725a3d7f25185826b97f5efc1174a34c58f4" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple2#ab6d8bbc73d620af5357f350fa132142c2d57a33" dependencies = [ "hash-db", "keccak-hasher 0.15.3 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple2)", diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index d42d9ae51c3c9..47b478b1d2da0 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -473,6 +473,33 @@ mod tests { assert_eq!(proof_check.storage(&[42]).unwrap().unwrap(), vec![42; size_content]); } + #[test] + fn proof_recorded_and_checked_old_hash() { + // test proof starting with old hash content and flagging in between. + let size_content = 33; // above hashable value treshold. + let value_range = 0..64; + let contents = value_range.clone().map(|i| (vec![i], Some(vec![i; size_content]))).collect::>(); + let in_memory = InMemoryBackend::::default(); + let mut in_memory = in_memory.update(vec![(None, contents)], false); + let in_memory_root = in_memory.storage_root(std::iter::empty(), false).0; + value_range.clone().for_each(|i| assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i; size_content])); + + in_memory = in_memory.update(vec![], true); + let in_memory_root = in_memory.storage_root(std::iter::empty(), false).0; + let trie = in_memory.as_trie_backend().unwrap(); + let trie_root = trie.storage_root(std::iter::empty(), false).0; + assert_eq!(in_memory_root, trie_root); + value_range.clone().for_each(|i| assert_eq!(trie.storage(&[i]).unwrap().unwrap(), vec![i; size_content])); + + let proving = ProvingBackend::new(trie); + assert_eq!(proving.storage(&[42]).unwrap().unwrap(), vec![42; size_content]); + + let proof = proving.extract_proof(); + + let proof_check = create_proof_check_backend::(in_memory_root.into(), proof).unwrap(); + assert_eq!(proof_check.storage(&[42]).unwrap().unwrap(), vec![42; size_content]); + } + #[test] fn proof_recorded_and_checked_with_child() { proof_recorded_and_checked_with_child_inner(false); From aa78ed690e05048c66f6f3e86518a29fd825a7ec Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 19 May 2021 18:11:02 +0200 Subject: [PATCH 011/188] old hash compatibility --- primitives/trie/src/lib.rs | 38 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 36 insertions(+), 2 deletions(-) diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index d92a3d34f6110..ade0c52ddcad5 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -69,6 +69,12 @@ pub struct TrieMeta { // change on access explicitely: `HashDB::get_with_meta`. // and reset on access explicitely: `HashDB::access_from`. pub unused_value: bool, + // Indicate that a node is using old hash scheme. + // Write with `do_value_hash` inactive will set this to + // true. + // In this case hash is not doing internal hashing, + // but next write with `do_value_hash` will remove switch scheme. + pub old_hash: bool, } impl Meta for TrieMeta { @@ -150,6 +156,10 @@ impl Meta for TrieMeta { self.range = Some(range); self.contain_hash = contain_hash; + if self.do_value_hash { + // Switch value hashing. + self.old_hash = false; + } } fn set_child_callback( @@ -253,7 +263,7 @@ impl MetaHasher for StateHasher fn hash(value: &[u8], meta: &Self::Meta) -> H::Out { match &meta { - TrieMeta { range: Some(range), contain_hash: false, do_value_hash, .. } => { + TrieMeta { range: Some(range), contain_hash: false, do_value_hash, old_hash: false, .. } => { if *do_value_hash && range.end - range.start >= trie_constants::INNER_HASH_TRESHOLD { let value = inner_hashed_value::(value, Some((range.start, range.end))); H::hash(value.as_slice()) @@ -273,6 +283,22 @@ impl MetaHasher for StateHasher fn stored_value(value: &[u8], mut meta: Self::Meta) -> DBValue { let mut stored = Vec::with_capacity(value.len() + 1); + if meta.old_hash { + // write as old hash. + stored.push(trie_constants::OLD_HASHING); + stored.extend_from_slice(value); + return stored; + } + if !meta.do_value_hash { + if let Some(range) = meta.range.as_ref() { + if range.end - range.start >= trie_constants::INNER_HASH_TRESHOLD { + // write as old hash. + stored.push(trie_constants::OLD_HASHING); + stored.extend_from_slice(value); + return stored; + } + } + } if meta.contain_hash { // already contain hash, just flag it. stored.push(trie_constants::DEAD_HEADER_META_HASHED_VALUE); @@ -305,16 +331,22 @@ impl MetaHasher for StateHasher fn extract_value<'a>(mut stored: &'a [u8], parent_meta: Option<&Self::Meta>) -> (&'a [u8], Self::Meta) { let input = &mut stored; let mut contain_hash = false; + let mut old_hash = false; if input.get(0) == Some(&trie_constants::DEAD_HEADER_META_HASHED_VALUE) { contain_hash = true; *input = &input[1..]; } + if input.get(0) == Some(&trie_constants::OLD_HASHING) { + old_hash = true; + *input = &input[1..]; + } let mut meta = TrieMeta { range: None, unused_value: contain_hash, contain_hash, do_value_hash: false, recorded_do_value_hash: false, + old_hash, }; // get recorded_do_value_hash let _offset = meta.read_state_meta(stored) @@ -364,7 +396,6 @@ impl MetaHasher for NoMetaHasher } } - impl TrieConfiguration for Layout where H: Hasher, @@ -923,6 +954,9 @@ mod trie_constants { pub const ENCODED_META_ALLOW_HASH: u8 = FIRST_PREFIX | 0b_01; /// In proof this header is used when only hashed value is stored. pub const DEAD_HEADER_META_HASHED_VALUE: u8 = FIRST_PREFIX | 0b_00_10; + /// If inner hashing should apply, but state is not flagged, then set + /// this meta to avoid checking both variant of hashes. + pub const OLD_HASHING: u8 = FIRST_PREFIX | 0b_00_11; pub const NIBBLE_SIZE_BOUND: usize = u16::max_value() as usize; pub const LEAF_PREFIX_MASK: u8 = 0b_01 << 6; pub const BRANCH_WITHOUT_MASK: u8 = 0b_10 << 6; From 045b0971ff1b281ee276e44959696cb40e0b8de3 Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 19 May 2021 19:10:10 +0200 Subject: [PATCH 012/188] Db migrate. --- client/db/src/upgrade.rs | 38 +++++++++++++++++++++++++++++++++++++- primitives/trie/src/lib.rs | 26 ++++++++++++++++++++++++++ 2 files changed, 63 insertions(+), 1 deletion(-) diff --git a/client/db/src/upgrade.rs b/client/db/src/upgrade.rs index ea91b8253e1d8..f5c0992ea231f 100644 --- a/client/db/src/upgrade.rs +++ b/client/db/src/upgrade.rs @@ -31,7 +31,7 @@ use codec::{Decode, Encode}; const VERSION_FILE_NAME: &'static str = "db_version"; /// Current db version. -const CURRENT_VERSION: u32 = 3; +const CURRENT_VERSION: u32 = 4; /// Number of columns in v1. const V1_NUM_COLUMNS: u32 = 11; @@ -49,6 +49,7 @@ pub fn upgrade_db(db_path: &Path, db_type: DatabaseType) -> sp_bl migrate_2_to_3::(db_path, db_type)? }, 2 => migrate_2_to_3::(db_path, db_type)?, + 3 => migrate_3_to_4::(db_path, db_type)?, CURRENT_VERSION => (), _ => Err(sp_blockchain::Error::Backend(format!("Future database version: {}", db_version)))?, } @@ -98,6 +99,41 @@ fn migrate_2_to_3(db_path: &Path, _db_type: DatabaseType) -> sp_b Ok(()) } +/// Migration from version3 to version4: +/// - Trie state meta for state that could be hashed internaly. +fn migrate_3_to_4(db_path: &Path, _db_type: DatabaseType) -> sp_blockchain::Result<()> { + let db_path = db_path.to_str() + .ok_or_else(|| sp_blockchain::Error::Backend("Invalid database path".into()))?; + let db_cfg = DatabaseConfig::with_columns(V2_NUM_COLUMNS); + let db = Database::open(&db_cfg, db_path).map_err(db_err)?; + + let batch_size = 10_000; // TODO use bigger size (need to iterate all each time). + loop { + let mut full_batch = false; + let mut size = 0; + let mut transaction = db.transaction(); + // Get all the keys we need to update. + // Note that every batch will restart full iter, could use + // a `iter_from` function. + for entry in db.iter(columns::STATE) { + if let Some(new_val) = sp_trie::tag_old_hashes::>(&entry.1) { + transaction.put_vec(columns::STATE, &entry.0, new_val); + size += 1; + if size == batch_size { + full_batch = true; + break; + } + } + } + db.write(transaction).map_err(db_err)?; + if !full_batch { + break; + } + } + Ok(()) +} + + /// Reads current database version from the file at given path. /// If the file does not exist returns 0. fn current_version(path: &Path) -> sp_blockchain::Result { diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index ade0c52ddcad5..33b3fd64e564c 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -963,6 +963,32 @@ mod trie_constants { pub const BRANCH_WITH_MASK: u8 = 0b_11 << 6; } +/// Utility to tag a state without meta with old_hash internal +/// hashing. +pub fn tag_old_hashes(existing: &[u8]) -> Option> { + use trie_db::NodeCodec; + let mut meta = TrieMeta::default(); + // allows restarting a migration. + if existing.len() > 0 && existing[0] == trie_constants::OLD_HASHING { + return None; // allow restarting a migration. + } + let _ = as TrieLayout>::Codec::decode_plan(existing, &mut meta) + .expect("Invalid db state entry found: {:?}, entry.0.as_slice()"); + match meta.range { + Some(range) => { + if range.end - range.start >= trie_constants::INNER_HASH_TRESHOLD { + let mut res = Vec::with_capacity(existing.len() + 1); + res.push(trie_constants::OLD_HASHING); + res.extend_from_slice(existing); + Some(res) + } else { + None + } + }, + None => None, + } +} + #[cfg(test)] mod tests { use super::*; From 79a43dd2d4518ab9dccd31a431f30d7e442469a1 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 20 May 2021 12:02:29 +0200 Subject: [PATCH 013/188] runing tests with both states when interesting. --- bin/node/testing/src/client.rs | 2 - client/api/src/light.rs | 2 +- .../basic-authorship/src/basic_authorship.rs | 8 ++-- client/consensus/aura/src/lib.rs | 2 +- client/consensus/babe/src/aux_schema.rs | 2 +- client/consensus/slots/src/aux_schema.rs | 2 +- client/finality-grandpa/src/aux_schema.rs | 8 ++-- client/network/src/light_client_requests.rs | 26 ++++++++--- client/network/test/src/block_import.rs | 20 +++++---- client/network/test/src/lib.rs | 2 +- client/offchain/src/lib.rs | 2 +- client/rpc/src/chain/tests.rs | 18 +++++--- client/rpc/src/state/tests.rs | 27 +++++++++--- client/service/test/src/client/light.rs | 40 +++++++++++++---- client/service/test/src/client/mod.rs | 44 +++++++++++++------ client/transaction-pool/src/testing/pool.rs | 4 +- .../state-machine/src/proving_backend.rs | 2 - primitives/trie/src/lib.rs | 34 ++++++++++---- test-utils/client/src/lib.rs | 11 +++++ test-utils/runtime/client/src/lib.rs | 18 +++++--- utils/frame/rpc/system/src/lib.rs | 8 ++-- 21 files changed, 192 insertions(+), 90 deletions(-) diff --git a/bin/node/testing/src/client.rs b/bin/node/testing/src/client.rs index c4ace4ced9b42..e5bdd4e6f723c 100644 --- a/bin/node/testing/src/client.rs +++ b/bin/node/testing/src/client.rs @@ -75,5 +75,3 @@ impl TestClientBuilderExt for substrate_test_client::TestClientBuilder< self.build_with_native_executor(None).0 } } - - diff --git a/client/api/src/light.rs b/client/api/src/light.rs index a068e2d4a3417..ba6ee74a7caed 100644 --- a/client/api/src/light.rs +++ b/client/api/src/light.rs @@ -322,7 +322,7 @@ pub mod tests { fn into(self) -> ClientError { ClientError::Application(Box::new(self)) } - } + } pub type OkCallFetcher = Mutex>; diff --git a/client/basic-authorship/src/basic_authorship.rs b/client/basic-authorship/src/basic_authorship.rs index c8277d3b5d32c..084057bde29f1 100644 --- a/client/basic-authorship/src/basic_authorship.rs +++ b/client/basic-authorship/src/basic_authorship.rs @@ -504,7 +504,7 @@ mod tests { #[test] fn should_cease_building_block_when_deadline_is_reached() { // given - let client = Arc::new(substrate_test_runtime_client::new()); + let client = Arc::new(substrate_test_runtime_client::new(true)); let spawner = sp_core::testing::TaskExecutor::new(); let txpool = BasicPool::new_full( Default::default(), @@ -564,7 +564,7 @@ mod tests { #[test] fn should_not_panic_when_deadline_is_reached() { - let client = Arc::new(substrate_test_runtime_client::new()); + let client = Arc::new(substrate_test_runtime_client::new(true)); let spawner = sp_core::testing::TaskExecutor::new(); let txpool = BasicPool::new_full( Default::default(), @@ -675,7 +675,7 @@ mod tests { #[test] fn should_not_remove_invalid_transactions_when_skipping() { // given - let mut client = Arc::new(substrate_test_runtime_client::new()); + let mut client = Arc::new(substrate_test_runtime_client::new(true)); let spawner = sp_core::testing::TaskExecutor::new(); let txpool = BasicPool::new_full( Default::default(), @@ -766,7 +766,7 @@ mod tests { #[test] fn should_cease_building_block_when_block_limit_is_reached() { - let client = Arc::new(substrate_test_runtime_client::new()); + let client = Arc::new(substrate_test_runtime_client::new(true)); let spawner = sp_core::testing::TaskExecutor::new(); let txpool = BasicPool::new_full( Default::default(), diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index ce254799d61f7..5cb1079c3a98f 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -765,7 +765,7 @@ mod tests { #[test] fn authorities_call_works() { - let client = substrate_test_runtime_client::new(); + let client = substrate_test_runtime_client::new(true); assert_eq!(client.chain_info().best_number, 0); assert_eq!(authorities(&client, &BlockId::Number(0)).unwrap(), vec![ diff --git a/client/consensus/babe/src/aux_schema.rs b/client/consensus/babe/src/aux_schema.rs index 8b8804e3bfb02..cf8b3bbe0bd33 100644 --- a/client/consensus/babe/src/aux_schema.rs +++ b/client/consensus/babe/src/aux_schema.rs @@ -156,7 +156,7 @@ mod test { epoch_index: 1, duration: 100, }; - let client = substrate_test_runtime_client::new(); + let client = substrate_test_runtime_client::new(true); let mut v0_tree = ForkTree::, _>::new(); v0_tree.import::<_, ConsensusError>( Default::default(), diff --git a/client/consensus/slots/src/aux_schema.rs b/client/consensus/slots/src/aux_schema.rs index db94ec48855e4..13612613f47af 100644 --- a/client/consensus/slots/src/aux_schema.rs +++ b/client/consensus/slots/src/aux_schema.rs @@ -159,7 +159,7 @@ mod test { #[test] fn check_equivocation_works() { - let client = substrate_test_runtime_client::new(); + let client = substrate_test_runtime_client::new(true); let (pair, _seed) = sr25519::Pair::generate(); let public = pair.public(); diff --git a/client/finality-grandpa/src/aux_schema.rs b/client/finality-grandpa/src/aux_schema.rs index 296f7c13c5244..169366a9fa710 100644 --- a/client/finality-grandpa/src/aux_schema.rs +++ b/client/finality-grandpa/src/aux_schema.rs @@ -561,7 +561,7 @@ mod test { #[test] fn load_decode_from_v0_migrates_data_format() { - let client = substrate_test_runtime_client::new(); + let client = substrate_test_runtime_client::new(true); let authorities = vec![(AuthorityId::default(), 100)]; let set_id = 3; @@ -654,7 +654,7 @@ mod test { #[test] fn load_decode_from_v1_migrates_data_format() { - let client = substrate_test_runtime_client::new(); + let client = substrate_test_runtime_client::new(true); let authorities = vec![(AuthorityId::default(), 100)]; let set_id = 3; @@ -750,7 +750,7 @@ mod test { #[test] fn load_decode_from_v2_migrates_data_format() { - let client = substrate_test_runtime_client::new(); + let client = substrate_test_runtime_client::new(true); let authorities = vec![(AuthorityId::default(), 100)]; let set_id = 3; @@ -823,7 +823,7 @@ mod test { #[test] fn write_read_concluded_rounds() { - let client = substrate_test_runtime_client::new(); + let client = substrate_test_runtime_client::new(true); let hash = H256::random(); let round_state = RoundState::genesis((hash, 0)); diff --git a/client/network/src/light_client_requests.rs b/client/network/src/light_client_requests.rs index f859a35f45b24..14e95424ee70e 100644 --- a/client/network/src/light_client_requests.rs +++ b/client/network/src/light_client_requests.rs @@ -182,8 +182,8 @@ mod tests { type Block = sp_runtime::generic::Block, substrate_test_runtime::Extrinsic>; - fn send_receive(request: sender::Request, pool: &LocalPool) { - let client = Arc::new(substrate_test_runtime_client::new()); + fn send_receive(request: sender::Request, pool: &LocalPool, hashed_value: bool) { + let client = Arc::new(substrate_test_runtime_client::new(hashed_value)); let (handler, protocol_config) = handler::LightClientRequestHandler::new(&protocol_id(), client); pool.spawner().spawn_obj(handler.run().boxed().into()).unwrap(); @@ -215,6 +215,10 @@ mod tests { #[test] fn send_receive_call() { + send_receive_call_inner(true); + send_receive_call_inner(false); + } + fn send_receive_call_inner(hashed_value: bool) { let chan = oneshot::channel(); let request = light::RemoteCallRequest { block: Default::default(), @@ -228,13 +232,17 @@ mod tests { send_receive(sender::Request::Call { request, sender: chan.0, - }, &pool); + }, &pool, hashed_value); assert_eq!(vec![42], pool.run_until(chan.1).unwrap().unwrap()); // ^--- from `DummyFetchChecker::check_execution_proof` } #[test] fn send_receive_read() { + send_receive_read_inner(true); + send_receive_read_inner(false); + } + fn send_receive_read_inner(hashed_value: bool) { let chan = oneshot::channel(); let request = light::RemoteReadRequest { header: dummy_header(), @@ -246,7 +254,7 @@ mod tests { send_receive(sender::Request::Read { request, sender: chan.0, - }, &pool); + }, &pool, hashed_value); assert_eq!( Some(vec![42]), pool.run_until(chan.1) @@ -260,6 +268,10 @@ mod tests { #[test] fn send_receive_read_child() { + send_receive_read_child_inner(true); + send_receive_read_child_inner(false); + } + fn send_receive_read_child_inner(hashed_value: bool) { let chan = oneshot::channel(); let child_info = ChildInfo::new_default(&b":child_storage:default:sub"[..]); let request = light::RemoteReadChildRequest { @@ -273,7 +285,7 @@ mod tests { send_receive(sender::Request::ReadChild { request, sender: chan.0, - }, &pool); + }, &pool, hashed_value); assert_eq!( Some(vec![42]), pool.run_until(chan.1) @@ -298,7 +310,7 @@ mod tests { send_receive(sender::Request::Header { request, sender: chan.0, - }, &pool); + }, &pool, true); // The remote does not know block 1: assert_matches!( pool.run_until(chan.1).unwrap(), @@ -327,7 +339,7 @@ mod tests { send_receive(sender::Request::Changes { request, sender: chan.0, - }, &pool); + }, &pool, true); assert_eq!(vec![(100, 2)], pool.run_until(chan.1).unwrap().unwrap()); // ^--- from `DummyFetchChecker::check_changes_proof` } diff --git a/client/network/test/src/block_import.rs b/client/network/test/src/block_import.rs index b3641d4b41214..4119e009b195e 100644 --- a/client/network/test/src/block_import.rs +++ b/client/network/test/src/block_import.rs @@ -29,8 +29,8 @@ use sc_block_builder::BlockBuilderProvider; use futures::executor::block_on; use super::*; -fn prepare_good_block() -> (TestClient, Hash, u64, PeerId, IncomingBlock) { - let mut client = substrate_test_runtime_client::new(); +fn prepare_good_block(hashed_value: bool) -> (TestClient, Hash, u64, PeerId, IncomingBlock) { + let mut client = substrate_test_runtime_client::new(hashed_value); let block = client.new_block(Default::default()).unwrap().build().unwrap().block; block_on(client.import(BlockOrigin::File, block)).unwrap(); @@ -51,13 +51,17 @@ fn prepare_good_block() -> (TestClient, Hash, u64, PeerId, IncomingBlock) #[test] fn import_single_good_block_works() { - let (_, _hash, number, peer_id, block) = prepare_good_block(); + import_single_good_block_works_inner(true); + import_single_good_block_works_inner(false); +} +fn import_single_good_block_works_inner(hashed_value: bool) { + let (_, _hash, number, peer_id, block) = prepare_good_block(hashed_value); let mut expected_aux = ImportedAux::default(); expected_aux.is_new_best = true; match block_on(import_single_block( - &mut substrate_test_runtime_client::new(), + &mut substrate_test_runtime_client::new(hashed_value), BlockOrigin::File, block, &mut PassThroughVerifier::new(true) @@ -70,7 +74,7 @@ fn import_single_good_block_works() { #[test] fn import_single_good_known_block_is_ignored() { - let (mut client, _hash, number, _, block) = prepare_good_block(); + let (mut client, _hash, number, _, block) = prepare_good_block(true); match block_on(import_single_block( &mut client, BlockOrigin::File, @@ -84,10 +88,10 @@ fn import_single_good_known_block_is_ignored() { #[test] fn import_single_good_block_without_header_fails() { - let (_, _, _, peer_id, mut block) = prepare_good_block(); + let (_, _, _, peer_id, mut block) = prepare_good_block(true); block.header = None; match block_on(import_single_block( - &mut substrate_test_runtime_client::new(), + &mut substrate_test_runtime_client::new(true), BlockOrigin::File, block, &mut PassThroughVerifier::new(true) @@ -106,7 +110,7 @@ fn async_import_queue_drops() { let queue = BasicQueue::new( verifier, - Box::new(substrate_test_runtime_client::new()), + Box::new(substrate_test_runtime_client::new(true)), None, &executor, None, diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index 8e56005dad25d..47582607f8bb7 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -822,7 +822,7 @@ pub trait TestNetFactory: Sized where >: /// Add a light peer. fn add_light_peer(&mut self) { - let (c, backend) = substrate_test_runtime_client::new_light(); + let (c, backend) = substrate_test_runtime_client::new_light(true); let client = Arc::new(c); let ( block_import, diff --git a/client/offchain/src/lib.rs b/client/offchain/src/lib.rs index 26975edbd6b63..ae49f1d6384ff 100644 --- a/client/offchain/src/lib.rs +++ b/client/offchain/src/lib.rs @@ -285,7 +285,7 @@ mod tests { fn should_call_into_runtime_and_produce_extrinsic() { sp_tracing::try_init_simple(); - let client = Arc::new(substrate_test_runtime_client::new()); + let client = Arc::new(substrate_test_runtime_client::new(true)); let spawner = sp_core::testing::TaskExecutor::new(); let pool = TestPool(BasicPool::new_full( Default::default(), diff --git a/client/rpc/src/chain/tests.rs b/client/rpc/src/chain/tests.rs index bb673d65ea0f2..4c0ea8e8a43ae 100644 --- a/client/rpc/src/chain/tests.rs +++ b/client/rpc/src/chain/tests.rs @@ -30,7 +30,7 @@ use crate::testing::TaskExecutor; #[test] fn should_return_header() { - let client = Arc::new(substrate_test_runtime_client::new()); + let client = Arc::new(substrate_test_runtime_client::new(true)); let api = new_full(client.clone(), SubscriptionManager::new(Arc::new(TaskExecutor))); assert_matches!( @@ -62,7 +62,11 @@ fn should_return_header() { #[test] fn should_return_a_block() { - let mut client = Arc::new(substrate_test_runtime_client::new()); + should_return_a_block_inner(true); + should_return_a_block_inner(false); +} +fn should_return_a_block_inner(hashed_value: bool) { + let mut client = Arc::new(substrate_test_runtime_client::new(hashed_value)); let api = new_full(client.clone(), SubscriptionManager::new(Arc::new(TaskExecutor))); let block = client.new_block(Default::default()).unwrap().build().unwrap().block; @@ -113,7 +117,7 @@ fn should_return_a_block() { #[test] fn should_return_block_hash() { - let mut client = Arc::new(substrate_test_runtime_client::new()); + let mut client = Arc::new(substrate_test_runtime_client::new(true)); let api = new_full(client.clone(), SubscriptionManager::new(Arc::new(TaskExecutor))); assert_matches!( @@ -157,7 +161,7 @@ fn should_return_block_hash() { #[test] fn should_return_finalized_hash() { - let mut client = Arc::new(substrate_test_runtime_client::new()); + let mut client = Arc::new(substrate_test_runtime_client::new(true)); let api = new_full(client.clone(), SubscriptionManager::new(Arc::new(TaskExecutor))); assert_matches!( @@ -187,7 +191,7 @@ fn should_notify_about_latest_block() { let (subscriber, id, transport) = Subscriber::new_test("test"); { - let mut client = Arc::new(substrate_test_runtime_client::new()); + let mut client = Arc::new(substrate_test_runtime_client::new(true)); let api = new_full(client.clone(), SubscriptionManager::new(Arc::new(TaskExecutor))); api.subscribe_all_heads(Default::default(), subscriber); @@ -217,7 +221,7 @@ fn should_notify_about_best_block() { let (subscriber, id, transport) = Subscriber::new_test("test"); { - let mut client = Arc::new(substrate_test_runtime_client::new()); + let mut client = Arc::new(substrate_test_runtime_client::new(true)); let api = new_full(client.clone(), SubscriptionManager::new(Arc::new(TaskExecutor))); api.subscribe_new_heads(Default::default(), subscriber); @@ -247,7 +251,7 @@ fn should_notify_about_finalized_block() { let (subscriber, id, transport) = Subscriber::new_test("test"); { - let mut client = Arc::new(substrate_test_runtime_client::new()); + let mut client = Arc::new(substrate_test_runtime_client::new(true)); let api = new_full(client.clone(), SubscriptionManager::new(Arc::new(TaskExecutor))); api.subscribe_finalized_heads(Default::default(), subscriber); diff --git a/client/rpc/src/state/tests.rs b/client/rpc/src/state/tests.rs index cfc27c7bf525e..4529f515a9e20 100644 --- a/client/rpc/src/state/tests.rs +++ b/client/rpc/src/state/tests.rs @@ -138,7 +138,7 @@ fn should_return_child_storage() { #[test] fn should_call_contract() { - let client = Arc::new(substrate_test_runtime_client::new()); + let client = Arc::new(substrate_test_runtime_client::new(true)); let genesis_hash = client.genesis_hash(); let (client, _child) = new_full( client, @@ -157,7 +157,7 @@ fn should_notify_about_storage_changes() { let (subscriber, id, transport) = Subscriber::new_test("test"); { - let mut client = Arc::new(substrate_test_runtime_client::new()); + let mut client = Arc::new(substrate_test_runtime_client::new(true)); let (api, _child) = new_full( client.clone(), SubscriptionManager::new(Arc::new(TaskExecutor)), @@ -195,7 +195,7 @@ fn should_send_initial_storage_changes_and_notifications() { let (subscriber, id, transport) = Subscriber::new_test("test"); { - let mut client = Arc::new(substrate_test_runtime_client::new()); + let mut client = Arc::new(substrate_test_runtime_client::new(true)); let (api, _child) = new_full( client.clone(), SubscriptionManager::new(Arc::new(TaskExecutor)), @@ -435,7 +435,8 @@ fn should_query_storage() { ); } - run_tests(Arc::new(substrate_test_runtime_client::new()), false); + run_tests(Arc::new(substrate_test_runtime_client::new(true)), false); + run_tests(Arc::new(substrate_test_runtime_client::new(false)), false); run_tests( Arc::new( TestClientBuilder::new() @@ -444,6 +445,15 @@ fn should_query_storage() { ), true, ); + run_tests( + Arc::new( + TestClientBuilder::new() + .state_hashed_value() + .changes_trie_config(Some(ChangesTrieConfiguration::new(4, 2))) + .build(), + ), + true, + ); } #[test] @@ -455,10 +465,13 @@ fn should_split_ranges() { assert_eq!(split_range(100, Some(99)), (0..99, Some(99..100))); } - #[test] fn should_return_runtime_version() { - let client = Arc::new(substrate_test_runtime_client::new()); + should_return_runtime_version_inner(true); + should_return_runtime_version_inner(false); +} +fn should_return_runtime_version_inner(hashed_value: bool) { + let client = Arc::new(substrate_test_runtime_client::new(hashed_value)); let (api, _child) = new_full( client.clone(), SubscriptionManager::new(Arc::new(TaskExecutor)), @@ -485,7 +498,7 @@ fn should_notify_on_runtime_version_initially() { let (subscriber, id, transport) = Subscriber::new_test("test"); { - let client = Arc::new(substrate_test_runtime_client::new()); + let client = Arc::new(substrate_test_runtime_client::new(true)); let (api, _child) = new_full( client.clone(), SubscriptionManager::new(Arc::new(TaskExecutor)), diff --git a/client/service/test/src/client/light.rs b/client/service/test/src/client/light.rs index f9daf460e8eb5..d4e3220de73c6 100644 --- a/client/service/test/src/client/light.rs +++ b/client/service/test/src/client/light.rs @@ -305,6 +305,10 @@ fn light_aux_store_is_updated_via_non_importing_op() { #[test] fn execution_proof_is_generated_and_checked() { + execution_proof_is_generated_and_checked_inner(true); + execution_proof_is_generated_and_checked_inner(false); +} +fn execution_proof_is_generated_and_checked_inner(hashed_value: bool) { fn execute(remote_client: &TestClient, at: u64, method: &'static str) -> (Vec, Vec) { let remote_block_id = BlockId::Number(at); let remote_header = remote_client.header(&remote_block_id).unwrap().unwrap(); @@ -371,7 +375,7 @@ fn execution_proof_is_generated_and_checked() { } // prepare remote client - let mut remote_client = substrate_test_runtime_client::new(); + let mut remote_client = substrate_test_runtime_client::new(hashed_value); for i in 1u32..3u32 { let mut digest = Digest::default(); digest.push(sp_runtime::generic::DigestItem::Other::(i.to_le_bytes().to_vec())); @@ -451,9 +455,9 @@ type TestChecker = LightDataChecker< DummyStorage, >; -fn prepare_for_read_proof_check() -> (TestChecker, Header, StorageProof, u32) { +fn prepare_for_read_proof_check(hashed_value: bool) -> (TestChecker, Header, StorageProof, u32) { // prepare remote client - let remote_client = substrate_test_runtime_client::new(); + let remote_client = substrate_test_runtime_client::new(hashed_value); let remote_block_id = BlockId::Number(0); let flagged = false; let remote_block_hash = remote_client.block_hash(0).unwrap().unwrap(); @@ -536,9 +540,9 @@ fn prepare_for_read_child_proof_check() -> (TestChecker, Header, StorageProof, V (local_checker, remote_block_header, remote_read_proof, child_value) } -fn prepare_for_header_proof_check(insert_cht: bool) -> (TestChecker, Hash, Header, StorageProof) { +fn prepare_for_header_proof_check(insert_cht: bool, hashed_value: bool) -> (TestChecker, Hash, Header, StorageProof) { // prepare remote client - let mut remote_client = substrate_test_runtime_client::new(); + let mut remote_client = substrate_test_runtime_client::new(hashed_value); let mut local_headers_hashes = Vec::new(); for i in 0..4 { let block = remote_client.new_block(Default::default()).unwrap().build().unwrap().block; @@ -578,7 +582,16 @@ fn header_with_computed_extrinsics_root(extrinsics: Vec) -> Header { #[test] fn storage_read_proof_is_generated_and_checked() { - let (local_checker, remote_block_header, remote_read_proof, heap_pages) = prepare_for_read_proof_check(); + storage_read_proof_is_generated_and_checked_inner(true); + storage_read_proof_is_generated_and_checked_inner(false); +} +fn storage_read_proof_is_generated_and_checked_inner(hashed_value: bool) { + let ( + local_checker, + remote_block_header, + remote_read_proof, + heap_pages, + ) = prepare_for_read_proof_check(hashed_value); assert_eq!((&local_checker as &dyn FetchChecker).check_read_proof(&RemoteReadRequest::
{ block: remote_block_header.hash(), header: remote_block_header, @@ -610,7 +623,11 @@ fn storage_child_read_proof_is_generated_and_checked() { #[test] fn header_proof_is_generated_and_checked() { - let (local_checker, local_cht_root, remote_block_header, remote_header_proof) = prepare_for_header_proof_check(true); + header_proof_is_generated_and_checked_inner(true); + header_proof_is_generated_and_checked_inner(false); +} +fn header_proof_is_generated_and_checked_inner(hashed: bool) { + let (local_checker, local_cht_root, remote_block_header, remote_header_proof) = prepare_for_header_proof_check(true, hashed); assert_eq!((&local_checker as &dyn FetchChecker).check_header_proof(&RemoteHeaderRequest::
{ cht_root: local_cht_root, block: 1, @@ -620,7 +637,7 @@ fn header_proof_is_generated_and_checked() { #[test] fn check_header_proof_fails_if_cht_root_is_invalid() { - let (local_checker, _, mut remote_block_header, remote_header_proof) = prepare_for_header_proof_check(true); + let (local_checker, _, mut remote_block_header, remote_header_proof) = prepare_for_header_proof_check(true, true); remote_block_header.number = 100; assert!((&local_checker as &dyn FetchChecker).check_header_proof(&RemoteHeaderRequest::
{ cht_root: Default::default(), @@ -631,7 +648,12 @@ fn check_header_proof_fails_if_cht_root_is_invalid() { #[test] fn check_header_proof_fails_if_invalid_header_provided() { - let (local_checker, local_cht_root, mut remote_block_header, remote_header_proof) = prepare_for_header_proof_check(true); + let ( + local_checker, + local_cht_root, + mut remote_block_header, + remote_header_proof, + ) = prepare_for_header_proof_check(true, true); remote_block_header.number = 100; assert!((&local_checker as &dyn FetchChecker).check_header_proof(&RemoteHeaderRequest::
{ cht_root: local_cht_root, diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index 0b4a9ed76c56d..562a1aa830a91 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -340,7 +340,11 @@ fn construct_genesis_with_bad_transaction_should_panic() { #[test] fn client_initializes_from_genesis_ok() { - let client = substrate_test_runtime_client::new(); + client_initializes_from_genesis_ok_inner(false); + client_initializes_from_genesis_ok_inner(true); +} +fn client_initializes_from_genesis_ok_inner(hashed_value: bool) { + let client = substrate_test_runtime_client::new(hashed_value); assert_eq!( client.runtime_api().balance_of( @@ -360,7 +364,7 @@ fn client_initializes_from_genesis_ok() { #[test] fn block_builder_works_with_no_transactions() { - let mut client = substrate_test_runtime_client::new(); + let mut client = substrate_test_runtime_client::new(true); let block = client.new_block(Default::default()).unwrap().build().unwrap().block; @@ -371,7 +375,11 @@ fn block_builder_works_with_no_transactions() { #[test] fn block_builder_works_with_transactions() { - let mut client = substrate_test_runtime_client::new(); + block_builder_works_with_transactions_inner(true); + block_builder_works_with_transactions_inner(false); +} +fn block_builder_works_with_transactions_inner(hashed_value: bool) { + let mut client = substrate_test_runtime_client::new(hashed_value); let mut builder = client.new_block(Default::default()).unwrap(); @@ -408,7 +416,7 @@ fn block_builder_works_with_transactions() { #[test] fn block_builder_does_not_include_invalid() { - let mut client = substrate_test_runtime_client::new(); + let mut client = substrate_test_runtime_client::new(true); let mut builder = client.new_block(Default::default()).unwrap(); @@ -473,7 +481,7 @@ fn best_containing_with_hash_not_found() { fn uncles_with_only_ancestors() { // block tree: // G -> A1 -> A2 - let mut client = substrate_test_runtime_client::new(); + let mut client = substrate_test_runtime_client::new(true); // G -> A1 let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; @@ -493,7 +501,7 @@ fn uncles_with_multiple_forks() { // A1 -> B2 -> B3 -> B4 // B2 -> C3 // A1 -> D2 - let mut client = substrate_test_runtime_client::new(); + let mut client = substrate_test_runtime_client::new(true); // G -> A1 let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; @@ -1012,7 +1020,7 @@ fn key_changes_works() { #[test] fn import_with_justification() { - let mut client = substrate_test_runtime_client::new(); + let mut client = substrate_test_runtime_client::new(true); // G -> A1 let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; @@ -1058,7 +1066,7 @@ fn import_with_justification() { #[test] fn importing_diverged_finalized_block_should_trigger_reorg() { - let mut client = substrate_test_runtime_client::new(); + let mut client = substrate_test_runtime_client::new(true); // G -> A1 -> A2 // \ @@ -1115,7 +1123,9 @@ fn importing_diverged_finalized_block_should_trigger_reorg() { #[test] fn finalizing_diverged_block_should_trigger_reorg() { - let (mut client, select_chain) = TestClientBuilder::new().build_with_longest_chain(); + let (mut client, select_chain) = TestClientBuilder::new() + .state_hashed_value() + .build_with_longest_chain(); // G -> A1 -> A2 // \ @@ -1202,7 +1212,7 @@ fn finalizing_diverged_block_should_trigger_reorg() { #[test] fn get_header_by_block_number_doesnt_panic() { - let client = substrate_test_runtime_client::new(); + let client = substrate_test_runtime_client::new(true); // backend uses u32 for block numbers, make sure we don't panic when // trying to convert @@ -1213,7 +1223,7 @@ fn get_header_by_block_number_doesnt_panic() { #[test] fn state_reverted_on_reorg() { sp_tracing::try_init_simple(); - let mut client = substrate_test_runtime_client::new(); + let mut client = substrate_test_runtime_client::new(true); let current_balance = |client: &substrate_test_runtime_client::TestClient| client.runtime_api().balance_of( @@ -1741,7 +1751,11 @@ fn imports_blocks_with_changes_tries_config_change() { #[test] fn storage_keys_iter_prefix_and_start_key_works() { - let client = substrate_test_runtime_client::new(); + storage_keys_iter_prefix_and_start_key_works_inner(true); + storage_keys_iter_prefix_and_start_key_works_inner(false); +} +fn storage_keys_iter_prefix_and_start_key_works_inner(hashed_value: bool) { + let client = substrate_test_runtime_client::new(hashed_value); let prefix = StorageKey(hex!("3a").to_vec()); @@ -1766,7 +1780,11 @@ fn storage_keys_iter_prefix_and_start_key_works() { #[test] fn storage_keys_iter_works() { - let client = substrate_test_runtime_client::new(); + storage_keys_iter_works_inner(true); + storage_keys_iter_works_inner(false); +} +fn storage_keys_iter_works_inner(hashed_value: bool) { + let client = substrate_test_runtime_client::new(hashed_value); let prefix = StorageKey(hex!("").to_vec()); diff --git a/client/transaction-pool/src/testing/pool.rs b/client/transaction-pool/src/testing/pool.rs index 063947b383d03..325722ab02ede 100644 --- a/client/transaction-pool/src/testing/pool.rs +++ b/client/transaction-pool/src/testing/pool.rs @@ -932,7 +932,7 @@ fn ready_set_should_eventually_resolve_when_block_update_arrives() { fn should_not_accept_old_signatures() { use std::convert::TryFrom; - let client = Arc::new(substrate_test_runtime_client::new()); + let client = Arc::new(substrate_test_runtime_client::new(true)); let pool = Arc::new( BasicPool::new_test(Arc::new(FullChainApi::new(client, None))).0 @@ -968,7 +968,7 @@ fn should_not_accept_old_signatures() { #[test] fn import_notification_to_pool_maintain_works() { - let mut client = Arc::new(substrate_test_runtime_client::new()); + let mut client = Arc::new(substrate_test_runtime_client::new(true)); let pool = Arc::new( BasicPool::new_test(Arc::new(FullChainApi::new(client.clone(), None))).0 diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 47b478b1d2da0..7c2d395f6069c 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -481,9 +481,7 @@ mod tests { let contents = value_range.clone().map(|i| (vec![i], Some(vec![i; size_content]))).collect::>(); let in_memory = InMemoryBackend::::default(); let mut in_memory = in_memory.update(vec![(None, contents)], false); - let in_memory_root = in_memory.storage_root(std::iter::empty(), false).0; value_range.clone().for_each(|i| assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i; size_content])); - in_memory = in_memory.update(vec![], true); let in_memory_root = in_memory.storage_root(std::iter::empty(), false).0; let trie = in_memory.as_trie_backend().unwrap(); diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 33b3fd64e564c..c34522f64499e 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -1155,13 +1155,19 @@ mod tests { check_iteration::(&input); } - // TODO add flag - fn populate_trie<'db, T: TrieConfiguration>( + fn populate_trie<'db, T>( db: &'db mut dyn HashDB, root: &'db mut TrieHash, - v: &[(Vec, Vec)] - ) -> TrieDBMut<'db, T> { + v: &[(Vec, Vec)], + flag_hash: bool, + ) -> TrieDBMut<'db, T> + where + T: TrieConfiguration, + { let mut t = TrieDBMut::::new(db, root); + if flag_hash { + flag_meta_hasher(&mut t).unwrap(); + } for i in 0..v.len() { let key: &[u8]= &v[i].0; let val: &[u8] = &v[i].1; @@ -1182,6 +1188,10 @@ mod tests { #[test] fn random_should_work() { + random_should_work_inner(true); + random_should_work_inner(false); + } + fn random_should_work_inner(flag: bool) { let mut seed = ::Out::zero(); for test_i in 0..10000 { if test_i % 50 == 0 { @@ -1195,12 +1205,12 @@ mod tests { count: 100, }.make_with(seed.as_fixed_bytes_mut()); - // TODO test other layout states. let layout = Layout::default(); + // TODO implement variant for stream codec or use iter_build. let real = layout.trie_root(x.clone()); let mut memdb = MemoryDB::default(); let mut root = Default::default(); - let mut memtrie = populate_trie::(&mut memdb, &mut root, &x); + let mut memtrie = populate_trie::(&mut memdb, &mut root, &x, flag); memtrie.commit(); if *memtrie.root() != real { @@ -1260,7 +1270,6 @@ mod tests { #[test] fn codec_trie_two_tuples_disjoint_keys() { - // TODO switch to old layout let layout = Layout::default(); let input = vec![(&[0x48, 0x19], &[0xfe]), (&[0x13, 0x14], &[0xff])]; let trie = layout.trie_root_unhashed(input); @@ -1287,19 +1296,26 @@ mod tests { #[test] fn iterator_works() { - let pairs = vec![ + iterator_works_inner(true); + iterator_works_inner(false); + } + fn iterator_works_inner(flag: bool) { + let mut pairs = vec![ (hex!("0103000000000000000464").to_vec(), hex!("0400000000").to_vec()), (hex!("0103000000000000000469").to_vec(), hex!("0401000000").to_vec()), ]; let mut mdb = MemoryDB::default(); let mut root = Default::default(); - let _ = populate_trie::(&mut mdb, &mut root, &pairs); + let _ = populate_trie::(&mut mdb, &mut root, &pairs, flag); let trie = TrieDB::::new(&mdb, &root).unwrap(); let iter = trie.iter().unwrap(); let mut iter_pairs = Vec::new(); + if flag { + pairs.insert(0, (vec![], vec![])); + } for pair in iter { let (key, value) = pair.unwrap(); iter_pairs.push((key, value.to_vec())); diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index d8cc40d5561c1..c4cb965e7c834 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -80,6 +80,7 @@ pub struct TestClientBuilder { fork_blocks: ForkBlocks, bad_blocks: BadBlocks, enable_offchain_indexing_api: bool, + state_hashed_value: bool, } impl Default @@ -116,6 +117,7 @@ impl TestClientBuilder TestClientBuilder Self { + self.state_hashed_value = true; + self + } + /// Build the test client with the given native executor. pub fn build_with_executor( self, @@ -202,6 +210,9 @@ impl TestClientBuilder for LightFetcher { } /// Creates new client instance used for tests. -pub fn new() -> Client { - TestClientBuilder::new().build() +pub fn new(hashed_state: bool) -> Client { + let mut builder = TestClientBuilder::new(); + if hashed_state { + builder = builder.state_hashed_value(); + } + builder.build() } /// Creates new light client instance used for tests. -pub fn new_light() -> ( +pub fn new_light(hashed_state: bool) -> ( client::Client, Arc, ) { @@ -368,10 +372,12 @@ pub fn new_light() -> ( local_call_executor, ); + let mut builder = TestClientBuilder::with_backend(backend.clone()); + if hashed_state { + builder = builder.state_hashed_value(); + } ( - TestClientBuilder::with_backend(backend.clone()) - .build_with_executor(call_executor) - .0, + builder.build_with_executor(call_executor).0, backend, ) } diff --git a/utils/frame/rpc/system/src/lib.rs b/utils/frame/rpc/system/src/lib.rs index bbc51a28a59cd..2409afca66994 100644 --- a/utils/frame/rpc/system/src/lib.rs +++ b/utils/frame/rpc/system/src/lib.rs @@ -294,7 +294,7 @@ mod tests { sp_tracing::try_init_simple(); // given - let client = Arc::new(substrate_test_runtime_client::new()); + let client = Arc::new(substrate_test_runtime_client::new(true)); let spawner = sp_core::testing::TaskExecutor::new(); let pool = BasicPool::new_full( Default::default(), @@ -334,7 +334,7 @@ mod tests { sp_tracing::try_init_simple(); // given - let client = Arc::new(substrate_test_runtime_client::new()); + let client = Arc::new(substrate_test_runtime_client::new(true,)); let spawner = sp_core::testing::TaskExecutor::new(); let pool = BasicPool::new_full( Default::default(), @@ -358,7 +358,7 @@ mod tests { sp_tracing::try_init_simple(); // given - let client = Arc::new(substrate_test_runtime_client::new()); + let client = Arc::new(substrate_test_runtime_client::new(true)); let spawner = sp_core::testing::TaskExecutor::new(); let pool = BasicPool::new_full( Default::default(), @@ -391,7 +391,7 @@ mod tests { sp_tracing::try_init_simple(); // given - let client = Arc::new(substrate_test_runtime_client::new()); + let client = Arc::new(substrate_test_runtime_client::new(true)); let spawner = sp_core::testing::TaskExecutor::new(); let pool = BasicPool::new_full( Default::default(), From aa2e5c31ed25b8f973232ce5111893bb759bd957 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 20 May 2021 13:11:35 +0200 Subject: [PATCH 014/188] fix chain spec test with serde default. --- client/basic-authorship/src/lib.rs | 2 +- client/chain-spec/src/chain_spec.rs | 1 + primitives/api/test/benches/bench.rs | 8 ++++---- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/client/basic-authorship/src/lib.rs b/client/basic-authorship/src/lib.rs index 133b833cdddc8..371307f8cdac2 100644 --- a/client/basic-authorship/src/lib.rs +++ b/client/basic-authorship/src/lib.rs @@ -30,7 +30,7 @@ //! # DefaultTestClientBuilderExt, TestClientBuilderExt, //! # }; //! # use sc_transaction_pool::{BasicPool, FullChainApi}; -//! # let client = Arc::new(substrate_test_runtime_client::new()); +//! # let client = Arc::new(substrate_test_runtime_client::new(true)); //! # let spawner = sp_core::testing::TaskExecutor::new(); //! # let txpool = BasicPool::new_full( //! # Default::default(), diff --git a/client/chain-spec/src/chain_spec.rs b/client/chain-spec/src/chain_spec.rs index 3838c5dbc0def..a18ad2949e52f 100644 --- a/client/chain-spec/src/chain_spec.rs +++ b/client/chain-spec/src/chain_spec.rs @@ -133,6 +133,7 @@ pub type GenesisStorage = HashMap; pub struct RawGenesis { pub top: GenesisStorage, pub children_default: HashMap, + #[serde(default)] pub flag_hashed_value: bool, } diff --git a/primitives/api/test/benches/bench.rs b/primitives/api/test/benches/bench.rs index 20ddbbe7116dc..a9fe79d1abcec 100644 --- a/primitives/api/test/benches/bench.rs +++ b/primitives/api/test/benches/bench.rs @@ -26,7 +26,7 @@ use sp_api::ProvideRuntimeApi; fn sp_api_benchmark(c: &mut Criterion) { c.bench_function("add one with same runtime api", |b| { - let client = substrate_test_runtime_client::new(); + let client = substrate_test_runtime_client::new(ture); let runtime_api = client.runtime_api(); let block_id = BlockId::Number(client.chain_info().best_number); @@ -34,14 +34,14 @@ fn sp_api_benchmark(c: &mut Criterion) { }); c.bench_function("add one with recreating runtime api", |b| { - let client = substrate_test_runtime_client::new(); + let client = substrate_test_runtime_client::new(true); let block_id = BlockId::Number(client.chain_info().best_number); b.iter(|| client.runtime_api().benchmark_add_one(&block_id, &1)) }); c.bench_function("vector add one with same runtime api", |b| { - let client = substrate_test_runtime_client::new(); + let client = substrate_test_runtime_client::new(true); let runtime_api = client.runtime_api(); let block_id = BlockId::Number(client.chain_info().best_number); let data = vec![0; 1000]; @@ -50,7 +50,7 @@ fn sp_api_benchmark(c: &mut Criterion) { }); c.bench_function("vector add one with recreating runtime api", |b| { - let client = substrate_test_runtime_client::new(); + let client = substrate_test_runtime_client::new(true); let block_id = BlockId::Number(client.chain_info().best_number); let data = vec![0; 1000]; From 91bd0aa861095c816702207bf714e519de716b9c Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 20 May 2021 14:01:35 +0200 Subject: [PATCH 015/188] export state (missing trie function). --- bin/node/bench/src/generator.rs | 4 ++++ bin/node/bench/src/trie.rs | 3 ++- client/api/src/backend.rs | 4 ++++ client/db/src/bench.rs | 4 ++++ client/db/src/lib.rs | 9 ++++++++- client/db/src/storage_cache.rs | 8 ++++++++ client/light/src/backend.rs | 7 +++++++ client/service/src/chain_ops/export_raw_state.rs | 2 +- client/service/src/client/client.rs | 4 ++++ primitives/state-machine/src/backend.rs | 7 +++++++ primitives/state-machine/src/proving_backend.rs | 4 ++++ primitives/state-machine/src/trie_backend.rs | 4 ++++ primitives/state-machine/src/trie_backend_essence.rs | 5 +++++ primitives/trie/src/lib.rs | 11 +++++++++++ 14 files changed, 73 insertions(+), 3 deletions(-) diff --git a/bin/node/bench/src/generator.rs b/bin/node/bench/src/generator.rs index c540ae147c9f0..a227c9081bb67 100644 --- a/bin/node/bench/src/generator.rs +++ b/bin/node/bench/src/generator.rs @@ -31,6 +31,7 @@ use crate::simple_trie::SimpleTrie; pub fn generate_trie( db: Arc, key_values: impl IntoIterator, Vec)>, + flag_hashed_value: bool, ) -> Hash { let mut root = Hash::default(); @@ -44,6 +45,9 @@ pub fn generate_trie( { let mut trie_db = TrieDBMut::new(&mut trie, &mut root); + if flag_hashed_value { + sp_trie::flag_meta_hasher(&mut trie_db).expect("flag trie failed"); + } for (key, value) in key_values { trie_db.insert(&key, &value).expect("trie insertion failed"); } diff --git a/bin/node/bench/src/trie.rs b/bin/node/bench/src/trie.rs index 267d0cc16c8c4..56bf7fe8ad604 100644 --- a/bin/node/bench/src/trie.rs +++ b/bin/node/bench/src/trie.rs @@ -117,7 +117,6 @@ impl core::BenchmarkDescription for TrieReadBenchmarkDescription { let mut rng = rand::thread_rng(); let warmup_prefix = KUSAMA_STATE_DISTRIBUTION.key(&mut rng); - // TODO flag trie for hash of value. let mut key_values = KeyValues::new(); let mut warmup_keys = KeyValues::new(); let mut query_keys = KeyValues::new(); @@ -146,6 +145,7 @@ impl core::BenchmarkDescription for TrieReadBenchmarkDescription { let root = generate_trie( database.open(self.database_type), key_values, + true, ); Box::new(TrieReadBenchmark { @@ -263,6 +263,7 @@ impl core::BenchmarkDescription for TrieWriteBenchmarkDescription { let root = generate_trie( database.open(self.database_type), key_values, + true, ); Box::new(TrieWriteBenchmark { diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index 09e9e0cb2e173..bd5e15c0222fd 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -385,6 +385,10 @@ pub trait StorageProvider> { storage_key: Option<&PrefixedStorageKey>, key: &StorageKey ) -> sp_blockchain::Result, u32)>>; + + /// Returns true when state allow hashing value and therefore + /// removing unaccess value from proofs. + fn state_hashed_value(&self, id: &BlockId) -> sp_blockchain::Result; } /// Client backend. diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index fdc53ab224966..21e20d1ff6211 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -528,6 +528,10 @@ impl StateBackend> for BenchmarkingState { fn proof_size(&self) -> Option { self.proof_recorder.as_ref().map(|recorder| recorder.estimate_encoded_size() as u32) } + + fn state_hashed_value(&self) -> bool { + self.state.borrow().as_ref().map_or(Default::default(), |s| s.state_hashed_value()) + } } impl std::fmt::Debug for BenchmarkingState { diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index c465598632396..2fa447b5e9b84 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -269,6 +269,10 @@ impl StateBackend> for RefTrackingState { fn usage_info(&self) -> StateUsageInfo { self.state.usage_info() } + + fn state_hashed_value(&self) -> bool { + self.state.state_hashed_value() + } } /// Database settings. @@ -2286,7 +2290,10 @@ pub(crate) mod tests { #[test] fn set_state_data() { - let flagged = false; // TODO test with flagged + set_state_data_inner(true); + set_state_data_inner(false); + } + fn set_state_data_inner(flagged: bool) { let db = Backend::::new_test(2, 0); let hash = { let mut op = db.begin_operation().unwrap(); diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index eeb5092fa6c8f..e6de7774f257f 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -687,6 +687,10 @@ impl>, B: BlockT> StateBackend> for Cachin info.include_state_machine_states(&self.overlay_stats); info } + + fn state_hashed_value(&self) -> bool { + self.state.state_hashed_value() + } } /// Extended [`CachingState`] that will sync the caches on drop. @@ -871,6 +875,10 @@ impl>, B: BlockT> StateBackend> for Syncin fn usage_info(&self) -> sp_state_machine::UsageInfo { self.caching_state().usage_info() } + + fn state_hashed_value(&self) -> bool { + self.caching_state().state_hashed_value() + } } impl Drop for SyncingCachingState { diff --git a/client/light/src/backend.rs b/client/light/src/backend.rs index 49c861411bb45..ea99273f8c869 100644 --- a/client/light/src/backend.rs +++ b/client/light/src/backend.rs @@ -540,4 +540,11 @@ impl StateBackend for GenesisOrUnavailableState GenesisOrUnavailableState::Unavailable => None, } } + + fn state_hashed_value(&self) -> bool { + match self { + GenesisOrUnavailableState::Genesis(state) => state.state_hashed_value(), + GenesisOrUnavailableState::Unavailable => false, + } + } } diff --git a/client/service/src/chain_ops/export_raw_state.rs b/client/service/src/chain_ops/export_raw_state.rs index bdba4911f8db6..fe96994ed2605 100644 --- a/client/service/src/chain_ops/export_raw_state.rs +++ b/client/service/src/chain_ops/export_raw_state.rs @@ -42,7 +42,7 @@ where let empty_key = StorageKey(Vec::new()); let mut top_storage = client.storage_pairs(&block, &empty_key)?; let mut children_default = HashMap::new(); - let flag_hashed_value = false; // TODO read from trie (through clinet api). + let flag_hashed_value = client.state_hashed_value(&block)?; // Remove all default child storage roots from the top storage and collect the child storage // pairs. diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index a958cb6865c79..661d1c9456dd6 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -1525,6 +1525,10 @@ impl StorageProvider for Client wher Ok(result) } + + fn state_hashed_value(&self, id: &BlockId) -> sp_blockchain::Result { + Ok(self.state_at(id)?.state_hashed_value()) + } } impl HeaderMetadata for Client where diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 90923b19b21c9..771d51f274678 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -254,6 +254,9 @@ pub trait Backend: sp_std::fmt::Debug { fn proof_size(&self) -> Option { unimplemented!() } + + /// Does trie state allow hashing of value. + fn state_hashed_value(&self) -> bool; } impl<'a, T: Backend, H: Hasher> Backend for &'a T { @@ -335,6 +338,10 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { fn usage_info(&self) -> UsageInfo { (*self).usage_info() } + + fn state_hashed_value(&self) -> bool { + (*self).state_hashed_value() + } } /// Trait that allows consolidate two transactions together. diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 7c2d395f6069c..81ad50408e4c4 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -361,6 +361,10 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn usage_info(&self) -> crate::stats::UsageInfo { self.0.usage_info() } + + fn state_hashed_value(&self) -> bool { + self.0.state_hashed_value() + } } /// Create proof check backend. diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 986f6db63670f..e110963a20817 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -255,6 +255,10 @@ impl, H: Hasher> Backend for TrieBackend where fn wipe(&self) -> Result<(), Self::Error> { Ok(()) } + + fn state_hashed_value(&self) -> bool { + self.essence.state_hashed_value() + } } #[cfg(test)] diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 594abf38fa698..6d417204ebd8d 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -169,6 +169,11 @@ impl, H: Hasher> TrieBackendEssence where H::Out: Ok(next_key) } + /// Does current trie use inner hashed value. + pub fn state_hashed_value(&self) -> bool { + sp_trie::state_hashed_value::, _>(self, &self.root) + } + /// Get the value of storage at given key. pub fn storage(&self, key: &[u8]) -> Result> { let map_e = |e| format!("Trie lookup error: {}", e); diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index c34522f64499e..8b497a8a7a280 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -594,6 +594,17 @@ pub fn flag_meta_hasher( Ok(()) } +/// Resolve if inner hashing of value is active. +pub fn state_hashed_value>( + db: &DB, + root: &TrieHash, +) -> bool { + if let Ok(t) = TrieDB::::new(&*db, root) { + unimplemented!("TODO has_flag on triedb"); + } + false +} + /// Read a value from the trie. pub fn read_trie_value>( db: &DB, From 183b7b56b61541ad1e5c2dbc3b8ad9dd2bf386c1 Mon Sep 17 00:00:00 2001 From: Emeric Chevalier Date: Fri, 21 May 2021 18:51:09 +0200 Subject: [PATCH 016/188] Pending using new branch, lacking genericity on layout resolution. --- Cargo.lock | 41 ++++----- Cargo.toml | 9 +- primitives/trie/src/lib.rs | 148 ++++++++++++++---------------- primitives/trie/src/node_codec.rs | 10 +- 4 files changed, 99 insertions(+), 109 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 79c33175dcfc3..897eff025d517 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2329,7 +2329,7 @@ dependencies = [ [[package]] name = "hash-db" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple2#ab6d8bbc73d620af5357f350fa132142c2d57a33" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#0b365f776e3716033f44f0fc9cbff7fd389a6c34" [[package]] name = "hash256-std-hasher" @@ -2343,7 +2343,7 @@ dependencies = [ [[package]] name = "hash256-std-hasher" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple2#ab6d8bbc73d620af5357f350fa132142c2d57a33" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#0b365f776e3716033f44f0fc9cbff7fd389a6c34" dependencies = [ "crunchy", ] @@ -2996,10 +2996,10 @@ dependencies = [ [[package]] name = "keccak-hasher" version = "0.15.3" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple2#ab6d8bbc73d620af5357f350fa132142c2d57a33" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#0b365f776e3716033f44f0fc9cbff7fd389a6c34" dependencies = [ "hash-db", - "hash256-std-hasher 0.15.2 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple2)", + "hash256-std-hasher 0.15.2 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4)", "tiny-keccak", ] @@ -3788,7 +3788,7 @@ dependencies = [ [[package]] name = "memory-db" version = "0.26.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple2#ab6d8bbc73d620af5357f350fa132142c2d57a33" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#0b365f776e3716033f44f0fc9cbff7fd389a6c34" dependencies = [ "hash-db", "hashbrown", @@ -4217,7 +4217,7 @@ dependencies = [ "sp-state-machine", "sp-trie", "substrate-test-client", - "trie-root 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)", + "trie-root", "wat", ] @@ -9177,7 +9177,7 @@ dependencies = [ "thiserror", "tracing", "trie-db", - "trie-root 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)", + "trie-root", ] [[package]] @@ -9283,7 +9283,7 @@ dependencies = [ "sp-std", "trie-bench", "trie-db", - "trie-root 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)", + "trie-root", "trie-standardmap 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -10332,22 +10332,22 @@ checksum = "a7f741b240f1a48843f9b8e0444fb55fb2a4ff67293b50a9179dfd5ea67f8d41" [[package]] name = "trie-bench" version = "0.27.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple2#ab6d8bbc73d620af5357f350fa132142c2d57a33" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#0b365f776e3716033f44f0fc9cbff7fd389a6c34" dependencies = [ "criterion", "hash-db", - "keccak-hasher 0.15.3 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple2)", + "keccak-hasher 0.15.3 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4)", "memory-db", "parity-scale-codec", "trie-db", - "trie-root 0.16.0 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple2)", - "trie-standardmap 0.15.2 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple2)", + "trie-root", + "trie-standardmap 0.15.2 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4)", ] [[package]] name = "trie-db" version = "0.22.3" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple2#ab6d8bbc73d620af5357f350fa132142c2d57a33" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#0b365f776e3716033f44f0fc9cbff7fd389a6c34" dependencies = [ "hash-db", "hashbrown", @@ -10359,16 +10359,7 @@ dependencies = [ [[package]] name = "trie-root" version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "652931506d2c1244d7217a70b99f56718a7b4161b37f04e7cd868072a99f68cd" -dependencies = [ - "hash-db", -] - -[[package]] -name = "trie-root" -version = "0.16.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple2#ab6d8bbc73d620af5357f350fa132142c2d57a33" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#0b365f776e3716033f44f0fc9cbff7fd389a6c34" dependencies = [ "hash-db", ] @@ -10386,10 +10377,10 @@ dependencies = [ [[package]] name = "trie-standardmap" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple2#ab6d8bbc73d620af5357f350fa132142c2d57a33" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#0b365f776e3716033f44f0fc9cbff7fd389a6c34" dependencies = [ "hash-db", - "keccak-hasher 0.15.3 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple2)", + "keccak-hasher 0.15.3 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4)", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 1bda1bc4f5ec2..3133235b02ad5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -271,7 +271,8 @@ zeroize = { opt-level = 3 } panic = "unwind" [patch.crates-io] -hash-db = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple2" } -memory-db = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple2" } -trie-db = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple2" } -trie-bench = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple2" } +hash-db = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple4" } +memory-db = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple4" } +trie-db = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple4" } +trie-root = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple4" } +trie-bench = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple4" } diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 8b497a8a7a280..34966333c23bc 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -39,7 +39,7 @@ pub use storage_proof::StorageProof; /// Various re-exports from the `trie-db` crate. pub use trie_db::{ Trie, TrieMut, DBValue, Recorder, CError, Query, TrieLayout, TrieConfiguration, - nibble_ops, TrieDBIterator, Meta, NodeChange, node::{NodePlan, ValuePlan}, + nibble_ops, TrieDBIterator, Meta, node::{NodePlan, ValuePlan}, GlobalMeta, }; /// Various re-exports from the `memory-db` crate. pub use memory_db::KeyFunction; @@ -79,7 +79,7 @@ pub struct TrieMeta { impl Meta for TrieMeta { /// Layout do not have content. - type MetaInput = (); + type GlobalMeta = bool; /// When true apply inner hashing of value. type StateMeta = bool; @@ -115,33 +115,23 @@ impl Meta for TrieMeta { } fn meta_for_new( - _input: Self::MetaInput, - parent: Option<&Self>, + global: Self::GlobalMeta, ) -> Self { let mut result = Self::default(); - result.do_value_hash = parent.map(|p| p.do_value_hash).unwrap_or_default(); + result.do_value_hash = global; result } fn meta_for_existing_inline_node( - input: Self::MetaInput, - parent: Option<&Self>, + global: Self::GlobalMeta, ) -> Self { - Self::meta_for_new(input, parent) + Self::meta_for_new(global) } fn meta_for_empty( + global: Self::GlobalMeta, ) -> Self { - Default::default() - } - - fn set_value_callback( - &mut self, - _new_value: Option<&[u8]>, - _is_branch: bool, - changed: NodeChange, - ) -> NodeChange { - changed + Self::meta_for_new(global) } fn encoded_value_callback( @@ -162,15 +152,6 @@ impl Meta for TrieMeta { } } - fn set_child_callback( - &mut self, - _child: Option<&Self>, - changed: NodeChange, - _at: usize, - ) -> NodeChange { - changed - } - fn decoded_callback( &mut self, node_plan: &NodePlan, @@ -209,7 +190,7 @@ impl TrieMeta { } /// substrate trie layout -pub struct Layout(sp_std::marker::PhantomData<(H, M)>); +pub struct Layout(bool, sp_std::marker::PhantomData<(H, M)>); impl fmt::Debug for Layout { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { @@ -217,37 +198,46 @@ impl fmt::Debug for Layout { } } -impl Default for Layout { - fn default() -> Self { - Layout(sp_std::marker::PhantomData) +impl Clone for Layout { + fn clone(&self) -> Self { + Layout(self.0, sp_std::marker::PhantomData) } } -impl Clone for Layout { - fn clone(&self) -> Self { - Layout(sp_std::marker::PhantomData) +impl Default for Layout { + fn default() -> Self { + Layout(false, sp_std::marker::PhantomData) } } impl TrieLayout for Layout where H: Hasher, - M: MetaHasher, - M::Meta: Meta, + M: MetaHasher, + M::Meta: Meta, { const USE_EXTENSION: bool = false; const ALLOW_EMPTY: bool = true; const USE_META: bool = true; + const READ_ROOT_STATE_META: bool = true; + type Hash = H; type Codec = NodeCodec; type MetaHasher = M; type Meta = M::Meta; - fn metainput_for_new_node(&self) -> ::MetaInput { - () + fn layout_meta(&self) -> GlobalMeta { + self.0 + } + fn initialize_from_root_meta(&mut self, root_meta: &Self::Meta) { + if root_meta.recorded_do_value_hash { + self.0 = true; + } } - fn metainput_for_stored_inline_node(&self) -> ::MetaInput { - () + fn set_root_meta(root_meta: &mut Self::Meta, global_meta: GlobalMeta) { + if global_meta { + root_meta.recorded_do_value_hash = true; + } } } @@ -260,6 +250,7 @@ impl MetaHasher for StateHasher H: Hasher, { type Meta = TrieMeta; + type GlobalMeta = bool; fn hash(value: &[u8], meta: &Self::Meta) -> H::Out { match &meta { @@ -328,7 +319,7 @@ impl MetaHasher for StateHasher >::stored_value(value.as_slice(), meta) } - fn extract_value<'a>(mut stored: &'a [u8], parent_meta: Option<&Self::Meta>) -> (&'a [u8], Self::Meta) { + fn extract_value(mut stored: &[u8], global_meta: Self::GlobalMeta) -> (&[u8], Self::Meta) { let input = &mut stored; let mut contain_hash = false; let mut old_hash = false; @@ -352,13 +343,13 @@ impl MetaHasher for StateHasher let _offset = meta.read_state_meta(stored) .expect("State meta reading failure."); //let stored = &stored[offset..]; - meta.do_value_hash = meta.recorded_do_value_hash || parent_meta.map(|m| m.do_value_hash).unwrap_or(false); + meta.do_value_hash = meta.recorded_do_value_hash || global_meta; (stored, meta) } - fn extract_value_owned(mut stored: DBValue, parent_meta: Option<&Self::Meta>) -> (DBValue, Self::Meta) { + fn extract_value_owned(mut stored: DBValue, global: Self::GlobalMeta) -> (DBValue, Self::Meta) { let len = stored.len(); - let (v, meta) = >::extract_value(stored.as_slice(), parent_meta); + let (v, meta) = >::extract_value(stored.as_slice(), global); let removed = len - v.len(); (stored.split_off(removed), meta) } @@ -374,6 +365,7 @@ impl MetaHasher for NoMetaHasher H: Hasher, { type Meta = TrieMeta; + type GlobalMeta = bool; fn hash(value: &[u8], _meta: &Self::Meta) -> H::Out { H::hash(value) @@ -387,11 +379,11 @@ impl MetaHasher for NoMetaHasher value } - fn extract_value<'a>(stored: &'a [u8], _parent_meta: Option<&Self::Meta>) -> (&'a [u8], Self::Meta) { + fn extract_value(stored: &[u8], _meta: Self::GlobalMeta) -> (&[u8], Self::Meta) { (stored, Default::default()) } - fn extract_value_owned(stored: DBValue, _parent_meta: Option<&Self::Meta>) -> (DBValue, Self::Meta) { + fn extract_value_owned(stored: DBValue, _meta: Self::GlobalMeta) -> (DBValue, Self::Meta) { (stored, Default::default()) } } @@ -399,15 +391,15 @@ impl MetaHasher for NoMetaHasher impl TrieConfiguration for Layout where H: Hasher, - M: MetaHasher, - M::Meta: Meta, + M: MetaHasher, + M::Meta: Meta, { fn trie_root(&self, input: I) -> ::Out where I: IntoIterator, A: AsRef<[u8]> + Ord, B: AsRef<[u8]>, { - trie_root::trie_root_no_extension::(input) + trie_root::trie_root_no_extension::(input, self.clone()) } fn trie_root_unhashed(&self, input: I) -> Vec where @@ -415,7 +407,7 @@ impl TrieConfiguration for Layout A: AsRef<[u8]> + Ord, B: AsRef<[u8]>, { - trie_root::unhashed_trie_no_extension::(input) + trie_root::unhashed_trie_no_extension::(input, self.clone()) } fn encode_index(input: u32) -> Vec { @@ -431,10 +423,10 @@ type MemTracker = memory_db::MemCounter; /// TrieDB error over `TrieConfiguration` trait. pub type TrieError = trie_db::TrieError, CError>; /// Reexport from `hash_db`, with genericity set for `Hasher` trait. -pub trait AsHashDB: hash_db::AsHashDB {} -impl> AsHashDB for T {} +pub trait AsHashDB: hash_db::AsHashDB {} +impl> AsHashDB for T {} /// Reexport from `hash_db`, with genericity set for `Hasher` trait. -pub type HashDB<'a, H, M> = dyn hash_db::HashDB + 'a; +pub type HashDB<'a, H, M, GM> = dyn hash_db::HashDB + 'a; /// Reexport from `hash_db`, with genericity set for `Hasher` trait. /// This uses a `KeyFunction` for prefixing keys internally (avoiding /// key conflict for non random keys). @@ -545,7 +537,7 @@ pub fn delta_trie_root( A: Borrow<[u8]>, B: Borrow>, V: Borrow<[u8]>, - DB: hash_db::HashDB, + DB: hash_db::HashDB>, { { let mut trie = TrieDBMut::::from_existing(db, &mut root)?; @@ -570,7 +562,7 @@ pub fn flag_inner_meta_hasher( mut root: TrieHash, ) -> Result, Box>> where L: TrieConfiguration, - DB: hash_db::HashDB, + DB: hash_db::HashDB>, { { let mut t = TrieDBMut::::from_existing(db, &mut root)?; @@ -595,7 +587,7 @@ pub fn flag_meta_hasher( } /// Resolve if inner hashing of value is active. -pub fn state_hashed_value>( +pub fn state_hashed_value>>( db: &DB, root: &TrieHash, ) -> bool { @@ -606,7 +598,7 @@ pub fn state_hashed_value>( +pub fn read_trie_value>>( db: &DB, root: &TrieHash, key: &[u8] @@ -617,8 +609,8 @@ pub fn read_trie_value, - DB: hash_db::HashDBRef + Q: Query, + DB: hash_db::HashDBRef> >( db: &DB, root: &TrieHash, @@ -666,7 +658,7 @@ pub fn child_delta_trie_root( B: Borrow>, V: Borrow<[u8]>, RD: AsRef<[u8]>, - DB: hash_db::HashDB + DB: hash_db::HashDB>, { let mut root = TrieHash::::default(); // root is fetched from DB, not writable by runtime, so it's always valid. @@ -689,7 +681,7 @@ pub fn for_keys_in_child_trie bool, DB> mut f: F ) -> Result<(), Box>> where - DB: hash_db::HashDBRef + DB: hash_db::HashDBRef>, { let mut root = TrieHash::::default(); // root is fetched from DB, not writable by runtime, so it's always valid. @@ -713,9 +705,9 @@ pub fn for_keys_in_child_trie bool, DB> pub fn record_all_keys( db: &DB, root: &TrieHash, - recorder: &mut Recorder> + recorder: &mut Recorder, L::Meta> ) -> Result<(), Box>> where - DB: hash_db::HashDBRef + DB: hash_db::HashDBRef>, { let trie = TrieDB::::new(&*db, root)?; let iter = trie.iter()?; @@ -740,7 +732,7 @@ pub fn read_child_trie_value( key: &[u8] ) -> Result>, Box>> where - DB: hash_db::HashDBRef + DB: hash_db::HashDBRef>, { let mut root = TrieHash::::default(); // root is fetched from DB, not writable by runtime, so it's always valid. @@ -751,7 +743,7 @@ pub fn read_child_trie_value( } /// Read a value from the child trie with given query. -pub fn read_child_trie_value_with, DB>( +pub fn read_child_trie_value_with, DB>( keyspace: &[u8], db: &DB, root_slice: &[u8], @@ -759,7 +751,7 @@ pub fn read_child_trie_value_with Result>, Box>> where - DB: hash_db::HashDBRef + DB: hash_db::HashDBRef>, { let mut root = TrieHash::::default(); // root is fetched from DB, not writable by runtime, so it's always valid. @@ -806,8 +798,8 @@ impl<'a, DB, H> KeySpacedDBMut<'a, DB, H> where } } -impl<'a, DB, H, T, M> hash_db::HashDBRef for KeySpacedDB<'a, DB, H> where - DB: hash_db::HashDBRef, +impl<'a, DB, H, T, M, GM> hash_db::HashDBRef for KeySpacedDB<'a, DB, H> where + DB: hash_db::HashDBRef, H: Hasher, T: From<&'static [u8]>, { @@ -820,9 +812,9 @@ impl<'a, DB, H, T, M> hash_db::HashDBRef for KeySpacedDB<'a, DB, H> whe self.0.access_from(key, at) } - fn get_with_meta(&self, key: &H::Out, prefix: Prefix, parent: Option<&M>) -> Option<(T, M)> { + fn get_with_meta(&self, key: &H::Out, prefix: Prefix, global: GM) -> Option<(T, M)> { let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.get_with_meta(key, (&derived_prefix.0, derived_prefix.1), parent) + self.0.get_with_meta(key, (&derived_prefix.0, derived_prefix.1), global) } fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { @@ -831,8 +823,8 @@ impl<'a, DB, H, T, M> hash_db::HashDBRef for KeySpacedDB<'a, DB, H> whe } } -impl<'a, DB, H, T, M> hash_db::HashDB for KeySpacedDBMut<'a, DB, H> where - DB: hash_db::HashDB, +impl<'a, DB, H, T, M, GM> hash_db::HashDB for KeySpacedDBMut<'a, DB, H> where + DB: hash_db::HashDB, H: Hasher, T: Default + PartialEq + for<'b> From<&'b [u8]> + Clone + Send + Sync, { @@ -845,9 +837,9 @@ impl<'a, DB, H, T, M> hash_db::HashDB for KeySpacedDBMut<'a, DB, H> whe self.0.access_from(key, at) } - fn get_with_meta(&self, key: &H::Out, prefix: Prefix, parent: Option<&M>) -> Option<(T, M)> { + fn get_with_meta(&self, key: &H::Out, prefix: Prefix, global: GM) -> Option<(T, M)> { let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.get_with_meta(key, (&derived_prefix.0, derived_prefix.1), parent) + self.0.get_with_meta(key, (&derived_prefix.0, derived_prefix.1), global) } fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { @@ -881,14 +873,14 @@ impl<'a, DB, H, T, M> hash_db::HashDB for KeySpacedDBMut<'a, DB, H> whe } } -impl<'a, DB, H, T, M> hash_db::AsHashDB for KeySpacedDBMut<'a, DB, H> where - DB: hash_db::HashDB, +impl<'a, DB, H, T, M, GM> hash_db::AsHashDB for KeySpacedDBMut<'a, DB, H> where + DB: hash_db::HashDB, H: Hasher, T: Default + PartialEq + for<'b> From<&'b [u8]> + Clone + Send + Sync, { - fn as_hash_db(&self) -> &dyn hash_db::HashDB { &*self } + fn as_hash_db(&self) -> &dyn hash_db::HashDB { &*self } - fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::HashDB + 'b) { + fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::HashDB + 'b) { &mut *self } } diff --git a/primitives/trie/src/node_codec.rs b/primitives/trie/src/node_codec.rs index b0b7c771c6b55..611e0fad0d18d 100644 --- a/primitives/trie/src/node_codec.rs +++ b/primitives/trie/src/node_codec.rs @@ -197,10 +197,16 @@ impl NodeCodecT for NodeCodec { } fn is_empty_node(data: &[u8]) -> bool { - data == >::empty_node() + data == >::empty_node_no_meta() } - fn empty_node() -> &'static [u8] { + fn empty_node(meta: &mut M) -> Vec { + let mut output = meta.write_state_meta(); + output.extend_from_slice(&[trie_constants::EMPTY_TRIE]); + output + } + + fn empty_node_no_meta() -> &'static [u8] { &[trie_constants::EMPTY_TRIE] } From 8511a40baea43e655227c26f1994d0054da8965d Mon Sep 17 00:00:00 2001 From: Emeric Chevalier Date: Fri, 21 May 2021 19:11:01 +0200 Subject: [PATCH 017/188] extract and set global meta --- Cargo.lock | 16 ++++++++-------- primitives/trie/src/lib.rs | 21 +++++++++++++++------ primitives/trie/src/node_codec.rs | 2 +- primitives/trie/src/trie_stream.rs | 1 + 4 files changed, 25 insertions(+), 15 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 897eff025d517..83b0b359996c2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2329,7 +2329,7 @@ dependencies = [ [[package]] name = "hash-db" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#0b365f776e3716033f44f0fc9cbff7fd389a6c34" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#e51687592a06ca1108c08c0ee8cb4dd00ff23c7d" [[package]] name = "hash256-std-hasher" @@ -2343,7 +2343,7 @@ dependencies = [ [[package]] name = "hash256-std-hasher" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#0b365f776e3716033f44f0fc9cbff7fd389a6c34" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#e51687592a06ca1108c08c0ee8cb4dd00ff23c7d" dependencies = [ "crunchy", ] @@ -2996,7 +2996,7 @@ dependencies = [ [[package]] name = "keccak-hasher" version = "0.15.3" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#0b365f776e3716033f44f0fc9cbff7fd389a6c34" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#e51687592a06ca1108c08c0ee8cb4dd00ff23c7d" dependencies = [ "hash-db", "hash256-std-hasher 0.15.2 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4)", @@ -3788,7 +3788,7 @@ dependencies = [ [[package]] name = "memory-db" version = "0.26.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#0b365f776e3716033f44f0fc9cbff7fd389a6c34" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#e51687592a06ca1108c08c0ee8cb4dd00ff23c7d" dependencies = [ "hash-db", "hashbrown", @@ -10332,7 +10332,7 @@ checksum = "a7f741b240f1a48843f9b8e0444fb55fb2a4ff67293b50a9179dfd5ea67f8d41" [[package]] name = "trie-bench" version = "0.27.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#0b365f776e3716033f44f0fc9cbff7fd389a6c34" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#e51687592a06ca1108c08c0ee8cb4dd00ff23c7d" dependencies = [ "criterion", "hash-db", @@ -10347,7 +10347,7 @@ dependencies = [ [[package]] name = "trie-db" version = "0.22.3" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#0b365f776e3716033f44f0fc9cbff7fd389a6c34" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#e51687592a06ca1108c08c0ee8cb4dd00ff23c7d" dependencies = [ "hash-db", "hashbrown", @@ -10359,7 +10359,7 @@ dependencies = [ [[package]] name = "trie-root" version = "0.16.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#0b365f776e3716033f44f0fc9cbff7fd389a6c34" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#e51687592a06ca1108c08c0ee8cb4dd00ff23c7d" dependencies = [ "hash-db", ] @@ -10377,7 +10377,7 @@ dependencies = [ [[package]] name = "trie-standardmap" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#0b365f776e3716033f44f0fc9cbff7fd389a6c34" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#e51687592a06ca1108c08c0ee8cb4dd00ff23c7d" dependencies = [ "hash-db", "keccak-hasher 0.15.3 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4)", diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 34966333c23bc..b562c534dc183 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -89,6 +89,17 @@ impl Meta for TrieMeta { self.do_value_hash = state_meta; } + fn extract_global_meta(&self) -> Self::GlobalMeta { + self.recorded_do_value_hash + } + + fn set_global_meta(&mut self, global_meta: Self::GlobalMeta) { + if global_meta { + self.recorded_do_value_hash = true; + self.do_value_hash = true; + } + } + fn has_state_meta(&self) -> bool { self.recorded_do_value_hash } @@ -230,14 +241,12 @@ impl TrieLayout for Layout self.0 } fn initialize_from_root_meta(&mut self, root_meta: &Self::Meta) { - if root_meta.recorded_do_value_hash { + if root_meta.extract_global_meta() { self.0 = true; } } fn set_root_meta(root_meta: &mut Self::Meta, global_meta: GlobalMeta) { - if global_meta { - root_meta.recorded_do_value_hash = true; - } + root_meta.set_global_meta(true); } } @@ -399,7 +408,7 @@ impl TrieConfiguration for Layout A: AsRef<[u8]> + Ord, B: AsRef<[u8]>, { - trie_root::trie_root_no_extension::(input, self.clone()) + trie_root::trie_root_no_extension::(input, self.layout_meta()) } fn trie_root_unhashed(&self, input: I) -> Vec where @@ -407,7 +416,7 @@ impl TrieConfiguration for Layout A: AsRef<[u8]> + Ord, B: AsRef<[u8]>, { - trie_root::unhashed_trie_no_extension::(input, self.clone()) + trie_root::unhashed_trie_no_extension::(input, self.layout_meta()) } fn encode_index(input: u32) -> Vec { diff --git a/primitives/trie/src/node_codec.rs b/primitives/trie/src/node_codec.rs index 611e0fad0d18d..0dc647f17f047 100644 --- a/primitives/trie/src/node_codec.rs +++ b/primitives/trie/src/node_codec.rs @@ -181,7 +181,7 @@ impl NodeCodecT for NodeCodec { type HashOut = H::Out; fn hashed_null_node() -> ::Out { - H::hash(>::empty_node()) + H::hash(>::empty_node_no_meta()) } fn decode_plan(data: &[u8], meta: &mut M) -> Result { diff --git a/primitives/trie/src/trie_stream.rs b/primitives/trie/src/trie_stream.rs index 3a65c5a9190b4..e9f19cef5449e 100644 --- a/primitives/trie/src/trie_stream.rs +++ b/primitives/trie/src/trie_stream.rs @@ -66,6 +66,7 @@ fn fuse_nibbles_node<'a>(nibbles: &'a [u8], kind: NodeKind) -> impl Iterator Self { TrieStream { From 270ed28b0d999d48cfc14d296c7d6aac7b8694c2 Mon Sep 17 00:00:00 2001 From: Emeric Chevalier Date: Mon, 24 May 2021 17:29:20 +0200 Subject: [PATCH 018/188] Update to branch 4 --- Cargo.lock | 16 ++--- bin/node/bench/src/generator.rs | 12 ++-- bin/node/bench/src/simple_trie.rs | 12 ++-- bin/node/bench/src/trie.rs | 4 +- client/db/src/bench.rs | 6 +- client/db/src/lib.rs | 8 +-- .../src/changes_trie/changes_iterator.rs | 4 +- .../state-machine/src/changes_trie/mod.rs | 2 +- .../state-machine/src/changes_trie/prune.rs | 2 +- .../state-machine/src/changes_trie/storage.rs | 4 +- primitives/state-machine/src/lib.rs | 2 +- .../state-machine/src/proving_backend.rs | 6 +- primitives/state-machine/src/trie_backend.rs | 38 ++++++++---- .../state-machine/src/trie_backend_essence.rs | 62 +++++++++---------- primitives/trie/src/lib.rs | 61 +++++++----------- primitives/trie/src/storage_proof.rs | 10 ++- primitives/trie/src/trie_stream.rs | 55 +++++++++++++--- 17 files changed, 172 insertions(+), 132 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 83b0b359996c2..d1c2e7d69da81 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2329,7 +2329,7 @@ dependencies = [ [[package]] name = "hash-db" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#e51687592a06ca1108c08c0ee8cb4dd00ff23c7d" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#09571017e3ad7ee3e04a40eb8d61bcff25e255ac" [[package]] name = "hash256-std-hasher" @@ -2343,7 +2343,7 @@ dependencies = [ [[package]] name = "hash256-std-hasher" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#e51687592a06ca1108c08c0ee8cb4dd00ff23c7d" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#09571017e3ad7ee3e04a40eb8d61bcff25e255ac" dependencies = [ "crunchy", ] @@ -2996,7 +2996,7 @@ dependencies = [ [[package]] name = "keccak-hasher" version = "0.15.3" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#e51687592a06ca1108c08c0ee8cb4dd00ff23c7d" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#09571017e3ad7ee3e04a40eb8d61bcff25e255ac" dependencies = [ "hash-db", "hash256-std-hasher 0.15.2 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4)", @@ -3788,7 +3788,7 @@ dependencies = [ [[package]] name = "memory-db" version = "0.26.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#e51687592a06ca1108c08c0ee8cb4dd00ff23c7d" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#09571017e3ad7ee3e04a40eb8d61bcff25e255ac" dependencies = [ "hash-db", "hashbrown", @@ -10332,7 +10332,7 @@ checksum = "a7f741b240f1a48843f9b8e0444fb55fb2a4ff67293b50a9179dfd5ea67f8d41" [[package]] name = "trie-bench" version = "0.27.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#e51687592a06ca1108c08c0ee8cb4dd00ff23c7d" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#09571017e3ad7ee3e04a40eb8d61bcff25e255ac" dependencies = [ "criterion", "hash-db", @@ -10347,7 +10347,7 @@ dependencies = [ [[package]] name = "trie-db" version = "0.22.3" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#e51687592a06ca1108c08c0ee8cb4dd00ff23c7d" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#09571017e3ad7ee3e04a40eb8d61bcff25e255ac" dependencies = [ "hash-db", "hashbrown", @@ -10359,7 +10359,7 @@ dependencies = [ [[package]] name = "trie-root" version = "0.16.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#e51687592a06ca1108c08c0ee8cb4dd00ff23c7d" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#09571017e3ad7ee3e04a40eb8d61bcff25e255ac" dependencies = [ "hash-db", ] @@ -10377,7 +10377,7 @@ dependencies = [ [[package]] name = "trie-standardmap" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#e51687592a06ca1108c08c0ee8cb4dd00ff23c7d" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#09571017e3ad7ee3e04a40eb8d61bcff25e255ac" dependencies = [ "hash-db", "keccak-hasher 0.15.3 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4)", diff --git a/bin/node/bench/src/generator.rs b/bin/node/bench/src/generator.rs index a227c9081bb67..9f65ddd77dafb 100644 --- a/bin/node/bench/src/generator.rs +++ b/bin/node/bench/src/generator.rs @@ -43,11 +43,15 @@ pub fn generate_trie( ); let mut trie = SimpleTrie { db, overlay: &mut overlay }; { - let mut trie_db = TrieDBMut::new(&mut trie, &mut root); - if flag_hashed_value { - sp_trie::flag_meta_hasher(&mut trie_db).expect("flag trie failed"); - } + let mut trie_db = if flag_hashed_value { + let layout = sp_trie::Layout::with_inner_hashing(); + let mut t = TrieDBMut::::new_with_layout(&mut trie, &mut root, layout); + t.force_layout_meta(); + t + } else { + TrieDBMut::new(&mut trie, &mut root) + }; for (key, value) in key_values { trie_db.insert(&key, &value).expect("trie insertion failed"); } diff --git a/bin/node/bench/src/simple_trie.rs b/bin/node/bench/src/simple_trie.rs index 6cc32e00e34d9..4d8e76ae3a7e1 100644 --- a/bin/node/bench/src/simple_trie.rs +++ b/bin/node/bench/src/simple_trie.rs @@ -31,15 +31,15 @@ pub struct SimpleTrie<'a> { pub overlay: &'a mut HashMap, Option>>, } -impl<'a> AsHashDB for SimpleTrie<'a> { - fn as_hash_db(&self) -> &dyn hash_db::HashDB { &*self } +impl<'a> AsHashDB for SimpleTrie<'a> { + fn as_hash_db(&self) -> &dyn hash_db::HashDB { &*self } - fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn HashDB + 'b) { + fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn HashDB + 'b) { &mut *self } } -impl<'a> HashDB for SimpleTrie<'a> { +impl<'a> HashDB for SimpleTrie<'a> { fn get(&self, key: &Hash, prefix: Prefix) -> Option { let key = sp_trie::prefixed_key::(key, prefix); if let Some(value) = self.overlay.get(&key) { @@ -48,9 +48,9 @@ impl<'a> HashDB for SimpleTrie<'a> { self.db.get(0, &key).expect("Database backend error") } - fn get_with_meta(&self, key: &Hash, prefix: Prefix, parent: Option<&TrieMeta>) -> Option<(DBValue, TrieMeta)> { + fn get_with_meta(&self, key: &Hash, prefix: Prefix, global: bool) -> Option<(DBValue, TrieMeta)> { let result = self.get(key, prefix); - result.map(|value| >::extract_value_owned(value, parent)) + result.map(|value| >::extract_value_owned(value, global)) } fn contains(&self, hash: &Hash, prefix: Prefix) -> bool { diff --git a/bin/node/bench/src/trie.rs b/bin/node/bench/src/trie.rs index 56bf7fe8ad604..735403f95c87c 100644 --- a/bin/node/bench/src/trie.rs +++ b/bin/node/bench/src/trie.rs @@ -170,11 +170,11 @@ impl core::BenchmarkDescription for TrieReadBenchmarkDescription { struct Storage(Arc); impl sp_state_machine::Storage for Storage { - fn get(&self, key: &Hash, prefix: Prefix, parent: Option<&TrieMeta>) -> Result, TrieMeta)>, String> { + fn get(&self, key: &Hash, prefix: Prefix, global: bool) -> Result, TrieMeta)>, String> { let key = sp_trie::prefixed_key::(key, prefix); self.0.get(0, &key).map_err(|e| format!("Database backend error: {:?}", e)) .map(|result| result - .map(|value| >::extract_value_owned(value, parent)) + .map(|value| >::extract_value_owned(value, global)) ) } diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index 21e20d1ff6211..8b73d0eb31622 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -49,20 +49,20 @@ struct StorageDb { } impl sp_state_machine::Storage> for StorageDb { - fn get(&self, key: &Block::Hash, prefix: Prefix, parent: Option<&TrieMeta>) -> Result, String> { + fn get(&self, key: &Block::Hash, prefix: Prefix, global: bool) -> Result, String> { let prefixed_key = prefixed_key::>(key, prefix); if let Some(recorder) = &self.proof_recorder { if let Some(v) = recorder.get(&key) { return Ok(v.clone()); } let backend_value = self.db.get(0, &prefixed_key) - .map(|result| result.map(|value| , _>>::extract_value_owned(value, parent))) + .map(|result| result.map(|value| , _>>::extract_value_owned(value, global))) .map_err(|e| format!("Database backend error: {:?}", e))?; recorder.record::>(key.clone(), backend_value.clone()); Ok(backend_value) } else { self.db.get(0, &prefixed_key) - .map(|result| result.map(|value| , _>>::extract_value_owned(value, parent))) + .map(|result| result.map(|value| , _>>::extract_value_owned(value, global))) .map_err(|e| format!("Database backend error: {:?}", e)) } } diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 2fa447b5e9b84..ddfcd6d260222 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -867,14 +867,14 @@ struct StorageDb { } impl sp_state_machine::Storage> for StorageDb { - fn get(&self, key: &Block::Hash, prefix: Prefix, parent: Option<&TrieMeta>) -> Result, String> { + fn get(&self, key: &Block::Hash, prefix: Prefix, global: bool) -> Result, String> { if self.prefix_keys { let key = prefixed_key::>(key, prefix); self.state_db.get(&key, self) } else { self.state_db.get(key.as_ref(), self) } - .map(|result| result.map(|value| , _>>::extract_value_owned(value, parent))) + .map(|result| result.map(|value| , _>>::extract_value_owned(value, global))) .map_err(|e| format!("Database backend error: {:?}", e)) } @@ -903,7 +903,7 @@ impl DbGenesisStorage { } impl sp_state_machine::Storage> for DbGenesisStorage { - fn get(&self, _key: &Block::Hash, _prefix: Prefix, _parent: Option<&TrieMeta>) -> Result, String> { + fn get(&self, _key: &Block::Hash, _prefix: Prefix, _global: bool) -> Result, String> { Ok(None) } fn access_from(&self, _key: &Block::Hash) { @@ -2125,7 +2125,7 @@ impl sc_client_api::backend::Backend for Backend { self.storage.as_ref(), &header.state_root, (&[], None), - None, + Default::default(), ).unwrap_or(None).is_some() }, _ => false, diff --git a/primitives/state-machine/src/changes_trie/changes_iterator.rs b/primitives/state-machine/src/changes_trie/changes_iterator.rs index be35581e7514d..25eda86b4e66a 100644 --- a/primitives/state-machine/src/changes_trie/changes_iterator.rs +++ b/primitives/state-machine/src/changes_trie/changes_iterator.rs @@ -24,7 +24,7 @@ use codec::{Decode, Encode, Codec}; use hash_db::Hasher; use num_traits::Zero; use sp_core::storage::PrefixedStorageKey; -use sp_trie::Recorder; +use sp_trie::{Recorder, TrieMeta}; use crate::changes_trie::{AnchorBlockId, ConfigurationRange, RootsStorage, Storage, BlockNumber}; use crate::changes_trie::input::{DigestIndex, ExtrinsicIndex, DigestIndexValue, ExtrinsicIndexValue}; use crate::changes_trie::storage::{TrieBackendAdapter, InMemoryStorage}; @@ -337,7 +337,7 @@ struct ProvingDrilldownIterator<'a, H, Number> H::Out: 'a, { essence: DrilldownIteratorEssence<'a, H, Number>, - proof_recorder: RefCell>, + proof_recorder: RefCell>, } impl<'a, H, Number> ProvingDrilldownIterator<'a, H, Number> diff --git a/primitives/state-machine/src/changes_trie/mod.rs b/primitives/state-machine/src/changes_trie/mod.rs index dd6d4f0df3b88..404353fc308b4 100644 --- a/primitives/state-machine/src/changes_trie/mod.rs +++ b/primitives/state-machine/src/changes_trie/mod.rs @@ -167,7 +167,7 @@ pub struct TrieBackendStorageAdapter<'a, H: Hasher, Number: BlockNumber>(pub &'a impl<'a, H: Hasher, N: BlockNumber> crate::TrieBackendStorage for TrieBackendStorageAdapter<'a, H, N> { type Overlay = sp_trie::MemoryDB; - fn get(&self, key: &H::Out, prefix: Prefix, _parent: Option<&sp_trie::TrieMeta>) -> Result, String> { + fn get(&self, key: &H::Out, prefix: Prefix, _global: bool) -> Result, String> { match self.0.get(key, prefix) { // change trie do not use meta. Ok(Some(v)) => Ok(Some((v, Default::default()))), diff --git a/primitives/state-machine/src/changes_trie/prune.rs b/primitives/state-machine/src/changes_trie/prune.rs index 4098eadb98b31..987fb4ff63d08 100644 --- a/primitives/state-machine/src/changes_trie/prune.rs +++ b/primitives/state-machine/src/changes_trie/prune.rs @@ -95,7 +95,7 @@ fn prune_trie( // enumerate all changes trie' keys, recording all nodes that have been 'touched' // (effectively - all changes trie nodes) - let mut proof_recorder: Recorder = Default::default(); + let mut proof_recorder: Recorder = Default::default(); { let mut trie = ProvingBackendRecorder::<_, H> { backend: &TrieBackendEssence::new(TrieBackendAdapter::new(storage), root), diff --git a/primitives/state-machine/src/changes_trie/storage.rs b/primitives/state-machine/src/changes_trie/storage.rs index f81838f11f6e9..5414d3f8c2f6a 100644 --- a/primitives/state-machine/src/changes_trie/storage.rs +++ b/primitives/state-machine/src/changes_trie/storage.rs @@ -190,7 +190,7 @@ impl Storage for InMemoryStorage Result, String> { - Ok( as hash_db::HashDBRef>::get(&self.data.read().mdb, key, prefix)) + Ok( as hash_db::HashDBRef>::get(&self.data.read().mdb, key, prefix)) } } @@ -207,7 +207,7 @@ impl<'a, H, Number> TrieBackendStorage for TrieBackendAdapter<'a, H, Number> { type Overlay = MemoryDB; - fn get(&self, key: &H::Out, prefix: Prefix, _parent: Option<&sp_trie::TrieMeta>) -> Result, String> { + fn get(&self, key: &H::Out, prefix: Prefix, _global: bool) -> Result, String> { match self.storage.get(key, prefix) { // change trie do not use meta. Ok(Some(v)) => Ok(Some((v, Default::default()))), diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index d12686b43ab2b..6b6c661ea4f03 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -855,7 +855,7 @@ mod execution { where H: Hasher, H::Out: Ord + Codec, - MH: sp_trie::MetaHasher, + MH: sp_trie::MetaHasher, KF: sp_trie::KeyFunction + Send + Sync, { proving_backend.storage(key).map_err(|e| Box::new(e) as Box) diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 81ad50408e4c4..b431fa0c0e04d 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -35,7 +35,7 @@ use sp_core::storage::ChildInfo; /// Patricia trie-based backend specialized in get value proofs. pub struct ProvingBackendRecorder<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { pub(crate) backend: &'a TrieBackendEssence, - pub(crate) proof_recorder: &'a mut Recorder, + pub(crate) proof_recorder: &'a mut Recorder, } impl<'a, S, H> ProvingBackendRecorder<'a, S, H> @@ -242,12 +242,12 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> TrieBackendStorage { type Overlay = S::Overlay; - fn get(&self, key: &H::Out, prefix: Prefix, parent: Option<&TrieMeta>) -> Result, String> { + fn get(&self, key: &H::Out, prefix: Prefix, global: bool) -> Result, String> { if let Some(v) = self.proof_recorder.get(key) { return Ok(v); } - let backend_value = self.backend.get(key, prefix, parent)?; + let backend_value = self.backend.get(key, prefix, global)?; self.proof_recorder.record::(key.clone(), backend_value.clone()); Ok(backend_value) } diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index e110963a20817..788eb47f89588 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -181,17 +181,20 @@ impl, H: Hasher> Backend for TrieBackend where self.essence.backend_storage(), &mut write_overlay, ); - if flag_inner_hash_value { - root = match sp_trie::flag_inner_meta_hasher::, _>(&mut eph, root) { - Ok(ret) => ret, - Err(e) => { - warn!(target: "trie", "Failed to flag trie: {}", e); - root - }, + let res = || { + if flag_inner_hash_value { + let layout = sp_trie::Layout::with_inner_hashing(); + let mut t = sp_trie::trie_types::TrieDBMut::::from_existing_with_layout( + &mut eph, + &mut root, + layout, + )?; + t.force_layout_meta()?; } - } + delta_trie_root::, _, _, _, _, _>(&mut eph, root, delta) + }; - match delta_trie_root::, _, _, _, _, _>(&mut eph, root, delta) { + match res() { Ok(ret) => root = ret, Err(e) => warn!(target: "trie", "Failed to write to trie: {}", e), } @@ -286,10 +289,19 @@ pub mod tests { { let mut sub_root = Vec::new(); root.encode_to(&mut sub_root); - let mut trie = TrieDBMut::new(&mut mdb, &mut root); - if hashed_value { - sp_trie::flag_meta_hasher(&mut trie).expect("flag failed"); - } + let mut trie = if hashed_value { + let layout = Layout::with_inner_hashing(); + let mut t = TrieDBMut::new_with_layout( + &mut mdb, + &mut root, + layout, + ); + t.force_layout_meta() + .expect("failed forced layout change"); + t + } else { + TrieDBMut::new(&mut mdb, &mut root) + }; trie.insert(child_info.prefixed_storage_key().as_slice(), &sub_root[..]) .expect("insert failed"); diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 6d417204ebd8d..157c8ff12097f 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -43,7 +43,7 @@ type Result = sp_std::result::Result; /// Patricia trie-based storage trait. pub trait Storage: Send + Sync { /// Get a trie node. - fn get(&self, key: &H::Out, prefix: Prefix, parent: Option<&TrieMeta>) -> Result>; + fn get(&self, key: &H::Out, prefix: Prefix, inner_hash: bool) -> Result>; /// Call back when value get accessed in trie. fn access_from(&self, key: &H::Out); } @@ -131,7 +131,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: child_info: Option<&ChildInfo>, key: &[u8], ) -> Result> { - let dyn_eph: &dyn hash_db::HashDBRef<_, _, _>; + let dyn_eph: &dyn hash_db::HashDBRef<_, _, _, _>; let keyspace_eph; if let Some(child_info) = child_info.as_ref() { keyspace_eph = KeySpacedDB::new(self, child_info.keyspace()); @@ -288,11 +288,11 @@ pub(crate) struct Ephemeral<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { overlay: &'a mut S::Overlay, } -impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> hash_db::AsHashDB +impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> hash_db::AsHashDB for Ephemeral<'a, S, H> { - fn as_hash_db<'b>(&'b self) -> &'b (dyn hash_db::HashDB + 'b) { self } - fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::HashDB + 'b) { self } + fn as_hash_db<'b>(&'b self) -> &'b (dyn hash_db::HashDB + 'b) { self } + fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::HashDB + 'b) { self } } impl<'a, S: TrieBackendStorage, H: Hasher> Ephemeral<'a, S, H> { @@ -304,18 +304,18 @@ impl<'a, S: TrieBackendStorage, H: Hasher> Ephemeral<'a, S, H> { } } -impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::HashDB +impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::HashDB for Ephemeral<'a, S, H> { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { - Self::get_with_meta(self, key, prefix, None).map(|r| r.0) + Self::get_with_meta(self, key, prefix, Default::default()).map(|r| r.0) } - fn get_with_meta(&self, key: &H::Out, prefix: Prefix, parent: Option<&TrieMeta>) -> Option<(DBValue, TrieMeta)> { - if let Some(val) = hash_db::HashDB::get_with_meta(self.overlay, key, prefix, parent) { + fn get_with_meta(&self, key: &H::Out, prefix: Prefix, global: bool) -> Option<(DBValue, TrieMeta)> { + if let Some(val) = hash_db::HashDB::get_with_meta(self.overlay, key, prefix, global) { Some(val) } else { - match self.storage.get(&key, prefix, parent) { + match self.storage.get(&key, prefix, global) { Ok(x) => x, Err(e) => { warn!(target: "trie", "Failed to read from DB: {}", e); @@ -357,15 +357,15 @@ impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::HashDB, H: Hasher> hash_db::HashDBRef +impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::HashDBRef for Ephemeral<'a, S, H> { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { hash_db::HashDB::get(self, key, prefix) } - fn get_with_meta(&self, key: &H::Out, prefix: Prefix, parent: Option<&TrieMeta>) -> Option<(DBValue, TrieMeta)> { - hash_db::HashDB::get_with_meta(self, key, prefix, parent) + fn get_with_meta(&self, key: &H::Out, prefix: Prefix, global: bool) -> Option<(DBValue, TrieMeta)> { + hash_db::HashDB::get_with_meta(self, key, prefix, global) } fn access_from(&self, key: &H::Out, at: Option<&H::Out>) -> Option { @@ -380,9 +380,9 @@ impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::HashDBRef: Send + Sync { /// Type of in-memory overlay. - type Overlay: hash_db::HashDB + Default + Consolidate; + type Overlay: hash_db::HashDB + Default + Consolidate; /// Get the value stored at key. - fn get(&self, key: &H::Out, prefix: Prefix, parent_meta: Option<&TrieMeta>) -> Result>; + fn get(&self, key: &H::Out, prefix: Prefix, global: bool) -> Result>; /// Call back when value get accessed in trie. fn access_from(&self, key: &H::Out); } @@ -392,8 +392,8 @@ pub trait TrieBackendStorage: Send + Sync { impl TrieBackendStorage for Arc> { type Overlay = PrefixedMemoryDB; - fn get(&self, key: &H::Out, prefix: Prefix, parent: Option<&TrieMeta>) -> Result> { - Storage::::get(self.deref(), key, prefix, parent) + fn get(&self, key: &H::Out, prefix: Prefix, global: bool) -> Result> { + Storage::::get(self.deref(), key, prefix, global) } fn access_from(&self, key: &H::Out) { @@ -404,13 +404,13 @@ impl TrieBackendStorage for Arc> { impl TrieBackendStorage for sp_trie::GenericMemoryDB where H: Hasher, - MH: sp_trie::MetaHasher, + MH: sp_trie::MetaHasher, KF: sp_trie::KeyFunction + Send + Sync, { type Overlay = Self; - fn get(&self, key: &H::Out, prefix: Prefix, parent: Option<&TrieMeta>) -> Result> { - Ok(hash_db::HashDB::get_with_meta(self, key, prefix, parent)) + fn get(&self, key: &H::Out, prefix: Prefix, global: bool) -> Result> { + Ok(hash_db::HashDB::get_with_meta(self, key, prefix, global)) } fn access_from(&self, key: &H::Out) { @@ -418,25 +418,25 @@ impl TrieBackendStorage for sp_trie::GenericMemoryDB } } -impl, H: Hasher> hash_db::AsHashDB +impl, H: Hasher> hash_db::AsHashDB for TrieBackendEssence { - fn as_hash_db<'b>(&'b self) -> &'b (dyn hash_db::HashDB + 'b) { self } - fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::HashDB + 'b) { self } + fn as_hash_db<'b>(&'b self) -> &'b (dyn hash_db::HashDB + 'b) { self } + fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::HashDB + 'b) { self } } -impl, H: Hasher> hash_db::HashDB +impl, H: Hasher> hash_db::HashDB for TrieBackendEssence { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { - self.get_with_meta(key, prefix, None).map(|r| r.0) + self.get_with_meta(key, prefix, Default::default()).map(|r| r.0) } - fn get_with_meta(&self, key: &H::Out, prefix: Prefix, parent: Option<&TrieMeta>) -> Option<(DBValue, TrieMeta)> { + fn get_with_meta(&self, key: &H::Out, prefix: Prefix, global: bool) -> Option<(DBValue, TrieMeta)> { if *key == self.empty { - return Some(([0u8].to_vec(), ::meta_for_empty())) + return Some(([0u8].to_vec(), ::meta_for_empty(global))) } - match self.storage.get(&key, prefix, parent) { + match self.storage.get(&key, prefix, global) { Ok(x) => x, Err(e) => { warn!(target: "trie", "Failed to read from DB: {}", e); @@ -472,15 +472,15 @@ impl, H: Hasher> hash_db::HashDB } } -impl, H: Hasher> hash_db::HashDBRef +impl, H: Hasher> hash_db::HashDBRef for TrieBackendEssence { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { hash_db::HashDB::get(self, key, prefix) } - fn get_with_meta(&self, key: &H::Out, prefix: Prefix, parent: Option<&TrieMeta>) -> Option<(DBValue, TrieMeta)> { - hash_db::HashDB::get_with_meta(self, key, prefix, parent) + fn get_with_meta(&self, key: &H::Out, prefix: Prefix, global: bool) -> Option<(DBValue, TrieMeta)> { + hash_db::HashDB::get_with_meta(self, key, prefix, global) } fn access_from(&self, key: &H::Out, at: Option<&H::Out>) -> Option { diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index b562c534dc183..2a5811b76d09a 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -220,6 +220,13 @@ impl Default for Layout { Layout(false, sp_std::marker::PhantomData) } } +impl Layout { + /// Layout with inner hashing active. + /// Will flag trie for hashing. + pub fn with_inner_hashing() -> Self { + Layout(true, sp_std::marker::PhantomData) + } +} impl TrieLayout for Layout where @@ -246,7 +253,7 @@ impl TrieLayout for Layout } } fn set_root_meta(root_meta: &mut Self::Meta, global_meta: GlobalMeta) { - root_meta.set_global_meta(true); + root_meta.set_global_meta(global_meta); } } @@ -461,7 +468,7 @@ pub type MemoryDBMeta = memory_db::MemoryDB< /// Reexport from `hash_db`, with genericity set for `Hasher` trait. pub type GenericMemoryDB = memory_db::MemoryDB< - H, KF, trie_db::DBValue, MH, MemTracker + H, KF, trie_db::DBValue, MH, MemTracker, >; /// Persistent trie database read-access interface for the a given hasher. @@ -565,36 +572,6 @@ pub fn delta_trie_root( Ok(root) } -/// Flag inner trie with state metadata to enable hash of value internally. -pub fn flag_inner_meta_hasher( - db: &mut DB, - mut root: TrieHash, -) -> Result, Box>> where - L: TrieConfiguration, - DB: hash_db::HashDB>, -{ - { - let mut t = TrieDBMut::::from_existing(db, &mut root)?; - flag_meta_hasher(&mut t)?; - } - Ok(root) -} - -/// Flag inner trie with state metadata to enable hash of value internally. -pub fn flag_meta_hasher( - t: &mut TrieDBMut -) -> Result<(), Box>> where - L: TrieConfiguration, -{ - let flag = true; - let key: &[u8]= &[]; - if !t.contains(key)? { - t.insert(key, b"")?; - } - assert!(t.flag(key, flag)?); - Ok(()) -} - /// Resolve if inner hashing of value is active. pub fn state_hashed_value>>( db: &DB, @@ -1168,7 +1145,7 @@ mod tests { } fn populate_trie<'db, T>( - db: &'db mut dyn HashDB, + db: &'db mut dyn HashDB>, root: &'db mut TrieHash, v: &[(Vec, Vec)], flag_hash: bool, @@ -1176,10 +1153,20 @@ mod tests { where T: TrieConfiguration, { - let mut t = TrieDBMut::::new(db, root); - if flag_hash { - flag_meta_hasher(&mut t).unwrap(); - } + let mut t = if flag_hash { + let mut root_meta = Default::default(); + T::set_root_meta(&mut root_meta, flag_hash); + + let mut layout = T::default(); + layout.initialize_from_root_meta(&root_meta); + + let mut t = TrieDBMut::::new_with_layout(db, root, layout); + t.force_layout_meta() + .expect("Could not force layout."); + t + } else { + TrieDBMut::::new(db, root) + }; for i in 0..v.len() { let key: &[u8]= &v[i].0; let val: &[u8] = &v[i].1; diff --git a/primitives/trie/src/storage_proof.rs b/primitives/trie/src/storage_proof.rs index 915dd92b1c16c..0f00a335ae2f1 100644 --- a/primitives/trie/src/storage_proof.rs +++ b/primitives/trie/src/storage_proof.rs @@ -113,23 +113,21 @@ impl From for crate::MemoryDB { let mut is_hashed_value = false; let mut accum = Vec::new(); for item in proof.trie_nodes.iter() { - // Note using `None` as parent meta does not impact `extract_value` of - // sp_trie meta hasher. - // But does not with `insert_with_meta`. + // Note using `default()` as global meta helps looking fro root node. + let layout_meta = Default::default(); let (encoded_node, mut meta) = < as TrieLayout>::MetaHasher as MetaHasher - >::extract_value(item.as_slice(), None); + >::extract_value(item.as_slice(), layout_meta); // read state meta. let _ = as TrieLayout>::Codec::decode_plan(encoded_node, &mut meta); if meta.recorded_do_value_hash { + debug_assert!(!is_hashed_value); is_hashed_value = true; } - // TODO insert_with_meta here accum.push((encoded_node, meta)); } for mut item in accum.into_iter() { if is_hashed_value { - // skipping hierarchy. item.1.do_value_hash = true; } db.insert_with_meta(crate::EMPTY_PREFIX, item.0, item.1); diff --git a/primitives/trie/src/trie_stream.rs b/primitives/trie/src/trie_stream.rs index e9f19cef5449e..dc754fbb903ec 100644 --- a/primitives/trie/src/trie_stream.rs +++ b/primitives/trie/src/trie_stream.rs @@ -17,13 +17,15 @@ //! `TrieStream` implementation for Substrate's trie format. -use hash_db::Hasher; +use hash_db::{MetaHasher, Hasher}; use trie_root; -use codec::Encode; +use codec::{Encode, Compact}; use sp_std::vec::Vec; -use crate::trie_constants; +use sp_std::ops::Range; +use crate::{trie_constants, TrieMeta, StateHasher}; use crate::node_header::{NodeKind, size_and_prefix_iterator}; use crate::node_codec::Bitmap; +use trie_db::Meta; const BRANCH_NODE_NO_VALUE: u8 = 254; const BRANCH_NODE_WITH_VALUE: u8 = 255; @@ -32,6 +34,8 @@ const BRANCH_NODE_WITH_VALUE: u8 = 255; /// Codec-flavored TrieStream. pub struct TrieStream { buffer: Vec, + inner_value_hashing: bool, + current_value_range: Option>, } impl TrieStream { @@ -68,9 +72,11 @@ fn fuse_nibbles_node<'a>(nibbles: &'a [u8], kind: NodeKind) -> impl Iterator Self { - TrieStream { - buffer: Vec::new() + fn new(meta: bool) -> Self { + Self { + buffer: Vec::new(), + inner_value_hashing: meta, + current_value_range: None, } } @@ -80,7 +86,9 @@ impl trie_root::TrieStream for TrieStream { fn append_leaf(&mut self, key: &[u8], value: &[u8]) { self.buffer.extend(fuse_nibbles_node(key, NodeKind::Leaf)); - value.encode_to(&mut self.buffer); + Compact(value.len() as u32).encode_to(&mut self.buffer); + self.current_value_range = Some(self.buffer.len()..self.buffer.len() + value.len()); + self.buffer.extend_from_slice(value); } fn begin_branch( @@ -102,7 +110,9 @@ impl trie_root::TrieStream for TrieStream { self.buffer.extend(&branch_node(maybe_value.is_some(), has_children)); } if let Some(value) = maybe_value { - value.encode_to(&mut self.buffer); + Compact(value.len() as u32).encode_to(&mut self.buffer); + self.current_value_range = Some(self.buffer.len()..self.buffer.len() + value.len()); + self.buffer.extend_from_slice(value); } } @@ -118,6 +128,35 @@ impl trie_root::TrieStream for TrieStream { } } + fn hash_root(self) -> H::Out { + let inner_value_hashing = self.inner_value_hashing; + let range = self.current_value_range; + let data = self.buffer; + if inner_value_hashing + && range.as_ref().map(|r| r.end - r.start >= trie_constants::INNER_HASH_TRESHOLD) + .unwrap_or_default() { + let meta = TrieMeta { + range: range, + unused_value: false, + contain_hash: false, + do_value_hash: true, + old_hash: false, + recorded_do_value_hash: true, + }; + // Add the recorded_do_value_hash to encoded + let mut encoded = meta.write_state_meta(); + let encoded = if encoded.len() > 0 { + encoded.extend(data); + encoded + } else { + data + }; + >>::hash(&encoded, &meta) + } else { + H::hash(&data) + } + } + fn out(self) -> Vec { self.buffer } } From b838e7e2aab7f0dd643cf40292e4e3bac2ab7411 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 25 May 2021 18:04:04 +0200 Subject: [PATCH 019/188] fix iterator with root flag (no longer insert node). --- bin/node/bench/src/generator.rs | 2 +- .../state-machine/src/trie_backend_essence.rs | 1 + primitives/trie/src/lib.rs | 25 +++++++++++++------ 3 files changed, 20 insertions(+), 8 deletions(-) diff --git a/bin/node/bench/src/generator.rs b/bin/node/bench/src/generator.rs index 9f65ddd77dafb..a27d6e529f277 100644 --- a/bin/node/bench/src/generator.rs +++ b/bin/node/bench/src/generator.rs @@ -47,7 +47,7 @@ pub fn generate_trie( let mut trie_db = if flag_hashed_value { let layout = sp_trie::Layout::with_inner_hashing(); let mut t = TrieDBMut::::new_with_layout(&mut trie, &mut root, layout); - t.force_layout_meta(); + t.force_layout_meta().expect("force layout meta failed"); t } else { TrieDBMut::new(&mut trie, &mut root) diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 157c8ff12097f..5d1f50e9c5a5a 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -172,6 +172,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: /// Does current trie use inner hashed value. pub fn state_hashed_value(&self) -> bool { sp_trie::state_hashed_value::, _>(self, &self.root) + .unwrap_or_default() } /// Get the value of storage at given key. diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 2a5811b76d09a..05a3aa701f99e 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -576,11 +576,25 @@ pub fn delta_trie_root( pub fn state_hashed_value>>( db: &DB, root: &TrieHash, -) -> bool { +) -> Option> { + struct ReadMeta { + hashed: Option>, + } + impl trie_db::Query for &mut ReadMeta { + type Item = DBValue; + fn decode(self, value: &[u8]) -> DBValue { value.to_vec() } + fn record(&mut self, _hash: &::Out, _data: &[u8], _depth: u32, meta: &L::Meta) { + debug_assert!(self.hashed.is_none()); + self.hashed = Some(meta.extract_global_meta()); + } + } + let mut read_meta: ReadMeta = ReadMeta { + hashed: None, + }; if let Ok(t) = TrieDB::::new(&*db, root) { - unimplemented!("TODO has_flag on triedb"); + let _ = t.get_with(&[], &mut read_meta); } - false + read_meta.hashed } /// Read a value from the trie. @@ -1299,7 +1313,7 @@ mod tests { iterator_works_inner(false); } fn iterator_works_inner(flag: bool) { - let mut pairs = vec![ + let pairs = vec![ (hex!("0103000000000000000464").to_vec(), hex!("0400000000").to_vec()), (hex!("0103000000000000000469").to_vec(), hex!("0401000000").to_vec()), ]; @@ -1312,9 +1326,6 @@ mod tests { let iter = trie.iter().unwrap(); let mut iter_pairs = Vec::new(); - if flag { - pairs.insert(0, (vec![], vec![])); - } for pair in iter { let (key, value) = pair.unwrap(); iter_pairs.push((key, value.to_vec())); From 30742b782e7c3906462e546c23ef5ed07d345b6d Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 25 May 2021 18:44:00 +0200 Subject: [PATCH 020/188] fix trie root hashing of root --- Cargo.lock | 16 ++++---- primitives/trie/src/lib.rs | 9 +++-- primitives/trie/src/trie_stream.rs | 59 ++++++++++++++++++++---------- 3 files changed, 54 insertions(+), 30 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d1c2e7d69da81..3a3a556fda9a0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2329,7 +2329,7 @@ dependencies = [ [[package]] name = "hash-db" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#09571017e3ad7ee3e04a40eb8d61bcff25e255ac" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#f5b50501d4dcc7c9c3b8b31908f677291b0e2603" [[package]] name = "hash256-std-hasher" @@ -2343,7 +2343,7 @@ dependencies = [ [[package]] name = "hash256-std-hasher" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#09571017e3ad7ee3e04a40eb8d61bcff25e255ac" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#f5b50501d4dcc7c9c3b8b31908f677291b0e2603" dependencies = [ "crunchy", ] @@ -2996,7 +2996,7 @@ dependencies = [ [[package]] name = "keccak-hasher" version = "0.15.3" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#09571017e3ad7ee3e04a40eb8d61bcff25e255ac" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#f5b50501d4dcc7c9c3b8b31908f677291b0e2603" dependencies = [ "hash-db", "hash256-std-hasher 0.15.2 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4)", @@ -3788,7 +3788,7 @@ dependencies = [ [[package]] name = "memory-db" version = "0.26.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#09571017e3ad7ee3e04a40eb8d61bcff25e255ac" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#f5b50501d4dcc7c9c3b8b31908f677291b0e2603" dependencies = [ "hash-db", "hashbrown", @@ -10332,7 +10332,7 @@ checksum = "a7f741b240f1a48843f9b8e0444fb55fb2a4ff67293b50a9179dfd5ea67f8d41" [[package]] name = "trie-bench" version = "0.27.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#09571017e3ad7ee3e04a40eb8d61bcff25e255ac" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#f5b50501d4dcc7c9c3b8b31908f677291b0e2603" dependencies = [ "criterion", "hash-db", @@ -10347,7 +10347,7 @@ dependencies = [ [[package]] name = "trie-db" version = "0.22.3" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#09571017e3ad7ee3e04a40eb8d61bcff25e255ac" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#f5b50501d4dcc7c9c3b8b31908f677291b0e2603" dependencies = [ "hash-db", "hashbrown", @@ -10359,7 +10359,7 @@ dependencies = [ [[package]] name = "trie-root" version = "0.16.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#09571017e3ad7ee3e04a40eb8d61bcff25e255ac" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#f5b50501d4dcc7c9c3b8b31908f677291b0e2603" dependencies = [ "hash-db", ] @@ -10377,7 +10377,7 @@ dependencies = [ [[package]] name = "trie-standardmap" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#09571017e3ad7ee3e04a40eb8d61bcff25e255ac" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#f5b50501d4dcc7c9c3b8b31908f677291b0e2603" dependencies = [ "hash-db", "keccak-hasher 0.15.3 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4)", diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 05a3aa701f99e..8e0eb24f95200 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -1206,7 +1206,7 @@ mod tests { } fn random_should_work_inner(flag: bool) { let mut seed = ::Out::zero(); - for test_i in 0..10000 { + for test_i in 0..10_000 { if test_i % 50 == 0 { println!("{:?} of 10000 stress tests done", test_i); } @@ -1218,8 +1218,11 @@ mod tests { count: 100, }.make_with(seed.as_fixed_bytes_mut()); - let layout = Layout::default(); - // TODO implement variant for stream codec or use iter_build. + let layout = if flag { + Layout::with_inner_hashing() + } else { + Layout::default() + }; let real = layout.trie_root(x.clone()); let mut memdb = MemoryDB::default(); let mut root = Default::default(); diff --git a/primitives/trie/src/trie_stream.rs b/primitives/trie/src/trie_stream.rs index dc754fbb903ec..f3c680f245c54 100644 --- a/primitives/trie/src/trie_stream.rs +++ b/primitives/trie/src/trie_stream.rs @@ -121,10 +121,28 @@ impl trie_root::TrieStream for TrieStream { } fn append_substream(&mut self, other: Self) { + let inner_value_hashing = other.inner_value_hashing; + let range = other.current_value_range.clone(); let data = other.out(); match data.len() { 0..=31 => data.encode_to(&mut self.buffer), - _ => H::hash(&data).as_ref().encode_to(&mut self.buffer), + _ => { + if inner_value_hashing + && range.as_ref().map(|r| r.end - r.start >= trie_constants::INNER_HASH_TRESHOLD) + .unwrap_or_default() { + let meta = TrieMeta { + range: range, + unused_value: false, + contain_hash: false, + do_value_hash: true, + old_hash: false, + recorded_do_value_hash: false, + }; + >>::hash(&data, &meta).as_ref().encode_to(&mut self.buffer); + } else { + H::hash(&data).as_ref().encode_to(&mut self.buffer); + } + }, } } @@ -132,28 +150,31 @@ impl trie_root::TrieStream for TrieStream { let inner_value_hashing = self.inner_value_hashing; let range = self.current_value_range; let data = self.buffer; + let meta = TrieMeta { + range: range, + unused_value: false, + contain_hash: false, + do_value_hash: inner_value_hashing, + old_hash: false, + recorded_do_value_hash: inner_value_hashing, + }; + + // Add the recorded_do_value_hash to encoded + let mut encoded = meta.write_state_meta(); + let encoded = if encoded.len() > 0 { + encoded.extend(data); + encoded + } else { + data + }; + if inner_value_hashing - && range.as_ref().map(|r| r.end - r.start >= trie_constants::INNER_HASH_TRESHOLD) + && meta.range.as_ref().map(|r| r.end - r.start >= trie_constants::INNER_HASH_TRESHOLD) .unwrap_or_default() { - let meta = TrieMeta { - range: range, - unused_value: false, - contain_hash: false, - do_value_hash: true, - old_hash: false, - recorded_do_value_hash: true, - }; - // Add the recorded_do_value_hash to encoded - let mut encoded = meta.write_state_meta(); - let encoded = if encoded.len() > 0 { - encoded.extend(data); - encoded - } else { - data - }; + >>::hash(&encoded, &meta) } else { - H::hash(&data) + H::hash(&encoded) } } From c620869ecb3634c0fb3650663f58e565c83f4e17 Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 26 May 2021 10:03:29 +0200 Subject: [PATCH 021/188] complete basic backend. --- frame/support/test/tests/instance.rs | 2 +- primitives/state-machine/src/basic.rs | 8 ++++++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/frame/support/test/tests/instance.rs b/frame/support/test/tests/instance.rs index e698f989de45f..91363d0857e10 100644 --- a/frame/support/test/tests/instance.rs +++ b/frame/support/test/tests/instance.rs @@ -327,7 +327,7 @@ fn storage_instance_independence() { let mut storage = sp_core::storage::Storage { top: std::collections::BTreeMap::new(), children_default: std::collections::HashMap::new(), - flag_hashed_value: false, // TODO test with + flag_hashed_value: true, }; sp_state_machine::BasicExternalities::execute_with_storage(&mut storage, || { module2::Value::::put(0); diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index 950b6d715b18b..054c543e4245d 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -283,8 +283,12 @@ impl Externalities for BasicExternalities { } } - // TODO set flag on layout??? - Layout::::default().trie_root(self.inner.top.clone()).as_ref().into() + let layout = if self.inner.flag_hashed_value { + Layout::::with_inner_hashing() + } else { + Layout::::default() + }; + layout.trie_root(self.inner.top.clone()).as_ref().into() } fn child_storage_root( From a3b10398ac97c580fdf213f262c58685dd8ada40 Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 26 May 2021 11:04:04 +0200 Subject: [PATCH 022/188] Remove old_hash meta from proof that do not use inner_hashing. --- .../state-machine/src/proving_backend.rs | 20 ++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index b431fa0c0e04d..996c1570bb0cc 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -113,6 +113,8 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> struct ProofRecorderInner { /// All the records that we have stored so far. records: HashMap>, + /// Is inner hash in proof. + flagged_inner_hash: bool, /// The encoded size of all recorded values. encoded_size: usize, } @@ -128,22 +130,24 @@ impl ProofRecorder { pub fn record(&self, key: Hash, mut val: Option<(DBValue, TrieMeta)>) { let mut inner = self.inner.write(); - let ProofRecorderInner { encoded_size, records } = &mut *inner; + let ProofRecorderInner { encoded_size, records, flagged_inner_hash } = &mut *inner; records.entry(key).or_insert_with(|| { if let Some(val) = val.as_mut() { + if val.1.recorded_do_value_hash { + *flagged_inner_hash = true; + } val.1.set_accessed_value(false); sp_trie::resolve_encoded_meta::(val); *encoded_size += sp_trie::estimate_entry_size(val, H::LENGTH); } val }); - } /// Record actual trie level value access. pub fn access_from(&self, key: &Hash, hash_len: usize) { let mut inner = self.inner.write(); - let ProofRecorderInner { encoded_size, records } = &mut *inner; + let ProofRecorderInner { encoded_size, records, .. } = &mut *inner; records.entry(key.clone()) .and_modify(|entry| { if let Some(entry) = entry.as_mut() { @@ -175,13 +179,19 @@ impl ProofRecorder { /// Convert into a [`StorageProof`]. pub fn to_storage_proof(&self) -> StorageProof { - let trie_nodes = self.inner.read() + let inner = self.inner.read(); + let trie_nodes = inner .records .iter() .filter_map(|(_k, v)| v.as_ref().map(|v| { + let mut meta = v.1.clone(); + if !inner.flagged_inner_hash { + // Remove the old hash meta. + meta.old_hash = false; + } < as sp_trie::TrieLayout>::MetaHasher as hash_db::MetaHasher - >::stored_value(v.0.as_slice(), v.1.clone()) + >::stored_value(v.0.as_slice(), meta) })) .collect(); From 693e38b0287de5a978a21ab2883c796668f4f8a1 Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 26 May 2021 15:06:23 +0200 Subject: [PATCH 023/188] fix trie test for empty (force layout on empty deltas). --- frame/system/src/lib.rs | 2 +- primitives/io/src/lib.rs | 12 ++ primitives/state-machine/src/backend.rs | 24 --- primitives/state-machine/src/basic.rs | 2 +- primitives/trie/src/lib.rs | 189 +++++------------------- test-utils/runtime/src/system.rs | 2 +- 6 files changed, 49 insertions(+), 182 deletions(-) diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index 44e7a3d5752fe..30afc77cc60b4 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -1408,7 +1408,7 @@ impl Pallet { >::hashed_key().to_vec() => [69u8; 32].encode() ], children_default: map![], - flag_hashed_value: false, + flag_hashed_value: true, }) } diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index 03bfd73772724..31db22a98bef2 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -1461,6 +1461,18 @@ mod tests { assert_eq!(storage::get(b"hello"), None); assert_eq!(storage::get(b"foo"), Some(b"bar".to_vec())); }); + + let value = vec![7u8; 35]; + t = BasicExternalities::new(Storage { + top: map![b"foo00".to_vec() => value.clone()], + children_default: map![], + flag_hashed_value: true, + }); + + t.execute_with(|| { + assert_eq!(storage::get(b"hello"), None); + assert_eq!(storage::get(b"foo00"), Some(value.clone())); + }); } #[test] diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 771d51f274678..728e4ef9ea21f 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -376,30 +376,6 @@ impl Consolidate for sp_trie::GenericMemoryDB } } -/// Insert input pairs into memory db. -/// TODO unused remove? -#[cfg(test)] -pub(crate) fn insert_into_memory_db(mdb: &mut sp_trie::MemoryDB, input: I) -> Option - where - H: Hasher, - I: IntoIterator, -{ - use sp_trie::{TrieMut, trie_types::TrieDBMut}; - - let mut root = ::Out::default(); - { - let mut trie = TrieDBMut::::new(mdb, &mut root); - for (key, value) in input { - if let Err(e) = trie.insert(&key, &value) { - log::warn!(target: "trie", "Failed to write to trie: {}", e); - return None; - } - } - } - - Some(root) -} - /// Insert input pairs into memory db. #[cfg(test)] pub(crate) fn insert_into_memory_db_no_meta(mdb: &mut sp_trie::MemoryDBNoMeta, input: I) -> Option diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index 054c543e4245d..c9794486b6acc 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -129,7 +129,7 @@ impl From> for BasicExternalities { inner: Storage { top: hashmap, children_default: Default::default(), - flag_hashed_value: false, + flag_hashed_value: true, }, extensions: Default::default(), } diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 8e0eb24f95200..d867321ca088f 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -500,49 +500,6 @@ pub mod trie_types { pub type TrieError = trie_db::TrieError; } -/* -/// Create a proof for a subset of keys in a trie. -/// -/// The `keys` may contain any set of keys regardless of each one of them is included -/// in the `db`. -/// -/// For a key `K` that is included in the `db` a proof of inclusion is generated. -/// For a key `K` that is not included in the `db` a proof of non-inclusion is generated. -/// These can be later checked in `verify_trie_proof`. -pub fn generate_trie_proof<'a, L: TrieConfiguration, I, K, DB>( - db: &DB, - root: TrieHash, - keys: I, -) -> Result>, Box>> where - I: IntoIterator, - K: 'a + AsRef<[u8]>, - DB: hash_db::HashDBRef, -{ - let trie = TrieDB::::new(db, &root)?; - generate_proof(&trie, keys) -} - -/// Verify a set of key-value pairs against a trie root and a proof. -/// -/// Checks a set of keys with optional values for inclusion in the proof that was generated by -/// `generate_trie_proof`. -/// If the value in the pair is supplied (`(key, Some(value))`), this key-value pair will be -/// checked for inclusion in the proof. -/// If the value is omitted (`(key, None)`), this key will be checked for non-inclusion in the -/// proof. -pub fn verify_trie_proof<'a, L: TrieConfiguration, I, K, V>( - root: &TrieHash, - proof: &[Vec], - items: I, -) -> Result<(), VerifyError, error::Error>> where - I: IntoIterator)>, - K: 'a + AsRef<[u8]>, - V: 'a + AsRef<[u8]>, -{ - verify_proof::, _, _, _>(root, proof, items) -} -*/ - /// Determine a trie root given a hash DB and delta values. pub fn delta_trie_root( db: &mut DB, @@ -561,6 +518,9 @@ pub fn delta_trie_root( let mut delta = delta.into_iter().collect::>(); delta.sort_by(|l, r| l.0.borrow().cmp(r.0.borrow())); + if delta.len() == 0 { + trie.force_layout_meta()?; + } for (key, change) in delta { match change.borrow() { Some(val) => trie.insert(key.borrow(), val.borrow())?, @@ -1008,18 +968,18 @@ mod tests { >::hashed_null_node() } - fn check_equivalent(input: &Vec<(&[u8], &[u8])>) { + fn check_equivalent(input: &Vec<(&[u8], &[u8])>, layout: T) { { - // TODO test flagged - let layout = T::default(); let closed_form = layout.trie_root(input.clone()); let d = layout.trie_root_unhashed(input.clone()); println!("Data: {:#x?}, {:#x?}", d, Blake2Hasher::hash(&d[..])); let persistent = { let mut memdb = MemoryDBMeta::<_, T::MetaHasher>::default(); let mut root = Default::default(); - // TODO test flagged - let mut t = TrieDBMut::::new(&mut memdb, &mut root); + let mut t = TrieDBMut::::new_with_layout(&mut memdb, &mut root, layout); + if input.len() == 0 { + t.force_layout_meta().unwrap(); + } for (x, y) in input.iter().rev() { t.insert(x, y).unwrap(); } @@ -1029,16 +989,17 @@ mod tests { } } - fn check_iteration(input: &Vec<(&[u8], &[u8])>) { + fn check_iteration(input: &Vec<(&[u8], &[u8])>, layout: T) { let mut memdb = MemoryDBMeta::<_, T::MetaHasher>::default(); let mut root = Default::default(); { - let mut t = TrieDBMut::::new(&mut memdb, &mut root); + let mut t = TrieDBMut::::new_with_layout(&mut memdb, &mut root, layout); for (x, y) in input.clone() { t.insert(x, y).unwrap(); } } { + // Not using layout: it should be initialized from state root meta. let t = TrieDB::::new(&mut memdb, &root).unwrap(); assert_eq!( input.iter().map(|(i, j)| (i.to_vec(), j.to_vec())).collect::>(), @@ -1049,6 +1010,20 @@ mod tests { } } + fn check_input(input: &Vec<(&[u8], &[u8])>) { + + let layout = Layout::with_inner_hashing(); + check_equivalent::(input, layout.clone()); + + + let layout = Layout::default(); + check_equivalent::(input, layout.clone()); + check_iteration::(input, layout); + let layout = Layout::with_inner_hashing(); + check_equivalent::(input, layout.clone()); + check_iteration::(input, layout); + } + #[test] fn default_trie_root() { let mut db = MemoryDB::default(); @@ -1066,15 +1041,13 @@ mod tests { #[test] fn empty_is_equivalent() { let input: Vec<(&[u8], &[u8])> = vec![]; - check_equivalent::(&input); - check_iteration::(&input); + check_input(&input); } #[test] fn leaf_is_equivalent() { let input: Vec<(&[u8], &[u8])> = vec![(&[0xaa][..], &[0xbb][..])]; - check_equivalent::(&input); - check_iteration::(&input); + check_input(&input); } #[test] @@ -1083,8 +1056,7 @@ mod tests { (&[0xaa][..], &[0x10][..]), (&[0xba][..], &[0x11][..]), ]; - check_equivalent::(&input); - check_iteration::(&input); + check_input(&input); } #[test] @@ -1093,8 +1065,7 @@ mod tests { (&[0xaa][..], &[0x10][..]), (&[0xab][..], &[0x11][..]), ]; - check_equivalent::(&input); - check_iteration::(&input); + check_input(&input); } #[test] @@ -1109,8 +1080,7 @@ mod tests { let mut d = st.make(); d.sort_by(|&(ref a, _), &(ref b, _)| a.cmp(b)); let dr = d.iter().map(|v| (&v.0[..], &v.1[..])).collect(); - check_equivalent::(&dr); - check_iteration::(&dr); + check_input(&dr); } #[test] @@ -1120,8 +1090,7 @@ mod tests { (&[0xaa, 0xaa][..], &[0xaa][..]), (&[0xaa, 0xbb][..], &[0xab][..]) ]; - check_equivalent::(&input); - check_iteration::(&input); + check_input(&input); } #[test] @@ -1134,8 +1103,7 @@ mod tests { (&[0xbb, 0xbb][..], &[0xbb][..]), (&[0xbb, 0xcc][..], &[0xbc][..]), ]; - check_equivalent::(&input); - check_iteration::(&input); + check_input(&input); } #[test] @@ -1144,8 +1112,7 @@ mod tests { (&[0xaa][..], &b"ABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABC"[..]), (&[0xba][..], &[0x11][..]), ]; - check_equivalent::(&input); - check_iteration::(&input); + check_input(&input); } #[test] @@ -1154,8 +1121,7 @@ mod tests { (&[0xaa][..], &b"ABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABC"[..]), (&[0xba][..], &b"ABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABC"[..]) ]; - check_equivalent::(&input); - check_iteration::(&input); + check_input(&input); } fn populate_trie<'db, T>( @@ -1259,7 +1225,6 @@ mod tests { #[test] fn codec_trie_empty() { - // TODO test other layout states. let layout = Layout::default(); let input: Vec<(&[u8], &[u8])> = vec![]; let trie = layout.trie_root_unhashed(input); @@ -1269,7 +1234,6 @@ mod tests { #[test] fn codec_trie_single_tuple() { - // TODO switch to old layout let layout = Layout::default(); let input = vec![ (vec![0xaa], vec![0xbb]) @@ -1336,92 +1300,7 @@ mod tests { assert_eq!(pairs, iter_pairs); } -/* - #[test] - fn proof_non_inclusion_works() { - let pairs = vec![ - (hex!("0102").to_vec(), hex!("01").to_vec()), - (hex!("0203").to_vec(), hex!("0405").to_vec()), - ]; - - let mut memdb = MemoryDB::default(); - let mut root = Default::default(); - populate_trie::(&mut memdb, &mut root, &pairs); - - let non_included_key: Vec = hex!("0909").to_vec(); - let proof = generate_trie_proof::( - &memdb, - root, - &[non_included_key.clone()] - ).unwrap(); - - // Verifying that the K was not included into the trie should work. - assert!(verify_trie_proof::>( - &root, - &proof, - &[(non_included_key.clone(), None)], - ).is_ok() - ); - - // Verifying that the K was included into the trie should fail. - assert!(verify_trie_proof::>( - &root, - &proof, - &[(non_included_key, Some(hex!("1010").to_vec()))], - ).is_err() - ); - } - - #[test] - fn proof_inclusion_works() { - let pairs = vec![ - (hex!("0102").to_vec(), hex!("01").to_vec()), - (hex!("0203").to_vec(), hex!("0405").to_vec()), - ]; - - let mut memdb = MemoryDB::default(); - let mut root = Default::default(); - populate_trie::(&mut memdb, &mut root, &pairs); - - let proof = generate_trie_proof::( - &memdb, - root, - &[pairs[0].0.clone()] - ).unwrap(); - // Check that a K, V included into the proof are verified. - assert!(verify_trie_proof::( - &root, - &proof, - &[(pairs[0].0.clone(), Some(pairs[0].1.clone()))] - ).is_ok() - ); - - // Absence of the V is not verified with the proof that has K, V included. - assert!(verify_trie_proof::>( - &root, - &proof, - &[(pairs[0].0.clone(), None)] - ).is_err() - ); - - // K not included into the trie is not verified. - assert!(verify_trie_proof::( - &root, - &proof, - &[(hex!("4242").to_vec(), Some(pairs[0].1.clone()))] - ).is_err() - ); - - // K included into the trie but not included into the proof is not verified. - assert!(verify_trie_proof::( - &root, - &proof, - &[(pairs[1].0.clone(), Some(pairs[1].1.clone()))] - ).is_err() - ); - } -*/ #[test] fn generate_storage_root_with_proof_works_independently_from_the_delta_order() { let proof = StorageProof::decode(&mut &include_bytes!("../test-res/proof")[..]).unwrap(); diff --git a/test-utils/runtime/src/system.rs b/test-utils/runtime/src/system.rs index fbf6012d083e7..cfffb1f535028 100644 --- a/test-utils/runtime/src/system.rs +++ b/test-utils/runtime/src/system.rs @@ -385,7 +385,7 @@ mod tests { } ], children_default: map![], - flag_hashed_value: false, // TODO test with true variant + flag_hashed_value: true, }, ) } From 6e89b3afcdae80472864ebcf245bea16a582915d Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 26 May 2021 16:17:41 +0200 Subject: [PATCH 024/188] Root update fix. --- primitives/state-machine/src/trie_backend.rs | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 788eb47f89588..7848663135576 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -379,12 +379,16 @@ pub mod tests { } #[test] - fn storage_root_transaction_is_empty() { - storage_root_transaction_is_empty_inner(false); - storage_root_transaction_is_empty_inner(true); - } - fn storage_root_transaction_is_empty_inner(flagged: bool) { - assert!(test_trie(flagged).storage_root(iter::empty(), false).1.drain().is_empty()); + fn storage_root_transaction_state_root_update() { + // a drop a insert of same hash: rc is 0 + assert_eq!(test_trie(false).storage_root(iter::empty(), false).1.drain() + .into_iter().filter(|v| (v.1).1 != 0).count(), 0); + // a drop a insert + assert_eq!(test_trie(false).storage_root(iter::empty(), true).1.drain() + .into_iter().filter(|v| (v.1).1 != 0).count(), 2); + // a drop a insert of same hash: rc is 0 + assert_eq!(test_trie(true).storage_root(iter::empty(), true).1.drain() + .into_iter().filter(|v| (v.1).1 != 0).count(), 0); } #[test] From f56879466330a98d996142d87ab443e837cb5b26 Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 26 May 2021 17:28:10 +0200 Subject: [PATCH 025/188] debug on meta --- primitives/trie/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index d867321ca088f..aebb5c6007141 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -49,7 +49,7 @@ pub use hash_db::{HashDB as HashDBT, EMPTY_PREFIX, MetaHasher}; pub use hash_db::NoMeta; /// Meta use by trie state. -#[derive(Default, Clone)] +#[derive(Default, Clone, Debug)] pub struct TrieMeta { // range of encoded value or hashed value. pub range: Option>, From fb0ea035129aa965c205387464317ff6a7cb483e Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 27 May 2021 10:13:52 +0200 Subject: [PATCH 026/188] Use trie key iteration that do not include value in proofs. --- Cargo.lock | 16 ++++---- .../state-machine/src/trie_backend_essence.rs | 39 +++++++++++++------ primitives/trie/src/lib.rs | 3 +- 3 files changed, 38 insertions(+), 20 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a9894d505cb74..5ba8be6d5afd4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2342,7 +2342,7 @@ dependencies = [ [[package]] name = "hash-db" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#f5b50501d4dcc7c9c3b8b31908f677291b0e2603" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#24f114312feeb2f17510709cc04276b0843a7eb4" [[package]] name = "hash256-std-hasher" @@ -2356,7 +2356,7 @@ dependencies = [ [[package]] name = "hash256-std-hasher" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#f5b50501d4dcc7c9c3b8b31908f677291b0e2603" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#24f114312feeb2f17510709cc04276b0843a7eb4" dependencies = [ "crunchy", ] @@ -3001,7 +3001,7 @@ dependencies = [ [[package]] name = "keccak-hasher" version = "0.15.3" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#f5b50501d4dcc7c9c3b8b31908f677291b0e2603" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#24f114312feeb2f17510709cc04276b0843a7eb4" dependencies = [ "hash-db", "hash256-std-hasher 0.15.2 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4)", @@ -3793,7 +3793,7 @@ dependencies = [ [[package]] name = "memory-db" version = "0.26.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#f5b50501d4dcc7c9c3b8b31908f677291b0e2603" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#24f114312feeb2f17510709cc04276b0843a7eb4" dependencies = [ "hash-db", "hashbrown", @@ -10375,7 +10375,7 @@ checksum = "a7f741b240f1a48843f9b8e0444fb55fb2a4ff67293b50a9179dfd5ea67f8d41" [[package]] name = "trie-bench" version = "0.27.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#f5b50501d4dcc7c9c3b8b31908f677291b0e2603" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#24f114312feeb2f17510709cc04276b0843a7eb4" dependencies = [ "criterion", "hash-db", @@ -10390,7 +10390,7 @@ dependencies = [ [[package]] name = "trie-db" version = "0.22.3" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#f5b50501d4dcc7c9c3b8b31908f677291b0e2603" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#24f114312feeb2f17510709cc04276b0843a7eb4" dependencies = [ "hash-db", "hashbrown", @@ -10402,7 +10402,7 @@ dependencies = [ [[package]] name = "trie-root" version = "0.16.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#f5b50501d4dcc7c9c3b8b31908f677291b0e2603" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#24f114312feeb2f17510709cc04276b0843a7eb4" dependencies = [ "hash-db", ] @@ -10420,7 +10420,7 @@ dependencies = [ [[package]] name = "trie-standardmap" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#f5b50501d4dcc7c9c3b8b31908f677291b0e2603" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#24f114312feeb2f17510709cc04276b0843a7eb4" dependencies = [ "hash-db", "keccak-hasher 0.15.3 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4)", diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 5d1f50e9c5a5a..7b2bd65c293d5 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -25,7 +25,8 @@ use crate::{warn, debug}; use hash_db::{self, Hasher, Prefix}; use sp_trie::{Trie, PrefixedMemoryDB, DBValue, empty_child_trie_root, read_trie_value, read_child_trie_value, - for_keys_in_child_trie, KeySpacedDB, TrieDBIterator, TrieMeta}; + for_keys_in_child_trie, KeySpacedDB, TrieDBIterator, TrieDBKeyIterator, + TrieMeta}; use sp_trie::trie_types::{TrieDB, TrieError, Layout}; use crate::{backend::Consolidate, StorageKey, StorageValue}; use sp_core::storage::ChildInfo; @@ -142,7 +143,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: let trie = TrieDB::::new(dyn_eph, root) .map_err(|e| format!("TrieDB creation error: {}", e))?; - let mut iter = trie.iter() + let mut iter = trie.key_iter() .map_err(|e| format!("TrieDB iteration error: {}", e))?; // The key just after the one given in input, basically `key++0`. @@ -159,7 +160,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: let next_element = iter.next(); let next_key = if let Some(next_element) = next_element { - let (next_key, _) = next_element + let next_key = next_element .map_err(|e| format!("TrieDB iterator next error: {}", e))?; Some(next_key) } else { @@ -238,15 +239,15 @@ impl, H: Hasher> TrieBackendEssence where H::Out: }; let mut root = H::Out::default(); root.as_mut().copy_from_slice(&root_vec); - self.keys_values_with_prefix_inner(&root, prefix, |k, _v| f(k), Some(child_info)) + self.keys_with_prefix_inner(&root, prefix, |k| f(k), Some(child_info)) } /// Execute given closure for all keys starting with prefix. pub fn for_keys_with_prefix(&self, prefix: &[u8], mut f: F) { - self.keys_values_with_prefix_inner(&self.root, prefix, |k, _v| f(k), None) + self.keys_with_prefix_inner(&self.root, prefix, |k| f(k), None) } - fn keys_values_with_prefix_inner( + fn keys_with_prefix_inner( &self, root: &H::Out, prefix: &[u8], @@ -256,12 +257,12 @@ impl, H: Hasher> TrieBackendEssence where H::Out: let mut iter = move |db| -> sp_std::result::Result<(), Box>> { let trie = TrieDB::::new(db, root)?; - for x in TrieDBIterator::new_prefixed(&trie, prefix)? { - let (key, value) = x?; + for x in TrieDBKeyIterator::new_prefixed(&trie, prefix)? { + let key = x?; debug_assert!(key.starts_with(prefix)); - f(&key, &value); + f(&key); } Ok(()) @@ -279,8 +280,24 @@ impl, H: Hasher> TrieBackendEssence where H::Out: } /// Execute given closure for all key and values starting with prefix. - pub fn for_key_values_with_prefix(&self, prefix: &[u8], f: F) { - self.keys_values_with_prefix_inner(&self.root, prefix, f, None) + pub fn for_key_values_with_prefix(&self, prefix: &[u8], mut f: F) { + let mut iter = move |db| -> sp_std::result::Result<(), Box>> { + let trie = TrieDB::::new(db, &self.root)?; + + for x in TrieDBIterator::new_prefixed(&trie, prefix)? { + let (key, value) = x?; + + debug_assert!(key.starts_with(prefix)); + + f(&key, &value); + } + + Ok(()) + }; + + if let Err(e) = iter(self) { + debug!(target: "trie", "Error while iterating by prefix: {}", e); + } } } diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index aebb5c6007141..8efc7398a4ac3 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -39,7 +39,8 @@ pub use storage_proof::StorageProof; /// Various re-exports from the `trie-db` crate. pub use trie_db::{ Trie, TrieMut, DBValue, Recorder, CError, Query, TrieLayout, TrieConfiguration, - nibble_ops, TrieDBIterator, Meta, node::{NodePlan, ValuePlan}, GlobalMeta, + nibble_ops, TrieDBIterator, TrieDBKeyIterator, Meta, node::{NodePlan, ValuePlan}, + GlobalMeta, }; /// Various re-exports from the `memory-db` crate. pub use memory_db::KeyFunction; From cd98390cb21729737ad7ca149fcf9eac82a461a0 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 27 May 2021 10:39:29 +0200 Subject: [PATCH 027/188] switch default test ext to use inner hash. --- client/executor/src/integration_tests/mod.rs | 29 ++++++++++++++++++++ primitives/state-machine/src/testing.rs | 7 ++++- 2 files changed, 35 insertions(+), 1 deletion(-) diff --git a/client/executor/src/integration_tests/mod.rs b/client/executor/src/integration_tests/mod.rs index 0ea312f10a3e4..ca6ec42cb53b8 100644 --- a/client/executor/src/integration_tests/mod.rs +++ b/client/executor/src/integration_tests/mod.rs @@ -777,3 +777,32 @@ fn panic_in_spawned_instance_panics_on_joining_its_result(wasm_method: WasmExecu assert!(format!("{}", error_result).contains("Spawned task")); } + +test_wasm_execution!(state_hashing_update); +fn state_hashing_update(wasm_method: WasmExecutionMethod) { + // use externalities without storage flag. + let mut ext = TestExternalities::new(Default::default()); + + let mut ext = ext.ext(); + ext.set_storage(b"foo".to_vec(), vec![1u8; 1_000]); // big inner hash + ext.set_storage(b"foo2".to_vec(), vec![3u8; 16]); // no inner hash + ext.set_storage(b"foo222".to_vec(), vec![5u8; 100]); // inner hash + + let output = call_in_wasm( + "test_data_in", + &b"Hello world".to_vec().encode(), + wasm_method, + &mut ext, + ).unwrap(); + + assert_eq!(output, b"all ok!".to_vec().encode()); + + // TODO new call in wasm to flag state. + + // TODO Query check + + // TODO update values with same value (same root likely). + + + // TODO update values with same value (same root likely). +} diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index 033590b6d1052..b79d6866edcca 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -236,7 +236,12 @@ impl Default for TestExternalities where H::Out: Ord + 'static + codec::Codec, { - fn default() -> Self { Self::new(Default::default()) } + fn default() -> Self { + // default to inner hashed. + let mut storage = Storage::default(); + storage.flag_hashed_value = true; + Self::new(storage) + } } impl From for TestExternalities From 5ab0c012532ad9cce975bff699812c5244afe613 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 27 May 2021 16:39:36 +0200 Subject: [PATCH 028/188] small integration test, and fix tx cache mgmt in ext. test failing --- client/executor/runtime-test/src/lib.rs | 7 +++ client/executor/src/integration_tests/mod.rs | 51 ++++++++++++++++++-- primitives/state-machine/src/ext.rs | 1 + 3 files changed, 55 insertions(+), 4 deletions(-) diff --git a/client/executor/runtime-test/src/lib.rs b/client/executor/runtime-test/src/lib.rs index bfba4ef039395..78b824140bcc7 100644 --- a/client/executor/runtime-test/src/lib.rs +++ b/client/executor/runtime-test/src/lib.rs @@ -62,6 +62,13 @@ sp_core::wasm_export_functions! { b"all ok!".to_vec() } + fn test_switch_state() { + print("switch_state"); + storage::flag_hash_value(); + print("switched!"); + } + + fn test_clear_prefix(input: Vec) -> Vec { storage::clear_prefix(&input); b"all ok!".to_vec() diff --git a/client/executor/src/integration_tests/mod.rs b/client/executor/src/integration_tests/mod.rs index ca6ec42cb53b8..75435b4c57041 100644 --- a/client/executor/src/integration_tests/mod.rs +++ b/client/executor/src/integration_tests/mod.rs @@ -797,12 +797,55 @@ fn state_hashing_update(wasm_method: WasmExecutionMethod) { assert_eq!(output, b"all ok!".to_vec().encode()); - // TODO new call in wasm to flag state. + let root1 = ext.storage_root(); + // flag state. + let _ = call_in_wasm( + "test_switch_state", + Default::default(), + wasm_method, + &mut ext, + ).unwrap(); + let root2 = ext.storage_root(); + + assert!(root1 != root2); + + // Same value update do not change root. + let output = call_in_wasm( + "test_data_in", + &b"Hello world".to_vec().encode(), + wasm_method, + &mut ext, + ).unwrap(); + + assert_eq!(output, b"all ok!".to_vec().encode()); + + let root3 = ext.storage_root(); + assert!(root2 == root3); - // TODO Query check + // change does + let output = call_in_wasm( + "test_data_in", + &b"Hello".to_vec().encode(), + wasm_method, + &mut ext, + ).unwrap(); + + assert_eq!(output, b"all ok!".to_vec().encode()); - // TODO update values with same value (same root likely). + let root3 = ext.storage_root(); + assert!(root2 != root3); + // restore is different from original + let output = call_in_wasm( + "test_data_in", + &b"Hello world".to_vec().encode(), + wasm_method, + &mut ext, + ).unwrap(); + + assert_eq!(output, b"all ok!".to_vec().encode()); - // TODO update values with same value (same root likely). + let root4 = ext.storage_root(); + assert!(root2 != root4); + assert!(root3 != root4); } diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index 972ea34fa687d..0b3e155c11594 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -739,6 +739,7 @@ where } fn flag_hash_value(&mut self) { + self.mark_dirty(); self.overlay.set_flag_hash_value() } } From 99ef85c50a80674bd18ecf5cf777ba630b37dd4e Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 27 May 2021 17:30:45 +0200 Subject: [PATCH 029/188] Proof scenario at state-machine level. --- client/executor/src/integration_tests/mod.rs | 41 +------- primitives/state-machine/src/lib.rs | 100 +++++++++++++++++++ primitives/state-machine/src/trie_backend.rs | 2 +- 3 files changed, 102 insertions(+), 41 deletions(-) diff --git a/client/executor/src/integration_tests/mod.rs b/client/executor/src/integration_tests/mod.rs index 75435b4c57041..5076352dff40a 100644 --- a/client/executor/src/integration_tests/mod.rs +++ b/client/executor/src/integration_tests/mod.rs @@ -807,45 +807,6 @@ fn state_hashing_update(wasm_method: WasmExecutionMethod) { ).unwrap(); let root2 = ext.storage_root(); + // Note that in this case all the value did switch (in memory changes). assert!(root1 != root2); - - // Same value update do not change root. - let output = call_in_wasm( - "test_data_in", - &b"Hello world".to_vec().encode(), - wasm_method, - &mut ext, - ).unwrap(); - - assert_eq!(output, b"all ok!".to_vec().encode()); - - let root3 = ext.storage_root(); - assert!(root2 == root3); - - // change does - let output = call_in_wasm( - "test_data_in", - &b"Hello".to_vec().encode(), - wasm_method, - &mut ext, - ).unwrap(); - - assert_eq!(output, b"all ok!".to_vec().encode()); - - let root3 = ext.storage_root(); - assert!(root2 != root3); - - // restore is different from original - let output = call_in_wasm( - "test_data_in", - &b"Hello world".to_vec().encode(), - wasm_method, - &mut ext, - ).unwrap(); - - assert_eq!(output, b"all ok!".to_vec().encode()); - - let root4 = ext.storage_root(); - assert!(root2 != root4); - assert!(root3 != root4); } diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index cb910552e814a..726d1e51b080b 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -1493,6 +1493,106 @@ mod tests { ); } + #[test] + fn inner_state_hashing_switch_proofs() { + + let (mut mdb, mut root) = trie_backend::tests::test_db(false); + { + let mut trie = TrieDBMut::from_existing_with_layout( + &mut mdb, + &mut root, + Layout::default(), + ).unwrap(); + trie.insert(b"foo", vec![1u8; 1_000].as_slice()) // big inner hash + .expect("insert failed"); + trie.insert(b"foo2", vec![3u8; 16].as_slice()) // no inner hash + .expect("insert failed"); + trie.insert(b"foo222", vec![5u8; 100].as_slice()) // inner hash + .expect("insert failed"); + } + + let check_proof = |mdb, root| -> StorageProof { + let remote_backend = TrieBackend::new(mdb, root); + let remote_root = remote_backend.storage_root(::std::iter::empty(), false).0; + let remote_proof = prove_read(remote_backend, &[b"foo222"]).unwrap(); + // check proof locally + let local_result1 = read_proof_check::( + remote_root, + remote_proof.clone(), + &[b"foo222"], + ).unwrap(); + // check that results are correct + assert_eq!( + local_result1.into_iter().collect::>(), + vec![(b"foo222".to_vec(), Some(vec![5u8; 100]))], + ); + remote_proof + }; + + let remote_proof = check_proof(mdb.clone(), root.clone()); + + // check full values in proof + assert!(remote_proof.encode().len() > 1_100); + assert!(remote_proof.encoded_size() > 1_100); + let root1 = root.clone(); + + + // trigger switch + { + let mut trie = TrieDBMut::from_existing_with_layout( + &mut mdb, + &mut root, + Layout::with_inner_hashing(), + ).unwrap(); + trie.force_layout_meta() + .expect("failed forced layout change"); + } + let root2 = root.clone(); + assert!(root1 != root2); + let remote_proof = check_proof(mdb.clone(), root.clone()); + // nodes are still with old hashing. + assert!(remote_proof.encode().len() > 1_100); + assert!(remote_proof.encoded_size() > 1_100); + assert_eq!(remote_proof.encode().len(), + remote_proof.encoded_size()); + + // update with same value do not change + { + let mut trie = TrieDBMut::from_existing_with_layout( + &mut mdb, + &mut root, + Layout::default(), + ).unwrap(); + trie.insert(b"foo222", vec![5u8; 100].as_slice()) // inner hash + .expect("insert failed"); + } + let root3 = root.clone(); + assert!(root2 == root3); + // different value then same is enough to update + // from triedbmut persipective (do not + // work with state machine as only changes do makes + // it to payload (would require a special host function). + { + let mut trie = TrieDBMut::from_existing_with_layout( + &mut mdb, + &mut root, + Layout::default(), + ).unwrap(); + trie.insert(b"foo222", vec![4u8].as_slice()) // inner hash + .expect("insert failed"); + trie.insert(b"foo222", vec![5u8; 100].as_slice()) // inner hash + .expect("insert failed"); + } + let root3 = root.clone(); + assert!(root2 != root3); + let remote_proof = check_proof(mdb.clone(), root.clone()); + // nodes foo is replaced by its hashed value form. + assert!(remote_proof.encode().len() < 1000); + assert!(remote_proof.encoded_size() < 1000); + assert_eq!(remote_proof.encode().len(), + remote_proof.encoded_size()); + } + #[test] fn child_storage_uuid() { diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index bb99f0dc331f3..afee980aedc6a 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -275,7 +275,7 @@ pub mod tests { const CHILD_KEY_1: &[u8] = b"sub1"; - fn test_db(hashed_value: bool) -> (PrefixedMemoryDB, H256) { + pub(crate) fn test_db(hashed_value: bool) -> (PrefixedMemoryDB, H256) { let child_info = ChildInfo::new_default(CHILD_KEY_1); let mut root = H256::default(); let mut mdb = PrefixedMemoryDB::::default(); From 4d6aee80f037c7e471564c4dc8421c8f893ad15e Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 27 May 2021 17:58:52 +0200 Subject: [PATCH 030/188] trace for db upgrade --- client/db/src/upgrade.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/client/db/src/upgrade.rs b/client/db/src/upgrade.rs index f5c0992ea231f..835ff1ba3aa42 100644 --- a/client/db/src/upgrade.rs +++ b/client/db/src/upgrade.rs @@ -26,6 +26,7 @@ use sp_runtime::traits::Block as BlockT; use crate::{columns, utils::DatabaseType}; use kvdb_rocksdb::{Database, DatabaseConfig}; use codec::{Decode, Encode}; +use log::info; /// Version file name. const VERSION_FILE_NAME: &'static str = "db_version"; @@ -102,11 +103,16 @@ fn migrate_2_to_3(db_path: &Path, _db_type: DatabaseType) -> sp_b /// Migration from version3 to version4: /// - Trie state meta for state that could be hashed internaly. fn migrate_3_to_4(db_path: &Path, _db_type: DatabaseType) -> sp_blockchain::Result<()> { + + info!("Starting trie node migration."); + let start_time = std::time::Instant::now(); let db_path = db_path.to_str() .ok_or_else(|| sp_blockchain::Error::Backend("Invalid database path".into()))?; let db_cfg = DatabaseConfig::with_columns(V2_NUM_COLUMNS); let db = Database::open(&db_cfg, db_path).map_err(db_err)?; + let mut nb_node_prefixed = 0; + let mut nb_node_seen = 0; let batch_size = 10_000; // TODO use bigger size (need to iterate all each time). loop { let mut full_batch = false; @@ -116,8 +122,10 @@ fn migrate_3_to_4(db_path: &Path, _db_type: DatabaseType) -> sp_b // Note that every batch will restart full iter, could use // a `iter_from` function. for entry in db.iter(columns::STATE) { + nb_node_seen += 1; if let Some(new_val) = sp_trie::tag_old_hashes::>(&entry.1) { transaction.put_vec(columns::STATE, &entry.0, new_val); + nb_node_prefixed += 1; size += 1; if size == batch_size { full_batch = true; @@ -125,11 +133,14 @@ fn migrate_3_to_4(db_path: &Path, _db_type: DatabaseType) -> sp_b } } } + info!("Committing batch, currently processed: {} of {} read nodes", nb_node_prefixed, nb_node_seen); db.write(transaction).map_err(db_err)?; if !full_batch { break; } } + info!("Trie node migration finished in {:?} ms.", start_time.elapsed().as_millis()); + info!("{:?} nodes prefixed for {:?} node.", nb_node_prefixed, nb_node_seen); Ok(()) } From 791acae981f706672acef4e11ce878e93e6cf97a Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 27 May 2021 20:04:02 +0200 Subject: [PATCH 031/188] try different param --- client/db/src/upgrade.rs | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/client/db/src/upgrade.rs b/client/db/src/upgrade.rs index 835ff1ba3aa42..2197e5b11d82e 100644 --- a/client/db/src/upgrade.rs +++ b/client/db/src/upgrade.rs @@ -113,10 +113,11 @@ fn migrate_3_to_4(db_path: &Path, _db_type: DatabaseType) -> sp_b let mut nb_node_prefixed = 0; let mut nb_node_seen = 0; - let batch_size = 10_000; // TODO use bigger size (need to iterate all each time). + let batch_size = 50_000; // TODO use bigger size (need to iterate all each time). loop { let mut full_batch = false; let mut size = 0; + let mut last = vec![0u8, 0u8, 0u8, 0u8]; let mut transaction = db.transaction(); // Get all the keys we need to update. // Note that every batch will restart full iter, could use @@ -129,11 +130,19 @@ fn migrate_3_to_4(db_path: &Path, _db_type: DatabaseType) -> sp_b size += 1; if size == batch_size { full_batch = true; + if entry.0.len() > 3 { + last.copy_from_slice(&entry.0[..4]); + } break; } } } - info!("Committing batch, currently processed: {} of {} read nodes", nb_node_prefixed, nb_node_seen); + info!( + "Committing batch, currently processed: {} of {} read nodes at {:?}", + nb_node_prefixed, + nb_node_seen, + last, + ); db.write(transaction).map_err(db_err)?; if !full_batch { break; From b51eaa4c1e8e720f14bf126a101d15daa3c622d1 Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 28 May 2021 09:17:24 +0200 Subject: [PATCH 032/188] act more like iter_from. --- client/db/src/upgrade.rs | 31 +++++++++++++++++-------------- 1 file changed, 17 insertions(+), 14 deletions(-) diff --git a/client/db/src/upgrade.rs b/client/db/src/upgrade.rs index 2197e5b11d82e..ad6ebfb2a70cc 100644 --- a/client/db/src/upgrade.rs +++ b/client/db/src/upgrade.rs @@ -117,31 +117,34 @@ fn migrate_3_to_4(db_path: &Path, _db_type: DatabaseType) -> sp_b loop { let mut full_batch = false; let mut size = 0; - let mut last = vec![0u8, 0u8, 0u8, 0u8]; + let mut last = Vec::new(); let mut transaction = db.transaction(); // Get all the keys we need to update. - // Note that every batch will restart full iter, could use - // a `iter_from` function. + // Note that every batch will restart full iter, + // if this prove to slow for archive node, this could be + // switch to a `iter_from` function but would require + // to upstream change to our rocksdb crate. for entry in db.iter(columns::STATE) { - nb_node_seen += 1; - if let Some(new_val) = sp_trie::tag_old_hashes::>(&entry.1) { - transaction.put_vec(columns::STATE, &entry.0, new_val); - nb_node_prefixed += 1; - size += 1; - if size == batch_size { - full_batch = true; - if entry.0.len() > 3 { - last.copy_from_slice(&entry.0[..4]); + if &entry.1[..] > last.as_slice() { + nb_node_seen += 1; + if let Some(new_val) = sp_trie::tag_old_hashes::>(&entry.1) { + transaction.put_vec(columns::STATE, &entry.0, new_val); + nb_node_prefixed += 1; + size += 1; + if size == batch_size { + full_batch = true; + last = entry.0.to_vec(); + break; } - break; } } } info!( - "Committing batch, currently processed: {} of {} read nodes at {:?}", + "Committing batch, currently processed: {} of {} read nodes at {:?}, {:?}", nb_node_prefixed, nb_node_seen, last, + start_time.elapsed().as_millis(), ); db.write(transaction).map_err(db_err)?; if !full_batch { From 715b1a536f2acd8e9143f593a20f482bc7bfc696 Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 28 May 2021 10:07:08 +0200 Subject: [PATCH 033/188] Bigger batches. --- client/db/src/upgrade.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/db/src/upgrade.rs b/client/db/src/upgrade.rs index ad6ebfb2a70cc..f0129251b691e 100644 --- a/client/db/src/upgrade.rs +++ b/client/db/src/upgrade.rs @@ -113,7 +113,7 @@ fn migrate_3_to_4(db_path: &Path, _db_type: DatabaseType) -> sp_b let mut nb_node_prefixed = 0; let mut nb_node_seen = 0; - let batch_size = 50_000; // TODO use bigger size (need to iterate all each time). + let batch_size = 250_000; loop { let mut full_batch = false; let mut size = 0; From 3cdb0ed5fdfdaa1f02b7a7cbb4c310e99941814f Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 28 May 2021 14:42:48 +0200 Subject: [PATCH 034/188] Update trie dependency. --- Cargo.lock | 18 +++++++++--------- primitives/state-machine/Cargo.toml | 2 +- primitives/trie/Cargo.toml | 2 +- test-utils/runtime/Cargo.toml | 2 +- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5ba8be6d5afd4..24c3ff64dee71 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2342,7 +2342,7 @@ dependencies = [ [[package]] name = "hash-db" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#24f114312feeb2f17510709cc04276b0843a7eb4" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#da84037309542514f9eebc853909bcbfb2838805" [[package]] name = "hash256-std-hasher" @@ -2356,7 +2356,7 @@ dependencies = [ [[package]] name = "hash256-std-hasher" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#24f114312feeb2f17510709cc04276b0843a7eb4" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#da84037309542514f9eebc853909bcbfb2838805" dependencies = [ "crunchy", ] @@ -3001,7 +3001,7 @@ dependencies = [ [[package]] name = "keccak-hasher" version = "0.15.3" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#24f114312feeb2f17510709cc04276b0843a7eb4" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#da84037309542514f9eebc853909bcbfb2838805" dependencies = [ "hash-db", "hash256-std-hasher 0.15.2 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4)", @@ -3793,7 +3793,7 @@ dependencies = [ [[package]] name = "memory-db" version = "0.26.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#24f114312feeb2f17510709cc04276b0843a7eb4" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#da84037309542514f9eebc853909bcbfb2838805" dependencies = [ "hash-db", "hashbrown", @@ -10375,7 +10375,7 @@ checksum = "a7f741b240f1a48843f9b8e0444fb55fb2a4ff67293b50a9179dfd5ea67f8d41" [[package]] name = "trie-bench" version = "0.27.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#24f114312feeb2f17510709cc04276b0843a7eb4" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#da84037309542514f9eebc853909bcbfb2838805" dependencies = [ "criterion", "hash-db", @@ -10389,8 +10389,8 @@ dependencies = [ [[package]] name = "trie-db" -version = "0.22.3" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#24f114312feeb2f17510709cc04276b0843a7eb4" +version = "0.22.5" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#da84037309542514f9eebc853909bcbfb2838805" dependencies = [ "hash-db", "hashbrown", @@ -10402,7 +10402,7 @@ dependencies = [ [[package]] name = "trie-root" version = "0.16.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#24f114312feeb2f17510709cc04276b0843a7eb4" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#da84037309542514f9eebc853909bcbfb2838805" dependencies = [ "hash-db", ] @@ -10420,7 +10420,7 @@ dependencies = [ [[package]] name = "trie-standardmap" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#24f114312feeb2f17510709cc04276b0843a7eb4" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#da84037309542514f9eebc853909bcbfb2838805" dependencies = [ "hash-db", "keccak-hasher 0.15.3 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4)", diff --git a/primitives/state-machine/Cargo.toml b/primitives/state-machine/Cargo.toml index 79fccef08c199..e04dfeb6b1415 100644 --- a/primitives/state-machine/Cargo.toml +++ b/primitives/state-machine/Cargo.toml @@ -18,7 +18,7 @@ log = { version = "0.4.11", optional = true } thiserror = { version = "1.0.21", optional = true } parking_lot = { version = "0.11.1", optional = true } hash-db = { version = "0.15.2", default-features = false } -trie-db = { version = "0.22.2", default-features = false } +trie-db = { version = "0.22.5", default-features = false } trie-root = { version = "0.16.0", default-features = false } sp-trie = { version = "3.0.0", path = "../trie", default-features = false } sp-core = { version = "3.0.0", path = "../core", default-features = false } diff --git a/primitives/trie/Cargo.toml b/primitives/trie/Cargo.toml index 4396550a48a8f..bf91fff31b8b6 100644 --- a/primitives/trie/Cargo.toml +++ b/primitives/trie/Cargo.toml @@ -21,7 +21,7 @@ harness = false codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } sp-std = { version = "3.0.0", default-features = false, path = "../std" } hash-db = { version = "0.15.2", default-features = false } -trie-db = { version = "0.22.2", default-features = false } +trie-db = { version = "0.22.5", default-features = false } trie-root = { version = "0.16.0", default-features = false } memory-db = { version = "0.26.0", default-features = false } sp-core = { version = "3.0.0", default-features = false, path = "../core" } diff --git a/test-utils/runtime/Cargo.toml b/test-utils/runtime/Cargo.toml index 96b7efff83380..0848d0506dfc4 100644 --- a/test-utils/runtime/Cargo.toml +++ b/test-utils/runtime/Cargo.toml @@ -38,7 +38,7 @@ pallet-timestamp = { version = "3.0.0", default-features = false, path = "../../ sp-finality-grandpa = { version = "3.0.0", default-features = false, path = "../../primitives/finality-grandpa" } sp-trie = { version = "3.0.0", default-features = false, path = "../../primitives/trie" } sp-transaction-pool = { version = "3.0.0", default-features = false, path = "../../primitives/transaction-pool" } -trie-db = { version = "0.22.2", default-features = false } +trie-db = { version = "0.22.5", default-features = false } parity-util-mem = { version = "0.9.0", default-features = false, features = ["primitive-types"] } sc-service = { version = "0.9.0", default-features = false, optional = true, features = ["test-helpers"], path = "../../client/service" } sp-state-machine = { version = "0.9.0", default-features = false, path = "../../primitives/state-machine" } From ae1454f2b6a6956e4157245abc42d05f9f45dcef Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 8 Jun 2021 12:22:48 +0200 Subject: [PATCH 035/188] drafting codec changes and refact --- primitives/trie/src/lib.rs | 193 +++++++++++++-------------- primitives/trie/src/node_codec.rs | 84 ++++++------ primitives/trie/src/node_header.rs | 63 +++++++-- primitives/trie/src/storage_proof.rs | 1 + 4 files changed, 187 insertions(+), 154 deletions(-) diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 8efc7398a4ac3..57aa05adeed04 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -52,85 +52,104 @@ pub use hash_db::NoMeta; /// Meta use by trie state. #[derive(Default, Clone, Debug)] pub struct TrieMeta { - // range of encoded value or hashed value. + /// Range of encoded value or hashed value. pub range: Option>, - // When `do_value_hash` is true, try to - // store this behavior in top node - // encoded (need to be part of state). + /// Defined in the trie layout, when used with + /// `TrieDbMut` it switch nodes to alternative hashing + /// method by setting `do_value_hash` to true. + /// TODO may be useless (indicate that previous hash is + /// not using `do_value_hash`). + pub switch_to_value_hash: bool, + /// When `do_value_hash` is true, try to + /// store this behavior in top node + /// encoded (need to be part of state). + /// TODO remove pub recorded_do_value_hash: bool, - // Does current encoded contains a hash instead of - // a value (information stored in meta for proofs). + /// Does current encoded contains a hash instead of + /// a value (information stored in meta for proofs). pub contain_hash: bool, - // Flag indicating if value hash can run. - // When defined for a node it gets active - // for all children node + /// Flag indicating if alternative value hash can run. + /// This is read and written as a state meta of the node. + /// TODO replace by TrieDbMut node variant + /// TODO replace by Option being size treshold. pub do_value_hash: bool, - // Record if a value was accessed, this is - // set as accessed by defalult, but can be - // change on access explicitely: `HashDB::get_with_meta`. - // and reset on access explicitely: `HashDB::access_from`. + /// Record if a value was accessed, this is + /// set as accessed by defalult, but can be + /// change on access explicitely: `HashDB::get_with_meta`. + /// and reset on access explicitely: `HashDB::access_from`. + /// TODO!! remove from meta: only use in proof recorder context. pub unused_value: bool, - // Indicate that a node is using old hash scheme. - // Write with `do_value_hash` inactive will set this to - // true. - // In this case hash is not doing internal hashing, - // but next write with `do_value_hash` will remove switch scheme. + /// Indicate that a node is using old hash scheme. + /// TODO remove pub old_hash: bool, } impl Meta for TrieMeta { - /// Layout do not have content. + /// When true apply inner hashing of value. type GlobalMeta = bool; + // TODO remove upstraem /// When true apply inner hashing of value. type StateMeta = bool; - fn set_state_meta(&mut self, state_meta: Self::StateMeta) { - self.recorded_do_value_hash = state_meta; - self.do_value_hash = state_meta; + // TODO remove upstream + fn set_state_meta(&mut self, _state_meta: Self::StateMeta) { + /*if !self.do_value_hash && state_meta { + self.switch_to_value_hash = true; + self.do_value_hash = true; + }*/ } + // TODO remove upstream fn extract_global_meta(&self) -> Self::GlobalMeta { self.recorded_do_value_hash } fn set_global_meta(&mut self, global_meta: Self::GlobalMeta) { - if global_meta { - self.recorded_do_value_hash = true; + if !self.do_value_hash && state_meta { + self.switch_to_value_hash = true; self.do_value_hash = true; } } + // TODO remove upstream? fn has_state_meta(&self) -> bool { - self.recorded_do_value_hash + false + //self.do_value_hash } + // TODO consider removal upstream of this method (node type in codec) fn read_state_meta(&mut self, data: &[u8]) -> Result { - let offset = if data[0] == trie_constants::ENCODED_META_ALLOW_HASH { + unreachable!() + // TODO read directly from codec. +/* let offset = if data[0] == trie_constants::ENCODED_META_ALLOW_HASH { self.recorded_do_value_hash = true; self.do_value_hash = true; 1 } else { 0 }; - Ok(offset) + Ok(offset)*/ } + // TODO consider removal upstream of this method (node type in codec) + // `do_value_hash` method is enough function to write with codec. fn write_state_meta(&self) -> Vec { - if self.recorded_do_value_hash { - // Note that this only works with sp_trie codec that - // cannot encode node starting by this byte. + unreachable!() +/* if self.do_value_hash { + // Note that this only works with sp_trie codec. + // Acts as a boolean result. [trie_constants::ENCODED_META_ALLOW_HASH].to_vec() } else { Vec::new() - } + }*/ } fn meta_for_new( global: Self::GlobalMeta, ) -> Self { let mut result = Self::default(); - result.do_value_hash = global; + result.set_global_meta(global); result } @@ -140,12 +159,14 @@ impl Meta for TrieMeta { Self::meta_for_new(global) } + // TODO meta for empty is unused: can consider removal upstream. fn meta_for_empty( global: Self::GlobalMeta, ) -> Self { Self::meta_for_new(global) } + // TODO if removing all meta, the Option will replace it. fn encoded_value_callback( &mut self, value_plan: ValuePlan, @@ -158,9 +179,9 @@ impl Meta for TrieMeta { self.range = Some(range); self.contain_hash = contain_hash; - if self.do_value_hash { - // Switch value hashing. - self.old_hash = false; + if self.switch_to_value_hash { + // Switched value hashing. + self.switch_to_value_hash = false } } @@ -184,7 +205,7 @@ impl Meta for TrieMeta { } fn do_value_hash(&self) -> bool { - self.unused_value + self.do_value_hash } } @@ -224,6 +245,7 @@ impl Default for Layout { impl Layout { /// Layout with inner hashing active. /// Will flag trie for hashing. + /// TODO rename inner -> alt pub fn with_inner_hashing() -> Self { Layout(true, sp_std::marker::PhantomData) } @@ -248,13 +270,19 @@ impl TrieLayout for Layout fn layout_meta(&self) -> GlobalMeta { self.0 } - fn initialize_from_root_meta(&mut self, root_meta: &Self::Meta) { - if root_meta.extract_global_meta() { + + // TODO remove upstream + fn initialize_from_root_meta(&mut self, _root_meta: &Self::Meta) { + unreachable!() + /*if root_meta.extract_global_meta() { self.0 = true; - } + }*/ } - fn set_root_meta(root_meta: &mut Self::Meta, global_meta: GlobalMeta) { - root_meta.set_global_meta(global_meta); + + // TODO remove upstream + fn set_root_meta(_root_meta: &mut Self::Meta, _global_meta: GlobalMeta) { + unreachable!() +// root_meta.set_global_meta(global_meta); } } @@ -271,7 +299,7 @@ impl MetaHasher for StateHasher fn hash(value: &[u8], meta: &Self::Meta) -> H::Out { match &meta { - TrieMeta { range: Some(range), contain_hash: false, do_value_hash, old_hash: false, .. } => { + TrieMeta { range: Some(range), contain_hash: false, do_value_hash, switch_to_value_hash: false, .. } => { if *do_value_hash && range.end - range.start >= trie_constants::INNER_HASH_TRESHOLD { let value = inner_hashed_value::(value, Some((range.start, range.end))); H::hash(value.as_slice()) @@ -289,24 +317,10 @@ impl MetaHasher for StateHasher } } + // TODO if removing meta upstream, still need to get DEAD_HEADER_META_HASHED_VALUE + // from proof. fn stored_value(value: &[u8], mut meta: Self::Meta) -> DBValue { let mut stored = Vec::with_capacity(value.len() + 1); - if meta.old_hash { - // write as old hash. - stored.push(trie_constants::OLD_HASHING); - stored.extend_from_slice(value); - return stored; - } - if !meta.do_value_hash { - if let Some(range) = meta.range.as_ref() { - if range.end - range.start >= trie_constants::INNER_HASH_TRESHOLD { - // write as old hash. - stored.push(trie_constants::OLD_HASHING); - stored.extend_from_slice(value); - return stored; - } - } - } if meta.contain_hash { // already contain hash, just flag it. stored.push(trie_constants::DEAD_HEADER_META_HASHED_VALUE); @@ -336,6 +350,7 @@ impl MetaHasher for StateHasher >::stored_value(value.as_slice(), meta) } + // TODO remove upstream? fn extract_value(mut stored: &[u8], global_meta: Self::GlobalMeta) -> (&[u8], Self::Meta) { let input = &mut stored; let mut contain_hash = false; @@ -344,10 +359,6 @@ impl MetaHasher for StateHasher contain_hash = true; *input = &input[1..]; } - if input.get(0) == Some(&trie_constants::OLD_HASHING) { - old_hash = true; - *input = &input[1..]; - } let mut meta = TrieMeta { range: None, unused_value: contain_hash, @@ -356,14 +367,11 @@ impl MetaHasher for StateHasher recorded_do_value_hash: false, old_hash, }; - // get recorded_do_value_hash - let _offset = meta.read_state_meta(stored) - .expect("State meta reading failure."); - //let stored = &stored[offset..]; - meta.do_value_hash = meta.recorded_do_value_hash || global_meta; + meta.set_global_meta(global_meta); (stored, meta) } + // TODO remove upstream fn extract_value_owned(mut stored: DBValue, global: Self::GlobalMeta) -> (DBValue, Self::Meta) { let len = stored.len(); let (v, meta) = >::extract_value(stored.as_slice(), global); @@ -374,6 +382,8 @@ impl MetaHasher for StateHasher /// Reimplement `NoMeta` `MetaHasher` with /// additional constraint. +/// TODO remove the MetaHasher is ignored +/// when no node have do_value_hash or layout defines it. #[derive(Clone, Copy, Debug, Eq, PartialEq)] pub struct NoMetaHasher; @@ -900,7 +910,10 @@ pub fn estimate_entry_size(entry: &(DBValue, TrieMeta), hash_len: usize) -> usiz full_encoded } -/// If needed, call to decode plan in order to record meta. +/// If needed, call to decode plan in order to update meta earlier. +/// TODO if removing fully meta, this will still be needed but with +/// a less generic name: read variant of node from db value and indicate +/// if can hash value. pub fn resolve_encoded_meta(entry: &mut (DBValue, TrieMeta)) { use trie_db::NodeCodec; if entry.1.do_value_hash { @@ -912,45 +925,25 @@ pub fn resolve_encoded_meta(entry: &mut (DBValue, TrieMeta)) { mod trie_constants { /// Treshold for using hash of value instead of value /// in encoded trie node when flagged. + /// TODO design would be to make it the global meta, but then + /// when serializing proof we would need to attach it (no way to + /// hash the nodes otherwhise), which would + /// break proof format. + /// TODO attaching to storage proof in a compatible way could be + /// achieve by using a escaped header in first or last element of proof + /// and write it after. pub const INNER_HASH_TRESHOLD: usize = 33; const FIRST_PREFIX: u8 = 0b_00 << 6; - pub const EMPTY_TRIE: u8 = FIRST_PREFIX | 0b_00; - pub const ENCODED_META_ALLOW_HASH: u8 = FIRST_PREFIX | 0b_01; /// In proof this header is used when only hashed value is stored. - pub const DEAD_HEADER_META_HASHED_VALUE: u8 = FIRST_PREFIX | 0b_00_10; - /// If inner hashing should apply, but state is not flagged, then set - /// this meta to avoid checking both variant of hashes. - pub const OLD_HASHING: u8 = FIRST_PREFIX | 0b_00_11; + pub const DEAD_HEADER_META_HASHED_VALUE: u8 = EMPTY_TRIE | 0b_00_01; pub const NIBBLE_SIZE_BOUND: usize = u16::max_value() as usize; pub const LEAF_PREFIX_MASK: u8 = 0b_01 << 6; pub const BRANCH_WITHOUT_MASK: u8 = 0b_10 << 6; pub const BRANCH_WITH_MASK: u8 = 0b_11 << 6; -} - -/// Utility to tag a state without meta with old_hash internal -/// hashing. -pub fn tag_old_hashes(existing: &[u8]) -> Option> { - use trie_db::NodeCodec; - let mut meta = TrieMeta::default(); - // allows restarting a migration. - if existing.len() > 0 && existing[0] == trie_constants::OLD_HASHING { - return None; // allow restarting a migration. - } - let _ = as TrieLayout>::Codec::decode_plan(existing, &mut meta) - .expect("Invalid db state entry found: {:?}, entry.0.as_slice()"); - match meta.range { - Some(range) => { - if range.end - range.start >= trie_constants::INNER_HASH_TRESHOLD { - let mut res = Vec::with_capacity(existing.len() + 1); - res.push(trie_constants::OLD_HASHING); - res.extend_from_slice(existing); - Some(res) - } else { - None - } - }, - None => None, - } + pub const EMPTY_TRIE: u8 = FIRST_PREFIX | (0b_00 << 4); + pub const ALT_HASHING_LEAF_PREFIX_MASK: u8 = FIRST_PREFIX | (0b_01 << 4); + pub const ALT_HASHING_BRANCH_WITHOUT_MASK: u8 = FIRST_PREFIX | (0b_10 << 4); + pub const ALT_HASHING_BRANCH_WITH_MASK: u8 = FIRST_PREFIX | (0b_11 << 4); } #[cfg(test)] diff --git a/primitives/trie/src/node_codec.rs b/primitives/trie/src/node_codec.rs index 0dc647f17f047..af4adef01bdf5 100644 --- a/primitives/trie/src/node_codec.rs +++ b/primitives/trie/src/node_codec.rs @@ -90,23 +90,18 @@ pub struct NodeCodec(PhantomData); impl NodeCodec { fn decode_plan_inner_hashed( data: &[u8], - meta: Option<&mut M>, + meta: Option<&mut M>, // TODO when remove no meta, remove option ) -> Result { - let contains_hash = meta.as_ref() - .map(|m| m.contains_hash_of_value()).unwrap_or_default(); - if data.len() < 1 { - return Err(Error::BadFormat); - } - let offset = if let Some(meta) = meta { - meta.read_state_meta(data).map_err(|_| Error::BadFormat)? - } else { - 0 - }; let mut input = ByteSliceInput::new(data); let _ = input.take(offset)?; + let contains_hash = meta.as_ref() + .map(|m| m.contains_hash_of_value()).unwrap_or_default(); + let header = NodeHeader::decode(&mut input)?; + let alt_hashing = header.alt_hashing(); match NodeHeader::decode(&mut input)? { NodeHeader::Null => Ok(NodePlan::Empty), + NodeHeader::AltHashBranch(has_value, nibble_count) NodeHeader::Branch(has_value, nibble_count) => { let padding = nibble_count % nibble_ops::NIBBLE_PER_BYTE != 0; // check that the padding is valid (if any) @@ -120,10 +115,10 @@ impl NodeCodec { let bitmap_range = input.take(BITMAP_LENGTH)?; let bitmap = Bitmap::decode(&data[bitmap_range])?; let value = if has_value { - let count = >::decode(&mut input)?.0 as usize; - if contains_hash { + if alt_hashing && contains_hash { ValuePlan::HashedValue(input.take(H::LENGTH)?, count) } else { + let count = >::decode(&mut input)?.0 as usize; ValuePlan::Value(input.take(count)?) } } else { @@ -149,8 +144,9 @@ impl NodeCodec { value, children, }) - } - NodeHeader::Leaf(nibble_count) => { + }, + NodeHeader::AltHashLeaf(nibble_count) + | NodeHeader::Leaf(nibble_count) => { let padding = nibble_count % nibble_ops::NIBBLE_PER_BYTE != 0; // check that the padding is valid (if any) if padding && nibble_ops::pad_left(data[input.offset]) != 0 { @@ -160,10 +156,10 @@ impl NodeCodec { (nibble_count + (nibble_ops::NIBBLE_PER_BYTE - 1)) / nibble_ops::NIBBLE_PER_BYTE, )?; let partial_padding = nibble_ops::number_padding(nibble_count); - let count = >::decode(&mut input)?.0 as usize; - let value = if contains_hash { - ValuePlan::HashedValue(input.take(H::LENGTH)?, count) + if alt_hashing && contains_hash { + ValuePlan::HashedValue(input.take(H::LENGTH)?, 0) } else { + let count = >::decode(&mut input)?.0 as usize; ValuePlan::Value(input.take(count)?) }; @@ -201,9 +197,7 @@ impl NodeCodecT for NodeCodec { } fn empty_node(meta: &mut M) -> Vec { - let mut output = meta.write_state_meta(); - output.extend_from_slice(&[trie_constants::EMPTY_TRIE]); - output + empty_node_no_meta().to_vec() } fn empty_node_no_meta() -> &'static [u8] { @@ -211,8 +205,11 @@ impl NodeCodecT for NodeCodec { } fn leaf_node(partial: Partial, value: Value, meta: &mut M) -> Vec { - let mut output = meta.write_state_meta(); - output.append(&mut partial_encode(partial, NodeKind::Leaf)); + let mut output = if meta.do_value_hash() { + partial_encode(partial, NodeKind::AltHashLeaf) + } else { + partial_encode(partial, NodeKind::Leaf) + }; match value { Value::Value(value) => { Compact(value.len() as u32).encode_to(&mut output); @@ -221,9 +218,8 @@ impl NodeCodecT for NodeCodec { let end = output.len(); meta.encoded_value_callback(ValuePlan::Value(start..end)); }, - Value::HashedValue(hash, size) => { + Value::HashedValue(hash, _size) => { debug_assert!(hash.len() == H::LENGTH); - Compact(size as u32).encode_to(&mut output); let start = output.len(); output.extend_from_slice(hash); let end = output.len(); @@ -258,20 +254,21 @@ impl NodeCodecT for NodeCodec { maybe_value: Value, meta: &mut M, ) -> Vec { - let mut output = meta.write_state_meta(); - output.append(&mut if let Value::NoValue = &maybe_value { - partial_from_iterator_encode( - partial, - number_nibble, - NodeKind::BranchNoValue, - ) - } else { - partial_from_iterator_encode( - partial, - number_nibble, - NodeKind::BranchWithValue, - ) - }); + let mut output = match (&maybe_value, meta.do_value_hash()) { + (&Value::NoValue, false) => { + partial_from_iterator_encode(partial, number_nibble, NodeKind::BranchWithValue) + }, + (_, false) => { + partial_from_iterator_encode(partial, number_nibble, NodeKind::BranchNoValue) + }, + (&Value::NoValue, true) => { + partial_from_iterator_encode(partial, number_nibble, NodeKind::AltHashBranchWithValue) + }, + (_, true) => { + partial_from_iterator_encode(partial, number_nibble, NodeKind::AltHashBranchNoValue) + }, + }; + let bitmap_index = output.len(); let mut bitmap: [u8; BITMAP_LENGTH] = [0; BITMAP_LENGTH]; (0..BITMAP_LENGTH).for_each(|_|output.push(0)); @@ -283,9 +280,8 @@ impl NodeCodecT for NodeCodec { let end = output.len(); meta.encoded_value_callback(ValuePlan::Value(start..end)); }, - Value::HashedValue(hash, size) => { + Value::HashedValue(hash, _size) => { debug_assert!(hash.len() == H::LENGTH); - Compact(size as u32).encode_to(&mut output); let start = output.len(); output.extend_from_slice(hash); let end = output.len(); @@ -326,6 +322,9 @@ fn partial_from_iterator_encode>( NodeKind::Leaf => NodeHeader::Leaf(nibble_count).encode_to(&mut output), NodeKind::BranchWithValue => NodeHeader::Branch(true, nibble_count).encode_to(&mut output), NodeKind::BranchNoValue => NodeHeader::Branch(false, nibble_count).encode_to(&mut output), + NodeKind::AltHashLeaf => NodeHeader::AltHashLeaf(nibble_count).encode_to(&mut output), + NodeKind::AltHashBranchWithValue => NodeHeader::AltHashBranch(true, nibble_count).encode_to(&mut output), + NodeKind::AltHashBranchNoValue => NodeHeader::AltHashBranch(false, nibble_count).encode_to(&mut output), }; output.extend(partial); output @@ -344,6 +343,9 @@ fn partial_encode(partial: Partial, node_kind: NodeKind) -> Vec { NodeKind::Leaf => NodeHeader::Leaf(nibble_count).encode_to(&mut output), NodeKind::BranchWithValue => NodeHeader::Branch(true, nibble_count).encode_to(&mut output), NodeKind::BranchNoValue => NodeHeader::Branch(false, nibble_count).encode_to(&mut output), + NodeKind::AltHashLeaf => NodeHeader::AltHashLeaf(nibble_count).encode_to(&mut output), + NodeKind::AltHashBranchWithValue => NodeHeader::AltHashBranch(true, nibble_count).encode_to(&mut output), + NodeKind::AltHashBranchNoValue => NodeHeader::AltHashBranch(false, nibble_count).encode_to(&mut output), }; if number_nibble_encoded > 0 { output.push(nibble_ops::pad_right((partial.0).1)); diff --git a/primitives/trie/src/node_header.rs b/primitives/trie/src/node_header.rs index 0fdf6fefbd0bc..2e291ebd89203 100644 --- a/primitives/trie/src/node_header.rs +++ b/primitives/trie/src/node_header.rs @@ -28,6 +28,8 @@ pub(crate) enum NodeHeader { Null, Branch(bool, usize), Leaf(usize), + AltHashBranch(bool, usize), + AltHashLeaf(usize), } /// NodeHeader without content @@ -35,6 +37,9 @@ pub(crate) enum NodeKind { Leaf, BranchNoValue, BranchWithValue, + AltHashLeaf, + AltHashBranchNoValue, + AltHashBranchWithValue, } impl Encode for NodeHeader { @@ -47,10 +52,28 @@ impl Encode for NodeHeader { encode_size_and_prefix(*nibble_count, trie_constants::BRANCH_WITHOUT_MASK, output), NodeHeader::Leaf(nibble_count) => encode_size_and_prefix(*nibble_count, trie_constants::LEAF_PREFIX_MASK, output), + NodeHeader::AltHashBranch(true, nibble_count) => + encode_size_and_prefix_alt(*nibble_count, trie_constants::ALT_HASIHNG_BRANCH_WITH_MASK, output), + NodeHeader::AltHashBranch(false, nibble_count) => + encode_size_and_prefix_alt(*nibble_count, trie_constants::ALT_HASHING_BRANCH_WITHOUT_MASK, output), + NodeHeader::AltHashLeaf(nibble_count) => + encode_size_and_prefix_alt(*nibble_count, trie_constants::ALT_HASHING_LEAF_PREFIX_MASK, output), } } } +impl NodeHeader { + /// Is this header using alternate hashing scheme. + pub(crate) alt_hashing() -> bool { + match self { + NodeHeader::Null + | NodeHeader::Leaf(..) + | NodeHeader::Branch(..) => false, + NodeHeader::AltHashBranch(..) + | NodeHeader::AltHashLeaf(..) => true, + } + } +} impl codec::EncodeLike for NodeHeader {} impl Decode for NodeHeader { @@ -60,11 +83,16 @@ impl Decode for NodeHeader { return Ok(NodeHeader::Null); } match i & (0b11 << 6) { - trie_constants::LEAF_PREFIX_MASK => Ok(NodeHeader::Leaf(decode_size(i, input)?)), - trie_constants::BRANCH_WITHOUT_MASK => Ok(NodeHeader::Branch(false, decode_size(i, input)?)), - trie_constants::BRANCH_WITH_MASK => Ok(NodeHeader::Branch(true, decode_size(i, input)?)), - // do not allow any special encoding - _ => Err("Unallowed encoding".into()), + trie_constants::LEAF_PREFIX_MASK => Ok(NodeHeader::Leaf(decode_size(i, input, 2)?)), + trie_constants::BRANCH_WITH_MASK => Ok(NodeHeader::Branch(true, decode_size(i, input, 2)?)), + trie_constants::BRANCH_WITHOUT_MASK => Ok(NodeHeader::Branch(false, decode_size(i, input, 2)?)), + trie_constants::EMPTY_TRIE => match i & (0b1111 << 4) { + trie_constants::ALT_HASHING_LEAF_PREFIX_MASK => Ok(NodeHeader::AltHashLeaf(decode_size(i, input, 4)?)), + trie_constants::ALT_HASHING_BRANCH_WITH_MASK => Ok(NodeHeader::AltHashBranch(true, decode_size(i, input, 4)?)), + trie_constants::ALT_HASHING_BRANCH_WITHOUT_MASK => Ok(NodeHeader::AltHashBranch(false, decode_size(i, input, 4)?)), + // do not allow any special encoding + _ => Err("Unallowed encoding".into()), + }, } } } @@ -72,14 +100,15 @@ impl Decode for NodeHeader { /// Returns an iterator over encoded bytes for node header and size. /// Size encoding allows unlimited, length inefficient, representation, but /// is bounded to 16 bit maximum value to avoid possible DOS. -pub(crate) fn size_and_prefix_iterator(size: usize, prefix: u8) -> impl Iterator { +pub(crate) fn size_and_prefix_iterator(size: usize, prefix: u8, prefix_mask: usize) -> impl Iterator { let size = sp_std::cmp::min(trie_constants::NIBBLE_SIZE_BOUND, size); - let l1 = sp_std::cmp::min(62, size); + let max_value = 255 >> prefix_mask; + let l1 = sp_std::cmp::min(max_value - 1, size); let (first_byte, mut rem) = if size == l1 { (once(prefix + l1 as u8), 0) } else { - (once(prefix + 63), size - l1) + (once(prefix + max_value), size - l1) }; let next_bytes = move || { if rem > 0 { @@ -98,17 +127,25 @@ pub(crate) fn size_and_prefix_iterator(size: usize, prefix: u8) -> impl Iterator first_byte.chain(sp_std::iter::from_fn(next_bytes)) } -/// Encodes size and prefix to a stream output. +/// Encodes size and prefix to a stream output (prefix on 2 first bit only). fn encode_size_and_prefix(size: usize, prefix: u8, out: &mut W) { - for b in size_and_prefix_iterator(size, prefix) { + for b in size_and_prefix_iterator(size, prefix, 2) { + out.push_byte(b) + } +} + +/// Encodes size and prefix to a stream output with prefix (prefix on 4 first bit only). +fn encode_size_and_prefix_alt(size: usize, prefix: u8, out: &mut W) { + for b in size_and_prefix_iterator(size, prefix, 4) { out.push_byte(b) } } /// Decode size only from stream input and header byte. -fn decode_size(first: u8, input: &mut impl Input) -> Result { - let mut result = (first & 255u8 >> 2) as usize; - if result < 63 { +fn decode_size(first: u8, input: &mut impl Input, prefix_mask: usize) -> Result { + let max_value = 255u8 >> prefix_mask; + let mut result = (first & max_value) as usize; + if result < max_value { return Ok(result); } result -= 1; diff --git a/primitives/trie/src/storage_proof.rs b/primitives/trie/src/storage_proof.rs index 0f00a335ae2f1..dcb9cf08e74b7 100644 --- a/primitives/trie/src/storage_proof.rs +++ b/primitives/trie/src/storage_proof.rs @@ -113,6 +113,7 @@ impl From for crate::MemoryDB { let mut is_hashed_value = false; let mut accum = Vec::new(); for item in proof.trie_nodes.iter() { + // TODO remove this look up // Note using `default()` as global meta helps looking fro root node. let layout_meta = Default::default(); let (encoded_node, mut meta) = < From c807f44d40424ffe797f1f41b7f1a2e64b124a58 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 8 Jun 2021 16:21:55 +0200 Subject: [PATCH 036/188] before removing unused branch no value alt hashing. more work todo rename all flag var to alt_hash, and remove extrinsic replace by storage query at every storage_root call. --- bin/node/bench/src/generator.rs | 6 +- client/db/src/bench.rs | 10 ++- client/db/src/lib.rs | 7 +- client/db/src/storage_cache.rs | 14 ++-- client/db/src/upgrade.rs | 61 +------------- client/light/src/backend.rs | 7 +- primitives/state-machine/src/backend.rs | 12 +-- primitives/state-machine/src/basic.rs | 10 +-- primitives/state-machine/src/ext.rs | 2 +- primitives/state-machine/src/lib.rs | 28 ++----- .../state-machine/src/proving_backend.rs | 7 +- primitives/state-machine/src/testing.rs | 2 +- primitives/state-machine/src/trie_backend.rs | 34 ++++---- primitives/storage/src/lib.rs | 4 +- primitives/trie/src/lib.rs | 80 ++++++++----------- primitives/trie/src/node_codec.rs | 26 +++--- primitives/trie/src/node_header.rs | 14 ++-- primitives/trie/src/storage_proof.rs | 17 +--- primitives/trie/src/trie_stream.rs | 54 ++++++++----- 19 files changed, 159 insertions(+), 236 deletions(-) diff --git a/bin/node/bench/src/generator.rs b/bin/node/bench/src/generator.rs index a27d6e529f277..913c1ff8779f0 100644 --- a/bin/node/bench/src/generator.rs +++ b/bin/node/bench/src/generator.rs @@ -46,9 +46,7 @@ pub fn generate_trie( let mut trie_db = if flag_hashed_value { let layout = sp_trie::Layout::with_inner_hashing(); - let mut t = TrieDBMut::::new_with_layout(&mut trie, &mut root, layout); - t.force_layout_meta().expect("force layout meta failed"); - t + TrieDBMut::::new_with_layout(&mut trie, &mut root, layout) } else { TrieDBMut::new(&mut trie, &mut root) }; @@ -58,7 +56,7 @@ pub fn generate_trie( trie_db.commit(); } - ( trie.db, overlay ) + (trie.db, overlay) }; let mut transaction = db.transaction(); diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index ec53cecf1f1a4..0f111eed99189 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -401,17 +401,21 @@ impl StateBackend> for BenchmarkingState { fn storage_root<'a>( &self, delta: impl Iterator)>, - flag_hash_value: bool, + alt_hashing: bool, ) -> (B::Hash, Self::Transaction) where B::Hash: Ord { - self.state.borrow().as_ref().map_or(Default::default(), |s| s.storage_root(delta, flag_hash_value)) + self.state.borrow().as_ref().map_or(Default::default(), |s| s.storage_root(delta, alt_hashing)) } fn child_storage_root<'a>( &self, child_info: &ChildInfo, delta: impl Iterator)>, + alt_hashing: bool, ) -> (B::Hash, bool, Self::Transaction) where B::Hash: Ord { - self.state.borrow().as_ref().map_or(Default::default(), |s| s.child_storage_root(child_info, delta)) + self.state.borrow().as_ref().map_or( + Default::default(), + |s| s.child_storage_root(child_info, delta, alt_hashing), + ) } fn pairs(&self) -> Vec<(Vec, Vec)> { diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index f808df9c2675e..41a6853aebc71 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -227,17 +227,18 @@ impl StateBackend> for RefTrackingState { fn storage_root<'a>( &self, delta: impl Iterator)>, - flag_hash_value: bool, + alt_hashing: bool, ) -> (B::Hash, Self::Transaction) where B::Hash: Ord { - self.state.storage_root(delta, flag_hash_value) + self.state.storage_root(delta, alt_hashing) } fn child_storage_root<'a>( &self, child_info: &ChildInfo, delta: impl Iterator)>, + alt_hashing: bool, ) -> (B::Hash, bool, Self::Transaction) where B::Hash: Ord { - self.state.child_storage_root(child_info, delta) + self.state.child_storage_root(child_info, delta, alt_hashing) } fn pairs(&self) -> Vec<(Vec, Vec)> { diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index acf89c646a858..946f576142745 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -645,17 +645,18 @@ impl>, B: BlockT> StateBackend> for Cachin fn storage_root<'a>( &self, delta: impl Iterator)>, - flag_hash_value: bool, + alt_hashing: bool, ) -> (B::Hash, Self::Transaction) where B::Hash: Ord { - self.state.storage_root(delta, flag_hash_value) + self.state.storage_root(delta, alt_hashing) } fn child_storage_root<'a>( &self, child_info: &ChildInfo, delta: impl Iterator)>, + alt_hashing: bool, ) -> (B::Hash, bool, Self::Transaction) where B::Hash: Ord { - self.state.child_storage_root(child_info, delta) + self.state.child_storage_root(child_info, delta, alt_hashing) } fn pairs(&self) -> Vec<(Vec, Vec)> { @@ -832,17 +833,18 @@ impl>, B: BlockT> StateBackend> for Syncin fn storage_root<'a>( &self, delta: impl Iterator)>, - flag_hash_value: bool, + alt_hashing: bool, ) -> (B::Hash, Self::Transaction) where B::Hash: Ord { - self.caching_state().storage_root(delta, flag_hash_value) + self.caching_state().storage_root(delta, alt_hashing) } fn child_storage_root<'a>( &self, child_info: &ChildInfo, delta: impl Iterator)>, + alt_hashing: bool, ) -> (B::Hash, bool, Self::Transaction) where B::Hash: Ord { - self.caching_state().child_storage_root(child_info, delta) + self.caching_state().child_storage_root(child_info, delta, alt_hashing) } fn pairs(&self) -> Vec<(Vec, Vec)> { diff --git a/client/db/src/upgrade.rs b/client/db/src/upgrade.rs index f0129251b691e..ea91b8253e1d8 100644 --- a/client/db/src/upgrade.rs +++ b/client/db/src/upgrade.rs @@ -26,13 +26,12 @@ use sp_runtime::traits::Block as BlockT; use crate::{columns, utils::DatabaseType}; use kvdb_rocksdb::{Database, DatabaseConfig}; use codec::{Decode, Encode}; -use log::info; /// Version file name. const VERSION_FILE_NAME: &'static str = "db_version"; /// Current db version. -const CURRENT_VERSION: u32 = 4; +const CURRENT_VERSION: u32 = 3; /// Number of columns in v1. const V1_NUM_COLUMNS: u32 = 11; @@ -50,7 +49,6 @@ pub fn upgrade_db(db_path: &Path, db_type: DatabaseType) -> sp_bl migrate_2_to_3::(db_path, db_type)? }, 2 => migrate_2_to_3::(db_path, db_type)?, - 3 => migrate_3_to_4::(db_path, db_type)?, CURRENT_VERSION => (), _ => Err(sp_blockchain::Error::Backend(format!("Future database version: {}", db_version)))?, } @@ -100,63 +98,6 @@ fn migrate_2_to_3(db_path: &Path, _db_type: DatabaseType) -> sp_b Ok(()) } -/// Migration from version3 to version4: -/// - Trie state meta for state that could be hashed internaly. -fn migrate_3_to_4(db_path: &Path, _db_type: DatabaseType) -> sp_blockchain::Result<()> { - - info!("Starting trie node migration."); - let start_time = std::time::Instant::now(); - let db_path = db_path.to_str() - .ok_or_else(|| sp_blockchain::Error::Backend("Invalid database path".into()))?; - let db_cfg = DatabaseConfig::with_columns(V2_NUM_COLUMNS); - let db = Database::open(&db_cfg, db_path).map_err(db_err)?; - - let mut nb_node_prefixed = 0; - let mut nb_node_seen = 0; - let batch_size = 250_000; - loop { - let mut full_batch = false; - let mut size = 0; - let mut last = Vec::new(); - let mut transaction = db.transaction(); - // Get all the keys we need to update. - // Note that every batch will restart full iter, - // if this prove to slow for archive node, this could be - // switch to a `iter_from` function but would require - // to upstream change to our rocksdb crate. - for entry in db.iter(columns::STATE) { - if &entry.1[..] > last.as_slice() { - nb_node_seen += 1; - if let Some(new_val) = sp_trie::tag_old_hashes::>(&entry.1) { - transaction.put_vec(columns::STATE, &entry.0, new_val); - nb_node_prefixed += 1; - size += 1; - if size == batch_size { - full_batch = true; - last = entry.0.to_vec(); - break; - } - } - } - } - info!( - "Committing batch, currently processed: {} of {} read nodes at {:?}, {:?}", - nb_node_prefixed, - nb_node_seen, - last, - start_time.elapsed().as_millis(), - ); - db.write(transaction).map_err(db_err)?; - if !full_batch { - break; - } - } - info!("Trie node migration finished in {:?} ms.", start_time.elapsed().as_millis()); - info!("{:?} nodes prefixed for {:?} node.", nb_node_prefixed, nb_node_seen); - Ok(()) -} - - /// Reads current database version from the file at given path. /// If the file does not exist returns 0. fn current_version(path: &Path) -> sp_blockchain::Result { diff --git a/client/light/src/backend.rs b/client/light/src/backend.rs index b09b4d748b8ec..2d65c7b3347ff 100644 --- a/client/light/src/backend.rs +++ b/client/light/src/backend.rs @@ -490,11 +490,11 @@ impl StateBackend for GenesisOrUnavailableState fn storage_root<'a>( &self, delta: impl Iterator)>, - flag_hash_value: bool, + alt_hashing: bool, ) -> (H::Out, Self::Transaction) where H::Out: Ord { match *self { GenesisOrUnavailableState::Genesis(ref state) => - state.storage_root(delta, flag_hash_value), + state.storage_root(delta, alt_hashing), GenesisOrUnavailableState::Unavailable => Default::default(), } } @@ -503,10 +503,11 @@ impl StateBackend for GenesisOrUnavailableState &self, child_info: &ChildInfo, delta: impl Iterator)>, + alt_hashing: bool, ) -> (H::Out, bool, Self::Transaction) where H::Out: Ord { match *self { GenesisOrUnavailableState::Genesis(ref state) => { - let (root, is_equal, _) = state.child_storage_root(child_info, delta); + let (root, is_equal, _) = state.child_storage_root(child_info, delta, alt_hashing); (root, is_equal, Default::default()) }, GenesisOrUnavailableState::Unavailable => diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 0f05d08858bb1..aaa6bbf16f8d7 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -123,12 +123,13 @@ pub trait Backend: sp_std::fmt::Debug { /// Calculate the storage root, with given delta over what is already stored in /// the backend, and produce a "transaction" that can be used to commit. - /// A flag `flag_inner_hash_value` can be set, it switches inner trie implementation. + /// `alt_hashing` indicate if trie state should apply alternate hashing + /// scheme (inner value hashed). /// Does not include child storage updates. fn storage_root<'a>( &self, delta: impl Iterator)>, - flag_inner_hash_value: bool, + alt_hashing: bool, ) -> (H::Out, Self::Transaction) where H::Out: Ord; /// Calculate the child storage root, with given delta over what is already stored in @@ -138,6 +139,7 @@ pub trait Backend: sp_std::fmt::Debug { &self, child_info: &ChildInfo, delta: impl Iterator)>, + alt_hashing: bool, ) -> (H::Out, bool, Self::Transaction) where H::Out: Ord; /// Get all key/value pairs into a Vec. @@ -176,14 +178,14 @@ pub trait Backend: sp_std::fmt::Debug { &'a ChildInfo, impl Iterator)>, )>, - flag_inner_hash_value: bool, + alt_hashing: bool, ) -> (H::Out, Self::Transaction) where H::Out: Ord + Encode { let mut txs: Self::Transaction = Default::default(); let mut child_roots: Vec<_> = Default::default(); // child first for (child_info, child_delta) in child_deltas { let (child_root, empty, child_txs) = - self.child_storage_root(&child_info, child_delta); + self.child_storage_root(&child_info, child_delta, alt_hashing); let prefixed_storage_key = child_info.prefixed_storage_key(); txs.consolidate(child_txs); if empty { @@ -199,7 +201,7 @@ pub trait Backend: sp_std::fmt::Debug { .iter() .map(|(k, v)| (&k[..], v.as_ref().map(|v| &v[..]))) ), - flag_inner_hash_value, + alt_hashing, ); txs.consolidate(parent_txs); (root, txs) diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index c9794486b6acc..2cc7314392bdf 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -74,7 +74,7 @@ impl BasicExternalities { inner: Storage { top: std::mem::take(&mut storage.top), children_default: std::mem::take(&mut storage.children_default), - flag_hashed_value: storage.flag_hashed_value, + alt_hashing: storage.alt_hashing, }, extensions: Default::default(), }; @@ -129,7 +129,7 @@ impl From> for BasicExternalities { inner: Storage { top: hashmap, children_default: Default::default(), - flag_hashed_value: true, + alt_hashing: true, }, extensions: Default::default(), } @@ -283,7 +283,7 @@ impl Externalities for BasicExternalities { } } - let layout = if self.inner.flag_hashed_value { + let layout = if self.inner.alt_hashing { Layout::::with_inner_hashing() } else { Layout::::default() @@ -298,7 +298,7 @@ impl Externalities for BasicExternalities { if let Some(child) = self.inner.children_default.get(child_info.storage_key()) { let delta = child.data.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))); crate::in_memory_backend::new_in_mem::() - .child_storage_root(&child.child_info, delta).0 + .child_storage_root(&child.child_info, delta, self.inner.alt_hashing).0 } else { empty_child_trie_root::>() }.encode() @@ -341,7 +341,7 @@ impl Externalities for BasicExternalities { } fn flag_hash_value(&mut self) { - self.inner.flag_hashed_value = true; + self.inner.alt_hashing = true; } } diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index 0b3e155c11594..e187c128345b8 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -547,7 +547,7 @@ where } else { let root = if let Some((changes, info)) = self.overlay.child_changes(storage_key) { let delta = changes.map(|(k, v)| (k.as_ref(), v.value().map(AsRef::as_ref))); - Some(self.backend.child_storage_root(info, delta)) + Some(self.backend.child_storage_root(info, delta, self.overlay.flag_hash_value())) } else { None }; diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 726d1e51b080b..709357430a40c 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -1496,12 +1496,13 @@ mod tests { #[test] fn inner_state_hashing_switch_proofs() { + let mut layout = Layout::default(); let (mut mdb, mut root) = trie_backend::tests::test_db(false); { let mut trie = TrieDBMut::from_existing_with_layout( &mut mdb, &mut root, - Layout::default(), + layout.cloen(), ).unwrap(); trie.insert(b"foo", vec![1u8; 1_000].as_slice()) // big inner hash .expect("insert failed"); @@ -1537,31 +1538,14 @@ mod tests { let root1 = root.clone(); - // trigger switch - { - let mut trie = TrieDBMut::from_existing_with_layout( - &mut mdb, - &mut root, - Layout::with_inner_hashing(), - ).unwrap(); - trie.force_layout_meta() - .expect("failed forced layout change"); - } - let root2 = root.clone(); - assert!(root1 != root2); - let remote_proof = check_proof(mdb.clone(), root.clone()); - // nodes are still with old hashing. - assert!(remote_proof.encode().len() > 1_100); - assert!(remote_proof.encoded_size() > 1_100); - assert_eq!(remote_proof.encode().len(), - remote_proof.encoded_size()); - + // do switch + layout = Layout::with_inner_hashing(); // update with same value do not change { let mut trie = TrieDBMut::from_existing_with_layout( &mut mdb, &mut root, - Layout::default(), + layout.clone(), ).unwrap(); trie.insert(b"foo222", vec![5u8; 100].as_slice()) // inner hash .expect("insert failed"); @@ -1576,7 +1560,7 @@ mod tests { let mut trie = TrieDBMut::from_existing_with_layout( &mut mdb, &mut root, - Layout::default(), + layout.clone(), ).unwrap(); trie.insert(b"foo222", vec![4u8].as_slice()) // inner hash .expect("insert failed"); diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 5e1a0de9ffc87..1ab9d09125648 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -353,17 +353,18 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn storage_root<'b>( &self, delta: impl Iterator)>, - flag_inner_hash_value: bool, + alt_hashing: bool, ) -> (H::Out, Self::Transaction) where H::Out: Ord { - self.0.storage_root(delta, flag_inner_hash_value) + self.0.storage_root(delta, alt_hashing) } fn child_storage_root<'b>( &self, child_info: &ChildInfo, delta: impl Iterator)>, + alt_hashing: bool, ) -> (H::Out, bool, Self::Transaction) where H::Out: Ord { - self.0.child_storage_root(child_info, delta) + self.0.child_storage_root(child_info, delta, alt_hashing) } fn register_overlay_stats(&self, _stats: &crate::stats::StateMachineStats) { } diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index b79d6866edcca..19006ccb52ac4 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -239,7 +239,7 @@ impl Default for TestExternalities fn default() -> Self { // default to inner hashed. let mut storage = Storage::default(); - storage.flag_hashed_value = true; + storage.alt_hashing = true; Self::new(storage) } } diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index afee980aedc6a..fbc1f12fd2ae1 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -171,7 +171,7 @@ impl, H: Hasher> Backend for TrieBackend where fn storage_root<'a>( &self, delta: impl Iterator)>, - flag_inner_hash_value: bool, + use_inner_hash_value: bool, ) -> (H::Out, Self::Transaction) where H::Out: Ord { let mut write_overlay = S::Overlay::default(); let mut root = *self.essence.root(); @@ -182,16 +182,12 @@ impl, H: Hasher> Backend for TrieBackend where &mut write_overlay, ); let res = || { - if flag_inner_hash_value { - let layout = sp_trie::Layout::with_inner_hashing(); - let mut t = sp_trie::trie_types::TrieDBMut::::from_existing_with_layout( - &mut eph, - &mut root, - layout, - )?; - t.force_layout_meta()?; - } - delta_trie_root::, _, _, _, _, _>(&mut eph, root, delta) + let layout = if use_inner_hash_value { + sp_trie::Layout::with_inner_hashing() + } else { + sp_trie::Layout::default() + }; + delta_trie_root::, _, _, _, _, _>(&mut eph, root, delta, layout) }; match res() { @@ -207,10 +203,16 @@ impl, H: Hasher> Backend for TrieBackend where &self, child_info: &ChildInfo, delta: impl Iterator)>, + use_inner_hash_value: bool, ) -> (H::Out, bool, Self::Transaction) where H::Out: Ord { let default_root = match child_info.child_type() { ChildType::ParentKeyId => empty_child_trie_root::>() }; + let layout = if use_inner_hash_value { + sp_trie::Layout::with_inner_hashing() + } else { + sp_trie::Layout::default() + }; let mut write_overlay = S::Overlay::default(); let prefixed_storage_key = child_info.prefixed_storage_key(); @@ -234,6 +236,7 @@ impl, H: Hasher> Backend for TrieBackend where &mut eph, root, delta, + layout, ) { Ok(ret) => root = ret, Err(e) => warn!(target: "trie", "Failed to write to trie: {}", e), @@ -291,14 +294,7 @@ pub mod tests { root.encode_to(&mut sub_root); let mut trie = if hashed_value { let layout = Layout::with_inner_hashing(); - let mut t = TrieDBMut::new_with_layout( - &mut mdb, - &mut root, - layout, - ); - t.force_layout_meta() - .expect("failed forced layout change"); - t + TrieDBMut::new_with_layout(&mut mdb, &mut root, layout) } else { TrieDBMut::new(&mut mdb, &mut root) }; diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 5c8028e40c2a8..32645adecb535 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -140,8 +140,8 @@ pub struct Storage { /// trie kind, so this is exclusively for the `ChildType::ParentKeyId` /// tries. pub children_default: std::collections::HashMap, StorageChild>, - /// Flag state for using hash of values internally. - pub flag_hashed_value: bool, + /// `true` when state should hash values internally. + pub alt_hashing: bool, } /// Storage change set diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 57aa05adeed04..fc7d16b71328f 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -88,25 +88,22 @@ impl Meta for TrieMeta { /// When true apply inner hashing of value. type GlobalMeta = bool; - // TODO remove upstraem /// When true apply inner hashing of value. type StateMeta = bool; - // TODO remove upstream - fn set_state_meta(&mut self, _state_meta: Self::StateMeta) { - /*if !self.do_value_hash && state_meta { - self.switch_to_value_hash = true; + fn set_state_meta(&mut self, state_meta: Self::StateMeta) { + if !self.do_value_hash && state_meta { self.do_value_hash = true; - }*/ + } } // TODO remove upstream fn extract_global_meta(&self) -> Self::GlobalMeta { - self.recorded_do_value_hash + self.switch_to_value_hash || self.do_value_hash } fn set_global_meta(&mut self, global_meta: Self::GlobalMeta) { - if !self.do_value_hash && state_meta { + if !self.do_value_hash && global_meta { self.switch_to_value_hash = true; self.do_value_hash = true; } @@ -114,12 +111,11 @@ impl Meta for TrieMeta { // TODO remove upstream? fn has_state_meta(&self) -> bool { - false - //self.do_value_hash + self.do_value_hash && !self.switch_to_value_hash } // TODO consider removal upstream of this method (node type in codec) - fn read_state_meta(&mut self, data: &[u8]) -> Result { + fn read_state_meta(&mut self, _data: &[u8]) -> Result { unreachable!() // TODO read directly from codec. /* let offset = if data[0] == trie_constants::ENCODED_META_ALLOW_HASH { @@ -204,6 +200,7 @@ impl Meta for TrieMeta { self.contain_hash } + // TODO could be rename to get_state_meta fn do_value_hash(&self) -> bool { self.do_value_hash } @@ -255,12 +252,12 @@ impl TrieLayout for Layout where H: Hasher, M: MetaHasher, - M::Meta: Meta, + M::Meta: Meta, { const USE_EXTENSION: bool = false; const ALLOW_EMPTY: bool = true; const USE_META: bool = true; - const READ_ROOT_STATE_META: bool = true; + const READ_ROOT_STATE_META: bool = false; // TODO rem type Hash = H; type Codec = NodeCodec; @@ -299,8 +296,8 @@ impl MetaHasher for StateHasher fn hash(value: &[u8], meta: &Self::Meta) -> H::Out { match &meta { - TrieMeta { range: Some(range), contain_hash: false, do_value_hash, switch_to_value_hash: false, .. } => { - if *do_value_hash && range.end - range.start >= trie_constants::INNER_HASH_TRESHOLD { + TrieMeta { range: Some(range), contain_hash: false, do_value_hash: true, switch_to_value_hash: false, .. } => { + if range.end - range.start >= trie_constants::INNER_HASH_TRESHOLD { let value = inner_hashed_value::(value, Some((range.start, range.end))); H::hash(value.as_slice()) } else { @@ -354,7 +351,6 @@ impl MetaHasher for StateHasher fn extract_value(mut stored: &[u8], global_meta: Self::GlobalMeta) -> (&[u8], Self::Meta) { let input = &mut stored; let mut contain_hash = false; - let mut old_hash = false; if input.get(0) == Some(&trie_constants::DEAD_HEADER_META_HASHED_VALUE) { contain_hash = true; *input = &input[1..]; @@ -365,7 +361,8 @@ impl MetaHasher for StateHasher contain_hash, do_value_hash: false, recorded_do_value_hash: false, - old_hash, + switch_to_value_hash: false, + old_hash: false, }; meta.set_global_meta(global_meta); (stored, meta) @@ -419,7 +416,7 @@ impl TrieConfiguration for Layout where H: Hasher, M: MetaHasher, - M::Meta: Meta, + M::Meta: Meta, { fn trie_root(&self, input: I) -> ::Out where I: IntoIterator, @@ -515,7 +512,8 @@ pub mod trie_types { pub fn delta_trie_root( db: &mut DB, mut root: TrieHash, - delta: I + delta: I, + layout: L, ) -> Result, Box>> where I: IntoIterator, A: Borrow<[u8]>, @@ -524,14 +522,11 @@ pub fn delta_trie_root( DB: hash_db::HashDB>, { { - let mut trie = TrieDBMut::::from_existing(db, &mut root)?; + let mut trie = TrieDBMut::::from_existing_with_layout(db, &mut root, layout)?; let mut delta = delta.into_iter().collect::>(); delta.sort_by(|l, r| l.0.borrow().cmp(r.0.borrow())); - if delta.len() == 0 { - trie.force_layout_meta()?; - } for (key, change) in delta { match change.borrow() { Some(val) => trie.insert(key.borrow(), val.borrow())?, @@ -622,6 +617,7 @@ pub fn child_delta_trie_root( db: &mut DB, root_data: RD, delta: I, + layout: L, ) -> Result<::Out, Box>> where I: IntoIterator, @@ -640,6 +636,7 @@ pub fn child_delta_trie_root( &mut db, root, delta, + layout, ) } @@ -971,9 +968,6 @@ mod tests { let mut memdb = MemoryDBMeta::<_, T::MetaHasher>::default(); let mut root = Default::default(); let mut t = TrieDBMut::::new_with_layout(&mut memdb, &mut root, layout); - if input.len() == 0 { - t.force_layout_meta().unwrap(); - } for (x, y) in input.iter().rev() { t.insert(x, y).unwrap(); } @@ -1005,7 +999,7 @@ mod tests { } fn check_input(input: &Vec<(&[u8], &[u8])>) { - +// TODO remove this iter let layout = Layout::with_inner_hashing(); check_equivalent::(input, layout.clone()); @@ -1122,25 +1116,12 @@ mod tests { db: &'db mut dyn HashDB>, root: &'db mut TrieHash, v: &[(Vec, Vec)], - flag_hash: bool, + layout: T, ) -> TrieDBMut<'db, T> where T: TrieConfiguration, { - let mut t = if flag_hash { - let mut root_meta = Default::default(); - T::set_root_meta(&mut root_meta, flag_hash); - - let mut layout = T::default(); - layout.initialize_from_root_meta(&root_meta); - - let mut t = TrieDBMut::::new_with_layout(db, root, layout); - t.force_layout_meta() - .expect("Could not force layout."); - t - } else { - TrieDBMut::::new(db, root) - }; + let mut t = TrieDBMut::::new_with_layout(db, root, layout); for i in 0..v.len() { let key: &[u8]= &v[i].0; let val: &[u8] = &v[i].1; @@ -1186,7 +1167,8 @@ mod tests { let real = layout.trie_root(x.clone()); let mut memdb = MemoryDB::default(); let mut root = Default::default(); - let mut memtrie = populate_trie::(&mut memdb, &mut root, &x, flag); + + let mut memtrie = populate_trie::(&mut memdb, &mut root, &x, layout.clone()); memtrie.commit(); if *memtrie.root() != real { @@ -1274,6 +1256,12 @@ mod tests { iterator_works_inner(false); } fn iterator_works_inner(flag: bool) { + let layout = if flag { + Layout::with_inner_hashing() + } else { + Layout::default() + }; + let pairs = vec![ (hex!("0103000000000000000464").to_vec(), hex!("0400000000").to_vec()), (hex!("0103000000000000000469").to_vec(), hex!("0401000000").to_vec()), @@ -1281,9 +1269,9 @@ mod tests { let mut mdb = MemoryDB::default(); let mut root = Default::default(); - let _ = populate_trie::(&mut mdb, &mut root, &pairs, flag); + let _ = populate_trie::(&mut mdb, &mut root, &pairs, layout.clone()); - let trie = TrieDB::::new(&mdb, &root).unwrap(); + let trie = TrieDB::::new_with_layout(&mdb, &root, layout).unwrap(); let iter = trie.iter().unwrap(); let mut iter_pairs = Vec::new(); @@ -1315,11 +1303,13 @@ mod tests { &mut proof_db.clone(), storage_root, valid_delta, + Default::default(), ).unwrap(); let second_storage_root = delta_trie_root::( &mut proof_db.clone(), storage_root, invalid_delta, + Default::default(), ).unwrap(); assert_eq!(first_storage_root, second_storage_root); diff --git a/primitives/trie/src/node_codec.rs b/primitives/trie/src/node_codec.rs index af4adef01bdf5..c239a5c07c97a 100644 --- a/primitives/trie/src/node_codec.rs +++ b/primitives/trie/src/node_codec.rs @@ -88,21 +88,23 @@ impl<'a> Input for ByteSliceInput<'a> { pub struct NodeCodec(PhantomData); impl NodeCodec { - fn decode_plan_inner_hashed( + fn decode_plan_inner_hashed>( data: &[u8], - meta: Option<&mut M>, // TODO when remove no meta, remove option + mut meta: Option<&mut M>, // TODO when remove no meta, remove option ) -> Result { let mut input = ByteSliceInput::new(data); - let _ = input.take(offset)?; let contains_hash = meta.as_ref() .map(|m| m.contains_hash_of_value()).unwrap_or_default(); let header = NodeHeader::decode(&mut input)?; let alt_hashing = header.alt_hashing(); - match NodeHeader::decode(&mut input)? { + meta.as_mut() + .map(|m| m.set_state_meta(alt_hashing)); + + match header { NodeHeader::Null => Ok(NodePlan::Empty), NodeHeader::AltHashBranch(has_value, nibble_count) - NodeHeader::Branch(has_value, nibble_count) => { + | NodeHeader::Branch(has_value, nibble_count) => { let padding = nibble_count % nibble_ops::NIBBLE_PER_BYTE != 0; // check that the padding is valid (if any) if padding && nibble_ops::pad_left(data[input.offset]) != 0 { @@ -116,7 +118,7 @@ impl NodeCodec { let bitmap = Bitmap::decode(&data[bitmap_range])?; let value = if has_value { if alt_hashing && contains_hash { - ValuePlan::HashedValue(input.take(H::LENGTH)?, count) + ValuePlan::HashedValue(input.take(H::LENGTH)?, 0) } else { let count = >::decode(&mut input)?.0 as usize; ValuePlan::Value(input.take(count)?) @@ -156,7 +158,7 @@ impl NodeCodec { (nibble_count + (nibble_ops::NIBBLE_PER_BYTE - 1)) / nibble_ops::NIBBLE_PER_BYTE, )?; let partial_padding = nibble_ops::number_padding(nibble_count); - if alt_hashing && contains_hash { + let value = if alt_hashing && contains_hash { ValuePlan::HashedValue(input.take(H::LENGTH)?, 0) } else { let count = >::decode(&mut input)?.0 as usize; @@ -172,7 +174,7 @@ impl NodeCodec { } } -impl NodeCodecT for NodeCodec { +impl> NodeCodecT for NodeCodec { type Error = Error; type HashOut = H::Out; @@ -196,8 +198,8 @@ impl NodeCodecT for NodeCodec { data == >::empty_node_no_meta() } - fn empty_node(meta: &mut M) -> Vec { - empty_node_no_meta().to_vec() + fn empty_node(_meta: &mut M) -> Vec { + sp_std::vec![trie_constants::EMPTY_TRIE] } fn empty_node_no_meta() -> &'static [u8] { @@ -223,7 +225,7 @@ impl NodeCodecT for NodeCodec { let start = output.len(); output.extend_from_slice(hash); let end = output.len(); - meta.encoded_value_callback(ValuePlan::HashedValue(start..end, size)); + meta.encoded_value_callback(ValuePlan::HashedValue(start..end, 0)); }, Value::NoValue => unimplemented!("No support for incomplete nodes"), } @@ -285,7 +287,7 @@ impl NodeCodecT for NodeCodec { let start = output.len(); output.extend_from_slice(hash); let end = output.len(); - meta.encoded_value_callback(ValuePlan::HashedValue(start..end, size)); + meta.encoded_value_callback(ValuePlan::HashedValue(start..end, 0)); }, Value::NoValue => (), } diff --git a/primitives/trie/src/node_header.rs b/primitives/trie/src/node_header.rs index 2e291ebd89203..a1f0090ecb8a2 100644 --- a/primitives/trie/src/node_header.rs +++ b/primitives/trie/src/node_header.rs @@ -53,7 +53,7 @@ impl Encode for NodeHeader { NodeHeader::Leaf(nibble_count) => encode_size_and_prefix(*nibble_count, trie_constants::LEAF_PREFIX_MASK, output), NodeHeader::AltHashBranch(true, nibble_count) => - encode_size_and_prefix_alt(*nibble_count, trie_constants::ALT_HASIHNG_BRANCH_WITH_MASK, output), + encode_size_and_prefix_alt(*nibble_count, trie_constants::ALT_HASHING_BRANCH_WITH_MASK, output), NodeHeader::AltHashBranch(false, nibble_count) => encode_size_and_prefix_alt(*nibble_count, trie_constants::ALT_HASHING_BRANCH_WITHOUT_MASK, output), NodeHeader::AltHashLeaf(nibble_count) => @@ -64,7 +64,7 @@ impl Encode for NodeHeader { impl NodeHeader { /// Is this header using alternate hashing scheme. - pub(crate) alt_hashing() -> bool { + pub(crate) fn alt_hashing(&self) -> bool { match self { NodeHeader::Null | NodeHeader::Leaf(..) @@ -74,6 +74,7 @@ impl NodeHeader { } } } + impl codec::EncodeLike for NodeHeader {} impl Decode for NodeHeader { @@ -93,6 +94,7 @@ impl Decode for NodeHeader { // do not allow any special encoding _ => Err("Unallowed encoding".into()), }, + _ => unreachable!(), } } } @@ -103,12 +105,12 @@ impl Decode for NodeHeader { pub(crate) fn size_and_prefix_iterator(size: usize, prefix: u8, prefix_mask: usize) -> impl Iterator { let size = sp_std::cmp::min(trie_constants::NIBBLE_SIZE_BOUND, size); - let max_value = 255 >> prefix_mask; - let l1 = sp_std::cmp::min(max_value - 1, size); + let max_value = 255u8 >> prefix_mask; + let l1 = sp_std::cmp::min(max_value as usize - 1, size); let (first_byte, mut rem) = if size == l1 { (once(prefix + l1 as u8), 0) } else { - (once(prefix + max_value), size - l1) + (once(prefix + max_value as u8), size - l1) }; let next_bytes = move || { if rem > 0 { @@ -145,7 +147,7 @@ fn encode_size_and_prefix_alt(size: usize, prefix: u8, out: fn decode_size(first: u8, input: &mut impl Input, prefix_mask: usize) -> Result { let max_value = 255u8 >> prefix_mask; let mut result = (first & max_value) as usize; - if result < max_value { + if result < max_value as usize { return Ok(result); } result -= 1; diff --git a/primitives/trie/src/storage_proof.rs b/primitives/trie/src/storage_proof.rs index dcb9cf08e74b7..77c1c0f3ad22a 100644 --- a/primitives/trie/src/storage_proof.rs +++ b/primitives/trie/src/storage_proof.rs @@ -110,28 +110,15 @@ impl From for crate::MemoryDB { // only allow global definition. // Using compact proof will work directly here (read trie structure and // work directly. - let mut is_hashed_value = false; - let mut accum = Vec::new(); for item in proof.trie_nodes.iter() { - // TODO remove this look up // Note using `default()` as global meta helps looking fro root node. let layout_meta = Default::default(); let (encoded_node, mut meta) = < as TrieLayout>::MetaHasher as MetaHasher >::extract_value(item.as_slice(), layout_meta); - // read state meta. + // read state meta (required for value layout and AltHash node. let _ = as TrieLayout>::Codec::decode_plan(encoded_node, &mut meta); - if meta.recorded_do_value_hash { - debug_assert!(!is_hashed_value); - is_hashed_value = true; - } - accum.push((encoded_node, meta)); - } - for mut item in accum.into_iter() { - if is_hashed_value { - item.1.do_value_hash = true; - } - db.insert_with_meta(crate::EMPTY_PREFIX, item.0, item.1); + db.insert_with_meta(crate::EMPTY_PREFIX, encoded_node, meta); } db } diff --git a/primitives/trie/src/trie_stream.rs b/primitives/trie/src/trie_stream.rs index f3c680f245c54..36ccb49175783 100644 --- a/primitives/trie/src/trie_stream.rs +++ b/primitives/trie/src/trie_stream.rs @@ -25,7 +25,6 @@ use sp_std::ops::Range; use crate::{trie_constants, TrieMeta, StateHasher}; use crate::node_header::{NodeKind, size_and_prefix_iterator}; use crate::node_codec::Bitmap; -use trie_db::Meta; const BRANCH_NODE_NO_VALUE: u8 = 254; const BRANCH_NODE_WITH_VALUE: u8 = 255; @@ -59,9 +58,12 @@ fn fuse_nibbles_node<'a>(nibbles: &'a [u8], kind: NodeKind) -> impl Iterator size_and_prefix_iterator(size, trie_constants::LEAF_PREFIX_MASK), - NodeKind::BranchNoValue => size_and_prefix_iterator(size, trie_constants::BRANCH_WITHOUT_MASK), - NodeKind::BranchWithValue => size_and_prefix_iterator(size, trie_constants::BRANCH_WITH_MASK), + NodeKind::Leaf => size_and_prefix_iterator(size, trie_constants::LEAF_PREFIX_MASK, 2), + NodeKind::BranchNoValue => size_and_prefix_iterator(size, trie_constants::BRANCH_WITHOUT_MASK, 2), + NodeKind::BranchWithValue => size_and_prefix_iterator(size, trie_constants::BRANCH_WITH_MASK, 2), + NodeKind::AltHashLeaf => size_and_prefix_iterator(size, trie_constants::ALT_HASHING_LEAF_PREFIX_MASK, 4), + NodeKind::AltHashBranchNoValue => size_and_prefix_iterator(size, trie_constants::ALT_HASHING_BRANCH_WITHOUT_MASK, 4), + NodeKind::AltHashBranchWithValue => size_and_prefix_iterator(size, trie_constants::ALT_HASHING_BRANCH_WITH_MASK, 4), }; iter_start .chain(if nibbles.len() % 2 == 1 { Some(nibbles[0]) } else { None }) @@ -85,7 +87,12 @@ impl trie_root::TrieStream for TrieStream { } fn append_leaf(&mut self, key: &[u8], value: &[u8]) { - self.buffer.extend(fuse_nibbles_node(key, NodeKind::Leaf)); + let kind = if self.inner_value_hashing { + NodeKind::AltHashLeaf + } else { + NodeKind::Leaf + }; + self.buffer.extend(fuse_nibbles_node(key, kind)); Compact(value.len() as u32).encode_to(&mut self.buffer); self.current_value_range = Some(self.buffer.len()..self.buffer.len() + value.len()); self.buffer.extend_from_slice(value); @@ -99,9 +106,19 @@ impl trie_root::TrieStream for TrieStream { ) { if let Some(partial) = maybe_partial { if maybe_value.is_some() { - self.buffer.extend(fuse_nibbles_node(partial, NodeKind::BranchWithValue)); + let kind = if self.inner_value_hashing { + NodeKind::AltHashBranchWithValue + } else { + NodeKind::BranchWithValue + }; + self.buffer.extend(fuse_nibbles_node(partial, kind)); } else { - self.buffer.extend(fuse_nibbles_node(partial, NodeKind::BranchNoValue)); + let kind = if self.inner_value_hashing { + NodeKind::AltHashBranchNoValue + } else { + NodeKind::BranchNoValue + }; + self.buffer.extend(fuse_nibbles_node(partial, kind)); } let bm = branch_node_bit_mask(has_children); self.buffer.extend([bm.0,bm.1].iter()); @@ -134,11 +151,14 @@ impl trie_root::TrieStream for TrieStream { range: range, unused_value: false, contain_hash: false, - do_value_hash: true, old_hash: false, recorded_do_value_hash: false, + // No existing state, no need to use switch_to_value_hash + switch_to_value_hash: false, + do_value_hash: true, }; - >>::hash(&data, &meta).as_ref().encode_to(&mut self.buffer); + let hash = >>::hash(&data, &meta); + self.buffer.extend_from_slice(hash.as_ref()); } else { H::hash(&data).as_ref().encode_to(&mut self.buffer); } @@ -154,27 +174,19 @@ impl trie_root::TrieStream for TrieStream { range: range, unused_value: false, contain_hash: false, - do_value_hash: inner_value_hashing, old_hash: false, recorded_do_value_hash: inner_value_hashing, - }; - - // Add the recorded_do_value_hash to encoded - let mut encoded = meta.write_state_meta(); - let encoded = if encoded.len() > 0 { - encoded.extend(data); - encoded - } else { - data + switch_to_value_hash: false, + do_value_hash: inner_value_hashing, }; if inner_value_hashing && meta.range.as_ref().map(|r| r.end - r.start >= trie_constants::INNER_HASH_TRESHOLD) .unwrap_or_default() { - >>::hash(&encoded, &meta) + >>::hash(&data, &meta) } else { - H::hash(&encoded) + H::hash(&data) } } From a70ce70363b1ac7cfadd555c93788ca6d0e10f5a Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 8 Jun 2021 16:49:05 +0200 Subject: [PATCH 037/188] alt hashing only for branch with value. --- frame/system/src/lib.rs | 2 +- primitives/trie/src/lib.rs | 5 ++- primitives/trie/src/node_codec.rs | 29 +++++++++--------- primitives/trie/src/node_header.rs | 49 ++++++++++++++---------------- primitives/trie/src/trie_stream.rs | 10 ++---- 5 files changed, 42 insertions(+), 53 deletions(-) diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index 55cacac640493..884a191b2e83b 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -1408,7 +1408,7 @@ impl Pallet { >::hashed_key().to_vec() => [69u8; 32].encode() ], children_default: map![], - flag_hashed_value: true, + alt_hashing: true, }) } diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index fc7d16b71328f..4cbe1777d3ee8 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -938,9 +938,8 @@ mod trie_constants { pub const BRANCH_WITHOUT_MASK: u8 = 0b_10 << 6; pub const BRANCH_WITH_MASK: u8 = 0b_11 << 6; pub const EMPTY_TRIE: u8 = FIRST_PREFIX | (0b_00 << 4); - pub const ALT_HASHING_LEAF_PREFIX_MASK: u8 = FIRST_PREFIX | (0b_01 << 4); - pub const ALT_HASHING_BRANCH_WITHOUT_MASK: u8 = FIRST_PREFIX | (0b_10 << 4); - pub const ALT_HASHING_BRANCH_WITH_MASK: u8 = FIRST_PREFIX | (0b_11 << 4); + pub const ALT_HASHING_LEAF_PREFIX_MASK: u8 = FIRST_PREFIX | (0b_1 << 5); + pub const ALT_HASHING_BRANCH_WITH_MASK: u8 = FIRST_PREFIX | (0b_01 << 4); } #[cfg(test)] diff --git a/primitives/trie/src/node_codec.rs b/primitives/trie/src/node_codec.rs index c239a5c07c97a..37d5510178d71 100644 --- a/primitives/trie/src/node_codec.rs +++ b/primitives/trie/src/node_codec.rs @@ -101,10 +101,16 @@ impl NodeCodec { meta.as_mut() .map(|m| m.set_state_meta(alt_hashing)); + let branch_has_value = if let NodeHeader::Branch(has_value, _) = &header { + *has_value + } else { + false + }; + match header { NodeHeader::Null => Ok(NodePlan::Empty), - NodeHeader::AltHashBranch(has_value, nibble_count) - | NodeHeader::Branch(has_value, nibble_count) => { + NodeHeader::AltHashBranch(nibble_count) + | NodeHeader::Branch(_, nibble_count) => { let padding = nibble_count % nibble_ops::NIBBLE_PER_BYTE != 0; // check that the padding is valid (if any) if padding && nibble_ops::pad_left(data[input.offset]) != 0 { @@ -116,7 +122,7 @@ impl NodeCodec { let partial_padding = nibble_ops::number_padding(nibble_count); let bitmap_range = input.take(BITMAP_LENGTH)?; let bitmap = Bitmap::decode(&data[bitmap_range])?; - let value = if has_value { + let value = if branch_has_value { if alt_hashing && contains_hash { ValuePlan::HashedValue(input.take(H::LENGTH)?, 0) } else { @@ -257,17 +263,14 @@ impl> NodeCodecT for NodeCodec { meta: &mut M, ) -> Vec { let mut output = match (&maybe_value, meta.do_value_hash()) { - (&Value::NoValue, false) => { - partial_from_iterator_encode(partial, number_nibble, NodeKind::BranchWithValue) - }, - (_, false) => { + (&Value::NoValue, _) => { partial_from_iterator_encode(partial, number_nibble, NodeKind::BranchNoValue) }, - (&Value::NoValue, true) => { - partial_from_iterator_encode(partial, number_nibble, NodeKind::AltHashBranchWithValue) + (_, false) => { + partial_from_iterator_encode(partial, number_nibble, NodeKind::BranchWithValue) }, (_, true) => { - partial_from_iterator_encode(partial, number_nibble, NodeKind::AltHashBranchNoValue) + partial_from_iterator_encode(partial, number_nibble, NodeKind::AltHashBranchWithValue) }, }; @@ -325,8 +328,7 @@ fn partial_from_iterator_encode>( NodeKind::BranchWithValue => NodeHeader::Branch(true, nibble_count).encode_to(&mut output), NodeKind::BranchNoValue => NodeHeader::Branch(false, nibble_count).encode_to(&mut output), NodeKind::AltHashLeaf => NodeHeader::AltHashLeaf(nibble_count).encode_to(&mut output), - NodeKind::AltHashBranchWithValue => NodeHeader::AltHashBranch(true, nibble_count).encode_to(&mut output), - NodeKind::AltHashBranchNoValue => NodeHeader::AltHashBranch(false, nibble_count).encode_to(&mut output), + NodeKind::AltHashBranchWithValue => NodeHeader::AltHashBranch(nibble_count).encode_to(&mut output), }; output.extend(partial); output @@ -346,8 +348,7 @@ fn partial_encode(partial: Partial, node_kind: NodeKind) -> Vec { NodeKind::BranchWithValue => NodeHeader::Branch(true, nibble_count).encode_to(&mut output), NodeKind::BranchNoValue => NodeHeader::Branch(false, nibble_count).encode_to(&mut output), NodeKind::AltHashLeaf => NodeHeader::AltHashLeaf(nibble_count).encode_to(&mut output), - NodeKind::AltHashBranchWithValue => NodeHeader::AltHashBranch(true, nibble_count).encode_to(&mut output), - NodeKind::AltHashBranchNoValue => NodeHeader::AltHashBranch(false, nibble_count).encode_to(&mut output), + NodeKind::AltHashBranchWithValue => NodeHeader::AltHashBranch(nibble_count).encode_to(&mut output), }; if number_nibble_encoded > 0 { output.push(nibble_ops::pad_right((partial.0).1)); diff --git a/primitives/trie/src/node_header.rs b/primitives/trie/src/node_header.rs index a1f0090ecb8a2..8d086d04d302e 100644 --- a/primitives/trie/src/node_header.rs +++ b/primitives/trie/src/node_header.rs @@ -28,7 +28,7 @@ pub(crate) enum NodeHeader { Null, Branch(bool, usize), Leaf(usize), - AltHashBranch(bool, usize), + AltHashBranch(usize), AltHashLeaf(usize), } @@ -38,7 +38,6 @@ pub(crate) enum NodeKind { BranchNoValue, BranchWithValue, AltHashLeaf, - AltHashBranchNoValue, AltHashBranchWithValue, } @@ -47,17 +46,15 @@ impl Encode for NodeHeader { match self { NodeHeader::Null => output.push_byte(trie_constants::EMPTY_TRIE), NodeHeader::Branch(true, nibble_count) => - encode_size_and_prefix(*nibble_count, trie_constants::BRANCH_WITH_MASK, output), + encode_size_and_prefix(*nibble_count, trie_constants::BRANCH_WITH_MASK, 2, output), NodeHeader::Branch(false, nibble_count) => - encode_size_and_prefix(*nibble_count, trie_constants::BRANCH_WITHOUT_MASK, output), + encode_size_and_prefix(*nibble_count, trie_constants::BRANCH_WITHOUT_MASK, 2, output), NodeHeader::Leaf(nibble_count) => - encode_size_and_prefix(*nibble_count, trie_constants::LEAF_PREFIX_MASK, output), - NodeHeader::AltHashBranch(true, nibble_count) => - encode_size_and_prefix_alt(*nibble_count, trie_constants::ALT_HASHING_BRANCH_WITH_MASK, output), - NodeHeader::AltHashBranch(false, nibble_count) => - encode_size_and_prefix_alt(*nibble_count, trie_constants::ALT_HASHING_BRANCH_WITHOUT_MASK, output), - NodeHeader::AltHashLeaf(nibble_count) => - encode_size_and_prefix_alt(*nibble_count, trie_constants::ALT_HASHING_LEAF_PREFIX_MASK, output), + encode_size_and_prefix(*nibble_count, trie_constants::LEAF_PREFIX_MASK, 2, output), + NodeHeader::AltHashBranch(nibble_count) => + encode_size_and_prefix(*nibble_count, trie_constants::ALT_HASHING_BRANCH_WITH_MASK, 4, output), + NodeHeader::AltHashLeaf(nibble_count) => + encode_size_and_prefix(*nibble_count, trie_constants::ALT_HASHING_LEAF_PREFIX_MASK, 3, output), } } } @@ -87,12 +84,15 @@ impl Decode for NodeHeader { trie_constants::LEAF_PREFIX_MASK => Ok(NodeHeader::Leaf(decode_size(i, input, 2)?)), trie_constants::BRANCH_WITH_MASK => Ok(NodeHeader::Branch(true, decode_size(i, input, 2)?)), trie_constants::BRANCH_WITHOUT_MASK => Ok(NodeHeader::Branch(false, decode_size(i, input, 2)?)), - trie_constants::EMPTY_TRIE => match i & (0b1111 << 4) { - trie_constants::ALT_HASHING_LEAF_PREFIX_MASK => Ok(NodeHeader::AltHashLeaf(decode_size(i, input, 4)?)), - trie_constants::ALT_HASHING_BRANCH_WITH_MASK => Ok(NodeHeader::AltHashBranch(true, decode_size(i, input, 4)?)), - trie_constants::ALT_HASHING_BRANCH_WITHOUT_MASK => Ok(NodeHeader::AltHashBranch(false, decode_size(i, input, 4)?)), - // do not allow any special encoding - _ => Err("Unallowed encoding".into()), + trie_constants::EMPTY_TRIE => { + if i & (0b111 << 5) == trie_constants::ALT_HASHING_LEAF_PREFIX_MASK { + Ok(NodeHeader::AltHashLeaf(decode_size(i, input, 3)?)) + } else if i & (0b1111 << 4) == trie_constants::ALT_HASHING_BRANCH_WITH_MASK { + Ok(NodeHeader::AltHashBranch(decode_size(i, input, 4)?)) + } else { + // do not allow any special encoding + Err("Unallowed encoding".into()) + } }, _ => unreachable!(), } @@ -105,7 +105,7 @@ impl Decode for NodeHeader { pub(crate) fn size_and_prefix_iterator(size: usize, prefix: u8, prefix_mask: usize) -> impl Iterator { let size = sp_std::cmp::min(trie_constants::NIBBLE_SIZE_BOUND, size); - let max_value = 255u8 >> prefix_mask; + let max_value = 255u8 >> (8 - prefix_mask); let l1 = sp_std::cmp::min(max_value as usize - 1, size); let (first_byte, mut rem) = if size == l1 { (once(prefix + l1 as u8), 0) @@ -130,15 +130,10 @@ pub(crate) fn size_and_prefix_iterator(size: usize, prefix: u8, prefix_mask: usi } /// Encodes size and prefix to a stream output (prefix on 2 first bit only). -fn encode_size_and_prefix(size: usize, prefix: u8, out: &mut W) { - for b in size_and_prefix_iterator(size, prefix, 2) { - out.push_byte(b) - } -} - -/// Encodes size and prefix to a stream output with prefix (prefix on 4 first bit only). -fn encode_size_and_prefix_alt(size: usize, prefix: u8, out: &mut W) { - for b in size_and_prefix_iterator(size, prefix, 4) { +fn encode_size_and_prefix(size: usize, prefix: u8, prefix_mask: usize, out: &mut W) + where W: Output + ?Sized, +{ + for b in size_and_prefix_iterator(size, prefix, prefix_mask) { out.push_byte(b) } } diff --git a/primitives/trie/src/trie_stream.rs b/primitives/trie/src/trie_stream.rs index 36ccb49175783..d5aa95421835e 100644 --- a/primitives/trie/src/trie_stream.rs +++ b/primitives/trie/src/trie_stream.rs @@ -61,8 +61,7 @@ fn fuse_nibbles_node<'a>(nibbles: &'a [u8], kind: NodeKind) -> impl Iterator size_and_prefix_iterator(size, trie_constants::LEAF_PREFIX_MASK, 2), NodeKind::BranchNoValue => size_and_prefix_iterator(size, trie_constants::BRANCH_WITHOUT_MASK, 2), NodeKind::BranchWithValue => size_and_prefix_iterator(size, trie_constants::BRANCH_WITH_MASK, 2), - NodeKind::AltHashLeaf => size_and_prefix_iterator(size, trie_constants::ALT_HASHING_LEAF_PREFIX_MASK, 4), - NodeKind::AltHashBranchNoValue => size_and_prefix_iterator(size, trie_constants::ALT_HASHING_BRANCH_WITHOUT_MASK, 4), + NodeKind::AltHashLeaf => size_and_prefix_iterator(size, trie_constants::ALT_HASHING_LEAF_PREFIX_MASK, 3), NodeKind::AltHashBranchWithValue => size_and_prefix_iterator(size, trie_constants::ALT_HASHING_BRANCH_WITH_MASK, 4), }; iter_start @@ -113,12 +112,7 @@ impl trie_root::TrieStream for TrieStream { }; self.buffer.extend(fuse_nibbles_node(partial, kind)); } else { - let kind = if self.inner_value_hashing { - NodeKind::AltHashBranchNoValue - } else { - NodeKind::BranchNoValue - }; - self.buffer.extend(fuse_nibbles_node(partial, kind)); + self.buffer.extend(fuse_nibbles_node(partial, NodeKind::BranchNoValue)); } let bm = branch_node_bit_mask(has_children); self.buffer.extend([bm.0,bm.1].iter()); From 11f0d5a66d4be0a6b3dfe840129ee37c9ada9f4f Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 8 Jun 2021 17:34:06 +0200 Subject: [PATCH 038/188] fix trie tests --- client/api/src/in_mem.rs | 2 +- primitives/trie/src/lib.rs | 6 +++--- primitives/trie/src/node_codec.rs | 3 ++- primitives/trie/src/node_header.rs | 2 +- primitives/trie/src/trie_stream.rs | 3 +-- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/client/api/src/in_mem.rs b/client/api/src/in_mem.rs index 3dbcd4aa897de..3030784b3c320 100644 --- a/client/api/src/in_mem.rs +++ b/client/api/src/in_mem.rs @@ -576,7 +576,7 @@ impl backend::BlockImportOperation for BlockImportOperatio let (root, transaction) = self.old_state.full_storage_root( storage.top.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))), child_delta, - storage.flag_hashed_value, + storage.alt_hashing, ); self.new_state = Some(transaction); diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 4cbe1777d3ee8..07a398090a5ca 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -980,14 +980,13 @@ mod tests { let mut memdb = MemoryDBMeta::<_, T::MetaHasher>::default(); let mut root = Default::default(); { - let mut t = TrieDBMut::::new_with_layout(&mut memdb, &mut root, layout); + let mut t = TrieDBMut::::new_with_layout(&mut memdb, &mut root, layout.clone()); for (x, y) in input.clone() { t.insert(x, y).unwrap(); } } { - // Not using layout: it should be initialized from state root meta. - let t = TrieDB::::new(&mut memdb, &root).unwrap(); + let t = TrieDB::::new_with_layout(&mut memdb, &root, layout).unwrap(); assert_eq!( input.iter().map(|(i, j)| (i.to_vec(), j.to_vec())).collect::>(), t.iter().unwrap() @@ -1000,6 +999,7 @@ mod tests { fn check_input(input: &Vec<(&[u8], &[u8])>) { // TODO remove this iter let layout = Layout::with_inner_hashing(); + check_iteration::(input, layout.clone()); check_equivalent::(input, layout.clone()); diff --git a/primitives/trie/src/node_codec.rs b/primitives/trie/src/node_codec.rs index 37d5510178d71..d7849fcf52498 100644 --- a/primitives/trie/src/node_codec.rs +++ b/primitives/trie/src/node_codec.rs @@ -104,7 +104,8 @@ impl NodeCodec { let branch_has_value = if let NodeHeader::Branch(has_value, _) = &header { *has_value } else { - false + // alt_hash_branch + true }; match header { diff --git a/primitives/trie/src/node_header.rs b/primitives/trie/src/node_header.rs index 8d086d04d302e..6711c1a047127 100644 --- a/primitives/trie/src/node_header.rs +++ b/primitives/trie/src/node_header.rs @@ -105,7 +105,7 @@ impl Decode for NodeHeader { pub(crate) fn size_and_prefix_iterator(size: usize, prefix: u8, prefix_mask: usize) -> impl Iterator { let size = sp_std::cmp::min(trie_constants::NIBBLE_SIZE_BOUND, size); - let max_value = 255u8 >> (8 - prefix_mask); + let max_value = 255u8 >> prefix_mask; let l1 = sp_std::cmp::min(max_value as usize - 1, size); let (first_byte, mut rem) = if size == l1 { (once(prefix + l1 as u8), 0) diff --git a/primitives/trie/src/trie_stream.rs b/primitives/trie/src/trie_stream.rs index d5aa95421835e..3206ff5729870 100644 --- a/primitives/trie/src/trie_stream.rs +++ b/primitives/trie/src/trie_stream.rs @@ -151,8 +151,7 @@ impl trie_root::TrieStream for TrieStream { switch_to_value_hash: false, do_value_hash: true, }; - let hash = >>::hash(&data, &meta); - self.buffer.extend_from_slice(hash.as_ref()); + >>::hash(&data, &meta).as_ref().encode_to(&mut self.buffer); } else { H::hash(&data).as_ref().encode_to(&mut self.buffer); } From cf0f0dfc0eb371b65e2a4571ef955e92daae1cac Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 8 Jun 2021 19:28:37 +0200 Subject: [PATCH 039/188] Hash of value include the encoded size. --- Cargo.lock | 16 +++++----- bin/node/bench/src/generator.rs | 4 +-- client/chain-spec/src/chain_spec.rs | 14 ++++----- client/db/src/bench.rs | 2 +- client/db/src/lib.rs | 30 +++++++++---------- client/executor/runtime-test/src/lib.rs | 2 +- client/executor/src/integration_tests/mod.rs | 4 +-- client/light/src/backend.rs | 3 +- .../service/src/chain_ops/export_raw_state.rs | 4 +-- frame/support/test/tests/instance.rs | 2 +- primitives/externalities/src/lib.rs | 3 +- primitives/io/src/lib.rs | 15 +++++----- primitives/state-machine/src/basic.rs | 6 ++-- primitives/state-machine/src/ext.rs | 14 ++++----- primitives/state-machine/src/lib.rs | 9 +++--- .../src/overlayed_changes/mod.rs | 14 ++++----- .../state-machine/src/proving_backend.rs | 14 ++------- primitives/state-machine/src/read_only.rs | 4 +-- primitives/state-machine/src/testing.rs | 2 +- primitives/state-machine/src/trie_backend.rs | 8 ++--- primitives/tasks/src/async_externalities.rs | 4 +-- primitives/trie/src/lib.rs | 12 ++++---- primitives/trie/src/node_codec.rs | 12 +++++--- primitives/trie/src/storage_proof.rs | 3 +- test-utils/client/src/lib.rs | 2 +- test-utils/runtime/src/genesismap.rs | 4 +-- test-utils/runtime/src/system.rs | 2 +- 27 files changed, 101 insertions(+), 108 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 24c3ff64dee71..771af600156c7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2342,7 +2342,7 @@ dependencies = [ [[package]] name = "hash-db" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#da84037309542514f9eebc853909bcbfb2838805" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#0f04db6e1d4781c41684df1539e3fd52e9aa85ae" [[package]] name = "hash256-std-hasher" @@ -2356,7 +2356,7 @@ dependencies = [ [[package]] name = "hash256-std-hasher" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#da84037309542514f9eebc853909bcbfb2838805" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#0f04db6e1d4781c41684df1539e3fd52e9aa85ae" dependencies = [ "crunchy", ] @@ -3001,7 +3001,7 @@ dependencies = [ [[package]] name = "keccak-hasher" version = "0.15.3" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#da84037309542514f9eebc853909bcbfb2838805" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#0f04db6e1d4781c41684df1539e3fd52e9aa85ae" dependencies = [ "hash-db", "hash256-std-hasher 0.15.2 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4)", @@ -3793,7 +3793,7 @@ dependencies = [ [[package]] name = "memory-db" version = "0.26.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#da84037309542514f9eebc853909bcbfb2838805" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#0f04db6e1d4781c41684df1539e3fd52e9aa85ae" dependencies = [ "hash-db", "hashbrown", @@ -10375,7 +10375,7 @@ checksum = "a7f741b240f1a48843f9b8e0444fb55fb2a4ff67293b50a9179dfd5ea67f8d41" [[package]] name = "trie-bench" version = "0.27.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#da84037309542514f9eebc853909bcbfb2838805" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#0f04db6e1d4781c41684df1539e3fd52e9aa85ae" dependencies = [ "criterion", "hash-db", @@ -10390,7 +10390,7 @@ dependencies = [ [[package]] name = "trie-db" version = "0.22.5" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#da84037309542514f9eebc853909bcbfb2838805" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#0f04db6e1d4781c41684df1539e3fd52e9aa85ae" dependencies = [ "hash-db", "hashbrown", @@ -10402,7 +10402,7 @@ dependencies = [ [[package]] name = "trie-root" version = "0.16.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#da84037309542514f9eebc853909bcbfb2838805" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#0f04db6e1d4781c41684df1539e3fd52e9aa85ae" dependencies = [ "hash-db", ] @@ -10420,7 +10420,7 @@ dependencies = [ [[package]] name = "trie-standardmap" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#da84037309542514f9eebc853909bcbfb2838805" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#0f04db6e1d4781c41684df1539e3fd52e9aa85ae" dependencies = [ "hash-db", "keccak-hasher 0.15.3 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4)", diff --git a/bin/node/bench/src/generator.rs b/bin/node/bench/src/generator.rs index 913c1ff8779f0..f811802d357ee 100644 --- a/bin/node/bench/src/generator.rs +++ b/bin/node/bench/src/generator.rs @@ -31,7 +31,7 @@ use crate::simple_trie::SimpleTrie; pub fn generate_trie( db: Arc, key_values: impl IntoIterator, Vec)>, - flag_hashed_value: bool, + alt_hashing: bool, ) -> Hash { let mut root = Hash::default(); @@ -44,7 +44,7 @@ pub fn generate_trie( let mut trie = SimpleTrie { db, overlay: &mut overlay }; { - let mut trie_db = if flag_hashed_value { + let mut trie_db = if alt_hashing { let layout = sp_trie::Layout::with_inner_hashing(); TrieDBMut::::new_with_layout(&mut trie, &mut root, layout) } else { diff --git a/client/chain-spec/src/chain_spec.rs b/client/chain-spec/src/chain_spec.rs index 7bf059ae8d0f4..51c3a8c677645 100644 --- a/client/chain-spec/src/chain_spec.rs +++ b/client/chain-spec/src/chain_spec.rs @@ -87,9 +87,9 @@ impl GenesisSource { ) .collect(); - let flag_hashed_value = storage.flag_hashed_value; + let alt_hashing = storage.alt_hashing; - Ok(Genesis::Raw(RawGenesis { top, children_default, flag_hashed_value })) + Ok(Genesis::Raw(RawGenesis { top, children_default, alt_hashing })) }, } } @@ -99,7 +99,7 @@ impl BuildStorage for ChainSpec { fn build_storage(&self) -> Result { match self.genesis.resolve()? { Genesis::Runtime(gc) => gc.build_storage(), - Genesis::Raw(RawGenesis { top: map, children_default: children_map, flag_hashed_value }) => Ok(Storage { + Genesis::Raw(RawGenesis { top: map, children_default: children_map, alt_hashing }) => Ok(Storage { top: map.into_iter().map(|(k, v)| (k.0, v.0)).collect(), children_default: children_map.into_iter().map(|(storage_key, child_content)| { let child_info = ChildInfo::new_default(storage_key.0.as_slice()); @@ -111,7 +111,7 @@ impl BuildStorage for ChainSpec { }, ) }).collect(), - flag_hashed_value, + alt_hashing, }), } } @@ -134,7 +134,7 @@ pub struct RawGenesis { pub top: GenesisStorage, pub children_default: HashMap, #[serde(default)] - pub flag_hashed_value: bool, + pub alt_hashing: bool, } #[derive(Serialize, Deserialize)] @@ -328,9 +328,9 @@ impl ChainSpec { .collect(), )) .collect(); - let flag_hashed_value = storage.flag_hashed_value; + let alt_hashing = storage.alt_hashing; - Genesis::Raw(RawGenesis { top, children_default, flag_hashed_value }) + Genesis::Raw(RawGenesis { top, children_default, alt_hashing }) }, (_, genesis) => genesis, }; diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index 0f111eed99189..409897b831440 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -151,7 +151,7 @@ impl BenchmarkingState { state.add_whitelist_to_tracker(); state.reopen()?; - let flagged = genesis.flag_hashed_value; + let flagged = genesis.alt_hashing; let child_delta = genesis.children_default.iter().map(|(_storage_key, child_content)| ( &child_content.child_info, child_content.data.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))), diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 41a6853aebc71..1eefca0c74ca4 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -782,7 +782,7 @@ impl sc_client_api::backend::BlockImportOperation for Bloc )); let mut changes_trie_config: Option = None; - let flag = storage.flag_hashed_value; + let flag = storage.alt_hashing; let (root, transaction) = self.old_state.full_storage_root( storage.top.iter().map(|(k, v)| { if &k[..] == well_known_keys::CHANGES_TRIE_CONFIG { @@ -2291,7 +2291,7 @@ pub(crate) mod tests { set_state_data_inner(true); set_state_data_inner(false); } - fn set_state_data_inner(flagged: bool) { + fn set_state_data_inner(alt_hashing: bool) { let db = Backend::::new_test(2, 0); let hash = { let mut op = db.begin_operation().unwrap(); @@ -2312,14 +2312,14 @@ pub(crate) mod tests { header.state_root = op.old_state.storage_root(storage .iter() .map(|(x, y)| (&x[..], Some(&y[..]))), - flagged, + alt_hashing, ).0.into(); let hash = header.hash(); op.reset_storage(Storage { top: storage.into_iter().collect(), children_default: Default::default(), - flag_hashed_value: flagged, + alt_hashing: alt_hashing, }).unwrap(); op.set_block_data( header.clone(), @@ -2358,7 +2358,7 @@ pub(crate) mod tests { let (root, overlay) = op.old_state.storage_root( storage.iter() .map(|(k, v)| (&k[..], v.as_ref().map(|v| &v[..]))), - flagged, + alt_hashing, ); op.update_db_storage(overlay).unwrap(); header.state_root = root.into(); @@ -2385,7 +2385,7 @@ pub(crate) mod tests { fn delete_only_when_negative_rc() { sp_tracing::try_init_simple(); let key; - let flagged = false; + let alt_hashing = false; let backend = Backend::::new_test(1, 0); let hash = { @@ -2399,13 +2399,13 @@ pub(crate) mod tests { extrinsics_root: Default::default(), }; - header.state_root = op.old_state.storage_root(std::iter::empty(), flagged).0.into(); + header.state_root = op.old_state.storage_root(std::iter::empty(), alt_hashing).0.into(); let hash = header.hash(); op.reset_storage(Storage { top: Default::default(), children_default: Default::default(), - flag_hashed_value: flagged, + alt_hashing: alt_hashing, }).unwrap(); key = op.db_updates.insert(EMPTY_PREFIX, b"hello"); @@ -2441,7 +2441,7 @@ pub(crate) mod tests { .iter() .cloned() .map(|(x, y)| (x, Some(y))), - flagged, + alt_hashing, ).0.into(); let hash = header.hash(); @@ -2479,7 +2479,7 @@ pub(crate) mod tests { .iter() .cloned() .map(|(x, y)| (x, Some(y))), - flagged, + alt_hashing, ).0.into(); let hash = header.hash(); @@ -2517,7 +2517,7 @@ pub(crate) mod tests { .iter() .cloned() .map(|(x, y)| (x, Some(y))), - flagged, + alt_hashing, ).0.into(); op.set_block_data( @@ -2833,7 +2833,7 @@ pub(crate) mod tests { #[test] fn storage_hash_is_cached_correctly() { let backend = Backend::::new_test(10, 10); - let flagged = false; + let alt_hashing = false; let hash0 = { let mut op = backend.begin_operation().unwrap(); @@ -2851,14 +2851,14 @@ pub(crate) mod tests { header.state_root = op.old_state.storage_root(storage .iter() .map(|(x, y)| (&x[..], Some(&y[..]))), - flagged, + alt_hashing, ).0.into(); let hash = header.hash(); op.reset_storage(Storage { top: storage.into_iter().collect(), children_default: Default::default(), - flag_hashed_value: flagged, + alt_hashing: alt_hashing, }).unwrap(); op.set_block_data( header.clone(), @@ -2893,7 +2893,7 @@ pub(crate) mod tests { let (root, overlay) = op.old_state.storage_root( storage.iter() .map(|(k, v)| (&k[..], v.as_ref().map(|v| &v[..]))), - flagged, + alt_hashing, ); op.update_db_storage(overlay).unwrap(); header.state_root = root.into(); diff --git a/client/executor/runtime-test/src/lib.rs b/client/executor/runtime-test/src/lib.rs index 78b824140bcc7..fc452d135da07 100644 --- a/client/executor/runtime-test/src/lib.rs +++ b/client/executor/runtime-test/src/lib.rs @@ -64,7 +64,7 @@ sp_core::wasm_export_functions! { fn test_switch_state() { print("switch_state"); - storage::flag_hash_value(); + storage::alt_hashing(); print("switched!"); } diff --git a/client/executor/src/integration_tests/mod.rs b/client/executor/src/integration_tests/mod.rs index 5076352dff40a..7c702cb855d6e 100644 --- a/client/executor/src/integration_tests/mod.rs +++ b/client/executor/src/integration_tests/mod.rs @@ -211,7 +211,7 @@ fn storage_should_work(wasm_method: WasmExecutionMethod) { b"baz".to_vec() => b"bar".to_vec() ], children_default: map![], - flag_hashed_value: false, + alt_hashing: false, }); assert_eq!(ext, expected); } @@ -245,7 +245,7 @@ fn clear_prefix_should_work(wasm_method: WasmExecutionMethod) { b"bbb".to_vec() => b"5".to_vec() ], children_default: map![], - flag_hashed_value: false, + alt_hashing: false, }); assert_eq!(expected, ext); } diff --git a/client/light/src/backend.rs b/client/light/src/backend.rs index 2d65c7b3347ff..b593b8998a0ae 100644 --- a/client/light/src/backend.rs +++ b/client/light/src/backend.rs @@ -346,8 +346,7 @@ impl BlockImportOperation for ImportOperation } let storage_update = InMemoryBackend::from(storage); - let flag = input.flag_hashed_value; - let (storage_root, _) = storage_update.full_storage_root(std::iter::empty(), child_delta, flag); + let (storage_root, _) = storage_update.full_storage_root(std::iter::empty(), child_delta, input.alt_hashing); self.storage_update = Some(storage_update); Ok(storage_root) diff --git a/client/service/src/chain_ops/export_raw_state.rs b/client/service/src/chain_ops/export_raw_state.rs index fe96994ed2605..28fad7632fd80 100644 --- a/client/service/src/chain_ops/export_raw_state.rs +++ b/client/service/src/chain_ops/export_raw_state.rs @@ -42,7 +42,7 @@ where let empty_key = StorageKey(Vec::new()); let mut top_storage = client.storage_pairs(&block, &empty_key)?; let mut children_default = HashMap::new(); - let flag_hashed_value = client.state_hashed_value(&block)?; + let alt_hashing = client.state_hashed_value(&block)?; // Remove all default child storage roots from the top storage and collect the child storage // pairs. @@ -70,5 +70,5 @@ where } let top = top_storage.into_iter().map(|(k, v)| (k.0, v.0)).collect(); - Ok(Storage { top, children_default, flag_hashed_value }) + Ok(Storage { top, children_default, alt_hashing }) } diff --git a/frame/support/test/tests/instance.rs b/frame/support/test/tests/instance.rs index 91363d0857e10..1a07b7174e2ed 100644 --- a/frame/support/test/tests/instance.rs +++ b/frame/support/test/tests/instance.rs @@ -327,7 +327,7 @@ fn storage_instance_independence() { let mut storage = sp_core::storage::Storage { top: std::collections::BTreeMap::new(), children_default: std::collections::HashMap::new(), - flag_hashed_value: true, + alt_hashing: true, }; sp_state_machine::BasicExternalities::execute_with_storage(&mut storage, || { module2::Value::::put(0); diff --git a/primitives/externalities/src/lib.rs b/primitives/externalities/src/lib.rs index 7d315812b2178..54003d9886311 100644 --- a/primitives/externalities/src/lib.rs +++ b/primitives/externalities/src/lib.rs @@ -293,7 +293,8 @@ pub trait Externalities: ExtensionStore { } /// Set flag in inner state to activate hashing of values. - fn flag_hash_value(&mut self); + /// TODO remove + fn alt_hashing(&mut self); } /// Extension for the [`Externalities`] trait. diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index 22e55a9b78a68..a94e0d624be4a 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -214,10 +214,9 @@ pub trait Storage { .expect("No open transaction that can be committed."); } - /// Set flag to switch storage state - /// to internally hash its values. - fn flag_hash_value(&mut self) { - self.flag_hash_value(); + /// Swith state to alternate hashing. + fn alt_hashing(&mut self) { + self.alt_hashing(); } } @@ -1465,7 +1464,7 @@ mod tests { t = BasicExternalities::new(Storage { top: map![b"foo".to_vec() => b"bar".to_vec()], children_default: map![], - flag_hashed_value: false, + alt_hashing: false, }); t.execute_with(|| { @@ -1477,7 +1476,7 @@ mod tests { t = BasicExternalities::new(Storage { top: map![b"foo00".to_vec() => value.clone()], children_default: map![], - flag_hashed_value: true, + alt_hashing: true, }); t.execute_with(|| { @@ -1492,7 +1491,7 @@ mod tests { let mut t = BasicExternalities::new(Storage { top: map![b":test".to_vec() => value.clone()], children_default: map![], - flag_hashed_value: false, + alt_hashing: false, }); t.execute_with(|| { @@ -1515,7 +1514,7 @@ mod tests { b":abdd".to_vec() => b"\x0b\0\0\0Hello world".to_vec() ], children_default: map![], - flag_hashed_value: false, + alt_hashing: false, }); t.execute_with(|| { diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index 2cc7314392bdf..f4ddc6f36f80d 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -340,7 +340,7 @@ impl Externalities for BasicExternalities { unimplemented!("set_whitelist is not supported in Basic") } - fn flag_hash_value(&mut self) { + fn alt_hashing(&mut self) { self.inner.alt_hashing = true; } } @@ -408,7 +408,7 @@ mod tests { child_info: child_info.to_owned(), } ], - flag_hashed_value: false, + alt_hashing: false, }); assert_eq!(ext.child_storage(child_info, b"doe"), Some(b"reindeer".to_vec())); @@ -439,7 +439,7 @@ mod tests { child_info: child_info.to_owned(), } ], - flag_hashed_value: false, + alt_hashing: false, }); let res = ext.kill_child_storage(child_info, None); diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index e187c128345b8..dd6e0f519ef35 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -547,7 +547,7 @@ where } else { let root = if let Some((changes, info)) = self.overlay.child_changes(storage_key) { let delta = changes.map(|(k, v)| (k.as_ref(), v.value().map(AsRef::as_ref))); - Some(self.backend.child_storage_root(info, delta, self.overlay.flag_hash_value())) + Some(self.backend.child_storage_root(info, delta, self.overlay.alt_hashing())) } else { None }; @@ -738,9 +738,9 @@ where self.backend.proof_size() } - fn flag_hash_value(&mut self) { + fn alt_hashing(&mut self) { self.mark_dirty(); - self.overlay.set_flag_hash_value() + self.overlay.set_alt_hashing() } } @@ -952,7 +952,7 @@ mod tests { vec![40] => vec![40] ], children_default: map![], - flag_hashed_value: false, + alt_hashing: false, }.into(); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); @@ -998,7 +998,7 @@ mod tests { child_info: child_info.to_owned(), } ], - flag_hashed_value: false, + alt_hashing: false, }.into(); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); @@ -1043,7 +1043,7 @@ mod tests { child_info: child_info.to_owned(), } ], - flag_hashed_value: false, + alt_hashing: false, }.into(); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); @@ -1083,7 +1083,7 @@ mod tests { child_info: child_info.to_owned(), } ], - flag_hashed_value: false, + alt_hashing: false, }.into(); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 709357430a40c..19ab6b994d63c 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -1502,7 +1502,7 @@ mod tests { let mut trie = TrieDBMut::from_existing_with_layout( &mut mdb, &mut root, - layout.cloen(), + layout.clone(), ).unwrap(); trie.insert(b"foo", vec![1u8; 1_000].as_slice()) // big inner hash .expect("insert failed"); @@ -1527,11 +1527,12 @@ mod tests { local_result1.into_iter().collect::>(), vec![(b"foo222".to_vec(), Some(vec![5u8; 100]))], ); + println!("a{:?}", remote_proof.encode().len()); + println!("b{:?}", remote_proof.encoded_size()); remote_proof }; let remote_proof = check_proof(mdb.clone(), root.clone()); - // check full values in proof assert!(remote_proof.encode().len() > 1_100); assert!(remote_proof.encoded_size() > 1_100); @@ -1551,7 +1552,7 @@ mod tests { .expect("insert failed"); } let root3 = root.clone(); - assert!(root2 == root3); + assert!(root1 == root3); // different value then same is enough to update // from triedbmut persipective (do not // work with state machine as only changes do makes @@ -1568,7 +1569,7 @@ mod tests { .expect("insert failed"); } let root3 = root.clone(); - assert!(root2 != root3); + assert!(root1 != root3); let remote_proof = check_proof(mdb.clone(), root.clone()); // nodes foo is replaced by its hashed value form. assert!(remote_proof.encode().len() < 1000); diff --git a/primitives/state-machine/src/overlayed_changes/mod.rs b/primitives/state-machine/src/overlayed_changes/mod.rs index 5386192255ad3..5df20ba1f233b 100644 --- a/primitives/state-machine/src/overlayed_changes/mod.rs +++ b/primitives/state-machine/src/overlayed_changes/mod.rs @@ -108,7 +108,7 @@ pub struct OverlayedChanges { /// True if extrinsics stats must be collected. collect_extrinsics: bool, /// True if we flag inner state to store hash of values. - flag_hash_value: bool, + alt_hashing: bool, /// Collect statistic on this execution. stats: StateMachineStats, } @@ -263,13 +263,13 @@ impl OverlayedChanges { } /// Ask to switch state to use inner hash. - pub fn set_flag_hash_value(&mut self) { - self.flag_hash_value = true; + pub fn set_alt_hashing(&mut self) { + self.alt_hashing = true; } - /// Is `flag_hash_value` flag set. - pub fn flag_hash_value(&self) -> bool { - self.flag_hash_value + /// Is `alt_hashing` flag set. + pub fn alt_hashing(&self) -> bool { + self.alt_hashing } /// Returns a double-Option: None if the key is unknown (i.e. and the query should be referred @@ -643,7 +643,7 @@ impl OverlayedChanges { |(k, v)| (&k[..], v.value().map(|v| &v[..])) ))); - let (root, transaction) = backend.full_storage_root(delta, child_delta, self.flag_hash_value); + let (root, transaction) = backend.full_storage_root(delta, child_delta, self.alt_hashing); cache.transaction = Some(transaction); cache.transaction_storage_root = Some(root); diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 1ab9d09125648..c1eaf7068c7ac 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -113,8 +113,6 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> struct ProofRecorderInner { /// All the records that we have stored so far. records: HashMap>, - /// Is inner hash in proof. - flagged_inner_hash: bool, /// The encoded size of all recorded values. encoded_size: usize, } @@ -130,12 +128,9 @@ impl ProofRecorder { pub fn record(&self, key: Hash, mut val: Option<(DBValue, TrieMeta)>) { let mut inner = self.inner.write(); - let ProofRecorderInner { encoded_size, records, flagged_inner_hash } = &mut *inner; + let ProofRecorderInner { encoded_size, records } = &mut *inner; records.entry(key).or_insert_with(|| { if let Some(val) = val.as_mut() { - if val.1.recorded_do_value_hash { - *flagged_inner_hash = true; - } val.1.set_accessed_value(false); sp_trie::resolve_encoded_meta::(val); *encoded_size += sp_trie::estimate_entry_size(val, H::LENGTH); @@ -184,14 +179,9 @@ impl ProofRecorder { .records .iter() .filter_map(|(_k, v)| v.as_ref().map(|v| { - let mut meta = v.1.clone(); - if !inner.flagged_inner_hash { - // Remove the old hash meta. - meta.old_hash = false; - } < as sp_trie::TrieLayout>::MetaHasher as hash_db::MetaHasher - >::stored_value(v.0.as_slice(), meta) + >::stored_value(v.0.as_slice(), v.1.clone()) })) .collect(); diff --git a/primitives/state-machine/src/read_only.rs b/primitives/state-machine/src/read_only.rs index a54b6eaba6848..b2e32726fc802 100644 --- a/primitives/state-machine/src/read_only.rs +++ b/primitives/state-machine/src/read_only.rs @@ -203,8 +203,8 @@ impl<'a, H: Hasher, B: 'a + Backend> Externalities for ReadOnlyExternalities< unimplemented!("set_whitelist is not supported in ReadOnlyExternalities") } - fn flag_hash_value(&mut self) { - unimplemented!("flag_hash_value is not supported by ReadOnlyExternalities"); + fn alt_hashing(&mut self) { + unimplemented!("alt_hashing is not supported by ReadOnlyExternalities"); } } diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index 19006ccb52ac4..49e963c7f4a99 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -171,7 +171,7 @@ where )) } - self.backend.update(transaction, self.overlay.flag_hash_value()) + self.backend.update(transaction, self.overlay.alt_hashing()) } /// Commit all pending changes to the underlying backend. diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index fbc1f12fd2ae1..62666134445d8 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -379,17 +379,17 @@ pub mod tests { // a drop a insert of same hash: rc is 0 assert_eq!(test_trie(false).storage_root(iter::empty(), false).1.drain() .into_iter().filter(|v| (v.1).1 != 0).count(), 0); - // a drop a insert + // Unchanged assert_eq!(test_trie(false).storage_root(iter::empty(), true).1.drain() - .into_iter().filter(|v| (v.1).1 != 0).count(), 2); + .into_iter().filter(|v| (v.1).1 != 0).count(), 0); // a drop a insert of same hash: rc is 0 assert_eq!(test_trie(true).storage_root(iter::empty(), true).1.drain() .into_iter().filter(|v| (v.1).1 != 0).count(), 0); } #[test] - fn storage_root_flagged_is_not_empty() { - assert!(!test_trie(false).storage_root(iter::empty(), true).1.drain().is_empty()); + fn storage_root_flagged_is_empty() { + assert!(test_trie(false).storage_root(iter::empty(), true).1.drain().is_empty()); } #[test] diff --git a/primitives/tasks/src/async_externalities.rs b/primitives/tasks/src/async_externalities.rs index ff21e74889d06..6451caf11b7eb 100644 --- a/primitives/tasks/src/async_externalities.rs +++ b/primitives/tasks/src/async_externalities.rs @@ -190,8 +190,8 @@ impl Externalities for AsyncExternalities { unimplemented!("set_whitelist is not supported in AsyncExternalities") } - fn flag_hash_value(&mut self) { - unimplemented!("flag_hash_value is not supported in AsyncExternalities") + fn alt_hashing(&mut self) { + unimplemented!("alt_hashing is not supported in AsyncExternalities") } } diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 07a398090a5ca..e8bd4e783fc3e 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -168,7 +168,7 @@ impl Meta for TrieMeta { value_plan: ValuePlan, ) { let (contain_hash, range) = match value_plan { - ValuePlan::Value(range) => (false, range), + ValuePlan::Value(range, with_len) => (false, with_len..range.end), ValuePlan::HashedValue(range, _size) => (true, range), ValuePlan::NoValue => return, }; @@ -186,7 +186,7 @@ impl Meta for TrieMeta { node_plan: &NodePlan, ) { let (contain_hash, range) = match node_plan.value_plan() { - Some(ValuePlan::Value(range)) => (false, range.clone()), + Some(ValuePlan::Value(range, with_len)) => (false, *with_len..range.end), Some(ValuePlan::HashedValue(range, _size)) => (true, range.clone()), Some(ValuePlan::NoValue) => return, None => return, @@ -324,7 +324,7 @@ impl MetaHasher for StateHasher stored.extend_from_slice(value); return stored; } - if meta.unused_value { + if meta.unused_value && meta.do_value_hash && !meta.switch_to_value_hash { if let Some(range) = meta.range.as_ref() { if range.end - range.start >= trie_constants::INNER_HASH_TRESHOLD { // Waring this assume that encoded value does not start by this, so it is tightly coupled @@ -893,7 +893,7 @@ fn inner_hashed_value(x: &[u8], range: Option<(usize, usize)>) -> Vec pub fn estimate_entry_size(entry: &(DBValue, TrieMeta), hash_len: usize) -> usize { use codec::Encode; let mut full_encoded = entry.0.encoded_size(); - if entry.1.unused_value { + if entry.1.unused_value && entry.1.do_value_hash { if let Some(range) = entry.1.range.as_ref() { let value_size = range.end - range.start; if range.end - range.start >= trie_constants::INNER_HASH_TRESHOLD { @@ -913,9 +913,7 @@ pub fn estimate_entry_size(entry: &(DBValue, TrieMeta), hash_len: usize) -> usiz /// if can hash value. pub fn resolve_encoded_meta(entry: &mut (DBValue, TrieMeta)) { use trie_db::NodeCodec; - if entry.1.do_value_hash { - let _ = as TrieLayout>::Codec::decode_plan(entry.0.as_slice(), &mut entry.1); - } + let _ = as TrieLayout>::Codec::decode_plan(entry.0.as_slice(), &mut entry.1); } /// Constants used into trie simplification codec. diff --git a/primitives/trie/src/node_codec.rs b/primitives/trie/src/node_codec.rs index d7849fcf52498..f4f79f8394dfc 100644 --- a/primitives/trie/src/node_codec.rs +++ b/primitives/trie/src/node_codec.rs @@ -127,8 +127,9 @@ impl NodeCodec { if alt_hashing && contains_hash { ValuePlan::HashedValue(input.take(H::LENGTH)?, 0) } else { + let with_len = input.offset; let count = >::decode(&mut input)?.0 as usize; - ValuePlan::Value(input.take(count)?) + ValuePlan::Value(input.take(count)?, with_len) } } else { ValuePlan::NoValue @@ -168,8 +169,9 @@ impl NodeCodec { let value = if alt_hashing && contains_hash { ValuePlan::HashedValue(input.take(H::LENGTH)?, 0) } else { + let with_len = input.offset; let count = >::decode(&mut input)?.0 as usize; - ValuePlan::Value(input.take(count)?) + ValuePlan::Value(input.take(count)?, with_len) }; Ok(NodePlan::Leaf { @@ -221,11 +223,12 @@ impl> NodeCodecT for NodeCodec { }; match value { Value::Value(value) => { + let with_len = output.len(); Compact(value.len() as u32).encode_to(&mut output); let start = output.len(); output.extend_from_slice(value); let end = output.len(); - meta.encoded_value_callback(ValuePlan::Value(start..end)); + meta.encoded_value_callback(ValuePlan::Value(start..end, with_len)); }, Value::HashedValue(hash, _size) => { debug_assert!(hash.len() == H::LENGTH); @@ -280,11 +283,12 @@ impl> NodeCodecT for NodeCodec { (0..BITMAP_LENGTH).for_each(|_|output.push(0)); match maybe_value { Value::Value(value) => { + let with_len = output.len(); Compact(value.len() as u32).encode_to(&mut output); let start = output.len(); output.extend_from_slice(value); let end = output.len(); - meta.encoded_value_callback(ValuePlan::Value(start..end)); + meta.encoded_value_callback(ValuePlan::Value(start..end, with_len)); }, Value::HashedValue(hash, _size) => { debug_assert!(hash.len() == H::LENGTH); diff --git a/primitives/trie/src/storage_proof.rs b/primitives/trie/src/storage_proof.rs index 77c1c0f3ad22a..51522a92c756e 100644 --- a/primitives/trie/src/storage_proof.rs +++ b/primitives/trie/src/storage_proof.rs @@ -111,7 +111,8 @@ impl From for crate::MemoryDB { // Using compact proof will work directly here (read trie structure and // work directly. for item in proof.trie_nodes.iter() { - // Note using `default()` as global meta helps looking fro root node. + // Note using `default()` to build proof is fine, do_value being in header + // and no switch needed. let layout_meta = Default::default(); let (encoded_node, mut meta) = < as TrieLayout>::MetaHasher as MetaHasher diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index c087994e6c4b5..467b802f65cef 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -211,7 +211,7 @@ impl TestClientBuilder Date: Wed, 9 Jun 2021 09:34:20 +0200 Subject: [PATCH 040/188] removing fields(broken) --- primitives/storage/src/lib.rs | 5 ++++- primitives/trie/src/lib.rs | 19 ++++++------------- primitives/trie/src/trie_stream.rs | 4 ---- 3 files changed, 10 insertions(+), 18 deletions(-) diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 32645adecb535..ef1ba64f256d9 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -170,6 +170,10 @@ pub mod well_known_keys { /// Current extrinsic index (u32) is stored under this key. pub const EXTRINSIC_INDEX: &'static [u8] = b":extrinsic_index"; + /// Configuration for trie internal hashing of value is stored + /// under this key. + pub const TRIE_HASHING_CONFIG: &'static [u8] = b":trie_hashing_conf"; + /// Changes trie configuration is stored under this key. pub const CHANGES_TRIE_CONFIG: &'static [u8] = b":changes_trie"; @@ -196,7 +200,6 @@ pub mod well_known_keys { CHILD_STORAGE_KEY_PREFIX.starts_with(key) } } - } /// Information related to a child state. diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index e8bd4e783fc3e..2a811461ac38a 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -57,14 +57,9 @@ pub struct TrieMeta { /// Defined in the trie layout, when used with /// `TrieDbMut` it switch nodes to alternative hashing /// method by setting `do_value_hash` to true. - /// TODO may be useless (indicate that previous hash is - /// not using `do_value_hash`). + /// TODO consider defining it without do_value + /// and set do_value on encoding only. pub switch_to_value_hash: bool, - /// When `do_value_hash` is true, try to - /// store this behavior in top node - /// encoded (need to be part of state). - /// TODO remove - pub recorded_do_value_hash: bool, /// Does current encoded contains a hash instead of /// a value (information stored in meta for proofs). pub contain_hash: bool, @@ -79,9 +74,6 @@ pub struct TrieMeta { /// and reset on access explicitely: `HashDB::access_from`. /// TODO!! remove from meta: only use in proof recorder context. pub unused_value: bool, - /// Indicate that a node is using old hash scheme. - /// TODO remove - pub old_hash: bool, } impl Meta for TrieMeta { @@ -201,8 +193,11 @@ impl Meta for TrieMeta { } // TODO could be rename to get_state_meta + // the type of node depend on it. fn do_value_hash(&self) -> bool { - self.do_value_hash + self.do_value_hash/* && self.range.as_ref().map(|range| + range.end - range.start >= trie_constants::INNER_HASH_TRESHOLD + ).unwrap_or(false)*/ } } @@ -360,9 +355,7 @@ impl MetaHasher for StateHasher unused_value: contain_hash, contain_hash, do_value_hash: false, - recorded_do_value_hash: false, switch_to_value_hash: false, - old_hash: false, }; meta.set_global_meta(global_meta); (stored, meta) diff --git a/primitives/trie/src/trie_stream.rs b/primitives/trie/src/trie_stream.rs index 3206ff5729870..b3cd7d00c00e7 100644 --- a/primitives/trie/src/trie_stream.rs +++ b/primitives/trie/src/trie_stream.rs @@ -145,8 +145,6 @@ impl trie_root::TrieStream for TrieStream { range: range, unused_value: false, contain_hash: false, - old_hash: false, - recorded_do_value_hash: false, // No existing state, no need to use switch_to_value_hash switch_to_value_hash: false, do_value_hash: true, @@ -167,8 +165,6 @@ impl trie_root::TrieStream for TrieStream { range: range, unused_value: false, contain_hash: false, - old_hash: false, - recorded_do_value_hash: inner_value_hashing, switch_to_value_hash: false, do_value_hash: inner_value_hashing, }; From 2f8360f5203914b2ec2a5c70da4ca21cd1cb5b99 Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 9 Jun 2021 09:58:11 +0200 Subject: [PATCH 041/188] fix trie_stream to also include value length in inner hash. --- primitives/trie/src/lib.rs | 2 +- primitives/trie/src/trie_stream.rs | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 2a811461ac38a..57c585c787dce 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -990,8 +990,8 @@ mod tests { fn check_input(input: &Vec<(&[u8], &[u8])>) { // TODO remove this iter let layout = Layout::with_inner_hashing(); - check_iteration::(input, layout.clone()); check_equivalent::(input, layout.clone()); + check_iteration::(input, layout.clone()); let layout = Layout::default(); diff --git a/primitives/trie/src/trie_stream.rs b/primitives/trie/src/trie_stream.rs index b3cd7d00c00e7..ffa9f6c6c8885 100644 --- a/primitives/trie/src/trie_stream.rs +++ b/primitives/trie/src/trie_stream.rs @@ -92,9 +92,10 @@ impl trie_root::TrieStream for TrieStream { NodeKind::Leaf }; self.buffer.extend(fuse_nibbles_node(key, kind)); + let start = self.buffer.len(); Compact(value.len() as u32).encode_to(&mut self.buffer); - self.current_value_range = Some(self.buffer.len()..self.buffer.len() + value.len()); self.buffer.extend_from_slice(value); + self.current_value_range = Some(start..self.buffer.len()); } fn begin_branch( @@ -121,9 +122,10 @@ impl trie_root::TrieStream for TrieStream { self.buffer.extend(&branch_node(maybe_value.is_some(), has_children)); } if let Some(value) = maybe_value { + let start = self.buffer.len(); Compact(value.len() as u32).encode_to(&mut self.buffer); - self.current_value_range = Some(self.buffer.len()..self.buffer.len() + value.len()); self.buffer.extend_from_slice(value); + self.current_value_range = Some(start..self.buffer.len()); } } From 76b6d7a96b656f358c425587efd11a232048818e Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 9 Jun 2021 10:18:36 +0200 Subject: [PATCH 042/188] triedbmut only using alt type if inner hashing. --- primitives/trie/src/lib.rs | 6 +++--- primitives/trie/src/node_codec.rs | 16 ++++++++++++---- 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 57c585c787dce..edc72443cf692 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -53,6 +53,7 @@ pub use hash_db::NoMeta; #[derive(Default, Clone, Debug)] pub struct TrieMeta { /// Range of encoded value or hashed value. + /// When encoded value, it includes the length of the value. pub range: Option>, /// Defined in the trie layout, when used with /// `TrieDbMut` it switch nodes to alternative hashing @@ -194,10 +195,9 @@ impl Meta for TrieMeta { // TODO could be rename to get_state_meta // the type of node depend on it. + // Note that it is after encoding state meta here!! fn do_value_hash(&self) -> bool { - self.do_value_hash/* && self.range.as_ref().map(|range| - range.end - range.start >= trie_constants::INNER_HASH_TRESHOLD - ).unwrap_or(false)*/ + self.do_value_hash } } diff --git a/primitives/trie/src/node_codec.rs b/primitives/trie/src/node_codec.rs index f4f79f8394dfc..e125a1d50ee11 100644 --- a/primitives/trie/src/node_codec.rs +++ b/primitives/trie/src/node_codec.rs @@ -216,7 +216,7 @@ impl> NodeCodecT for NodeCodec { } fn leaf_node(partial: Partial, value: Value, meta: &mut M) -> Vec { - let mut output = if meta.do_value_hash() { + let mut output = if meta.do_value_hash() && value_do_hash(&value) { partial_encode(partial, NodeKind::AltHashLeaf) } else { partial_encode(partial, NodeKind::Leaf) @@ -263,10 +263,10 @@ impl> NodeCodecT for NodeCodec { partial: impl Iterator, number_nibble: usize, children: impl Iterator::Out>>>>, - maybe_value: Value, + value: Value, meta: &mut M, ) -> Vec { - let mut output = match (&maybe_value, meta.do_value_hash()) { + let mut output = match (&value, meta.do_value_hash() && value_do_hash(&value)) { (&Value::NoValue, _) => { partial_from_iterator_encode(partial, number_nibble, NodeKind::BranchNoValue) }, @@ -281,7 +281,7 @@ impl> NodeCodecT for NodeCodec { let bitmap_index = output.len(); let mut bitmap: [u8; BITMAP_LENGTH] = [0; BITMAP_LENGTH]; (0..BITMAP_LENGTH).for_each(|_|output.push(0)); - match maybe_value { + match value { Value::Value(value) => { let with_len = output.len(); Compact(value.len() as u32).encode_to(&mut output); @@ -318,6 +318,14 @@ impl> NodeCodecT for NodeCodec { // utils +fn value_do_hash(val: &Value) -> bool { + if let Value::Value(val) = val { + val.encoded_size() >= trie_constants::INNER_HASH_TRESHOLD + } else { + false + } +} + /// Encode and allocate node type header (type and size), and partial value. /// It uses an iterator over encoded partial bytes as input. fn partial_from_iterator_encode>( From cfa0ecd87f079288749de72f289a2246aa8c5a84 Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 9 Jun 2021 10:42:20 +0200 Subject: [PATCH 043/188] trie_stream to also only use alt hashing type when actually alt hashing. --- primitives/trie/src/lib.rs | 4 +++- primitives/trie/src/node_codec.rs | 5 +++++ primitives/trie/src/trie_stream.rs | 20 ++++++++++++++++---- 3 files changed, 24 insertions(+), 5 deletions(-) diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index edc72443cf692..33e25c5baafb2 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -59,7 +59,7 @@ pub struct TrieMeta { /// `TrieDbMut` it switch nodes to alternative hashing /// method by setting `do_value_hash` to true. /// TODO consider defining it without do_value - /// and set do_value on encoding only. + /// and set do_value on encoding only. TODO try_do_value_hash pub switch_to_value_hash: bool, /// Does current encoded contains a hash instead of /// a value (information stored in meta for proofs). @@ -68,6 +68,7 @@ pub struct TrieMeta { /// This is read and written as a state meta of the node. /// TODO replace by TrieDbMut node variant /// TODO replace by Option being size treshold. + /// TODO apply_do_value_hash (and remove size testing) pub do_value_hash: bool, /// Record if a value was accessed, this is /// set as accessed by defalult, but can be @@ -920,6 +921,7 @@ mod trie_constants { /// TODO attaching to storage proof in a compatible way could be /// achieve by using a escaped header in first or last element of proof /// and write it after. + /// TODO 33 is not good switch to 32 + 1 + 1: 34 (avoid hashing stored hash for a 1 byte gain). pub const INNER_HASH_TRESHOLD: usize = 33; const FIRST_PREFIX: u8 = 0b_00 << 6; /// In proof this header is used when only hashed value is stored. diff --git a/primitives/trie/src/node_codec.rs b/primitives/trie/src/node_codec.rs index e125a1d50ee11..318f621383185 100644 --- a/primitives/trie/src/node_codec.rs +++ b/primitives/trie/src/node_codec.rs @@ -216,6 +216,11 @@ impl> NodeCodecT for NodeCodec { } fn leaf_node(partial: Partial, value: Value, meta: &mut M) -> Vec { + // Note that we use AltHash type only if inner hashing will occur, + // this way we allow changing hash threshold. + // With fix inner hashing alt hash can be use with all node, but + // that is not better (encoding can use an additional nibble byte + // sometime). let mut output = if meta.do_value_hash() && value_do_hash(&value) { partial_encode(partial, NodeKind::AltHashLeaf) } else { diff --git a/primitives/trie/src/trie_stream.rs b/primitives/trie/src/trie_stream.rs index ffa9f6c6c8885..a6d9b08786886 100644 --- a/primitives/trie/src/trie_stream.rs +++ b/primitives/trie/src/trie_stream.rs @@ -32,8 +32,13 @@ const BRANCH_NODE_WITH_VALUE: u8 = 255; #[derive(Default, Clone)] /// Codec-flavored TrieStream. pub struct TrieStream { + /// Current node buffer. buffer: Vec, + /// Global trie alt hashing activation. inner_value_hashing: bool, + /// For current node, do we use alt hashing. + apply_inner_hashing: bool, + /// Keep trace of position of encoded value. current_value_range: Option>, } @@ -77,6 +82,7 @@ impl trie_root::TrieStream for TrieStream { Self { buffer: Vec::new(), inner_value_hashing: meta, + apply_inner_hashing: false, current_value_range: None, } } @@ -86,7 +92,8 @@ impl trie_root::TrieStream for TrieStream { } fn append_leaf(&mut self, key: &[u8], value: &[u8]) { - let kind = if self.inner_value_hashing { + self.apply_inner_hashing = self.inner_value_hashing && value_do_hash(value); + let kind = if self.apply_inner_hashing { NodeKind::AltHashLeaf } else { NodeKind::Leaf @@ -105,8 +112,9 @@ impl trie_root::TrieStream for TrieStream { has_children: impl Iterator, ) { if let Some(partial) = maybe_partial { - if maybe_value.is_some() { - let kind = if self.inner_value_hashing { + if let Some(value) = maybe_value { + self.apply_inner_hashing = self.inner_value_hashing && value_do_hash(value); + let kind = if self.apply_inner_hashing { NodeKind::AltHashBranchWithValue } else { NodeKind::BranchWithValue @@ -134,7 +142,7 @@ impl trie_root::TrieStream for TrieStream { } fn append_substream(&mut self, other: Self) { - let inner_value_hashing = other.inner_value_hashing; + let inner_value_hashing = other.apply_inner_hashing; let range = other.current_value_range.clone(); let data = other.out(); match data.len() { @@ -202,3 +210,7 @@ fn branch_node_buffered(has_value: bool, has_children: I, output: &mut[u8]) output[0] = first; Bitmap::encode(has_children, &mut output[1..]); } + +fn value_do_hash(val: &[u8]) -> bool { + val.encoded_size() >= trie_constants::INNER_HASH_TRESHOLD +} From 60f70125eb002dd9aa37273f33e0ed7ecd90ed57 Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 9 Jun 2021 11:16:06 +0200 Subject: [PATCH 044/188] Refactor meta state, logic should work with change of trie treshold. --- primitives/trie/src/lib.rs | 87 +++++++++++------------------- primitives/trie/src/node_codec.rs | 10 ++-- primitives/trie/src/trie_stream.rs | 24 ++++----- 3 files changed, 49 insertions(+), 72 deletions(-) diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 33e25c5baafb2..59d51b9c84910 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -58,18 +58,12 @@ pub struct TrieMeta { /// Defined in the trie layout, when used with /// `TrieDbMut` it switch nodes to alternative hashing /// method by setting `do_value_hash` to true. - /// TODO consider defining it without do_value - /// and set do_value on encoding only. TODO try_do_value_hash - pub switch_to_value_hash: bool, + pub try_inner_hashing: bool, /// Does current encoded contains a hash instead of /// a value (information stored in meta for proofs). pub contain_hash: bool, - /// Flag indicating if alternative value hash can run. - /// This is read and written as a state meta of the node. - /// TODO replace by TrieDbMut node variant - /// TODO replace by Option being size treshold. - /// TODO apply_do_value_hash (and remove size testing) - pub do_value_hash: bool, + /// Flag indicating alternative value hash will be use. + pub apply_inner_hashing: bool, /// Record if a value was accessed, this is /// set as accessed by defalult, but can be /// change on access explicitely: `HashDB::get_with_meta`. @@ -86,26 +80,21 @@ impl Meta for TrieMeta { type StateMeta = bool; fn set_state_meta(&mut self, state_meta: Self::StateMeta) { - if !self.do_value_hash && state_meta { - self.do_value_hash = true; - } + self.apply_inner_hashing = state_meta; } - // TODO remove upstream + // TODO rename upstream as read_global_meta fn extract_global_meta(&self) -> Self::GlobalMeta { - self.switch_to_value_hash || self.do_value_hash + self.try_inner_hashing } fn set_global_meta(&mut self, global_meta: Self::GlobalMeta) { - if !self.do_value_hash && global_meta { - self.switch_to_value_hash = true; - self.do_value_hash = true; - } + self.try_inner_hashing = global_meta; } // TODO remove upstream? fn has_state_meta(&self) -> bool { - self.do_value_hash && !self.switch_to_value_hash + self.apply_inner_hashing } // TODO consider removal upstream of this method (node type in codec) @@ -167,12 +156,10 @@ impl Meta for TrieMeta { ValuePlan::NoValue => return, }; + self.apply_inner_hashing = self.try_inner_hashing + && range.end - range.start >= trie_constants::INNER_HASH_TRESHOLD; self.range = Some(range); self.contain_hash = contain_hash; - if self.switch_to_value_hash { - // Switched value hashing. - self.switch_to_value_hash = false - } } fn decoded_callback( @@ -194,11 +181,9 @@ impl Meta for TrieMeta { self.contain_hash } - // TODO could be rename to get_state_meta - // the type of node depend on it. - // Note that it is after encoding state meta here!! + // TODO remove upstream fn do_value_hash(&self) -> bool { - self.do_value_hash + self.apply_inner_hashing } } @@ -292,13 +277,9 @@ impl MetaHasher for StateHasher fn hash(value: &[u8], meta: &Self::Meta) -> H::Out { match &meta { - TrieMeta { range: Some(range), contain_hash: false, do_value_hash: true, switch_to_value_hash: false, .. } => { - if range.end - range.start >= trie_constants::INNER_HASH_TRESHOLD { - let value = inner_hashed_value::(value, Some((range.start, range.end))); - H::hash(value.as_slice()) - } else { - H::hash(value) - } + TrieMeta { range: Some(range), contain_hash: false, apply_inner_hashing: true, .. } => { + let value = inner_hashed_value::(value, Some((range.start, range.end))); + H::hash(value.as_slice()) }, TrieMeta { range: Some(_range), contain_hash: true, .. } => { // value contains a hash of data (already inner_hashed_value). @@ -320,19 +301,17 @@ impl MetaHasher for StateHasher stored.extend_from_slice(value); return stored; } - if meta.unused_value && meta.do_value_hash && !meta.switch_to_value_hash { - if let Some(range) = meta.range.as_ref() { - if range.end - range.start >= trie_constants::INNER_HASH_TRESHOLD { - // Waring this assume that encoded value does not start by this, so it is tightly coupled - // with the header type of the codec: only for optimization. - stored.push(trie_constants::DEAD_HEADER_META_HASHED_VALUE); - let range = meta.range.as_ref().expect("Tested in condition"); - meta.contain_hash = true; // useless but could be with meta as &mut - // store hash instead of value. - let value = inner_hashed_value::(value, Some((range.start, range.end))); - stored.extend_from_slice(value.as_slice()); - return stored; - } + if meta.unused_value && meta.apply_inner_hashing { + if meta.range.is_some() { + // Waring this assume that encoded value does not start by this, so it is tightly coupled + // with the header type of the codec: only for optimization. + stored.push(trie_constants::DEAD_HEADER_META_HASHED_VALUE); + let range = meta.range.as_ref().expect("Tested in condition"); + meta.contain_hash = true; // useless but could be with meta as &mut + // store hash instead of value. + let value = inner_hashed_value::(value, Some((range.start, range.end))); + stored.extend_from_slice(value.as_slice()); + return stored; } } stored.extend_from_slice(value); @@ -355,8 +334,8 @@ impl MetaHasher for StateHasher range: None, unused_value: contain_hash, contain_hash, - do_value_hash: false, - switch_to_value_hash: false, + apply_inner_hashing: false, + try_inner_hashing: false, }; meta.set_global_meta(global_meta); (stored, meta) @@ -887,14 +866,12 @@ fn inner_hashed_value(x: &[u8], range: Option<(usize, usize)>) -> Vec pub fn estimate_entry_size(entry: &(DBValue, TrieMeta), hash_len: usize) -> usize { use codec::Encode; let mut full_encoded = entry.0.encoded_size(); - if entry.1.unused_value && entry.1.do_value_hash { + if entry.1.unused_value && entry.1.apply_inner_hashing { if let Some(range) = entry.1.range.as_ref() { let value_size = range.end - range.start; - if range.end - range.start >= trie_constants::INNER_HASH_TRESHOLD { - full_encoded -= value_size; - full_encoded += hash_len; - full_encoded += 1; - } + full_encoded -= value_size; + full_encoded += hash_len; + full_encoded += 1; } } diff --git a/primitives/trie/src/node_codec.rs b/primitives/trie/src/node_codec.rs index 318f621383185..61bcf789d8817 100644 --- a/primitives/trie/src/node_codec.rs +++ b/primitives/trie/src/node_codec.rs @@ -183,7 +183,11 @@ impl NodeCodec { } } -impl> NodeCodecT for NodeCodec { +impl NodeCodecT for NodeCodec + where + H: Hasher, + M: Meta, +{ type Error = Error; type HashOut = H::Out; @@ -221,7 +225,7 @@ impl> NodeCodecT for NodeCodec { // With fix inner hashing alt hash can be use with all node, but // that is not better (encoding can use an additional nibble byte // sometime). - let mut output = if meta.do_value_hash() && value_do_hash(&value) { + let mut output = if meta.extract_global_meta() && value_do_hash(&value) { partial_encode(partial, NodeKind::AltHashLeaf) } else { partial_encode(partial, NodeKind::Leaf) @@ -271,7 +275,7 @@ impl> NodeCodecT for NodeCodec { value: Value, meta: &mut M, ) -> Vec { - let mut output = match (&value, meta.do_value_hash() && value_do_hash(&value)) { + let mut output = match (&value, meta.extract_global_meta() && value_do_hash(&value)) { (&Value::NoValue, _) => { partial_from_iterator_encode(partial, number_nibble, NodeKind::BranchNoValue) }, diff --git a/primitives/trie/src/trie_stream.rs b/primitives/trie/src/trie_stream.rs index a6d9b08786886..4e383c9d5763c 100644 --- a/primitives/trie/src/trie_stream.rs +++ b/primitives/trie/src/trie_stream.rs @@ -142,22 +142,21 @@ impl trie_root::TrieStream for TrieStream { } fn append_substream(&mut self, other: Self) { - let inner_value_hashing = other.apply_inner_hashing; + let apply_inner_hashing = other.apply_inner_hashing; let range = other.current_value_range.clone(); let data = other.out(); match data.len() { 0..=31 => data.encode_to(&mut self.buffer), _ => { - if inner_value_hashing - && range.as_ref().map(|r| r.end - r.start >= trie_constants::INNER_HASH_TRESHOLD) - .unwrap_or_default() { + if apply_inner_hashing { let meta = TrieMeta { range: range, unused_value: false, contain_hash: false, - // No existing state, no need to use switch_to_value_hash - switch_to_value_hash: false, - do_value_hash: true, + // Using `inner_value_hashing` instead to check this. + // And unused in hasher. + try_inner_hashing: false, + apply_inner_hashing: true, }; >>::hash(&data, &meta).as_ref().encode_to(&mut self.buffer); } else { @@ -168,21 +167,18 @@ impl trie_root::TrieStream for TrieStream { } fn hash_root(self) -> H::Out { - let inner_value_hashing = self.inner_value_hashing; + let apply_inner_hashing = self.apply_inner_hashing; let range = self.current_value_range; let data = self.buffer; let meta = TrieMeta { range: range, unused_value: false, contain_hash: false, - switch_to_value_hash: false, - do_value_hash: inner_value_hashing, + try_inner_hashing: false, + apply_inner_hashing: true, }; - if inner_value_hashing - && meta.range.as_ref().map(|r| r.end - r.start >= trie_constants::INNER_HASH_TRESHOLD) - .unwrap_or_default() { - + if apply_inner_hashing { >>::hash(&data, &meta) } else { H::hash(&data) From 40ec2f715c7d332485b2f25a8592190df5548843 Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 9 Jun 2021 12:05:13 +0200 Subject: [PATCH 045/188] Remove NoMeta variant. --- Cargo.lock | 16 +++--- client/api/src/cht.rs | 2 +- client/db/src/changes_tries_storage.rs | 2 +- client/db/src/lib.rs | 10 ++-- client/light/src/fetcher.rs | 2 +- frame/session/src/historical/mod.rs | 2 +- primitives/state-machine/src/backend.rs | 7 +-- .../state-machine/src/changes_trie/mod.rs | 2 +- .../state-machine/src/changes_trie/prune.rs | 4 +- .../state-machine/src/changes_trie/storage.rs | 4 +- primitives/state-machine/src/lib.rs | 4 +- primitives/trie/src/lib.rs | 56 ++----------------- primitives/trie/src/storage_proof.rs | 20 +++---- 13 files changed, 41 insertions(+), 90 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 771af600156c7..299383a5775b6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2342,7 +2342,7 @@ dependencies = [ [[package]] name = "hash-db" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#0f04db6e1d4781c41684df1539e3fd52e9aa85ae" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#22910ad4af83e00b549cee918226efa940fdd3cf" [[package]] name = "hash256-std-hasher" @@ -2356,7 +2356,7 @@ dependencies = [ [[package]] name = "hash256-std-hasher" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#0f04db6e1d4781c41684df1539e3fd52e9aa85ae" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#22910ad4af83e00b549cee918226efa940fdd3cf" dependencies = [ "crunchy", ] @@ -3001,7 +3001,7 @@ dependencies = [ [[package]] name = "keccak-hasher" version = "0.15.3" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#0f04db6e1d4781c41684df1539e3fd52e9aa85ae" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#22910ad4af83e00b549cee918226efa940fdd3cf" dependencies = [ "hash-db", "hash256-std-hasher 0.15.2 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4)", @@ -3793,7 +3793,7 @@ dependencies = [ [[package]] name = "memory-db" version = "0.26.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#0f04db6e1d4781c41684df1539e3fd52e9aa85ae" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#22910ad4af83e00b549cee918226efa940fdd3cf" dependencies = [ "hash-db", "hashbrown", @@ -10375,7 +10375,7 @@ checksum = "a7f741b240f1a48843f9b8e0444fb55fb2a4ff67293b50a9179dfd5ea67f8d41" [[package]] name = "trie-bench" version = "0.27.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#0f04db6e1d4781c41684df1539e3fd52e9aa85ae" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#22910ad4af83e00b549cee918226efa940fdd3cf" dependencies = [ "criterion", "hash-db", @@ -10390,7 +10390,7 @@ dependencies = [ [[package]] name = "trie-db" version = "0.22.5" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#0f04db6e1d4781c41684df1539e3fd52e9aa85ae" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#22910ad4af83e00b549cee918226efa940fdd3cf" dependencies = [ "hash-db", "hashbrown", @@ -10402,7 +10402,7 @@ dependencies = [ [[package]] name = "trie-root" version = "0.16.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#0f04db6e1d4781c41684df1539e3fd52e9aa85ae" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#22910ad4af83e00b549cee918226efa940fdd3cf" dependencies = [ "hash-db", ] @@ -10420,7 +10420,7 @@ dependencies = [ [[package]] name = "trie-standardmap" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#0f04db6e1d4781c41684df1539e3fd52e9aa85ae" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#22910ad4af83e00b549cee918226efa940fdd3cf" dependencies = [ "hash-db", "keccak-hasher 0.15.3 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4)", diff --git a/client/api/src/cht.rs b/client/api/src/cht.rs index 5c8dadcd6825f..9739e71a9ab2c 100644 --- a/client/api/src/cht.rs +++ b/client/api/src/cht.rs @@ -32,7 +32,7 @@ use sp_trie; use sp_core::{H256, convert_hash}; use sp_runtime::traits::{Header as HeaderT, AtLeast32Bit, Zero, One}; use sp_state_machine::{ - MemoryDBNoMeta as MemoryDB, TrieBackend, Backend as StateBackend, StorageProof, InMemoryBackend, + MemoryDB, TrieBackend, Backend as StateBackend, StorageProof, InMemoryBackend, prove_read_on_trie_backend, read_proof_check, read_proof_check_on_proving_backend_generic as read_proof_check_on_proving_backend, }; diff --git a/client/db/src/changes_tries_storage.rs b/client/db/src/changes_tries_storage.rs index 3f7e19cacfc24..860ca41730518 100644 --- a/client/db/src/changes_tries_storage.rs +++ b/client/db/src/changes_tries_storage.rs @@ -24,7 +24,7 @@ use hash_db::Prefix; use codec::{Decode, Encode}; use parking_lot::RwLock; use sp_blockchain::{Error as ClientError, Result as ClientResult}; -use sp_trie::MemoryDBNoMeta as MemoryDB; +use sp_trie::MemoryDB; use sc_client_api::backend::PrunableStateChangesTrieStorage; use sp_blockchain::{well_known_cache_keys, Cache as BlockchainCache, HeaderMetadataCache}; use sp_core::{ChangesTrieConfiguration, ChangesTrieConfigurationRange, convert_hash}; diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 1eefca0c74ca4..9e59513f6724a 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -65,7 +65,7 @@ use sp_blockchain::{ }; use codec::{Decode, Encode}; use hash_db::Prefix; -use sp_trie::{MemoryDB, MemoryDBNoMeta, PrefixedMemoryDB, prefixed_key, StateHasher, +use sp_trie::{MemoryDB, PrefixedMemoryDB, prefixed_key, StateHasher, TrieMeta, MetaHasher}; use sp_database::Transaction; use sp_core::{Hasher, ChangesTrieConfiguration}; @@ -692,7 +692,7 @@ pub struct BlockImportOperation { storage_updates: StorageCollection, child_storage_updates: ChildStorageCollection, offchain_storage_updates: OffchainChangesCollection, - changes_trie_updates: MemoryDBNoMeta>, + changes_trie_updates: MemoryDB>, changes_trie_build_cache_update: Option>>, changes_trie_config_update: Option>, pending_block: Option>, @@ -1733,7 +1733,7 @@ impl sc_client_api::backend::Backend for Backend { child_storage_updates: Default::default(), offchain_storage_updates: Default::default(), changes_trie_config_update: None, - changes_trie_updates: MemoryDBNoMeta::default(), + changes_trie_updates: MemoryDB::default(), changes_trie_build_cache_update: None, aux_ops: Vec::new(), finalized_blocks: Vec::new(), @@ -2160,9 +2160,9 @@ pub(crate) mod tests { pub(crate) type Block = RawBlock>; - pub fn prepare_changes(changes: Vec<(Vec, Vec)>) -> (H256, MemoryDBNoMeta) { + pub fn prepare_changes(changes: Vec<(Vec, Vec)>) -> (H256, MemoryDB) { let mut changes_root = H256::default(); - let mut changes_trie_update = MemoryDBNoMeta::::default(); + let mut changes_trie_update = MemoryDB::::default(); { let mut trie = TrieDBMut::::new( &mut changes_trie_update, diff --git a/client/light/src/fetcher.rs b/client/light/src/fetcher.rs index 117c2d6970bd9..ada0f6695038e 100644 --- a/client/light/src/fetcher.rs +++ b/client/light/src/fetcher.rs @@ -161,7 +161,7 @@ impl> LightDataChecker { H::Out: Ord + codec::Codec, { // all the checks are sharing the same storage - let storage: sp_state_machine::MemoryDBNoMeta = remote_roots_proof.into(); + let storage: sp_state_machine::MemoryDB = remote_roots_proof.into_memory_db_no_meta(); // remote_roots.keys() are sorted => we can use this to group changes tries roots // that are belongs to the same CHT diff --git a/frame/session/src/historical/mod.rs b/frame/session/src/historical/mod.rs index 0564d159e1e3f..8902ebe551f6c 100644 --- a/frame/session/src/historical/mod.rs +++ b/frame/session/src/historical/mod.rs @@ -35,7 +35,7 @@ use frame_support::{ decl_module, decl_storage, Parameter, print, traits::{ValidatorSet, ValidatorSetWithIdentification}, }; -use sp_trie::{MemoryDBNoMeta as MemoryDB, Trie, TrieMut, Recorder, EMPTY_PREFIX}; +use sp_trie::{MemoryDB, Trie, TrieMut, Recorder, EMPTY_PREFIX}; use sp_trie::trie_types::{TrieDBMut, TrieDB}; use super::{SessionIndex, Module as SessionModule}; diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index aaa6bbf16f8d7..fa44acf9dde8c 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -295,16 +295,16 @@ impl Consolidate for sp_trie::GenericMemoryDB /// Insert input pairs into memory db. #[cfg(test)] -pub(crate) fn insert_into_memory_db_no_meta(mdb: &mut sp_trie::MemoryDBNoMeta, input: I) -> Option +pub(crate) fn insert_into_memory_db(mdb: &mut sp_trie::MemoryDB, input: I) -> Option where H: Hasher, I: IntoIterator, { - use sp_trie::{TrieMut, trie_types::TrieDBMutNoMeta}; + use sp_trie::{TrieMut, trie_types::TrieDBMut}; let mut root = ::Out::default(); { - let mut trie = TrieDBMutNoMeta::::new(mdb, &mut root); + let mut trie = TrieDBMut::::new(mdb, &mut root); for (key, value) in input { if let Err(e) = trie.insert(&key, &value) { log::warn!(target: "trie", "Failed to write to trie: {}", e); @@ -316,7 +316,6 @@ pub(crate) fn insert_into_memory_db_no_meta(mdb: &mut sp_trie::MemoryDBNoM Some(root) } - /// Wrapper to create a [`RuntimeCode`] from a type that implements [`Backend`]. #[cfg(feature = "std")] pub struct BackendRuntimeCode<'a, B, H> { diff --git a/primitives/state-machine/src/changes_trie/mod.rs b/primitives/state-machine/src/changes_trie/mod.rs index 404353fc308b4..f2bbf371fb50e 100644 --- a/primitives/state-machine/src/changes_trie/mod.rs +++ b/primitives/state-machine/src/changes_trie/mod.rs @@ -73,7 +73,7 @@ use num_traits::{One, Zero}; use codec::{Decode, Encode}; use sp_core; use sp_core::storage::PrefixedStorageKey; -use sp_trie::{MemoryDBNoMeta as MemoryDB, DBValue, TrieMut}; +use sp_trie::{MemoryDB, DBValue, TrieMut}; use sp_trie::trie_types::TrieDBMut; use crate::{ StorageKey, diff --git a/primitives/state-machine/src/changes_trie/prune.rs b/primitives/state-machine/src/changes_trie/prune.rs index 987fb4ff63d08..6f00e9b6a8e0d 100644 --- a/primitives/state-machine/src/changes_trie/prune.rs +++ b/primitives/state-machine/src/changes_trie/prune.rs @@ -114,9 +114,9 @@ fn prune_trie( #[cfg(test)] mod tests { use std::collections::HashSet; - use sp_trie::MemoryDBNoMeta as MemoryDB; + use sp_trie::MemoryDB; use sp_core::H256; - use crate::backend::insert_into_memory_db_no_meta as insert_into_memory_db; + use crate::backend::insert_into_memory_db; use crate::changes_trie::storage::InMemoryStorage; use codec::Encode; use sp_runtime::traits::BlakeTwo256; diff --git a/primitives/state-machine/src/changes_trie/storage.rs b/primitives/state-machine/src/changes_trie/storage.rs index 5414d3f8c2f6a..115cc5461e1a9 100644 --- a/primitives/state-machine/src/changes_trie/storage.rs +++ b/primitives/state-machine/src/changes_trie/storage.rs @@ -21,7 +21,7 @@ use std::collections::{BTreeMap, HashSet, HashMap}; use hash_db::{Hasher, Prefix, EMPTY_PREFIX}; use sp_core::storage::PrefixedStorageKey; use sp_trie::DBValue; -use sp_trie::MemoryDBNoMeta as MemoryDB; +use sp_trie::MemoryDB; use parking_lot::RwLock; use crate::{ StorageKey, @@ -30,7 +30,7 @@ use crate::{ }; #[cfg(test)] -use crate::backend::insert_into_memory_db_no_meta as insert_into_memory_db; +use crate::backend::insert_into_memory_db; #[cfg(test)] use crate::changes_trie::input::{InputPair, ChildIndex}; diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 19ab6b994d63c..d7d6cbc1da731 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -144,7 +144,7 @@ mod changes_trie { #[cfg(feature = "std")] mod std_reexport { pub use sp_trie::{trie_types::{Layout, TrieDBMut}, StorageProof, TrieMut, - DBValue, MemoryDB, MemoryDBNoMeta}; + DBValue, MemoryDB}; pub use crate::testing::TestExternalities; pub use crate::basic::BasicExternalities; pub use crate::read_only::{ReadOnlyExternalities, InspectState}; @@ -195,7 +195,7 @@ mod execution { /// Type of changes trie transaction. pub type ChangesTrieTransaction = ( - MemoryDBNoMeta, + MemoryDB, ChangesTrieCacheAction<::Out, N>, ); diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 59d51b9c84910..de5d751aba038 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -350,41 +350,6 @@ impl MetaHasher for StateHasher } } -/// Reimplement `NoMeta` `MetaHasher` with -/// additional constraint. -/// TODO remove the MetaHasher is ignored -/// when no node have do_value_hash or layout defines it. -#[derive(Clone, Copy, Debug, Eq, PartialEq)] -pub struct NoMetaHasher; - -impl MetaHasher for NoMetaHasher - where - H: Hasher, -{ - type Meta = TrieMeta; - type GlobalMeta = bool; - - fn hash(value: &[u8], _meta: &Self::Meta) -> H::Out { - H::hash(value) - } - - fn stored_value(value: &[u8], _meta: Self::Meta) -> DBValue { - value.to_vec() - } - - fn stored_value_owned(value: DBValue, _meta: Self::Meta) -> DBValue { - value - } - - fn extract_value(stored: &[u8], _meta: Self::GlobalMeta) -> (&[u8], Self::Meta) { - (stored, Default::default()) - } - - fn extract_value_owned(stored: DBValue, _meta: Self::GlobalMeta) -> (DBValue, Self::Meta) { - (stored, Default::default()) - } -} - impl TrieConfiguration for Layout where H: Hasher, @@ -436,17 +401,6 @@ pub type PrefixedMemoryDB = memory_db::MemoryDB< pub type MemoryDB = memory_db::MemoryDB< H, memory_db::HashKey, trie_db::DBValue, StateHasher, MemTracker, >; -/// Reexport from `hash_db`, with genericity set for `Hasher` trait. -/// This uses a noops `KeyFunction` (key addressing must be hashed or using -/// an encoding scheme that avoid key conflict). -pub type MemoryDBNoMeta = memory_db::MemoryDB< - H, memory_db::HashKey, trie_db::DBValue, NoMetaHasher, MemTracker, ->; -/// MemoryDB with specific meta hasher. -pub type MemoryDBMeta = memory_db::MemoryDB< - H, memory_db::HashKey, trie_db::DBValue, M, MemTracker, ->; - /// Reexport from `hash_db`, with genericity set for `Hasher` trait. pub type GenericMemoryDB = memory_db::MemoryDB< H, KF, trie_db::DBValue, MH, MemTracker, @@ -465,16 +419,10 @@ pub type TrieHash = <::Hash as Hasher>::Out; pub mod trie_types { /// State layout. pub type Layout = super::Layout; - /// Old state layout definition, do not use meta, do not - /// do internal value hashing. - pub type LayoutNoMeta = super::Layout; /// Persistent trie database read-access interface for the a given hasher. pub type TrieDB<'a, H> = super::TrieDB<'a, Layout>; /// Persistent trie database write-access interface for the a given hasher. pub type TrieDBMut<'a, H> = super::TrieDBMut<'a, Layout>; - /// Persistent trie database write-access interface for the a given hasher, - /// old layout. - pub type TrieDBMutNoMeta<'a, H> = super::TrieDBMut<'a, LayoutNoMeta>; /// Querying interface, as in `trie_db` but less generic. pub type Lookup<'a, H, Q> = trie_db::Lookup<'a, Layout, Q>; /// As in `trie_db`, but less generic, error type for the crate. @@ -924,6 +872,10 @@ mod tests { type Layout = super::trie_types::Layout; + type MemoryDBMeta = memory_db::MemoryDB< + H, memory_db::HashKey, trie_db::DBValue, M, MemTracker, + >; + fn hashed_null_node() -> TrieHash { >::hashed_null_node() } diff --git a/primitives/trie/src/storage_proof.rs b/primitives/trie/src/storage_proof.rs index 51522a92c756e..a1b634df90b62 100644 --- a/primitives/trie/src/storage_proof.rs +++ b/primitives/trie/src/storage_proof.rs @@ -63,6 +63,16 @@ impl StorageProof { self.into() } + /// Creates a `MemoryDB` from `Self`. In case we do not need + /// to check meta (using alt hashing will always be disabled). + pub fn into_memory_db_no_meta(self) -> crate::MemoryDB { + let mut db = crate::MemoryDB::default(); + for item in self.iter_nodes() { + db.insert(crate::EMPTY_PREFIX, &item); + } + db + } + /// Merges multiple storage proofs covering potentially different sets of keys into one proof /// covering all keys. The merged proof output may be smaller than the aggregate size of the input /// proofs due to deduplication of trie nodes. @@ -124,13 +134,3 @@ impl From for crate::MemoryDB { db } } - -impl From for crate::MemoryDBNoMeta { - fn from(proof: StorageProof) -> Self { - let mut db = crate::MemoryDBNoMeta::default(); - for item in proof.iter_nodes() { - db.insert(crate::EMPTY_PREFIX, &item); - } - db - } -} From 204f51f5ca8877da052a40923d265c323597d50b Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 9 Jun 2021 22:06:11 +0200 Subject: [PATCH 046/188] Remove state_hashed trigger specific functions. --- Cargo.lock | 1 + client/api/src/backend.rs | 4 -- client/api/src/cht.rs | 2 +- client/api/src/in_mem.rs | 1 - client/block-builder/src/lib.rs | 2 +- client/chain-spec/src/chain_spec.rs | 12 ++--- client/db/src/bench.rs | 6 --- client/db/src/lib.rs | 20 ++++---- client/db/src/storage_cache.rs | 11 +--- client/executor/runtime-test/Cargo.toml | 1 + client/executor/runtime-test/src/lib.rs | 5 +- client/executor/src/integration_tests/mod.rs | 19 ++++--- client/light/src/backend.rs | 9 +--- client/network/test/src/lib.rs | 4 +- .../service/src/chain_ops/export_raw_state.rs | 3 +- client/service/src/client/client.rs | 4 -- client/service/test/src/client/mod.rs | 24 +++++---- frame/support/test/tests/instance.rs | 8 +-- frame/system/src/lib.rs | 9 ++-- primitives/externalities/src/lib.rs | 29 +++++++++-- primitives/io/src/lib.rs | 17 +++---- primitives/state-machine/src/backend.rs | 10 ++-- primitives/state-machine/src/basic.rs | 16 ++---- primitives/state-machine/src/ext.rs | 16 ++---- .../state-machine/src/in_memory_backend.rs | 16 ++---- .../src/overlayed_changes/mod.rs | 14 +----- .../state-machine/src/proving_backend.rs | 50 +++++++++++++------ primitives/state-machine/src/read_only.rs | 4 -- primitives/state-machine/src/testing.rs | 11 ++-- primitives/state-machine/src/trie_backend.rs | 12 +++-- .../state-machine/src/trie_backend_essence.rs | 6 --- primitives/storage/src/lib.rs | 40 ++++++++++++++- primitives/tasks/src/async_externalities.rs | 4 -- primitives/trie/src/lib.rs | 26 +--------- test-utils/client/src/lib.rs | 4 +- test-utils/runtime/src/genesismap.rs | 4 +- test-utils/runtime/src/system.rs | 8 ++- 37 files changed, 215 insertions(+), 217 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 299383a5775b6..a6721d91dd025 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7859,6 +7859,7 @@ dependencies = [ "sp-runtime", "sp-sandbox", "sp-std", + "sp-storage", "sp-tasks", "substrate-wasm-builder", ] diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index bd5e15c0222fd..09e9e0cb2e173 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -385,10 +385,6 @@ pub trait StorageProvider> { storage_key: Option<&PrefixedStorageKey>, key: &StorageKey ) -> sp_blockchain::Result, u32)>>; - - /// Returns true when state allow hashing value and therefore - /// removing unaccess value from proofs. - fn state_hashed_value(&self, id: &BlockId) -> sp_blockchain::Result; } /// Client backend. diff --git a/client/api/src/cht.rs b/client/api/src/cht.rs index 9739e71a9ab2c..d0ad9facd73c2 100644 --- a/client/api/src/cht.rs +++ b/client/api/src/cht.rs @@ -117,7 +117,7 @@ pub fn build_proof( .into_iter() .map(|(k, v)| (k, Some(v))) .collect::>(); - let mut storage = InMemoryBackend::::default().update(vec![(None, transaction)], false); + let mut storage = InMemoryBackend::::default().update(vec![(None, transaction)]); let trie_storage = storage.as_trie_backend() .expect("InMemoryState::as_trie_backend always returns Some; qed"); prove_read_on_trie_backend( diff --git a/client/api/src/in_mem.rs b/client/api/src/in_mem.rs index 3030784b3c320..d756e1cc0bbc4 100644 --- a/client/api/src/in_mem.rs +++ b/client/api/src/in_mem.rs @@ -576,7 +576,6 @@ impl backend::BlockImportOperation for BlockImportOperatio let (root, transaction) = self.old_state.full_storage_root( storage.top.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))), child_delta, - storage.alt_hashing, ); self.new_state = Some(transaction); diff --git a/client/block-builder/src/lib.rs b/client/block-builder/src/lib.rs index 7d391f8fb85b3..c264fc10e6a35 100644 --- a/client/block-builder/src/lib.rs +++ b/client/block-builder/src/lib.rs @@ -301,7 +301,7 @@ mod tests { #[test] fn block_building_storage_proof_does_not_include_runtime_by_default() { - let builder = substrate_test_runtime_client::TestClientBuilder::new(); + let builder = substrate_test_runtime_client::TestClientBuilder::new().state_hashed_value(); let backend = builder.backend(); let client = builder.build(); diff --git a/client/chain-spec/src/chain_spec.rs b/client/chain-spec/src/chain_spec.rs index 51c3a8c677645..59b55707e182b 100644 --- a/client/chain-spec/src/chain_spec.rs +++ b/client/chain-spec/src/chain_spec.rs @@ -87,9 +87,7 @@ impl GenesisSource { ) .collect(); - let alt_hashing = storage.alt_hashing; - - Ok(Genesis::Raw(RawGenesis { top, children_default, alt_hashing })) + Ok(Genesis::Raw(RawGenesis { top, children_default })) }, } } @@ -99,7 +97,7 @@ impl BuildStorage for ChainSpec { fn build_storage(&self) -> Result { match self.genesis.resolve()? { Genesis::Runtime(gc) => gc.build_storage(), - Genesis::Raw(RawGenesis { top: map, children_default: children_map, alt_hashing }) => Ok(Storage { + Genesis::Raw(RawGenesis { top: map, children_default: children_map }) => Ok(Storage { top: map.into_iter().map(|(k, v)| (k.0, v.0)).collect(), children_default: children_map.into_iter().map(|(storage_key, child_content)| { let child_info = ChildInfo::new_default(storage_key.0.as_slice()); @@ -111,7 +109,6 @@ impl BuildStorage for ChainSpec { }, ) }).collect(), - alt_hashing, }), } } @@ -133,8 +130,6 @@ pub type GenesisStorage = HashMap; pub struct RawGenesis { pub top: GenesisStorage, pub children_default: HashMap, - #[serde(default)] - pub alt_hashing: bool, } #[derive(Serialize, Deserialize)] @@ -328,9 +323,8 @@ impl ChainSpec { .collect(), )) .collect(); - let alt_hashing = storage.alt_hashing; - Genesis::Raw(RawGenesis { top, children_default, alt_hashing }) + Genesis::Raw(RawGenesis { top, children_default }) }, (_, genesis) => genesis, }; diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index 409897b831440..312f576052e87 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -151,7 +151,6 @@ impl BenchmarkingState { state.add_whitelist_to_tracker(); state.reopen()?; - let flagged = genesis.alt_hashing; let child_delta = genesis.children_default.iter().map(|(_storage_key, child_content)| ( &child_content.child_info, child_content.data.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))), @@ -159,7 +158,6 @@ impl BenchmarkingState { let (root, transaction): (B::Hash, _) = state.state.borrow_mut().as_mut().unwrap().full_storage_root( genesis.top.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))), child_delta, - flagged, ); state.genesis = transaction.clone().drain(); state.genesis_root = root.clone(); @@ -532,10 +530,6 @@ impl StateBackend> for BenchmarkingState { fn proof_size(&self) -> Option { self.proof_recorder.as_ref().map(|recorder| recorder.estimate_encoded_size() as u32) } - - fn state_hashed_value(&self) -> bool { - self.state.borrow().as_ref().map_or(Default::default(), |s| s.state_hashed_value()) - } } impl std::fmt::Debug for BenchmarkingState { diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 9e59513f6724a..dc0d5465ecaf7 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -270,10 +270,6 @@ impl StateBackend> for RefTrackingState { fn usage_info(&self) -> StateUsageInfo { self.state.usage_info() } - - fn state_hashed_value(&self) -> bool { - self.state.state_hashed_value() - } } /// Database settings. @@ -782,7 +778,6 @@ impl sc_client_api::backend::BlockImportOperation for Bloc )); let mut changes_trie_config: Option = None; - let flag = storage.alt_hashing; let (root, transaction) = self.old_state.full_storage_root( storage.top.iter().map(|(k, v)| { if &k[..] == well_known_keys::CHANGES_TRIE_CONFIG { @@ -794,7 +789,6 @@ impl sc_client_api::backend::BlockImportOperation for Bloc (&k[..], Some(&v[..])) }), child_delta, - flag, ); self.db_updates = transaction; @@ -2304,11 +2298,20 @@ pub(crate) mod tests { extrinsics_root: Default::default(), }; - let storage = vec![ + let mut storage = vec![ (vec![1, 3, 5], vec![2, 4, 6]), (vec![1, 2, 3], vec![9, 9, 9]), ]; + if alt_hashing { + storage.push(( + sp_core::storage::well_known_keys::TRIE_HASHING_CONFIG.to_vec(), + sp_core::storage::trie_threshold_encode( + sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD, + ), + )); + } + header.state_root = op.old_state.storage_root(storage .iter() .map(|(x, y)| (&x[..], Some(&y[..]))), @@ -2319,7 +2322,6 @@ pub(crate) mod tests { op.reset_storage(Storage { top: storage.into_iter().collect(), children_default: Default::default(), - alt_hashing: alt_hashing, }).unwrap(); op.set_block_data( header.clone(), @@ -2405,7 +2407,6 @@ pub(crate) mod tests { op.reset_storage(Storage { top: Default::default(), children_default: Default::default(), - alt_hashing: alt_hashing, }).unwrap(); key = op.db_updates.insert(EMPTY_PREFIX, b"hello"); @@ -2858,7 +2859,6 @@ pub(crate) mod tests { op.reset_storage(Storage { top: storage.into_iter().collect(), children_default: Default::default(), - alt_hashing: alt_hashing, }).unwrap(); op.set_block_data( header.clone(), diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index 946f576142745..29dceb6b80828 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -688,10 +688,6 @@ impl>, B: BlockT> StateBackend> for Cachin info.include_state_machine_states(&self.overlay_stats); info } - - fn state_hashed_value(&self) -> bool { - self.state.state_hashed_value() - } } /// Extended [`CachingState`] that will sync the caches on drop. @@ -877,10 +873,6 @@ impl>, B: BlockT> StateBackend> for Syncin fn usage_info(&self) -> sp_state_machine::UsageInfo { self.caching_state().usage_info() } - - fn state_hashed_value(&self) -> bool { - self.caching_state().state_hashed_value() - } } impl Drop for SyncingCachingState { @@ -1208,8 +1200,7 @@ mod tests { let shared = new_shared_cache::(256*1024, (0,1)); let mut backend = InMemoryBackend::::default(); - let flagged = false; - backend.insert(std::iter::once((None, vec![(key.clone(), Some(vec![1]))])), flagged); + backend.insert(std::iter::once((None, vec![(key.clone(), Some(vec![1]))]))); let mut s = CachingState::new( backend.clone(), diff --git a/client/executor/runtime-test/Cargo.toml b/client/executor/runtime-test/Cargo.toml index 93ad463be16c3..a9be5c650625e 100644 --- a/client/executor/runtime-test/Cargo.toml +++ b/client/executor/runtime-test/Cargo.toml @@ -20,6 +20,7 @@ sp-runtime = { version = "3.0.0", default-features = false, path = "../../../pri sp-sandbox = { version = "0.9.0", default-features = false, path = "../../../primitives/sandbox" } sp-std = { version = "3.0.0", default-features = false, path = "../../../primitives/std" } sp-tasks = { version = "3.0.0", default-features = false, path = "../../../primitives/tasks" } +sp-storage = { version = "3.0.0", default-features = false, path = "../../../primitives/storage" } [build-dependencies] substrate-wasm-builder = { version = "4.0.0", path = "../../../utils/wasm-builder" } diff --git a/client/executor/runtime-test/src/lib.rs b/client/executor/runtime-test/src/lib.rs index fc452d135da07..2e595760815ab 100644 --- a/client/executor/runtime-test/src/lib.rs +++ b/client/executor/runtime-test/src/lib.rs @@ -64,7 +64,10 @@ sp_core::wasm_export_functions! { fn test_switch_state() { print("switch_state"); - storage::alt_hashing(); + storage::set( + sp_storage::well_known_keys::TRIE_HASHING_CONFIG, + sp_storage::trie_threshold_encode(sp_storage::TEST_DEFAULT_ALT_HASH_THRESHOLD).as_slice(), + ); print("switched!"); } diff --git a/client/executor/src/integration_tests/mod.rs b/client/executor/src/integration_tests/mod.rs index 7c702cb855d6e..164c576540809 100644 --- a/client/executor/src/integration_tests/mod.rs +++ b/client/executor/src/integration_tests/mod.rs @@ -204,15 +204,18 @@ fn storage_should_work(wasm_method: WasmExecutionMethod) { assert_eq!(output, b"all ok!".to_vec().encode()); } - let expected = TestExternalities::new(sp_core::storage::Storage { + let mut storage = sp_core::storage::Storage { top: map![ b"input".to_vec() => b"Hello world".to_vec(), b"foo".to_vec() => b"bar".to_vec(), b"baz".to_vec() => b"bar".to_vec() ], children_default: map![], - alt_hashing: false, - }); + }; + storage.modify_trie_alt_hashing_threshold(Some( + sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD + )); + let expected = TestExternalities::new(storage); assert_eq!(ext, expected); } @@ -238,15 +241,19 @@ fn clear_prefix_should_work(wasm_method: WasmExecutionMethod) { assert_eq!(output, b"all ok!".to_vec().encode()); } - let expected = TestExternalities::new(sp_core::storage::Storage { + let mut storage = sp_core::storage::Storage { top: map![ b"aaa".to_vec() => b"1".to_vec(), b"aab".to_vec() => b"2".to_vec(), b"bbb".to_vec() => b"5".to_vec() ], children_default: map![], - alt_hashing: false, - }); + }; + storage.modify_trie_alt_hashing_threshold(Some( + sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD + )); + + let expected = TestExternalities::new(storage); assert_eq!(expected, ext); } diff --git a/client/light/src/backend.rs b/client/light/src/backend.rs index b593b8998a0ae..d68507f549320 100644 --- a/client/light/src/backend.rs +++ b/client/light/src/backend.rs @@ -346,7 +346,7 @@ impl BlockImportOperation for ImportOperation } let storage_update = InMemoryBackend::from(storage); - let (storage_root, _) = storage_update.full_storage_root(std::iter::empty(), child_delta, input.alt_hashing); + let (storage_root, _) = storage_update.full_storage_root(std::iter::empty(), child_delta); self.storage_update = Some(storage_update); Ok(storage_root) @@ -540,11 +540,4 @@ impl StateBackend for GenesisOrUnavailableState GenesisOrUnavailableState::Unavailable => None, } } - - fn state_hashed_value(&self) -> bool { - match self { - GenesisOrUnavailableState::Genesis(state) => state.state_hashed_value(), - GenesisOrUnavailableState::Unavailable => false, - } - } } diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index 47582607f8bb7..e1e02a884f1cb 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -700,8 +700,8 @@ pub trait TestNetFactory: Sized where >: /// Add a full peer. fn add_full_peer_with_config(&mut self, config: FullPeerConfig) { let test_client_builder = match config.keep_blocks { - Some(keep_blocks) => TestClientBuilder::with_pruning_window(keep_blocks), - None => TestClientBuilder::with_default_backend(), + Some(keep_blocks) => TestClientBuilder::with_pruning_window(keep_blocks).state_hashed_value(), + None => TestClientBuilder::with_default_backend().state_hashed_value(), }; let backend = test_client_builder.backend(); let (c, longest_chain) = test_client_builder.build_with_longest_chain(); diff --git a/client/service/src/chain_ops/export_raw_state.rs b/client/service/src/chain_ops/export_raw_state.rs index 28fad7632fd80..71822cf6275f8 100644 --- a/client/service/src/chain_ops/export_raw_state.rs +++ b/client/service/src/chain_ops/export_raw_state.rs @@ -42,7 +42,6 @@ where let empty_key = StorageKey(Vec::new()); let mut top_storage = client.storage_pairs(&block, &empty_key)?; let mut children_default = HashMap::new(); - let alt_hashing = client.state_hashed_value(&block)?; // Remove all default child storage roots from the top storage and collect the child storage // pairs. @@ -70,5 +69,5 @@ where } let top = top_storage.into_iter().map(|(k, v)| (k.0, v.0)).collect(); - Ok(Storage { top, children_default, alt_hashing }) + Ok(Storage { top, children_default }) } diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index fd306c536fadd..b294be2268997 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -1539,10 +1539,6 @@ impl StorageProvider for Client wher Ok(result) } - - fn state_hashed_value(&self, id: &BlockId) -> sp_blockchain::Result { - Ok(self.state_at(id)?.state_hashed_value()) - } } impl HeaderMetadata for Client where diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index a4fb06a8d5e79..1fccd918be7c9 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -1751,11 +1751,7 @@ fn imports_blocks_with_changes_tries_config_change() { #[test] fn storage_keys_iter_prefix_and_start_key_works() { - storage_keys_iter_prefix_and_start_key_works_inner(true); - storage_keys_iter_prefix_and_start_key_works_inner(false); -} -fn storage_keys_iter_prefix_and_start_key_works_inner(hashed_value: bool) { - let client = substrate_test_runtime_client::new(hashed_value); + let client = substrate_test_runtime_client::new(false); let prefix = StorageKey(hex!("3a").to_vec()); @@ -1800,11 +1796,19 @@ fn storage_keys_iter_works_inner(hashed_value: bool) { .take(3) .map(|x| x.0) .collect(); - assert_eq!(res, [ - hex!("3a686561707061676573").to_vec(), - hex!("6644b9b8bc315888ac8e41a7968dc2b4141a5403c58acdf70b7e8f7e07bf5081").to_vec(), - hex!("79c07e2b1d2e2abfd4855b936617eeff5e0621c4869aa60c02be9adcc98a0d1d").to_vec(), - ]); + if hashed_value { + assert_eq!(res, [ + hex!("3a686561707061676573").to_vec(), + sp_core::storage::well_known_keys::TRIE_HASHING_CONFIG.to_vec(), + hex!("6644b9b8bc315888ac8e41a7968dc2b4141a5403c58acdf70b7e8f7e07bf5081").to_vec(), + ]); + } else { + assert_eq!(res, [ + hex!("3a686561707061676573").to_vec(), + hex!("6644b9b8bc315888ac8e41a7968dc2b4141a5403c58acdf70b7e8f7e07bf5081").to_vec(), + hex!("79c07e2b1d2e2abfd4855b936617eeff5e0621c4869aa60c02be9adcc98a0d1d").to_vec(), + ]); + } let res: Vec<_> = client.storage_keys_iter(&BlockId::Number(0), Some(&prefix), Some(&StorageKey(hex!("79c07e2b1d2e2abfd4855b936617eeff5e0621c4869aa60c02be9adcc98a0d1d").to_vec()))) .unwrap() diff --git a/frame/support/test/tests/instance.rs b/frame/support/test/tests/instance.rs index 1a07b7174e2ed..3171020182f33 100644 --- a/frame/support/test/tests/instance.rs +++ b/frame/support/test/tests/instance.rs @@ -327,8 +327,10 @@ fn storage_instance_independence() { let mut storage = sp_core::storage::Storage { top: std::collections::BTreeMap::new(), children_default: std::collections::HashMap::new(), - alt_hashing: true, }; + storage.modify_trie_alt_hashing_threshold(Some( + sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD, + )); sp_state_machine::BasicExternalities::execute_with_storage(&mut storage, || { module2::Value::::put(0); module2::Value::::put(0); @@ -343,8 +345,8 @@ fn storage_instance_independence() { module2::DoubleMap::::insert(&0, &0, &0); module2::DoubleMap::::insert(&0, &0, &0); }); - // 12 storage values. - assert_eq!(storage.top.len(), 12); + // 12 storage values and threshold. + assert_eq!(storage.top.len(), 13); } #[test] diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index 884a191b2e83b..98f6a2bfd6fd0 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -1401,15 +1401,18 @@ impl Pallet { /// Get the basic externalities for this pallet, useful for tests. #[cfg(any(feature = "std", test))] pub fn externalities() -> TestExternalities { - TestExternalities::new(sp_core::storage::Storage { + let mut storage = sp_core::storage::Storage { top: map![ >::hashed_key_for(T::BlockNumber::zero()) => [69u8; 32].encode(), >::hashed_key().to_vec() => T::BlockNumber::one().encode(), >::hashed_key().to_vec() => [69u8; 32].encode() ], children_default: map![], - alt_hashing: true, - }) + }; + storage.modify_trie_alt_hashing_threshold( + Some(sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD), + ); + TestExternalities::new(storage) } /// Set the block number to something in particular. Can be used as an alternative to diff --git a/primitives/externalities/src/lib.rs b/primitives/externalities/src/lib.rs index 54003d9886311..351d50bd90e60 100644 --- a/primitives/externalities/src/lib.rs +++ b/primitives/externalities/src/lib.rs @@ -291,10 +291,6 @@ pub trait Externalities: ExtensionStore { fn proof_size(&self) -> Option { None } - - /// Set flag in inner state to activate hashing of values. - /// TODO remove - fn alt_hashing(&mut self); } /// Extension for the [`Externalities`] trait. @@ -327,3 +323,28 @@ impl ExternalitiesExt for &mut dyn Externalities { self.deregister_extension_by_type_id(TypeId::of::()) } } + +/// Helpers method for the [`Externalities`] trait. +pub trait ExternalitiesHelpers: Externalities { + /// Utility function to get trie inner value hash threshold from + /// backend state or pending changes. + fn get_trie_alt_hashing_threshold(&self) -> Option { + self.storage(sp_storage::well_known_keys::TRIE_HASHING_CONFIG) + .and_then(|encoded| sp_storage::trie_threshold_decode(&mut encoded.as_slice())) + } + + /// Utility function to modify trie inner value hash threshold. + fn modify_trie_alt_hashing_threshold(&mut self, threshold: Option) { + match threshold { + Some(threshold) => { + let encoded = sp_storage::trie_threshold_encode(threshold); + self.set_storage(sp_storage::well_known_keys::TRIE_HASHING_CONFIG.to_vec(), encoded); + }, + None => { + self.clear_storage(sp_storage::well_known_keys::TRIE_HASHING_CONFIG); + }, + } + } +} + +impl ExternalitiesHelpers for E { } diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index a94e0d624be4a..8d93f3de774ee 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -213,11 +213,6 @@ pub trait Storage { self.storage_commit_transaction() .expect("No open transaction that can be committed."); } - - /// Swith state to alternate hashing. - fn alt_hashing(&mut self) { - self.alt_hashing(); - } } /// Interface for accessing the child storage for default child trie, @@ -1464,7 +1459,6 @@ mod tests { t = BasicExternalities::new(Storage { top: map![b"foo".to_vec() => b"bar".to_vec()], children_default: map![], - alt_hashing: false, }); t.execute_with(|| { @@ -1473,11 +1467,14 @@ mod tests { }); let value = vec![7u8; 35]; - t = BasicExternalities::new(Storage { + let mut storage = Storage { top: map![b"foo00".to_vec() => value.clone()], children_default: map![], - alt_hashing: true, - }); + }; + storage.modify_trie_alt_hashing_threshold(Some( + sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD + )); + t = BasicExternalities::new(storage); t.execute_with(|| { assert_eq!(storage::get(b"hello"), None); @@ -1491,7 +1488,6 @@ mod tests { let mut t = BasicExternalities::new(Storage { top: map![b":test".to_vec() => value.clone()], children_default: map![], - alt_hashing: false, }); t.execute_with(|| { @@ -1514,7 +1510,6 @@ mod tests { b":abdd".to_vec() => b"\x0b\0\0\0Hello world".to_vec() ], children_default: map![], - alt_hashing: false, }); t.execute_with(|| { diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index fa44acf9dde8c..65d0469100af1 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -178,8 +178,8 @@ pub trait Backend: sp_std::fmt::Debug { &'a ChildInfo, impl Iterator)>, )>, - alt_hashing: bool, ) -> (H::Out, Self::Transaction) where H::Out: Ord + Encode { + let alt_hashing = self.get_trie_alt_hashing_threshold().is_some(); let mut txs: Self::Transaction = Default::default(); let mut child_roots: Vec<_> = Default::default(); // child first @@ -257,8 +257,12 @@ pub trait Backend: sp_std::fmt::Debug { unimplemented!() } - /// Does trie state allow hashing of value. - fn state_hashed_value(&self) -> bool; + /// Read current trie hashing threshold. + /// Please do not reimplement. + fn get_trie_alt_hashing_threshold(&self) -> Option { + self.storage(sp_core::storage::well_known_keys::TRIE_HASHING_CONFIG).ok().flatten() + .and_then(|encoded| sp_core::storage::trie_threshold_decode(&mut encoded.as_slice())) + } } /// Trait that allows consolidate two transactions together. diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index f4ddc6f36f80d..57158d938ccd0 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -33,7 +33,7 @@ use sp_core::{ }; use log::warn; use codec::Encode; -use sp_externalities::{Extensions, Extension}; +use sp_externalities::{Extensions, Extension, ExternalitiesHelpers}; /// Simple Map-based Externalities impl. #[derive(Debug)] @@ -74,7 +74,6 @@ impl BasicExternalities { inner: Storage { top: std::mem::take(&mut storage.top), children_default: std::mem::take(&mut storage.children_default), - alt_hashing: storage.alt_hashing, }, extensions: Default::default(), }; @@ -129,7 +128,6 @@ impl From> for BasicExternalities { inner: Storage { top: hashmap, children_default: Default::default(), - alt_hashing: true, }, extensions: Default::default(), } @@ -283,7 +281,8 @@ impl Externalities for BasicExternalities { } } - let layout = if self.inner.alt_hashing { + let alt_hashing = self.get_trie_alt_hashing_threshold().is_some(); + let layout = if alt_hashing { Layout::::with_inner_hashing() } else { Layout::::default() @@ -297,8 +296,9 @@ impl Externalities for BasicExternalities { ) -> Vec { if let Some(child) = self.inner.children_default.get(child_info.storage_key()) { let delta = child.data.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))); + let alt_hashing = self.get_trie_alt_hashing_threshold().is_some(); crate::in_memory_backend::new_in_mem::() - .child_storage_root(&child.child_info, delta, self.inner.alt_hashing).0 + .child_storage_root(&child.child_info, delta, alt_hashing).0 } else { empty_child_trie_root::>() }.encode() @@ -339,10 +339,6 @@ impl Externalities for BasicExternalities { fn set_whitelist(&mut self, _: Vec) { unimplemented!("set_whitelist is not supported in Basic") } - - fn alt_hashing(&mut self) { - self.inner.alt_hashing = true; - } } impl sp_externalities::ExtensionStore for BasicExternalities { @@ -408,7 +404,6 @@ mod tests { child_info: child_info.to_owned(), } ], - alt_hashing: false, }); assert_eq!(ext.child_storage(child_info, b"doe"), Some(b"reindeer".to_vec())); @@ -439,7 +434,6 @@ mod tests { child_info: child_info.to_owned(), } ], - alt_hashing: false, }); let res = ext.kill_child_storage(child_info, None); diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index dd6e0f519ef35..4ecbe17fc3346 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -28,7 +28,7 @@ use sp_core::{ }; use sp_trie::{trie_types::Layout, empty_child_trie_root}; use sp_externalities::{ - Externalities, Extensions, Extension, ExtensionStore, + Externalities, Extensions, Extension, ExtensionStore, ExternalitiesHelpers, }; use codec::{Decode, Encode, EncodeAppend}; @@ -547,7 +547,8 @@ where } else { let root = if let Some((changes, info)) = self.overlay.child_changes(storage_key) { let delta = changes.map(|(k, v)| (k.as_ref(), v.value().map(AsRef::as_ref))); - Some(self.backend.child_storage_root(info, delta, self.overlay.alt_hashing())) + let alt_hashing = self.get_trie_alt_hashing_threshold().is_some(); + Some(self.backend.child_storage_root(info, delta, alt_hashing)) } else { None }; @@ -737,11 +738,6 @@ where fn proof_size(&self) -> Option { self.backend.proof_size() } - - fn alt_hashing(&mut self) { - self.mark_dirty(); - self.overlay.set_alt_hashing() - } } /// Implement `Encode` by forwarding the stored raw vec. @@ -951,8 +947,7 @@ mod tests { vec![20] => vec![20], vec![40] => vec![40] ], - children_default: map![], - alt_hashing: false, + children_default: map![] }.into(); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); @@ -998,7 +993,6 @@ mod tests { child_info: child_info.to_owned(), } ], - alt_hashing: false, }.into(); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); @@ -1043,7 +1037,6 @@ mod tests { child_info: child_info.to_owned(), } ], - alt_hashing: false, }.into(); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); @@ -1083,7 +1076,6 @@ mod tests { child_info: child_info.to_owned(), } ], - alt_hashing: false, }.into(); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index d2a4bf6ae8e98..c5dc881caae91 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -45,10 +45,9 @@ where >( &self, changes: T, - flag_inner_hash_value: bool, ) -> Self { let mut clone = self.clone(); - clone.insert(changes, flag_inner_hash_value); + clone.insert(changes); clone } @@ -58,8 +57,8 @@ where >( &mut self, changes: T, - flag_inner_hash_value: bool, ) { + // Note that in case the threshold is changed, it will not be apply immediately. let (top, child) = changes.into_iter().partition::, _>(|v| v.0.is_none()); let (root, transaction) = self.full_storage_root( top.iter().map(|(_, v)| v).flatten().map(|(k, v)| (&k[..], v.as_deref())), @@ -67,7 +66,6 @@ where .filter_map(|v| v.0.as_ref().map(|c| (c, v.1.iter().map(|(k, v)| (&k[..], v.as_deref())))) ), - flag_inner_hash_value, ); self.apply_transaction(root, transaction); @@ -119,7 +117,6 @@ where let mut backend = new_in_mem(); backend.insert( inner.into_iter().map(|(k, m)| (k, m.into_iter().map(|(k, v)| (k, Some(v))).collect())), - false, ); backend } @@ -182,13 +179,11 @@ mod tests { let storage = new_in_mem::(); let child_info = ChildInfo::new_default(b"1"); let child_info = &child_info; - let flagged = false; let mut storage = storage.update( vec![( Some(child_info.clone()), vec![(b"2".to_vec(), Some(b"3".to_vec()))] - )], - flagged, + )] ); let trie_backend = storage.as_trie_backend().unwrap(); assert_eq!(trie_backend.child_storage(child_info, b"2").unwrap(), @@ -201,10 +196,9 @@ mod tests { fn insert_multiple_times_child_data_works() { let mut storage = new_in_mem::(); let child_info = ChildInfo::new_default(b"1"); - let flagged = false; - storage.insert(vec![(Some(child_info.clone()), vec![(b"2".to_vec(), Some(b"3".to_vec()))])], flagged); - storage.insert(vec![(Some(child_info.clone()), vec![(b"1".to_vec(), Some(b"3".to_vec()))])], flagged); + storage.insert(vec![(Some(child_info.clone()), vec![(b"2".to_vec(), Some(b"3".to_vec()))])]); + storage.insert(vec![(Some(child_info.clone()), vec![(b"1".to_vec(), Some(b"3".to_vec()))])]); assert_eq!(storage.child_storage(&child_info, &b"2"[..]), Ok(Some(b"3".to_vec()))); assert_eq!(storage.child_storage(&child_info, &b"1"[..]), Ok(Some(b"3".to_vec()))); diff --git a/primitives/state-machine/src/overlayed_changes/mod.rs b/primitives/state-machine/src/overlayed_changes/mod.rs index 5df20ba1f233b..1d3cbb59ba0c1 100644 --- a/primitives/state-machine/src/overlayed_changes/mod.rs +++ b/primitives/state-machine/src/overlayed_changes/mod.rs @@ -107,8 +107,6 @@ pub struct OverlayedChanges { transaction_index_ops: Vec, /// True if extrinsics stats must be collected. collect_extrinsics: bool, - /// True if we flag inner state to store hash of values. - alt_hashing: bool, /// Collect statistic on this execution. stats: StateMachineStats, } @@ -262,16 +260,6 @@ impl OverlayedChanges { self.collect_extrinsics = collect_extrinsics; } - /// Ask to switch state to use inner hash. - pub fn set_alt_hashing(&mut self) { - self.alt_hashing = true; - } - - /// Is `alt_hashing` flag set. - pub fn alt_hashing(&self) -> bool { - self.alt_hashing - } - /// Returns a double-Option: None if the key is unknown (i.e. and the query should be referred /// to the backend); Some(None) if the key has been deleted. Some(Some(...)) for a key whose /// value has been set. @@ -643,7 +631,7 @@ impl OverlayedChanges { |(k, v)| (&k[..], v.value().map(|v| &v[..])) ))); - let (root, transaction) = backend.full_storage_root(delta, child_delta, self.alt_hashing); + let (root, transaction) = backend.full_storage_root(delta, child_delta); cache.transaction = Some(transaction); cache.transaction_storage_root = Some(root); diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index c1eaf7068c7ac..d4a1ec66523dd 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -362,10 +362,6 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn usage_info(&self) -> crate::stats::UsageInfo { self.0.usage_info() } - - fn state_hashed_value(&self) -> bool { - self.0.state_hashed_value() - } } /// Create proof check backend. @@ -456,11 +452,19 @@ mod tests { proof_recorded_and_checked_inner(false); } fn proof_recorded_and_checked_inner(flagged: bool) { - let size_content = 33; // above hashable value treshold. + let size_content = 34; // above hashable value treshold. let value_range = 0..64; - let contents = value_range.clone().map(|i| (vec![i], Some(vec![i; size_content]))).collect::>(); + let mut contents = value_range.clone().map(|i| (vec![i], Some(vec![i; size_content]))).collect::>(); + if flagged { + contents.push(( + sp_core::storage::well_known_keys::TRIE_HASHING_CONFIG.to_vec(), + Some(sp_core::storage::trie_threshold_encode( + sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD, + )), + )); + } let in_memory = InMemoryBackend::::default(); - let mut in_memory = in_memory.update(vec![(None, contents)], flagged); + let mut in_memory = in_memory.update(vec![(None, contents)]); let in_memory_root = in_memory.storage_root(std::iter::empty(), flagged).0; value_range.clone().for_each(|i| assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i; size_content])); @@ -481,16 +485,23 @@ mod tests { #[test] fn proof_recorded_and_checked_old_hash() { // test proof starting with old hash content and flagging in between. - let size_content = 33; // above hashable value treshold. + // TODO not that usefull (we do run with direct update). -> replace by change of threshold + // test. + let size_content = 34; // above hashable value treshold. let value_range = 0..64; let contents = value_range.clone().map(|i| (vec![i], Some(vec![i; size_content]))).collect::>(); let in_memory = InMemoryBackend::::default(); - let mut in_memory = in_memory.update(vec![(None, contents)], false); + let mut in_memory = in_memory.update(vec![(None, contents)]); value_range.clone().for_each(|i| assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i; size_content])); - in_memory = in_memory.update(vec![], true); - let in_memory_root = in_memory.storage_root(std::iter::empty(), false).0; + in_memory = in_memory.update(vec![(None, vec![( + sp_core::storage::well_known_keys::TRIE_HASHING_CONFIG.to_vec(), + Some(sp_core::storage::trie_threshold_encode( + sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD, + )), + )])]); + let in_memory_root = in_memory.storage_root(std::iter::empty(), true).0; let trie = in_memory.as_trie_backend().unwrap(); - let trie_root = trie.storage_root(std::iter::empty(), false).0; + let trie_root = trie.storage_root(std::iter::empty(), true).0; assert_eq!(in_memory_root, trie_root); value_range.clone().for_each(|i| assert_eq!(trie.storage(&[i]).unwrap().unwrap(), vec![i; size_content])); @@ -513,20 +524,27 @@ mod tests { let child_info_2 = ChildInfo::new_default(b"sub2"); let child_info_1 = &child_info_1; let child_info_2 = &child_info_2; - let contents = vec![ - (None, (0..64).map(|i| (vec![i], Some(vec![i]))).collect()), + let mut contents = vec![ + (None, (0..64).map(|i| (vec![i], Some(vec![i]))).collect::>()), (Some(child_info_1.clone()), (28..65).map(|i| (vec![i], Some(vec![i]))).collect()), (Some(child_info_2.clone()), (10..15).map(|i| (vec![i], Some(vec![i]))).collect()), ]; + if flagged { + contents[0].1.push(( + sp_core::storage::well_known_keys::TRIE_HASHING_CONFIG.to_vec(), + Some(sp_core::storage::trie_threshold_encode( + sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD, + )), + )); + } let in_memory = InMemoryBackend::::default(); - let mut in_memory = in_memory.update(contents, flagged); + let mut in_memory = in_memory.update(contents); let child_storage_keys = vec![child_info_1.to_owned(), child_info_2.to_owned()]; let in_memory_root = in_memory.full_storage_root( std::iter::empty(), child_storage_keys.iter().map(|k|(k, std::iter::empty())), - flagged, ).0; (0..64).for_each(|i| assert_eq!( in_memory.storage(&[i]).unwrap().unwrap(), diff --git a/primitives/state-machine/src/read_only.rs b/primitives/state-machine/src/read_only.rs index b2e32726fc802..296520900c952 100644 --- a/primitives/state-machine/src/read_only.rs +++ b/primitives/state-machine/src/read_only.rs @@ -202,10 +202,6 @@ impl<'a, H: Hasher, B: 'a + Backend> Externalities for ReadOnlyExternalities< fn set_whitelist(&mut self, _: Vec) { unimplemented!("set_whitelist is not supported in ReadOnlyExternalities") } - - fn alt_hashing(&mut self) { - unimplemented!("alt_hashing is not supported by ReadOnlyExternalities"); - } } impl<'a, H: Hasher, B: 'a + Backend> sp_externalities::ExtensionStore for ReadOnlyExternalities<'a, H, B> { diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index 49e963c7f4a99..3327a78d016ea 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -139,7 +139,7 @@ where /// Insert key/value into backend pub fn insert(&mut self, k: StorageKey, v: StorageValue) { - self.backend.insert(vec![(None, vec![(k, Some(v))])], false); + self.backend.insert(vec![(None, vec![(k, Some(v))])]); } /// Registers the given extension for this instance. @@ -171,7 +171,7 @@ where )) } - self.backend.update(transaction, self.overlay.alt_hashing()) + self.backend.update(transaction) } /// Commit all pending changes to the underlying backend. @@ -239,7 +239,9 @@ impl Default for TestExternalities fn default() -> Self { // default to inner hashed. let mut storage = Storage::default(); - storage.alt_hashing = true; + storage.modify_trie_alt_hashing_threshold( + Some(sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD), + ); Self::new(storage) } } @@ -308,7 +310,8 @@ mod tests { #[test] fn commit_should_work() { - let mut ext = TestExternalities::::default(); + let storage = Storage::default(); // avoid adding the trie threshold. + let mut ext = TestExternalities::::from(storage); let mut ext = ext.ext(); ext.set_storage(b"doe".to_vec(), b"reindeer".to_vec()); ext.set_storage(b"dog".to_vec(), b"puppy".to_vec()); diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 62666134445d8..52cadac871aa0 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -261,10 +261,6 @@ impl, H: Hasher> Backend for TrieBackend where fn wipe(&self) -> Result<(), Self::Error> { Ok(()) } - - fn state_hashed_value(&self) -> bool { - self.essence.state_hashed_value() - } } #[cfg(test)] @@ -305,6 +301,14 @@ pub mod tests { trie.insert(b"value1", &[42]).expect("insert failed"); trie.insert(b"value2", &[24]).expect("insert failed"); trie.insert(b":code", b"return 42").expect("insert failed"); + if hashed_value { + trie.insert( + sp_core::storage::well_known_keys::TRIE_HASHING_CONFIG, + sp_core::storage::trie_threshold_encode( + sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD, + ).as_slice(), + ).unwrap(); + } for i in 128u8..255u8 { trie.insert(&[i], &[i]).unwrap(); } diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 7b2bd65c293d5..e7d41a11eb64b 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -170,12 +170,6 @@ impl, H: Hasher> TrieBackendEssence where H::Out: Ok(next_key) } - /// Does current trie use inner hashed value. - pub fn state_hashed_value(&self) -> bool { - sp_trie::state_hashed_value::, _>(self, &self.root) - .unwrap_or_default() - } - /// Get the value of storage at given key. pub fn storage(&self, key: &[u8]) -> Result> { let map_e = |e| format!("Trie lookup error: {}", e); diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index ef1ba64f256d9..f63323bbaa17f 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -140,8 +140,6 @@ pub struct Storage { /// trie kind, so this is exclusively for the `ChildType::ParentKeyId` /// tries. pub children_default: std::collections::HashMap, StorageChild>, - /// `true` when state should hash values internally. - pub alt_hashing: bool, } /// Storage change set @@ -202,6 +200,44 @@ pub mod well_known_keys { } } +/// Configuration value for a given threshold. +pub fn trie_threshold_encode(threshold: u32) -> Vec { + codec::Compact(threshold).encode() +} + +/// Configuration threshold from encoded, invalid encoded +/// is same as no threshold. +pub fn trie_threshold_decode(mut encoded: &[u8]) -> Option { + codec::Compact::::decode(&mut encoded).ok() + .map(|compact| compact.0) +} + +/// Default value to use as a threshold for testing. +pub const TEST_DEFAULT_ALT_HASH_THRESHOLD: u32 = 34; + +#[cfg(feature = "std")] +impl Storage { + /// Utility function to get trie inner value hash threshold from + /// backend state or pending changes. + pub fn get_trie_alt_hashing_threshold(&self) -> Option { + self.top.get(well_known_keys::TRIE_HASHING_CONFIG) + .and_then(|encoded| trie_threshold_decode(&mut encoded.as_slice())) + } + + /// Utility function to modify trie inner value hash threshold. + pub fn modify_trie_alt_hashing_threshold(&mut self, threshold: Option) { + match threshold { + Some(threshold) => { + let encoded = trie_threshold_encode(threshold); + self.top.insert(well_known_keys::TRIE_HASHING_CONFIG.to_vec(), encoded); + }, + None => { + self.top.remove(well_known_keys::TRIE_HASHING_CONFIG); + }, + } + } +} + /// Information related to a child state. #[derive(Debug, Clone)] #[cfg_attr(feature = "std", derive(PartialEq, Eq, Hash, PartialOrd, Ord))] diff --git a/primitives/tasks/src/async_externalities.rs b/primitives/tasks/src/async_externalities.rs index 6451caf11b7eb..5d99ca4368d0b 100644 --- a/primitives/tasks/src/async_externalities.rs +++ b/primitives/tasks/src/async_externalities.rs @@ -189,10 +189,6 @@ impl Externalities for AsyncExternalities { fn set_whitelist(&mut self, _: Vec) { unimplemented!("set_whitelist is not supported in AsyncExternalities") } - - fn alt_hashing(&mut self) { - unimplemented!("alt_hashing is not supported in AsyncExternalities") - } } impl sp_externalities::ExtensionStore for AsyncExternalities { diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index de5d751aba038..283147602e31e 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -459,31 +459,6 @@ pub fn delta_trie_root( Ok(root) } -/// Resolve if inner hashing of value is active. -pub fn state_hashed_value>>( - db: &DB, - root: &TrieHash, -) -> Option> { - struct ReadMeta { - hashed: Option>, - } - impl trie_db::Query for &mut ReadMeta { - type Item = DBValue; - fn decode(self, value: &[u8]) -> DBValue { value.to_vec() } - fn record(&mut self, _hash: &::Out, _data: &[u8], _depth: u32, meta: &L::Meta) { - debug_assert!(self.hashed.is_none()); - self.hashed = Some(meta.extract_global_meta()); - } - } - let mut read_meta: ReadMeta = ReadMeta { - hashed: None, - }; - if let Ok(t) = TrieDB::::new(&*db, root) { - let _ = t.get_with(&[], &mut read_meta); - } - read_meta.hashed -} - /// Read a value from the trie. pub fn read_trie_value>>( db: &DB, @@ -847,6 +822,7 @@ mod trie_constants { /// achieve by using a escaped header in first or last element of proof /// and write it after. /// TODO 33 is not good switch to 32 + 1 + 1: 34 (avoid hashing stored hash for a 1 byte gain). + /// TODO replace with sp_storage test one. pub const INNER_HASH_TRESHOLD: usize = 33; const FIRST_PREFIX: u8 = 0b_00 << 6; /// In proof this header is used when only hashed value is stored. diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index 467b802f65cef..0603f7dd7dfa8 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -211,7 +211,9 @@ impl TestClientBuilder authorities.encode(), blake2_256(&AccountKeyring::Alice.to_raw_public().to_keyed_vec(b"balance:")).to_vec() => { vec![111u8, 0, 0, 0, 0, 0, 0, 0] - } + }, + sp_core::storage::well_known_keys::TRIE_HASHING_CONFIG.to_vec() => + sp_core::storage::trie_threshold_encode( + sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD, + ), ], children_default: map![], - alt_hashing: true, }, ) } From b62cdf5948b81a432ad420993bb9f6f7dcea6516 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 10 Jun 2021 13:22:55 +0200 Subject: [PATCH 047/188] pending switching to using threshold, new storage root api does not make much sense. --- bin/node/bench/src/generator.rs | 6 +- bin/node/bench/src/simple_trie.rs | 10 ++-- bin/node/bench/src/trie.rs | 11 +++- client/api/src/in_mem.rs | 4 +- client/db/src/bench.rs | 25 +++++--- client/db/src/lib.rs | 35 +++++++---- client/db/src/storage_cache.rs | 24 ++++---- client/light/src/backend.rs | 15 ++--- primitives/state-machine/src/backend.rs | 55 ++++++++++++++--- primitives/state-machine/src/basic.rs | 10 ++-- .../state-machine/src/changes_trie/mod.rs | 7 ++- .../state-machine/src/changes_trie/storage.rs | 7 ++- primitives/state-machine/src/ext.rs | 9 ++- .../state-machine/src/in_memory_backend.rs | 2 +- primitives/state-machine/src/lib.rs | 2 +- .../src/overlayed_changes/mod.rs | 18 +++++- .../state-machine/src/proving_backend.rs | 14 ++--- primitives/state-machine/src/trie_backend.rs | 16 ++--- .../state-machine/src/trie_backend_essence.rs | 60 ++++++++++++------- primitives/trie/src/lib.rs | 58 +++++++----------- primitives/trie/src/node_codec.rs | 14 +++-- primitives/trie/src/trie_stream.rs | 22 ++++--- 22 files changed, 268 insertions(+), 156 deletions(-) diff --git a/bin/node/bench/src/generator.rs b/bin/node/bench/src/generator.rs index f811802d357ee..6589bdcd0b277 100644 --- a/bin/node/bench/src/generator.rs +++ b/bin/node/bench/src/generator.rs @@ -31,7 +31,7 @@ use crate::simple_trie::SimpleTrie; pub fn generate_trie( db: Arc, key_values: impl IntoIterator, Vec)>, - alt_hashing: bool, + alt_hashing: Option, ) -> Hash { let mut root = Hash::default(); @@ -44,8 +44,8 @@ pub fn generate_trie( let mut trie = SimpleTrie { db, overlay: &mut overlay }; { - let mut trie_db = if alt_hashing { - let layout = sp_trie::Layout::with_inner_hashing(); + let mut trie_db = if let Some(threshold) = alt_hashing { + let layout = sp_trie::Layout::with_inner_hashing(threshold); TrieDBMut::::new_with_layout(&mut trie, &mut root, layout) } else { TrieDBMut::new(&mut trie, &mut root) diff --git a/bin/node/bench/src/simple_trie.rs b/bin/node/bench/src/simple_trie.rs index 4d8e76ae3a7e1..fea106ed196fe 100644 --- a/bin/node/bench/src/simple_trie.rs +++ b/bin/node/bench/src/simple_trie.rs @@ -31,15 +31,15 @@ pub struct SimpleTrie<'a> { pub overlay: &'a mut HashMap, Option>>, } -impl<'a> AsHashDB for SimpleTrie<'a> { - fn as_hash_db(&self) -> &dyn hash_db::HashDB { &*self } +impl<'a> AsHashDB> for SimpleTrie<'a> { + fn as_hash_db(&self) -> &dyn hash_db::HashDB> { &*self } - fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn HashDB + 'b) { + fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn HashDB> + 'b) { &mut *self } } -impl<'a> HashDB for SimpleTrie<'a> { +impl<'a> HashDB> for SimpleTrie<'a> { fn get(&self, key: &Hash, prefix: Prefix) -> Option { let key = sp_trie::prefixed_key::(key, prefix); if let Some(value) = self.overlay.get(&key) { @@ -48,7 +48,7 @@ impl<'a> HashDB for SimpleTrie<'a> { self.db.get(0, &key).expect("Database backend error") } - fn get_with_meta(&self, key: &Hash, prefix: Prefix, global: bool) -> Option<(DBValue, TrieMeta)> { + fn get_with_meta(&self, key: &Hash, prefix: Prefix, global: Option) -> Option<(DBValue, TrieMeta)> { let result = self.get(key, prefix); result.map(|value| >::extract_value_owned(value, global)) } diff --git a/bin/node/bench/src/trie.rs b/bin/node/bench/src/trie.rs index 735403f95c87c..f1bcd3b2239c6 100644 --- a/bin/node/bench/src/trie.rs +++ b/bin/node/bench/src/trie.rs @@ -145,7 +145,7 @@ impl core::BenchmarkDescription for TrieReadBenchmarkDescription { let root = generate_trie( database.open(self.database_type), key_values, - true, + Some(sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD), ); Box::new(TrieReadBenchmark { @@ -170,7 +170,12 @@ impl core::BenchmarkDescription for TrieReadBenchmarkDescription { struct Storage(Arc); impl sp_state_machine::Storage for Storage { - fn get(&self, key: &Hash, prefix: Prefix, global: bool) -> Result, TrieMeta)>, String> { + fn get( + &self, + key: &Hash, + prefix: Prefix, + global: Option, + ) -> Result, TrieMeta)>, String> { let key = sp_trie::prefixed_key::(key, prefix); self.0.get(0, &key).map_err(|e| format!("Database backend error: {:?}", e)) .map(|result| result @@ -263,7 +268,7 @@ impl core::BenchmarkDescription for TrieWriteBenchmarkDescription { let root = generate_trie( database.open(self.database_type), key_values, - true, + Some(sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD), ); Box::new(TrieWriteBenchmark { diff --git a/client/api/src/in_mem.rs b/client/api/src/in_mem.rs index d756e1cc0bbc4..93ac41b3c9dce 100644 --- a/client/api/src/in_mem.rs +++ b/client/api/src/in_mem.rs @@ -573,9 +573,11 @@ impl backend::BlockImportOperation for BlockImportOperatio ) ); - let (root, transaction) = self.old_state.full_storage_root( + let alt_hashing = storage.get_trie_alt_hashing_threshold(); + let (root, transaction) = self.old_state.full_storage_root_with_alt_hashing( storage.top.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))), child_delta, + alt_hashing, ); self.new_state = Some(transaction); diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index 312f576052e87..0201cd0b450a0 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -49,7 +49,12 @@ struct StorageDb { } impl sp_state_machine::Storage> for StorageDb { - fn get(&self, key: &Block::Hash, prefix: Prefix, global: bool) -> Result, String> { + fn get( + &self, + key: &Block::Hash, + prefix: Prefix, + global: Option, + ) -> Result, String> { let prefixed_key = prefixed_key::>(key, prefix); if let Some(recorder) = &self.proof_recorder { if let Some(v) = recorder.get(&key) { @@ -155,9 +160,12 @@ impl BenchmarkingState { &child_content.child_info, child_content.data.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))), )); - let (root, transaction): (B::Hash, _) = state.state.borrow_mut().as_mut().unwrap().full_storage_root( + let alt_hashing = genesis.get_trie_alt_hashing_threshold(); + let (root, transaction): (B::Hash, _) = state.state.borrow_mut().as_mut().unwrap() + .full_storage_root_with_alt_hashing( genesis.top.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))), child_delta, + alt_hashing, ); state.genesis = transaction.clone().drain(); state.genesis_root = root.clone(); @@ -396,23 +404,24 @@ impl StateBackend> for BenchmarkingState { } } - fn storage_root<'a>( + fn storage_root_with_alt_hashing<'a>( &self, delta: impl Iterator)>, - alt_hashing: bool, + alt_hashing: Option, ) -> (B::Hash, Self::Transaction) where B::Hash: Ord { - self.state.borrow().as_ref().map_or(Default::default(), |s| s.storage_root(delta, alt_hashing)) + self.state.borrow().as_ref() + .map_or(Default::default(), |s| s.storage_root_with_alt_hashing(delta, alt_hashing)) } - fn child_storage_root<'a>( + fn child_storage_root_with_alt_hashing<'a>( &self, child_info: &ChildInfo, delta: impl Iterator)>, - alt_hashing: bool, + alt_hashing: Option, ) -> (B::Hash, bool, Self::Transaction) where B::Hash: Ord { self.state.borrow().as_ref().map_or( Default::default(), - |s| s.child_storage_root(child_info, delta, alt_hashing), + |s| s.child_storage_root_with_alt_hashing(child_info, delta, alt_hashing), ) } diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index dc0d5465ecaf7..9b3d525ef9eba 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -224,21 +224,21 @@ impl StateBackend> for RefTrackingState { self.state.for_child_keys_with_prefix(child_info, prefix, f) } - fn storage_root<'a>( + fn storage_root_with_alt_hashing<'a>( &self, delta: impl Iterator)>, - alt_hashing: bool, + alt_hashing: Option, ) -> (B::Hash, Self::Transaction) where B::Hash: Ord { - self.state.storage_root(delta, alt_hashing) + self.state.storage_root_with_alt_hashing(delta, alt_hashing) } - fn child_storage_root<'a>( + fn child_storage_root_with_alt_hashing<'a>( &self, child_info: &ChildInfo, delta: impl Iterator)>, - alt_hashing: bool, + alt_hashing: Option, ) -> (B::Hash, bool, Self::Transaction) where B::Hash: Ord { - self.state.child_storage_root(child_info, delta, alt_hashing) + self.state.child_storage_root_with_alt_hashing(child_info, delta, alt_hashing) } fn pairs(&self) -> Vec<(Vec, Vec)> { @@ -778,7 +778,8 @@ impl sc_client_api::backend::BlockImportOperation for Bloc )); let mut changes_trie_config: Option = None; - let (root, transaction) = self.old_state.full_storage_root( + let alt_hashing = storage.get_trie_alt_hashing_threshold(); + let (root, transaction) = self.old_state.full_storage_root_with_alt_hashing( storage.top.iter().map(|(k, v)| { if &k[..] == well_known_keys::CHANGES_TRIE_CONFIG { changes_trie_config = Some( @@ -789,6 +790,7 @@ impl sc_client_api::backend::BlockImportOperation for Bloc (&k[..], Some(&v[..])) }), child_delta, + alt_hashing, ); self.db_updates = transaction; @@ -859,15 +861,21 @@ struct StorageDb { } impl sp_state_machine::Storage> for StorageDb { - fn get(&self, key: &Block::Hash, prefix: Prefix, global: bool) -> Result, String> { + fn get( + &self, + key: &Block::Hash, + prefix: Prefix, + global: Option, + ) -> Result, String> { if self.prefix_keys { let key = prefixed_key::>(key, prefix); self.state_db.get(&key, self) } else { self.state_db.get(key.as_ref(), self) } - .map(|result| result.map(|value| , _>>::extract_value_owned(value, global))) - .map_err(|e| format!("Database backend error: {:?}", e)) + .map(|result| result.map(|value| + , _>>::extract_value_owned(value, global) + )).map_err(|e| format!("Database backend error: {:?}", e)) } fn access_from(&self, _key: &Block::Hash) { @@ -895,7 +903,12 @@ impl DbGenesisStorage { } impl sp_state_machine::Storage> for DbGenesisStorage { - fn get(&self, _key: &Block::Hash, _prefix: Prefix, _global: bool) -> Result, String> { + fn get( + &self, + _key: &Block::Hash, + _prefix: Prefix, + _global: Option, + ) -> Result, String> { Ok(None) } fn access_from(&self, _key: &Block::Hash) { diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index 29dceb6b80828..bf929732caa2e 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -642,21 +642,21 @@ impl>, B: BlockT> StateBackend> for Cachin self.state.for_child_keys_with_prefix(child_info, prefix, f) } - fn storage_root<'a>( + fn storage_root_with_alt_hashing<'a>( &self, delta: impl Iterator)>, - alt_hashing: bool, + alt_hashing: Option, ) -> (B::Hash, Self::Transaction) where B::Hash: Ord { - self.state.storage_root(delta, alt_hashing) + self.state.storage_root_with_alt_hashing(delta, alt_hashing) } - fn child_storage_root<'a>( + fn child_storage_root_with_alt_hashing<'a>( &self, child_info: &ChildInfo, delta: impl Iterator)>, - alt_hashing: bool, + alt_hashing: Option, ) -> (B::Hash, bool, Self::Transaction) where B::Hash: Ord { - self.state.child_storage_root(child_info, delta, alt_hashing) + self.state.child_storage_root_with_alt_hashing(child_info, delta, alt_hashing) } fn pairs(&self) -> Vec<(Vec, Vec)> { @@ -826,21 +826,21 @@ impl>, B: BlockT> StateBackend> for Syncin self.caching_state().for_child_keys_with_prefix(child_info, prefix, f) } - fn storage_root<'a>( + fn storage_root_with_alt_hashing<'a>( &self, delta: impl Iterator)>, - alt_hashing: bool, + alt_hashing: Option, ) -> (B::Hash, Self::Transaction) where B::Hash: Ord { - self.caching_state().storage_root(delta, alt_hashing) + self.caching_state().storage_root_with_alt_hashing(delta, alt_hashing) } - fn child_storage_root<'a>( + fn child_storage_root_with_alt_hashing<'a>( &self, child_info: &ChildInfo, delta: impl Iterator)>, - alt_hashing: bool, + alt_hashing: Option, ) -> (B::Hash, bool, Self::Transaction) where B::Hash: Ord { - self.caching_state().child_storage_root(child_info, delta, alt_hashing) + self.caching_state().child_storage_root_with_alt_hashing(child_info, delta, alt_hashing) } fn pairs(&self) -> Vec<(Vec, Vec)> { diff --git a/client/light/src/backend.rs b/client/light/src/backend.rs index d68507f549320..d7ec2df029cf5 100644 --- a/client/light/src/backend.rs +++ b/client/light/src/backend.rs @@ -486,27 +486,28 @@ impl StateBackend for GenesisOrUnavailableState } } - fn storage_root<'a>( + fn storage_root_with_alt_hashing<'a>( &self, delta: impl Iterator)>, - alt_hashing: bool, + alt_hashing: Option, ) -> (H::Out, Self::Transaction) where H::Out: Ord { match *self { - GenesisOrUnavailableState::Genesis(ref state) => - state.storage_root(delta, alt_hashing), + GenesisOrUnavailableState::Genesis(ref state) => state + .storage_root_with_alt_hashing(delta, alt_hashing), GenesisOrUnavailableState::Unavailable => Default::default(), } } - fn child_storage_root<'a>( + fn child_storage_root_with_alt_hashing<'a>( &self, child_info: &ChildInfo, delta: impl Iterator)>, - alt_hashing: bool, + alt_hashing: Option, ) -> (H::Out, bool, Self::Transaction) where H::Out: Ord { match *self { GenesisOrUnavailableState::Genesis(ref state) => { - let (root, is_equal, _) = state.child_storage_root(child_info, delta, alt_hashing); + let (root, is_equal, _) = state + .child_storage_root_with_alt_hashing(child_info, delta, alt_hashing); (root, is_equal, Default::default()) }, GenesisOrUnavailableState::Unavailable => diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 65d0469100af1..913ad4cd7431b 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -129,8 +129,10 @@ pub trait Backend: sp_std::fmt::Debug { fn storage_root<'a>( &self, delta: impl Iterator)>, - alt_hashing: bool, - ) -> (H::Out, Self::Transaction) where H::Out: Ord; + ) -> (H::Out, Self::Transaction) where H::Out: Ord { + let alt_hashing = self.get_trie_alt_hashing_threshold(); + self.storage_root_with_alt_hashing(delta, alt_hashing) + } /// Calculate the child storage root, with given delta over what is already stored in /// the backend, and produce a "transaction" that can be used to commit. The second argument @@ -139,9 +141,29 @@ pub trait Backend: sp_std::fmt::Debug { &self, child_info: &ChildInfo, delta: impl Iterator)>, - alt_hashing: bool, + ) -> (H::Out, bool, Self::Transaction) where H::Out: Ord { + let alt_hashing = self.get_trie_alt_hashing_threshold(); + self.child_storage_root_with_alt_hashing(child_info, delta, alt_hashing) + } + + /// Helpers function to avoid on query for full storage root. + /// See `storage_root`. + fn storage_root_with_alt_hashing<'a>( + &self, + delta: impl Iterator)>, + alt_hashing: Option, + ) -> (H::Out, Self::Transaction) where H::Out: Ord; + + /// Helpers function to avoid on query for full storage root. + /// See `child_storage_root`. + fn child_storage_root_with_alt_hashing<'a>( + &self, + child_info: &ChildInfo, + delta: impl Iterator)>, + alt_hashing: Option, ) -> (H::Out, bool, Self::Transaction) where H::Out: Ord; + /// Get all key/value pairs into a Vec. fn pairs(&self) -> Vec<(StorageKey, StorageValue)>; @@ -167,10 +189,10 @@ pub trait Backend: sp_std::fmt::Debug { fn as_trie_backend(&mut self) -> Option<&TrieBackend> { None } - /// Calculate the storage root, with given delta over what is already stored /// in the backend, and produce a "transaction" that can be used to commit. /// Does include child storage updates. + /// TODO remove (getting alt_hashing from delta is required) fn full_storage_root<'a>( &self, delta: impl Iterator)>, @@ -179,13 +201,30 @@ pub trait Backend: sp_std::fmt::Debug { impl Iterator)>, )>, ) -> (H::Out, Self::Transaction) where H::Out: Ord + Encode { - let alt_hashing = self.get_trie_alt_hashing_threshold().is_some(); + let alt_hashing = self.get_trie_alt_hashing_threshold(); + self.full_storage_root_with_alt_hashing(delta, child_deltas, alt_hashing) + } + + /// Helpers function to avoid on query for full storage root. + /// See `storage_root`. + fn full_storage_root_with_alt_hashing<'a>( + &self, + delta: impl Iterator)>, + child_deltas: impl Iterator)>, + )>, + alt_hashing: Option, + ) -> (H::Out, Self::Transaction) where H::Out: Ord + Encode { let mut txs: Self::Transaction = Default::default(); let mut child_roots: Vec<_> = Default::default(); // child first for (child_info, child_delta) in child_deltas { - let (child_root, empty, child_txs) = - self.child_storage_root(&child_info, child_delta, alt_hashing); + let (child_root, empty, child_txs) = self.child_storage_root_with_alt_hashing( + &child_info, + child_delta, + alt_hashing, + ); let prefixed_storage_key = child_info.prefixed_storage_key(); txs.consolidate(child_txs); if empty { @@ -194,7 +233,7 @@ pub trait Backend: sp_std::fmt::Debug { child_roots.push((prefixed_storage_key.into_inner(), Some(child_root.encode()))); } } - let (root, parent_txs) = self.storage_root(delta + let (root, parent_txs) = self.storage_root_with_alt_hashing(delta .map(|(k, v)| (&k[..], v.as_ref().map(|v| &v[..]))) .chain( child_roots diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index 57158d938ccd0..dcc4aa2cafdd9 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -281,9 +281,9 @@ impl Externalities for BasicExternalities { } } - let alt_hashing = self.get_trie_alt_hashing_threshold().is_some(); - let layout = if alt_hashing { - Layout::::with_inner_hashing() + let alt_hashing = self.get_trie_alt_hashing_threshold(); + let layout = if let Some(threshold) = alt_hashing { + Layout::::with_inner_hashing(threshold) } else { Layout::::default() }; @@ -296,9 +296,9 @@ impl Externalities for BasicExternalities { ) -> Vec { if let Some(child) = self.inner.children_default.get(child_info.storage_key()) { let delta = child.data.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))); - let alt_hashing = self.get_trie_alt_hashing_threshold().is_some(); + let alt_hashing = self.get_trie_alt_hashing_threshold(); crate::in_memory_backend::new_in_mem::() - .child_storage_root(&child.child_info, delta, alt_hashing).0 + .child_storage_root_with_alt_hashing(&child.child_info, delta, alt_hashing).0 } else { empty_child_trie_root::>() }.encode() diff --git a/primitives/state-machine/src/changes_trie/mod.rs b/primitives/state-machine/src/changes_trie/mod.rs index f2bbf371fb50e..5a2e206956ac9 100644 --- a/primitives/state-machine/src/changes_trie/mod.rs +++ b/primitives/state-machine/src/changes_trie/mod.rs @@ -167,7 +167,12 @@ pub struct TrieBackendStorageAdapter<'a, H: Hasher, Number: BlockNumber>(pub &'a impl<'a, H: Hasher, N: BlockNumber> crate::TrieBackendStorage for TrieBackendStorageAdapter<'a, H, N> { type Overlay = sp_trie::MemoryDB; - fn get(&self, key: &H::Out, prefix: Prefix, _global: bool) -> Result, String> { + fn get( + &self, + key: &H::Out, + prefix: Prefix, + _global: Option, + ) -> Result, String> { match self.0.get(key, prefix) { // change trie do not use meta. Ok(Some(v)) => Ok(Some((v, Default::default()))), diff --git a/primitives/state-machine/src/changes_trie/storage.rs b/primitives/state-machine/src/changes_trie/storage.rs index 115cc5461e1a9..0cea30f0d809c 100644 --- a/primitives/state-machine/src/changes_trie/storage.rs +++ b/primitives/state-machine/src/changes_trie/storage.rs @@ -207,7 +207,12 @@ impl<'a, H, Number> TrieBackendStorage for TrieBackendAdapter<'a, H, Number> { type Overlay = MemoryDB; - fn get(&self, key: &H::Out, prefix: Prefix, _global: bool) -> Result, String> { + fn get( + &self, + key: &H::Out, + prefix: Prefix, + _global: Option, + ) -> Result, String> { match self.storage.get(key, prefix) { // change trie do not use meta. Ok(Some(v)) => Ok(Some((v, Default::default()))), diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index 4ecbe17fc3346..01c3eb1e56c27 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -519,7 +519,10 @@ where return root.encode(); } - let root = self.overlay.storage_root(self.backend, self.storage_transaction_cache); + let root = self.overlay.storage_root( + self.backend, + self.storage_transaction_cache, + ); trace!(target: "state", "{:04x}: Root {}", self.id, HexDisplay::from(&root.as_ref())); root.encode() } @@ -547,8 +550,8 @@ where } else { let root = if let Some((changes, info)) = self.overlay.child_changes(storage_key) { let delta = changes.map(|(k, v)| (k.as_ref(), v.value().map(AsRef::as_ref))); - let alt_hashing = self.get_trie_alt_hashing_threshold().is_some(); - Some(self.backend.child_storage_root(info, delta, alt_hashing)) + let alt_hashing = self.get_trie_alt_hashing_threshold(); + Some(self.backend.child_storage_root_with_alt_hashing(info, delta, alt_hashing)) } else { None }; diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index c5dc881caae91..045558d9f8440 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -58,8 +58,8 @@ where &mut self, changes: T, ) { - // Note that in case the threshold is changed, it will not be apply immediately. let (top, child) = changes.into_iter().partition::, _>(|v| v.0.is_none()); + unimplemented!("get alt hashing changes here"); let (root, transaction) = self.full_storage_root( top.iter().map(|(_, v)| v).flatten().map(|(k, v)| (&k[..], v.as_deref())), child.iter() diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index d7d6cbc1da731..20efdb1236a1a 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -855,7 +855,7 @@ mod execution { where H: Hasher, H::Out: Ord + Codec, - MH: sp_trie::MetaHasher, + MH: sp_trie::MetaHasher>, KF: sp_trie::KeyFunction + Send + Sync, { proving_backend.storage(key).map_err(|e| Box::new(e) as Box) diff --git a/primitives/state-machine/src/overlayed_changes/mod.rs b/primitives/state-machine/src/overlayed_changes/mod.rs index 1d3cbb59ba0c1..6d28184501a5f 100644 --- a/primitives/state-machine/src/overlayed_changes/mod.rs +++ b/primitives/state-machine/src/overlayed_changes/mod.rs @@ -614,6 +614,16 @@ impl OverlayedChanges { } } + /// Utility function to get trie inner value hash threshold from + /// backend state or pending changes. + fn get_trie_alt_hashing_threshold>(&self, backend: &B) -> Option { + match self.storage(sp_core::storage::well_known_keys::TRIE_HASHING_CONFIG) { + Some(Some(mut encoded)) => sp_core::storage::trie_threshold_decode(&mut encoded), + Some(None) => None, + None => backend.get_trie_alt_hashing_threshold(), + } + } + /// Generate the storage root using `backend` and all changes /// as seen by the current transaction. /// @@ -625,13 +635,19 @@ impl OverlayedChanges { ) -> H::Out where H::Out: Ord + Encode, { + let alt_hashing = self.get_trie_alt_hashing_threshold(backend); + let delta = self.changes().map(|(k, v)| (&k[..], v.value().map(|v| &v[..]))); let child_delta = self.children() .map(|(changes, info)| (info, changes.map( |(k, v)| (&k[..], v.value().map(|v| &v[..])) ))); - let (root, transaction) = backend.full_storage_root(delta, child_delta); + let (root, transaction) = backend.full_storage_root_with_alt_hashing( + delta, + child_delta, + alt_hashing, + ); cache.transaction = Some(transaction); cache.transaction_storage_root = Some(root); diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index d4a1ec66523dd..9b3eb9cc5be58 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -242,7 +242,7 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> TrieBackendStorage { type Overlay = S::Overlay; - fn get(&self, key: &H::Out, prefix: Prefix, global: bool) -> Result, String> { + fn get(&self, key: &H::Out, prefix: Prefix, global: Option) -> Result, String> { if let Some(v) = self.proof_recorder.get(key) { return Ok(v); } @@ -340,21 +340,21 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> self.0.child_keys(child_info, prefix) } - fn storage_root<'b>( + fn storage_root_with_alt_hashing<'b>( &self, delta: impl Iterator)>, - alt_hashing: bool, + alt_hashing: Option, ) -> (H::Out, Self::Transaction) where H::Out: Ord { - self.0.storage_root(delta, alt_hashing) + self.0.storage_root_with_alt_hashing(delta, alt_hashing) } - fn child_storage_root<'b>( + fn child_storage_root_with_alt_hashing<'b>( &self, child_info: &ChildInfo, delta: impl Iterator)>, - alt_hashing: bool, + alt_hashing: Option, ) -> (H::Out, bool, Self::Transaction) where H::Out: Ord { - self.0.child_storage_root(child_info, delta, alt_hashing) + self.0.child_storage_root_with_alt_hashing(child_info, delta, alt_hashing) } fn register_overlay_stats(&self, _stats: &crate::stats::StateMachineStats) { } diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 52cadac871aa0..4f7001800cbd0 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -168,10 +168,10 @@ impl, H: Hasher> Backend for TrieBackend where collect_all().map_err(|e| debug!(target: "trie", "Error extracting trie keys: {}", e)).unwrap_or_default() } - fn storage_root<'a>( + fn storage_root_with_alt_hashing<'a>( &self, delta: impl Iterator)>, - use_inner_hash_value: bool, + use_inner_hash_value: Option, ) -> (H::Out, Self::Transaction) where H::Out: Ord { let mut write_overlay = S::Overlay::default(); let mut root = *self.essence.root(); @@ -182,8 +182,8 @@ impl, H: Hasher> Backend for TrieBackend where &mut write_overlay, ); let res = || { - let layout = if use_inner_hash_value { - sp_trie::Layout::with_inner_hashing() + let layout = if let Some(threshold) = use_inner_hash_value { + sp_trie::Layout::with_inner_hashing(threshold) } else { sp_trie::Layout::default() }; @@ -199,17 +199,17 @@ impl, H: Hasher> Backend for TrieBackend where (root, write_overlay) } - fn child_storage_root<'a>( + fn child_storage_root_with_alt_hashing<'a>( &self, child_info: &ChildInfo, delta: impl Iterator)>, - use_inner_hash_value: bool, + use_inner_hash_value: Option, ) -> (H::Out, bool, Self::Transaction) where H::Out: Ord { let default_root = match child_info.child_type() { ChildType::ParentKeyId => empty_child_trie_root::>() }; - let layout = if use_inner_hash_value { - sp_trie::Layout::with_inner_hashing() + let layout = if let Some(threshold) = use_inner_hash_value { + sp_trie::Layout::with_inner_hashing(threshold) } else { sp_trie::Layout::default() }; diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index e7d41a11eb64b..63b1ccc2d9698 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -44,7 +44,12 @@ type Result = sp_std::result::Result; /// Patricia trie-based storage trait. pub trait Storage: Send + Sync { /// Get a trie node. - fn get(&self, key: &H::Out, prefix: Prefix, inner_hash: bool) -> Result>; + fn get( + &self, + key: &H::Out, + prefix: Prefix, + alt_hashing: Option, + ) -> Result>; /// Call back when value get accessed in trie. fn access_from(&self, key: &H::Out); } @@ -300,11 +305,11 @@ pub(crate) struct Ephemeral<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { overlay: &'a mut S::Overlay, } -impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> hash_db::AsHashDB +impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> hash_db::AsHashDB> for Ephemeral<'a, S, H> { - fn as_hash_db<'b>(&'b self) -> &'b (dyn hash_db::HashDB + 'b) { self } - fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::HashDB + 'b) { self } + fn as_hash_db<'b>(&'b self) -> &'b (dyn hash_db::HashDB> + 'b) { self } + fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::HashDB> + 'b) { self } } impl<'a, S: TrieBackendStorage, H: Hasher> Ephemeral<'a, S, H> { @@ -316,14 +321,14 @@ impl<'a, S: TrieBackendStorage, H: Hasher> Ephemeral<'a, S, H> { } } -impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::HashDB +impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::HashDB> for Ephemeral<'a, S, H> { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { Self::get_with_meta(self, key, prefix, Default::default()).map(|r| r.0) } - fn get_with_meta(&self, key: &H::Out, prefix: Prefix, global: bool) -> Option<(DBValue, TrieMeta)> { + fn get_with_meta(&self, key: &H::Out, prefix: Prefix, global: Option) -> Option<(DBValue, TrieMeta)> { if let Some(val) = hash_db::HashDB::get_with_meta(self.overlay, key, prefix, global) { Some(val) } else { @@ -369,14 +374,14 @@ impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::HashDB, H: Hasher> hash_db::HashDBRef +impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::HashDBRef> for Ephemeral<'a, S, H> { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { hash_db::HashDB::get(self, key, prefix) } - fn get_with_meta(&self, key: &H::Out, prefix: Prefix, global: bool) -> Option<(DBValue, TrieMeta)> { + fn get_with_meta(&self, key: &H::Out, prefix: Prefix, global: Option) -> Option<(DBValue, TrieMeta)> { hash_db::HashDB::get_with_meta(self, key, prefix, global) } @@ -392,9 +397,9 @@ impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::HashDBRef: Send + Sync { /// Type of in-memory overlay. - type Overlay: hash_db::HashDB + Default + Consolidate; + type Overlay: hash_db::HashDB> + Default + Consolidate; /// Get the value stored at key. - fn get(&self, key: &H::Out, prefix: Prefix, global: bool) -> Result>; + fn get(&self, key: &H::Out, prefix: Prefix, global: Option) -> Result>; /// Call back when value get accessed in trie. fn access_from(&self, key: &H::Out); } @@ -404,7 +409,12 @@ pub trait TrieBackendStorage: Send + Sync { impl TrieBackendStorage for Arc> { type Overlay = PrefixedMemoryDB; - fn get(&self, key: &H::Out, prefix: Prefix, global: bool) -> Result> { + fn get( + &self, + key: &H::Out, + prefix: Prefix, + global: Option, + ) -> Result> { Storage::::get(self.deref(), key, prefix, global) } @@ -416,12 +426,17 @@ impl TrieBackendStorage for Arc> { impl TrieBackendStorage for sp_trie::GenericMemoryDB where H: Hasher, - MH: sp_trie::MetaHasher, + MH: sp_trie::MetaHasher>, KF: sp_trie::KeyFunction + Send + Sync, { type Overlay = Self; - fn get(&self, key: &H::Out, prefix: Prefix, global: bool) -> Result> { + fn get( + &self, + key: &H::Out, + prefix: Prefix, + global: Option, + ) -> Result> { Ok(hash_db::HashDB::get_with_meta(self, key, prefix, global)) } @@ -430,21 +445,26 @@ impl TrieBackendStorage for sp_trie::GenericMemoryDB } } -impl, H: Hasher> hash_db::AsHashDB +impl, H: Hasher> hash_db::AsHashDB> for TrieBackendEssence { - fn as_hash_db<'b>(&'b self) -> &'b (dyn hash_db::HashDB + 'b) { self } - fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::HashDB + 'b) { self } + fn as_hash_db<'b>(&'b self) -> &'b (dyn hash_db::HashDB> + 'b) { self } + fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::HashDB> + 'b) { self } } -impl, H: Hasher> hash_db::HashDB +impl, H: Hasher> hash_db::HashDB> for TrieBackendEssence { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { self.get_with_meta(key, prefix, Default::default()).map(|r| r.0) } - fn get_with_meta(&self, key: &H::Out, prefix: Prefix, global: bool) -> Option<(DBValue, TrieMeta)> { + fn get_with_meta( + &self, + key: &H::Out, + prefix: Prefix, + global: Option, + ) -> Option<(DBValue, TrieMeta)> { if *key == self.empty { return Some(([0u8].to_vec(), ::meta_for_empty(global))) } @@ -484,14 +504,14 @@ impl, H: Hasher> hash_db::HashDB, H: Hasher> hash_db::HashDBRef +impl, H: Hasher> hash_db::HashDBRef> for TrieBackendEssence { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { hash_db::HashDB::get(self, key, prefix) } - fn get_with_meta(&self, key: &H::Out, prefix: Prefix, global: bool) -> Option<(DBValue, TrieMeta)> { + fn get_with_meta(&self, key: &H::Out, prefix: Prefix, global: Option) -> Option<(DBValue, TrieMeta)> { hash_db::HashDB::get_with_meta(self, key, prefix, global) } diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 283147602e31e..1b8acc9a796a3 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -57,24 +57,27 @@ pub struct TrieMeta { pub range: Option>, /// Defined in the trie layout, when used with /// `TrieDbMut` it switch nodes to alternative hashing - /// method by setting `do_value_hash` to true. - pub try_inner_hashing: bool, + /// method by defining the threshold to use with alternative + /// hashing. + pub try_inner_hashing: Option, + /// Flag indicating alternative value hash is currently use + /// or will be use. + pub apply_inner_hashing: bool, /// Does current encoded contains a hash instead of /// a value (information stored in meta for proofs). pub contain_hash: bool, - /// Flag indicating alternative value hash will be use. - pub apply_inner_hashing: bool, /// Record if a value was accessed, this is /// set as accessed by defalult, but can be /// change on access explicitely: `HashDB::get_with_meta`. /// and reset on access explicitely: `HashDB::access_from`. - /// TODO!! remove from meta: only use in proof recorder context. + /// TODO!! could be remove from meta: only use in proof recorder context. + /// But does not add memory usage here. pub unused_value: bool, } impl Meta for TrieMeta { /// When true apply inner hashing of value. - type GlobalMeta = bool; + type GlobalMeta = Option; /// When true apply inner hashing of value. type StateMeta = bool; @@ -156,8 +159,9 @@ impl Meta for TrieMeta { ValuePlan::NoValue => return, }; - self.apply_inner_hashing = self.try_inner_hashing - && range.end - range.start >= trie_constants::INNER_HASH_TRESHOLD; + self.apply_inner_hashing = self.try_inner_hashing.as_ref().map(|threshold| + range.end - range.start >= *threshold as usize + ).unwrap_or(false); self.range = Some(range); self.contain_hash = contain_hash; } @@ -201,7 +205,7 @@ impl TrieMeta { } /// substrate trie layout -pub struct Layout(bool, sp_std::marker::PhantomData<(H, M)>); +pub struct Layout(Option, sp_std::marker::PhantomData<(H, M)>); impl fmt::Debug for Layout { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { @@ -217,23 +221,23 @@ impl Clone for Layout { impl Default for Layout { fn default() -> Self { - Layout(false, sp_std::marker::PhantomData) + Layout(None, sp_std::marker::PhantomData) } } impl Layout { /// Layout with inner hashing active. /// Will flag trie for hashing. /// TODO rename inner -> alt - pub fn with_inner_hashing() -> Self { - Layout(true, sp_std::marker::PhantomData) + pub fn with_inner_hashing(threshold: u32) -> Self { + Layout(Some(threshold), sp_std::marker::PhantomData) } } impl TrieLayout for Layout where H: Hasher, - M: MetaHasher, - M::Meta: Meta, + M: MetaHasher>, + M::Meta: Meta, StateMeta = bool>, { const USE_EXTENSION: bool = false; const ALLOW_EMPTY: bool = true; @@ -273,7 +277,7 @@ impl MetaHasher for StateHasher H: Hasher, { type Meta = TrieMeta; - type GlobalMeta = bool; + type GlobalMeta = Option; fn hash(value: &[u8], meta: &Self::Meta) -> H::Out { match &meta { @@ -335,7 +339,7 @@ impl MetaHasher for StateHasher unused_value: contain_hash, contain_hash, apply_inner_hashing: false, - try_inner_hashing: false, + try_inner_hashing: None, }; meta.set_global_meta(global_meta); (stored, meta) @@ -353,8 +357,8 @@ impl MetaHasher for StateHasher impl TrieConfiguration for Layout where H: Hasher, - M: MetaHasher, - M::Meta: Meta, + M: MetaHasher>, + M::Meta: Meta, StateMeta = bool>, { fn trie_root(&self, input: I) -> ::Out where I: IntoIterator, @@ -812,18 +816,6 @@ pub fn resolve_encoded_meta(entry: &mut (DBValue, TrieMeta)) { /// Constants used into trie simplification codec. mod trie_constants { - /// Treshold for using hash of value instead of value - /// in encoded trie node when flagged. - /// TODO design would be to make it the global meta, but then - /// when serializing proof we would need to attach it (no way to - /// hash the nodes otherwhise), which would - /// break proof format. - /// TODO attaching to storage proof in a compatible way could be - /// achieve by using a escaped header in first or last element of proof - /// and write it after. - /// TODO 33 is not good switch to 32 + 1 + 1: 34 (avoid hashing stored hash for a 1 byte gain). - /// TODO replace with sp_storage test one. - pub const INNER_HASH_TRESHOLD: usize = 33; const FIRST_PREFIX: u8 = 0b_00 << 6; /// In proof this header is used when only hashed value is stored. pub const DEAD_HEADER_META_HASHED_VALUE: u8 = EMPTY_TRIE | 0b_00_01; @@ -895,12 +887,6 @@ mod tests { } fn check_input(input: &Vec<(&[u8], &[u8])>) { -// TODO remove this iter - let layout = Layout::with_inner_hashing(); - check_equivalent::(input, layout.clone()); - check_iteration::(input, layout.clone()); - - let layout = Layout::default(); check_equivalent::(input, layout.clone()); check_iteration::(input, layout); diff --git a/primitives/trie/src/node_codec.rs b/primitives/trie/src/node_codec.rs index 61bcf789d8817..9e2931486cba0 100644 --- a/primitives/trie/src/node_codec.rs +++ b/primitives/trie/src/node_codec.rs @@ -186,7 +186,7 @@ impl NodeCodec { impl NodeCodecT for NodeCodec where H: Hasher, - M: Meta, + M: Meta>, { type Error = Error; type HashOut = H::Out; @@ -225,7 +225,9 @@ impl NodeCodecT for NodeCodec // With fix inner hashing alt hash can be use with all node, but // that is not better (encoding can use an additional nibble byte // sometime). - let mut output = if meta.extract_global_meta() && value_do_hash(&value) { + let mut output = if meta.extract_global_meta().as_ref().map(|threshold| + value_do_hash(&value, threshold) + ).unwrap_or(false) { partial_encode(partial, NodeKind::AltHashLeaf) } else { partial_encode(partial, NodeKind::Leaf) @@ -275,7 +277,9 @@ impl NodeCodecT for NodeCodec value: Value, meta: &mut M, ) -> Vec { - let mut output = match (&value, meta.extract_global_meta() && value_do_hash(&value)) { + let mut output = match (&value, meta.extract_global_meta().as_ref().map(|threshold| + value_do_hash(&value, threshold) + ).unwrap_or(false)) { (&Value::NoValue, _) => { partial_from_iterator_encode(partial, number_nibble, NodeKind::BranchNoValue) }, @@ -327,9 +331,9 @@ impl NodeCodecT for NodeCodec // utils -fn value_do_hash(val: &Value) -> bool { +fn value_do_hash(val: &Value, threshold: &u32) -> bool { if let Value::Value(val) = val { - val.encoded_size() >= trie_constants::INNER_HASH_TRESHOLD + val.encoded_size() >= *threshold as usize } else { false } diff --git a/primitives/trie/src/trie_stream.rs b/primitives/trie/src/trie_stream.rs index 4e383c9d5763c..2be98fad76ab6 100644 --- a/primitives/trie/src/trie_stream.rs +++ b/primitives/trie/src/trie_stream.rs @@ -35,7 +35,7 @@ pub struct TrieStream { /// Current node buffer. buffer: Vec, /// Global trie alt hashing activation. - inner_value_hashing: bool, + inner_value_hashing: Option, /// For current node, do we use alt hashing. apply_inner_hashing: bool, /// Keep trace of position of encoded value. @@ -76,9 +76,9 @@ fn fuse_nibbles_node<'a>(nibbles: &'a [u8], kind: NodeKind) -> impl Iterator; - fn new(meta: bool) -> Self { + fn new(meta: Option) -> Self { Self { buffer: Vec::new(), inner_value_hashing: meta, @@ -92,7 +92,9 @@ impl trie_root::TrieStream for TrieStream { } fn append_leaf(&mut self, key: &[u8], value: &[u8]) { - self.apply_inner_hashing = self.inner_value_hashing && value_do_hash(value); + self.apply_inner_hashing = self.inner_value_hashing.as_ref().map(|threshold| + value_do_hash(value, threshold) + ).unwrap_or(false); let kind = if self.apply_inner_hashing { NodeKind::AltHashLeaf } else { @@ -113,7 +115,9 @@ impl trie_root::TrieStream for TrieStream { ) { if let Some(partial) = maybe_partial { if let Some(value) = maybe_value { - self.apply_inner_hashing = self.inner_value_hashing && value_do_hash(value); + self.apply_inner_hashing = self.inner_value_hashing.as_ref().map(|threshold| + value_do_hash(value, threshold) + ).unwrap_or(false); let kind = if self.apply_inner_hashing { NodeKind::AltHashBranchWithValue } else { @@ -155,7 +159,7 @@ impl trie_root::TrieStream for TrieStream { contain_hash: false, // Using `inner_value_hashing` instead to check this. // And unused in hasher. - try_inner_hashing: false, + try_inner_hashing: None, apply_inner_hashing: true, }; >>::hash(&data, &meta).as_ref().encode_to(&mut self.buffer); @@ -174,7 +178,7 @@ impl trie_root::TrieStream for TrieStream { range: range, unused_value: false, contain_hash: false, - try_inner_hashing: false, + try_inner_hashing: None, apply_inner_hashing: true, }; @@ -207,6 +211,6 @@ fn branch_node_buffered(has_value: bool, has_children: I, output: &mut[u8]) Bitmap::encode(has_children, &mut output[1..]); } -fn value_do_hash(val: &[u8]) -> bool { - val.encoded_size() >= trie_constants::INNER_HASH_TRESHOLD +fn value_do_hash(val: &[u8], threshold: &u32) -> bool { + val.encoded_size() >= *threshold as usize } From 74fb9c8eab613fb21964a153218a07f8501cb979 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 10 Jun 2021 14:51:17 +0200 Subject: [PATCH 048/188] refactoring to use state from backend (not possible payload changes). --- client/api/src/in_mem.rs | 2 +- client/db/src/bench.rs | 10 ++-- client/db/src/lib.rs | 10 ++-- client/db/src/storage_cache.rs | 16 +++---- client/light/src/backend.rs | 11 +++-- primitives/externalities/src/lib.rs | 2 + primitives/state-machine/src/backend.rs | 48 +++---------------- primitives/state-machine/src/basic.rs | 20 ++++---- primitives/state-machine/src/ext.rs | 6 +-- .../state-machine/src/in_memory_backend.rs | 15 ++++-- .../src/overlayed_changes/mod.rs | 14 +----- .../state-machine/src/proving_backend.rs | 8 ++-- primitives/state-machine/src/trie_backend.rs | 20 +++++++- primitives/storage/src/lib.rs | 16 ++++++- 14 files changed, 97 insertions(+), 101 deletions(-) diff --git a/client/api/src/in_mem.rs b/client/api/src/in_mem.rs index 93ac41b3c9dce..899fdd5e66690 100644 --- a/client/api/src/in_mem.rs +++ b/client/api/src/in_mem.rs @@ -574,7 +574,7 @@ impl backend::BlockImportOperation for BlockImportOperatio ); let alt_hashing = storage.get_trie_alt_hashing_threshold(); - let (root, transaction) = self.old_state.full_storage_root_with_alt_hashing( + let (root, transaction) = self.old_state.full_storage_root( storage.top.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))), child_delta, alt_hashing, diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index 0201cd0b450a0..af626d3af7c2f 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -162,7 +162,7 @@ impl BenchmarkingState { )); let alt_hashing = genesis.get_trie_alt_hashing_threshold(); let (root, transaction): (B::Hash, _) = state.state.borrow_mut().as_mut().unwrap() - .full_storage_root_with_alt_hashing( + .full_storage_root( genesis.top.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))), child_delta, alt_hashing, @@ -404,16 +404,16 @@ impl StateBackend> for BenchmarkingState { } } - fn storage_root_with_alt_hashing<'a>( + fn storage_root<'a>( &self, delta: impl Iterator)>, alt_hashing: Option, ) -> (B::Hash, Self::Transaction) where B::Hash: Ord { self.state.borrow().as_ref() - .map_or(Default::default(), |s| s.storage_root_with_alt_hashing(delta, alt_hashing)) + .map_or(Default::default(), |s| s.storage_root(delta, alt_hashing)) } - fn child_storage_root_with_alt_hashing<'a>( + fn child_storage_root<'a>( &self, child_info: &ChildInfo, delta: impl Iterator)>, @@ -421,7 +421,7 @@ impl StateBackend> for BenchmarkingState { ) -> (B::Hash, bool, Self::Transaction) where B::Hash: Ord { self.state.borrow().as_ref().map_or( Default::default(), - |s| s.child_storage_root_with_alt_hashing(child_info, delta, alt_hashing), + |s| s.child_storage_root(child_info, delta, alt_hashing), ) } diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 9b3d525ef9eba..5fa44f8c2e1c3 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -224,21 +224,21 @@ impl StateBackend> for RefTrackingState { self.state.for_child_keys_with_prefix(child_info, prefix, f) } - fn storage_root_with_alt_hashing<'a>( + fn storage_root<'a>( &self, delta: impl Iterator)>, alt_hashing: Option, ) -> (B::Hash, Self::Transaction) where B::Hash: Ord { - self.state.storage_root_with_alt_hashing(delta, alt_hashing) + self.state.storage_root(delta, alt_hashing) } - fn child_storage_root_with_alt_hashing<'a>( + fn child_storage_root<'a>( &self, child_info: &ChildInfo, delta: impl Iterator)>, alt_hashing: Option, ) -> (B::Hash, bool, Self::Transaction) where B::Hash: Ord { - self.state.child_storage_root_with_alt_hashing(child_info, delta, alt_hashing) + self.state.child_storage_root(child_info, delta, alt_hashing) } fn pairs(&self) -> Vec<(Vec, Vec)> { @@ -779,7 +779,7 @@ impl sc_client_api::backend::BlockImportOperation for Bloc let mut changes_trie_config: Option = None; let alt_hashing = storage.get_trie_alt_hashing_threshold(); - let (root, transaction) = self.old_state.full_storage_root_with_alt_hashing( + let (root, transaction) = self.old_state.full_storage_root( storage.top.iter().map(|(k, v)| { if &k[..] == well_known_keys::CHANGES_TRIE_CONFIG { changes_trie_config = Some( diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index bf929732caa2e..b249ea14748ac 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -642,21 +642,21 @@ impl>, B: BlockT> StateBackend> for Cachin self.state.for_child_keys_with_prefix(child_info, prefix, f) } - fn storage_root_with_alt_hashing<'a>( + fn storage_root<'a>( &self, delta: impl Iterator)>, alt_hashing: Option, ) -> (B::Hash, Self::Transaction) where B::Hash: Ord { - self.state.storage_root_with_alt_hashing(delta, alt_hashing) + self.state.storage_root(delta, alt_hashing) } - fn child_storage_root_with_alt_hashing<'a>( + fn child_storage_root<'a>( &self, child_info: &ChildInfo, delta: impl Iterator)>, alt_hashing: Option, ) -> (B::Hash, bool, Self::Transaction) where B::Hash: Ord { - self.state.child_storage_root_with_alt_hashing(child_info, delta, alt_hashing) + self.state.child_storage_root(child_info, delta, alt_hashing) } fn pairs(&self) -> Vec<(Vec, Vec)> { @@ -826,21 +826,21 @@ impl>, B: BlockT> StateBackend> for Syncin self.caching_state().for_child_keys_with_prefix(child_info, prefix, f) } - fn storage_root_with_alt_hashing<'a>( + fn storage_root<'a>( &self, delta: impl Iterator)>, alt_hashing: Option, ) -> (B::Hash, Self::Transaction) where B::Hash: Ord { - self.caching_state().storage_root_with_alt_hashing(delta, alt_hashing) + self.caching_state().storage_root(delta, alt_hashing) } - fn child_storage_root_with_alt_hashing<'a>( + fn child_storage_root<'a>( &self, child_info: &ChildInfo, delta: impl Iterator)>, alt_hashing: Option, ) -> (B::Hash, bool, Self::Transaction) where B::Hash: Ord { - self.caching_state().child_storage_root_with_alt_hashing(child_info, delta, alt_hashing) + self.caching_state().child_storage_root(child_info, delta, alt_hashing) } fn pairs(&self) -> Vec<(Vec, Vec)> { diff --git a/client/light/src/backend.rs b/client/light/src/backend.rs index d7ec2df029cf5..7b07477012d72 100644 --- a/client/light/src/backend.rs +++ b/client/light/src/backend.rs @@ -324,6 +324,7 @@ impl BlockImportOperation for ImportOperation fn reset_storage(&mut self, input: Storage) -> ClientResult { check_genesis_storage(&input)?; + let alt_hashing = input.get_trie_alt_hashing_threshold(); // changes trie configuration let changes_trie_config = input.top.iter() .find(|(k, _)| &k[..] == well_known_keys::CHANGES_TRIE_CONFIG) @@ -346,7 +347,7 @@ impl BlockImportOperation for ImportOperation } let storage_update = InMemoryBackend::from(storage); - let (storage_root, _) = storage_update.full_storage_root(std::iter::empty(), child_delta); + let (storage_root, _) = storage_update.full_storage_root(std::iter::empty(), child_delta, alt_hashing); self.storage_update = Some(storage_update); Ok(storage_root) @@ -486,19 +487,19 @@ impl StateBackend for GenesisOrUnavailableState } } - fn storage_root_with_alt_hashing<'a>( + fn storage_root<'a>( &self, delta: impl Iterator)>, alt_hashing: Option, ) -> (H::Out, Self::Transaction) where H::Out: Ord { match *self { GenesisOrUnavailableState::Genesis(ref state) => state - .storage_root_with_alt_hashing(delta, alt_hashing), + .storage_root(delta, alt_hashing), GenesisOrUnavailableState::Unavailable => Default::default(), } } - fn child_storage_root_with_alt_hashing<'a>( + fn child_storage_root<'a>( &self, child_info: &ChildInfo, delta: impl Iterator)>, @@ -507,7 +508,7 @@ impl StateBackend for GenesisOrUnavailableState match *self { GenesisOrUnavailableState::Genesis(ref state) => { let (root, is_equal, _) = state - .child_storage_root_with_alt_hashing(child_info, delta, alt_hashing); + .child_storage_root(child_info, delta, alt_hashing); (root, is_equal, Default::default()) }, GenesisOrUnavailableState::Unavailable => diff --git a/primitives/externalities/src/lib.rs b/primitives/externalities/src/lib.rs index 351d50bd90e60..d902df436889f 100644 --- a/primitives/externalities/src/lib.rs +++ b/primitives/externalities/src/lib.rs @@ -326,12 +326,14 @@ impl ExternalitiesExt for &mut dyn Externalities { /// Helpers method for the [`Externalities`] trait. pub trait ExternalitiesHelpers: Externalities { + /* No we use backend in this case TODO remove function /// Utility function to get trie inner value hash threshold from /// backend state or pending changes. fn get_trie_alt_hashing_threshold(&self) -> Option { self.storage(sp_storage::well_known_keys::TRIE_HASHING_CONFIG) .and_then(|encoded| sp_storage::trie_threshold_decode(&mut encoded.as_slice())) } + */ /// Utility function to modify trie inner value hash threshold. fn modify_trie_alt_hashing_threshold(&mut self, threshold: Option) { diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 913ad4cd7431b..4976d6a43d5db 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -126,44 +126,24 @@ pub trait Backend: sp_std::fmt::Debug { /// `alt_hashing` indicate if trie state should apply alternate hashing /// scheme (inner value hashed). /// Does not include child storage updates. + /// Alt hashing paremeter must contain possible changes from delta. fn storage_root<'a>( &self, delta: impl Iterator)>, - ) -> (H::Out, Self::Transaction) where H::Out: Ord { - let alt_hashing = self.get_trie_alt_hashing_threshold(); - self.storage_root_with_alt_hashing(delta, alt_hashing) - } + alt_hashing: Option, + ) -> (H::Out, Self::Transaction) where H::Out: Ord; /// Calculate the child storage root, with given delta over what is already stored in /// the backend, and produce a "transaction" that can be used to commit. The second argument /// is true if child storage root equals default storage root. + /// Alt hashing paremeter must contain possible changes from delta. fn child_storage_root<'a>( &self, child_info: &ChildInfo, delta: impl Iterator)>, - ) -> (H::Out, bool, Self::Transaction) where H::Out: Ord { - let alt_hashing = self.get_trie_alt_hashing_threshold(); - self.child_storage_root_with_alt_hashing(child_info, delta, alt_hashing) - } - - /// Helpers function to avoid on query for full storage root. - /// See `storage_root`. - fn storage_root_with_alt_hashing<'a>( - &self, - delta: impl Iterator)>, - alt_hashing: Option, - ) -> (H::Out, Self::Transaction) where H::Out: Ord; - - /// Helpers function to avoid on query for full storage root. - /// See `child_storage_root`. - fn child_storage_root_with_alt_hashing<'a>( - &self, - child_info: &ChildInfo, - delta: impl Iterator)>, alt_hashing: Option, ) -> (H::Out, bool, Self::Transaction) where H::Out: Ord; - /// Get all key/value pairs into a Vec. fn pairs(&self) -> Vec<(StorageKey, StorageValue)>; @@ -192,7 +172,7 @@ pub trait Backend: sp_std::fmt::Debug { /// Calculate the storage root, with given delta over what is already stored /// in the backend, and produce a "transaction" that can be used to commit. /// Does include child storage updates. - /// TODO remove (getting alt_hashing from delta is required) + /// Alt hashing paremeter must contain possible changes from delta. fn full_storage_root<'a>( &self, delta: impl Iterator)>, @@ -200,27 +180,13 @@ pub trait Backend: sp_std::fmt::Debug { &'a ChildInfo, impl Iterator)>, )>, - ) -> (H::Out, Self::Transaction) where H::Out: Ord + Encode { - let alt_hashing = self.get_trie_alt_hashing_threshold(); - self.full_storage_root_with_alt_hashing(delta, child_deltas, alt_hashing) - } - - /// Helpers function to avoid on query for full storage root. - /// See `storage_root`. - fn full_storage_root_with_alt_hashing<'a>( - &self, - delta: impl Iterator)>, - child_deltas: impl Iterator)>, - )>, alt_hashing: Option, ) -> (H::Out, Self::Transaction) where H::Out: Ord + Encode { let mut txs: Self::Transaction = Default::default(); let mut child_roots: Vec<_> = Default::default(); // child first for (child_info, child_delta) in child_deltas { - let (child_root, empty, child_txs) = self.child_storage_root_with_alt_hashing( + let (child_root, empty, child_txs) = self.child_storage_root( &child_info, child_delta, alt_hashing, @@ -233,7 +199,7 @@ pub trait Backend: sp_std::fmt::Debug { child_roots.push((prefixed_storage_key.into_inner(), Some(child_root.encode()))); } } - let (root, parent_txs) = self.storage_root_with_alt_hashing(delta + let (root, parent_txs) = self.storage_root(delta .map(|(k, v)| (&k[..], v.as_ref().map(|v| &v[..]))) .chain( child_roots diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index dcc4aa2cafdd9..4a691e4eb208b 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -33,19 +33,21 @@ use sp_core::{ }; use log::warn; use codec::Encode; -use sp_externalities::{Extensions, Extension, ExternalitiesHelpers}; +use sp_externalities::{Extensions, Extension}; /// Simple Map-based Externalities impl. #[derive(Debug)] pub struct BasicExternalities { inner: Storage, extensions: Extensions, + alt_hashing: Option, } impl BasicExternalities { /// Create a new instance of `BasicExternalities` pub fn new(inner: Storage) -> Self { - BasicExternalities { inner, extensions: Default::default() } + let alt_hashing = inner.get_trie_alt_hashing_threshold(); + BasicExternalities { inner, extensions: Default::default(), alt_hashing } } /// New basic externalities with empty storage. @@ -70,12 +72,14 @@ impl BasicExternalities { storage: &mut sp_core::storage::Storage, f: impl FnOnce() -> R, ) -> R { + let alt_hashing = storage.get_trie_alt_hashing_threshold(); let mut ext = Self { inner: Storage { top: std::mem::take(&mut storage.top), children_default: std::mem::take(&mut storage.children_default), }, extensions: Default::default(), + alt_hashing, }; let r = ext.execute_with(f); @@ -124,12 +128,14 @@ impl Default for BasicExternalities { impl From> for BasicExternalities { fn from(hashmap: BTreeMap) -> Self { + let alt_hashing = sp_core::storage::alt_hashing::get_trie_alt_hashing_threshold(&hashmap); BasicExternalities { inner: Storage { top: hashmap, children_default: Default::default(), }, extensions: Default::default(), + alt_hashing, } } } @@ -281,9 +287,8 @@ impl Externalities for BasicExternalities { } } - let alt_hashing = self.get_trie_alt_hashing_threshold(); - let layout = if let Some(threshold) = alt_hashing { - Layout::::with_inner_hashing(threshold) + let layout = if let Some(threshold) = self.alt_hashing.as_ref() { + Layout::::with_inner_hashing(*threshold) } else { Layout::::default() }; @@ -296,9 +301,8 @@ impl Externalities for BasicExternalities { ) -> Vec { if let Some(child) = self.inner.children_default.get(child_info.storage_key()) { let delta = child.data.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))); - let alt_hashing = self.get_trie_alt_hashing_threshold(); - crate::in_memory_backend::new_in_mem::() - .child_storage_root_with_alt_hashing(&child.child_info, delta, alt_hashing).0 + crate::in_memory_backend::new_in_mem::(self.alt_hashing.clone()) + .child_storage_root(&child.child_info, delta, self.alt_hashing.clone()).0 } else { empty_child_trie_root::>() }.encode() diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index 01c3eb1e56c27..3927432fbc94e 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -28,7 +28,7 @@ use sp_core::{ }; use sp_trie::{trie_types::Layout, empty_child_trie_root}; use sp_externalities::{ - Externalities, Extensions, Extension, ExtensionStore, ExternalitiesHelpers, + Externalities, Extensions, Extension, ExtensionStore, }; use codec::{Decode, Encode, EncodeAppend}; @@ -549,9 +549,9 @@ where root.encode() } else { let root = if let Some((changes, info)) = self.overlay.child_changes(storage_key) { + let alt_hashing = self.backend.get_trie_alt_hashing_threshold(); let delta = changes.map(|(k, v)| (k.as_ref(), v.value().map(AsRef::as_ref))); - let alt_hashing = self.get_trie_alt_hashing_threshold(); - Some(self.backend.child_storage_root_with_alt_hashing(info, delta, alt_hashing)) + Some(self.backend.child_storage_root(info, delta, alt_hashing)) } else { None }; diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index 045558d9f8440..f7a97f1bb2c26 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -27,12 +27,14 @@ use codec::Codec; use sp_core::storage::{ChildInfo, Storage}; /// Create a new empty instance of in-memory backend. -pub fn new_in_mem() -> TrieBackend, H> +pub fn new_in_mem(initial_alt_hashing: Option) -> TrieBackend, H> where H::Out: Codec + Ord, { let db = MemoryDB::default(); - TrieBackend::new(db, empty_trie_root::>()) + let mut trie = TrieBackend::new(db, empty_trie_root::>()); + trie.force_alt_hashing = Some(initial_alt_hashing); + trie } impl TrieBackend, H> @@ -59,13 +61,13 @@ where changes: T, ) { let (top, child) = changes.into_iter().partition::, _>(|v| v.0.is_none()); - unimplemented!("get alt hashing changes here"); let (root, transaction) = self.full_storage_root( top.iter().map(|(_, v)| v).flatten().map(|(k, v)| (&k[..], v.as_deref())), child.iter() .filter_map(|v| v.0.as_ref().map(|c| (c, v.1.iter().map(|(k, v)| (&k[..], v.as_deref())))) ), + self.force_alt_hashing.clone().flatten(), ); self.apply_transaction(root, transaction); @@ -104,7 +106,7 @@ where H::Out: Codec + Ord, { fn default() -> Self { - new_in_mem() + new_in_mem(None) } } @@ -114,7 +116,10 @@ where H::Out: Codec + Ord, { fn from(inner: HashMap, BTreeMap>) -> Self { - let mut backend = new_in_mem(); + let alt_hashing = inner.get(&None).map(|map| + sp_core::storage::alt_hashing::get_trie_alt_hashing_threshold(&map) + ).flatten(); + let mut backend = new_in_mem(alt_hashing); backend.insert( inner.into_iter().map(|(k, m)| (k, m.into_iter().map(|(k, v)| (k, Some(v))).collect())), ); diff --git a/primitives/state-machine/src/overlayed_changes/mod.rs b/primitives/state-machine/src/overlayed_changes/mod.rs index 6d28184501a5f..ea1c973ebbfa1 100644 --- a/primitives/state-machine/src/overlayed_changes/mod.rs +++ b/primitives/state-machine/src/overlayed_changes/mod.rs @@ -614,16 +614,6 @@ impl OverlayedChanges { } } - /// Utility function to get trie inner value hash threshold from - /// backend state or pending changes. - fn get_trie_alt_hashing_threshold>(&self, backend: &B) -> Option { - match self.storage(sp_core::storage::well_known_keys::TRIE_HASHING_CONFIG) { - Some(Some(mut encoded)) => sp_core::storage::trie_threshold_decode(&mut encoded), - Some(None) => None, - None => backend.get_trie_alt_hashing_threshold(), - } - } - /// Generate the storage root using `backend` and all changes /// as seen by the current transaction. /// @@ -635,7 +625,7 @@ impl OverlayedChanges { ) -> H::Out where H::Out: Ord + Encode, { - let alt_hashing = self.get_trie_alt_hashing_threshold(backend); + let alt_hashing = backend.get_trie_alt_hashing_threshold(); let delta = self.changes().map(|(k, v)| (&k[..], v.value().map(|v| &v[..]))); let child_delta = self.children() @@ -643,7 +633,7 @@ impl OverlayedChanges { |(k, v)| (&k[..], v.value().map(|v| &v[..])) ))); - let (root, transaction) = backend.full_storage_root_with_alt_hashing( + let (root, transaction) = backend.full_storage_root( delta, child_delta, alt_hashing, diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 9b3eb9cc5be58..f48e225844ada 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -340,21 +340,21 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> self.0.child_keys(child_info, prefix) } - fn storage_root_with_alt_hashing<'b>( + fn storage_root<'b>( &self, delta: impl Iterator)>, alt_hashing: Option, ) -> (H::Out, Self::Transaction) where H::Out: Ord { - self.0.storage_root_with_alt_hashing(delta, alt_hashing) + self.0.storage_root(delta, alt_hashing) } - fn child_storage_root_with_alt_hashing<'b>( + fn child_storage_root<'b>( &self, child_info: &ChildInfo, delta: impl Iterator)>, alt_hashing: Option, ) -> (H::Out, bool, Self::Transaction) where H::Out: Ord { - self.0.child_storage_root_with_alt_hashing(child_info, delta, alt_hashing) + self.0.child_storage_root(child_info, delta, alt_hashing) } fn register_overlay_stats(&self, _stats: &crate::stats::StateMachineStats) { } diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 4f7001800cbd0..1cb34ff669ec9 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -32,6 +32,10 @@ use sp_std::{boxed::Box, vec::Vec}; /// Patricia trie-based backend. Transaction type is an overlay of changes to commit. pub struct TrieBackend, H: Hasher> { pub (crate) essence: TrieBackendEssence, + // Allows setting alt hashing at start for testing only + // (see in_memory_backend that cannot read from state as + // it changes. + pub (crate) force_alt_hashing: Option>, } impl, H: Hasher> TrieBackend where H::Out: Codec { @@ -39,6 +43,7 @@ impl, H: Hasher> TrieBackend where H::Out: Codec pub fn new(storage: S, root: H::Out) -> Self { TrieBackend { essence: TrieBackendEssence::new(storage, root), + force_alt_hashing: None, } } @@ -168,11 +173,16 @@ impl, H: Hasher> Backend for TrieBackend where collect_all().map_err(|e| debug!(target: "trie", "Error extracting trie keys: {}", e)).unwrap_or_default() } - fn storage_root_with_alt_hashing<'a>( + fn storage_root<'a>( &self, delta: impl Iterator)>, use_inner_hash_value: Option, ) -> (H::Out, Self::Transaction) where H::Out: Ord { + let use_inner_hash_value = if let Some(force) = self.force_alt_hashing.as_ref() { + force.clone() + } else { + use_inner_hash_value + }; let mut write_overlay = S::Overlay::default(); let mut root = *self.essence.root(); @@ -199,12 +209,18 @@ impl, H: Hasher> Backend for TrieBackend where (root, write_overlay) } - fn child_storage_root_with_alt_hashing<'a>( + fn child_storage_root<'a>( &self, child_info: &ChildInfo, delta: impl Iterator)>, use_inner_hash_value: Option, ) -> (H::Out, bool, Self::Transaction) where H::Out: Ord { + let use_inner_hash_value = if let Some(force) = self.force_alt_hashing.as_ref() { + force.clone() + } else { + use_inner_hash_value + }; + let default_root = match child_info.child_type() { ChildType::ParentKeyId => empty_child_trie_root::>() }; diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index f63323bbaa17f..a38d3e41135f5 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -220,8 +220,7 @@ impl Storage { /// Utility function to get trie inner value hash threshold from /// backend state or pending changes. pub fn get_trie_alt_hashing_threshold(&self) -> Option { - self.top.get(well_known_keys::TRIE_HASHING_CONFIG) - .and_then(|encoded| trie_threshold_decode(&mut encoded.as_slice())) + alt_hashing::get_trie_alt_hashing_threshold(&self.top) } /// Utility function to modify trie inner value hash threshold. @@ -238,6 +237,19 @@ impl Storage { } } +/// alt hashing related utils. +#[cfg(feature = "std")] +pub mod alt_hashing { + use super::*; + + /// Utility function to get trie inner value hash threshold from + /// backend state or pending changes. + pub fn get_trie_alt_hashing_threshold(map: &StorageMap) -> Option { + map.get(well_known_keys::TRIE_HASHING_CONFIG) + .and_then(|encoded| trie_threshold_decode(&mut encoded.as_slice())) + } +} + /// Information related to a child state. #[derive(Debug, Clone)] #[cfg_attr(feature = "std", derive(PartialEq, Eq, Hash, PartialOrd, Ord))] From 97a1aa6721480396101e3d3c78a2ae10491554e1 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 10 Jun 2021 18:15:02 +0200 Subject: [PATCH 049/188] Applying from previous state --- client/api/src/in_mem.rs | 2 - client/db/src/bench.rs | 8 +- client/db/src/lib.rs | 20 +---- client/db/src/storage_cache.rs | 12 +-- client/executor/src/integration_tests/mod.rs | 74 +++++++++++++------ client/light/src/backend.rs | 9 +-- client/service/test/src/client/light.rs | 6 +- primitives/state-machine/src/backend.rs | 7 -- primitives/state-machine/src/basic.rs | 2 +- primitives/state-machine/src/ext.rs | 3 +- .../state-machine/src/in_memory_backend.rs | 12 ++- primitives/state-machine/src/lib.rs | 20 +++-- .../src/overlayed_changes/mod.rs | 3 - .../state-machine/src/proving_backend.rs | 64 ++++------------ primitives/state-machine/src/trie_backend.rs | 45 +++-------- primitives/trie/src/lib.rs | 7 +- 16 files changed, 117 insertions(+), 177 deletions(-) diff --git a/client/api/src/in_mem.rs b/client/api/src/in_mem.rs index 899fdd5e66690..d756e1cc0bbc4 100644 --- a/client/api/src/in_mem.rs +++ b/client/api/src/in_mem.rs @@ -573,11 +573,9 @@ impl backend::BlockImportOperation for BlockImportOperatio ) ); - let alt_hashing = storage.get_trie_alt_hashing_threshold(); let (root, transaction) = self.old_state.full_storage_root( storage.top.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))), child_delta, - alt_hashing, ); self.new_state = Some(transaction); diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index af626d3af7c2f..0b297ad402871 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -160,12 +160,10 @@ impl BenchmarkingState { &child_content.child_info, child_content.data.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))), )); - let alt_hashing = genesis.get_trie_alt_hashing_threshold(); let (root, transaction): (B::Hash, _) = state.state.borrow_mut().as_mut().unwrap() .full_storage_root( genesis.top.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))), child_delta, - alt_hashing, ); state.genesis = transaction.clone().drain(); state.genesis_root = root.clone(); @@ -407,21 +405,19 @@ impl StateBackend> for BenchmarkingState { fn storage_root<'a>( &self, delta: impl Iterator)>, - alt_hashing: Option, ) -> (B::Hash, Self::Transaction) where B::Hash: Ord { self.state.borrow().as_ref() - .map_or(Default::default(), |s| s.storage_root(delta, alt_hashing)) + .map_or(Default::default(), |s| s.storage_root(delta)) } fn child_storage_root<'a>( &self, child_info: &ChildInfo, delta: impl Iterator)>, - alt_hashing: Option, ) -> (B::Hash, bool, Self::Transaction) where B::Hash: Ord { self.state.borrow().as_ref().map_or( Default::default(), - |s| s.child_storage_root(child_info, delta, alt_hashing), + |s| s.child_storage_root(child_info, delta), ) } diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 5fa44f8c2e1c3..9f320d83839b0 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -227,18 +227,16 @@ impl StateBackend> for RefTrackingState { fn storage_root<'a>( &self, delta: impl Iterator)>, - alt_hashing: Option, ) -> (B::Hash, Self::Transaction) where B::Hash: Ord { - self.state.storage_root(delta, alt_hashing) + self.state.storage_root(delta) } fn child_storage_root<'a>( &self, child_info: &ChildInfo, delta: impl Iterator)>, - alt_hashing: Option, ) -> (B::Hash, bool, Self::Transaction) where B::Hash: Ord { - self.state.child_storage_root(child_info, delta, alt_hashing) + self.state.child_storage_root(child_info, delta) } fn pairs(&self) -> Vec<(Vec, Vec)> { @@ -778,7 +776,7 @@ impl sc_client_api::backend::BlockImportOperation for Bloc )); let mut changes_trie_config: Option = None; - let alt_hashing = storage.get_trie_alt_hashing_threshold(); + // TODO test genesis init with a threshold let (root, transaction) = self.old_state.full_storage_root( storage.top.iter().map(|(k, v)| { if &k[..] == well_known_keys::CHANGES_TRIE_CONFIG { @@ -790,7 +788,6 @@ impl sc_client_api::backend::BlockImportOperation for Bloc (&k[..], Some(&v[..])) }), child_delta, - alt_hashing, ); self.db_updates = transaction; @@ -2328,7 +2325,6 @@ pub(crate) mod tests { header.state_root = op.old_state.storage_root(storage .iter() .map(|(x, y)| (&x[..], Some(&y[..]))), - alt_hashing, ).0.into(); let hash = header.hash(); @@ -2373,7 +2369,6 @@ pub(crate) mod tests { let (root, overlay) = op.old_state.storage_root( storage.iter() .map(|(k, v)| (&k[..], v.as_ref().map(|v| &v[..]))), - alt_hashing, ); op.update_db_storage(overlay).unwrap(); header.state_root = root.into(); @@ -2400,7 +2395,6 @@ pub(crate) mod tests { fn delete_only_when_negative_rc() { sp_tracing::try_init_simple(); let key; - let alt_hashing = false; let backend = Backend::::new_test(1, 0); let hash = { @@ -2414,7 +2408,7 @@ pub(crate) mod tests { extrinsics_root: Default::default(), }; - header.state_root = op.old_state.storage_root(std::iter::empty(), alt_hashing).0.into(); + header.state_root = op.old_state.storage_root(std::iter::empty()).0.into(); let hash = header.hash(); op.reset_storage(Storage { @@ -2455,7 +2449,6 @@ pub(crate) mod tests { .iter() .cloned() .map(|(x, y)| (x, Some(y))), - alt_hashing, ).0.into(); let hash = header.hash(); @@ -2493,7 +2486,6 @@ pub(crate) mod tests { .iter() .cloned() .map(|(x, y)| (x, Some(y))), - alt_hashing, ).0.into(); let hash = header.hash(); @@ -2531,7 +2523,6 @@ pub(crate) mod tests { .iter() .cloned() .map(|(x, y)| (x, Some(y))), - alt_hashing, ).0.into(); op.set_block_data( @@ -2847,7 +2838,6 @@ pub(crate) mod tests { #[test] fn storage_hash_is_cached_correctly() { let backend = Backend::::new_test(10, 10); - let alt_hashing = false; let hash0 = { let mut op = backend.begin_operation().unwrap(); @@ -2865,7 +2855,6 @@ pub(crate) mod tests { header.state_root = op.old_state.storage_root(storage .iter() .map(|(x, y)| (&x[..], Some(&y[..]))), - alt_hashing, ).0.into(); let hash = header.hash(); @@ -2906,7 +2895,6 @@ pub(crate) mod tests { let (root, overlay) = op.old_state.storage_root( storage.iter() .map(|(k, v)| (&k[..], v.as_ref().map(|v| &v[..]))), - alt_hashing, ); op.update_db_storage(overlay).unwrap(); header.state_root = root.into(); diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index b249ea14748ac..cb2ab1de1b6c9 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -645,18 +645,16 @@ impl>, B: BlockT> StateBackend> for Cachin fn storage_root<'a>( &self, delta: impl Iterator)>, - alt_hashing: Option, ) -> (B::Hash, Self::Transaction) where B::Hash: Ord { - self.state.storage_root(delta, alt_hashing) + self.state.storage_root(delta) } fn child_storage_root<'a>( &self, child_info: &ChildInfo, delta: impl Iterator)>, - alt_hashing: Option, ) -> (B::Hash, bool, Self::Transaction) where B::Hash: Ord { - self.state.child_storage_root(child_info, delta, alt_hashing) + self.state.child_storage_root(child_info, delta) } fn pairs(&self) -> Vec<(Vec, Vec)> { @@ -829,18 +827,16 @@ impl>, B: BlockT> StateBackend> for Syncin fn storage_root<'a>( &self, delta: impl Iterator)>, - alt_hashing: Option, ) -> (B::Hash, Self::Transaction) where B::Hash: Ord { - self.caching_state().storage_root(delta, alt_hashing) + self.caching_state().storage_root(delta) } fn child_storage_root<'a>( &self, child_info: &ChildInfo, delta: impl Iterator)>, - alt_hashing: Option, ) -> (B::Hash, bool, Self::Transaction) where B::Hash: Ord { - self.caching_state().child_storage_root(child_info, delta, alt_hashing) + self.caching_state().child_storage_root(child_info, delta) } fn pairs(&self) -> Vec<(Vec, Vec)> { diff --git a/client/executor/src/integration_tests/mod.rs b/client/executor/src/integration_tests/mod.rs index 164c576540809..b06b5f4e8df76 100644 --- a/client/executor/src/integration_tests/mod.rs +++ b/client/executor/src/integration_tests/mod.rs @@ -790,30 +790,60 @@ fn state_hashing_update(wasm_method: WasmExecutionMethod) { // use externalities without storage flag. let mut ext = TestExternalities::new(Default::default()); - let mut ext = ext.ext(); - ext.set_storage(b"foo".to_vec(), vec![1u8; 1_000]); // big inner hash - ext.set_storage(b"foo2".to_vec(), vec![3u8; 16]); // no inner hash - ext.set_storage(b"foo222".to_vec(), vec![5u8; 100]); // inner hash - - let output = call_in_wasm( - "test_data_in", - &b"Hello world".to_vec().encode(), - wasm_method, - &mut ext, - ).unwrap(); + let root1 = { + let mut ext = ext.ext(); + ext.set_storage(b"foo".to_vec(), b"bar".to_vec()); + let output = call_in_wasm( + "test_data_in", + &vec![1u8; 100].encode(), + wasm_method, + &mut ext, + ).unwrap(); - assert_eq!(output, b"all ok!".to_vec().encode()); + assert_eq!(output, b"all ok!".to_vec().encode()); + ext.storage_root() + }; - let root1 = ext.storage_root(); - // flag state. - let _ = call_in_wasm( - "test_switch_state", - Default::default(), - wasm_method, - &mut ext, - ).unwrap(); - let root2 = ext.storage_root(); + ext.commit_all().unwrap(); + let root2 = { + let mut ext = ext.ext(); + // flag state. + let _ = call_in_wasm( + "test_switch_state", + Default::default(), + wasm_method, + &mut ext, + ).unwrap(); + ext.storage_root() + }; - // Note that in this case all the value did switch (in memory changes). assert!(root1 != root2); + + ext.commit_all().unwrap(); + let root3 = { + let mut ext = ext.ext(); + let _ = call_in_wasm( + "test_data_in", + &vec![2u8; 100].to_vec().encode(), + wasm_method, + &mut ext, + ).unwrap(); + ext.storage_root() + }; + assert!(root2 != root3); + + ext.commit_all().unwrap(); + let root3 = { + let mut ext = ext.ext(); + // revert to root 2 state, but this time + // inner hashing should apply + let _ = call_in_wasm( + "test_data_in", + &vec![1u8; 100].to_vec().encode(), + wasm_method, + &mut ext, + ).unwrap(); + ext.storage_root() + }; + assert!(root2 != root3); } diff --git a/client/light/src/backend.rs b/client/light/src/backend.rs index 7b07477012d72..d5707f4796b34 100644 --- a/client/light/src/backend.rs +++ b/client/light/src/backend.rs @@ -324,7 +324,6 @@ impl BlockImportOperation for ImportOperation fn reset_storage(&mut self, input: Storage) -> ClientResult { check_genesis_storage(&input)?; - let alt_hashing = input.get_trie_alt_hashing_threshold(); // changes trie configuration let changes_trie_config = input.top.iter() .find(|(k, _)| &k[..] == well_known_keys::CHANGES_TRIE_CONFIG) @@ -347,7 +346,7 @@ impl BlockImportOperation for ImportOperation } let storage_update = InMemoryBackend::from(storage); - let (storage_root, _) = storage_update.full_storage_root(std::iter::empty(), child_delta, alt_hashing); + let (storage_root, _) = storage_update.full_storage_root(std::iter::empty(), child_delta); self.storage_update = Some(storage_update); Ok(storage_root) @@ -490,11 +489,10 @@ impl StateBackend for GenesisOrUnavailableState fn storage_root<'a>( &self, delta: impl Iterator)>, - alt_hashing: Option, ) -> (H::Out, Self::Transaction) where H::Out: Ord { match *self { GenesisOrUnavailableState::Genesis(ref state) => state - .storage_root(delta, alt_hashing), + .storage_root(delta), GenesisOrUnavailableState::Unavailable => Default::default(), } } @@ -503,12 +501,11 @@ impl StateBackend for GenesisOrUnavailableState &self, child_info: &ChildInfo, delta: impl Iterator)>, - alt_hashing: Option, ) -> (H::Out, bool, Self::Transaction) where H::Out: Ord { match *self { GenesisOrUnavailableState::Genesis(ref state) => { let (root, is_equal, _) = state - .child_storage_root(child_info, delta, alt_hashing); + .child_storage_root(child_info, delta); (root, is_equal, Default::default()) }, GenesisOrUnavailableState::Unavailable => diff --git a/client/service/test/src/client/light.rs b/client/service/test/src/client/light.rs index d4e3220de73c6..e40538e08ebb9 100644 --- a/client/service/test/src/client/light.rs +++ b/client/service/test/src/client/light.rs @@ -459,11 +459,10 @@ fn prepare_for_read_proof_check(hashed_value: bool) -> (TestChecker, Header, Sto // prepare remote client let remote_client = substrate_test_runtime_client::new(hashed_value); let remote_block_id = BlockId::Number(0); - let flagged = false; let remote_block_hash = remote_client.block_hash(0).unwrap().unwrap(); let mut remote_block_header = remote_client.header(&remote_block_id).unwrap().unwrap(); remote_block_header.state_root = remote_client.state_at(&remote_block_id).unwrap() - .storage_root(std::iter::empty(), flagged).0.into(); + .storage_root(std::iter::empty()).0.into(); // 'fetch' read proof from remote node let heap_pages = remote_client.storage(&remote_block_id, &StorageKey(well_known_keys::HEAP_PAGES.to_vec())) @@ -496,7 +495,6 @@ fn prepare_for_read_child_proof_check() -> (TestChecker, Header, StorageProof, V use substrate_test_runtime_client::TestClientBuilderExt; let child_info = ChildInfo::new_default(b"child1"); let child_info = &child_info; - let flagged = false; // prepare remote client let remote_client = substrate_test_runtime_client::TestClientBuilder::new() .add_extra_child_storage( @@ -508,7 +506,7 @@ fn prepare_for_read_child_proof_check() -> (TestChecker, Header, StorageProof, V let remote_block_hash = remote_client.block_hash(0).unwrap().unwrap(); let mut remote_block_header = remote_client.header(&remote_block_id).unwrap().unwrap(); remote_block_header.state_root = remote_client.state_at(&remote_block_id).unwrap() - .storage_root(std::iter::empty(), flagged).0.into(); + .storage_root(std::iter::empty()).0.into(); // 'fetch' child read proof from remote node let child_value = remote_client.child_storage( diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 4976d6a43d5db..946de1f6469c5 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -126,22 +126,18 @@ pub trait Backend: sp_std::fmt::Debug { /// `alt_hashing` indicate if trie state should apply alternate hashing /// scheme (inner value hashed). /// Does not include child storage updates. - /// Alt hashing paremeter must contain possible changes from delta. fn storage_root<'a>( &self, delta: impl Iterator)>, - alt_hashing: Option, ) -> (H::Out, Self::Transaction) where H::Out: Ord; /// Calculate the child storage root, with given delta over what is already stored in /// the backend, and produce a "transaction" that can be used to commit. The second argument /// is true if child storage root equals default storage root. - /// Alt hashing paremeter must contain possible changes from delta. fn child_storage_root<'a>( &self, child_info: &ChildInfo, delta: impl Iterator)>, - alt_hashing: Option, ) -> (H::Out, bool, Self::Transaction) where H::Out: Ord; /// Get all key/value pairs into a Vec. @@ -180,7 +176,6 @@ pub trait Backend: sp_std::fmt::Debug { &'a ChildInfo, impl Iterator)>, )>, - alt_hashing: Option, ) -> (H::Out, Self::Transaction) where H::Out: Ord + Encode { let mut txs: Self::Transaction = Default::default(); let mut child_roots: Vec<_> = Default::default(); @@ -189,7 +184,6 @@ pub trait Backend: sp_std::fmt::Debug { let (child_root, empty, child_txs) = self.child_storage_root( &child_info, child_delta, - alt_hashing, ); let prefixed_storage_key = child_info.prefixed_storage_key(); txs.consolidate(child_txs); @@ -206,7 +200,6 @@ pub trait Backend: sp_std::fmt::Debug { .iter() .map(|(k, v)| (&k[..], v.as_ref().map(|v| &v[..]))) ), - alt_hashing, ); txs.consolidate(parent_txs); (root, txs) diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index 4a691e4eb208b..550f6704d8cff 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -302,7 +302,7 @@ impl Externalities for BasicExternalities { if let Some(child) = self.inner.children_default.get(child_info.storage_key()) { let delta = child.data.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))); crate::in_memory_backend::new_in_mem::(self.alt_hashing.clone()) - .child_storage_root(&child.child_info, delta, self.alt_hashing.clone()).0 + .child_storage_root(&child.child_info, delta).0 } else { empty_child_trie_root::>() }.encode() diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index 3927432fbc94e..f788f6a4f3bf7 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -549,9 +549,8 @@ where root.encode() } else { let root = if let Some((changes, info)) = self.overlay.child_changes(storage_key) { - let alt_hashing = self.backend.get_trie_alt_hashing_threshold(); let delta = changes.map(|(k, v)| (k.as_ref(), v.value().map(AsRef::as_ref))); - Some(self.backend.child_storage_root(info, delta, alt_hashing)) + Some(self.backend.child_storage_root(info, delta)) } else { None }; diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index f7a97f1bb2c26..89ea70fd7106d 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -67,7 +67,6 @@ where .filter_map(|v| v.0.as_ref().map(|c| (c, v.1.iter().map(|(k, v)| (&k[..], v.as_deref())))) ), - self.force_alt_hashing.clone().flatten(), ); self.apply_transaction(root, transaction); @@ -90,6 +89,13 @@ where pub fn eq(&self, other: &Self) -> bool { self.root() == other.root() } + + + /// To reset with threshold for genesis storage, this function allows + /// setting a alt hashing threshold at start. + pub fn force_alt_hashing(&mut self, threshold: Option) { + self.force_alt_hashing = Some(threshold); + } } impl Clone for TrieBackend, H> @@ -181,7 +187,7 @@ mod tests { /// Assert in memory backend with only child trie keys works as trie backend. #[test] fn in_memory_with_child_trie_only() { - let storage = new_in_mem::(); + let storage = new_in_mem::(None); let child_info = ChildInfo::new_default(b"1"); let child_info = &child_info; let mut storage = storage.update( @@ -199,7 +205,7 @@ mod tests { #[test] fn insert_multiple_times_child_data_works() { - let mut storage = new_in_mem::(); + let mut storage = new_in_mem::(None); let child_info = ChildInfo::new_default(b"1"); storage.insert(vec![(Some(child_info.clone()), vec![(b"2".to_vec(), Some(b"3".to_vec()))])]); diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 20efdb1236a1a..0b27ee7b47b3c 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -894,7 +894,7 @@ mod tests { traits::CodeExecutor, }; use crate::execution::CallResult; - + use sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD as TRESHOLD; #[derive(Clone)] struct DummyCodeExecutor { @@ -1086,9 +1086,7 @@ mod tests { // fetch execution proof from 'remote' full node let remote_backend = trie_backend::tests::test_trie(flagged); - let remote_root = remote_backend.storage_root(std::iter::empty(), flagged).0; - let remote_root_2 = remote_backend.storage_root(std::iter::empty(), false).0; - assert_eq!(remote_root, remote_root_2); + let remote_root = remote_backend.storage_root(std::iter::empty()).0; let (remote_result, remote_proof) = prove_execution::<_, _, u64, _, _>( remote_backend, &mut Default::default(), @@ -1244,7 +1242,7 @@ mod tests { fn set_child_storage_works() { let child_info = ChildInfo::new_default(b"sub1"); let child_info = &child_info; - let mut state = new_in_mem::(); + let mut state = new_in_mem::(None); let backend = state.as_trie_backend().unwrap(); let mut overlay = OverlayedChanges::default(); let mut cache = StorageTransactionCache::default(); @@ -1290,7 +1288,7 @@ mod tests { b"d4".to_vec(), ]; let key = b"key".to_vec(); - let mut state = new_in_mem::(); + let mut state = new_in_mem::(None); let backend = state.as_trie_backend().unwrap(); let mut overlay = OverlayedChanges::default(); let mut cache = StorageTransactionCache::default(); @@ -1351,7 +1349,7 @@ mod tests { let key = b"events".to_vec(); let mut cache = StorageTransactionCache::default(); - let mut state = new_in_mem::(); + let mut state = new_in_mem::(None); let backend = state.as_trie_backend().unwrap(); let mut overlay = OverlayedChanges::default(); @@ -1444,7 +1442,7 @@ mod tests { let child_info = &child_info; // fetch read proof from 'remote' full node let remote_backend = trie_backend::tests::test_trie(flagged); - let remote_root = remote_backend.storage_root(::std::iter::empty(), flagged).0; + let remote_root = remote_backend.storage_root(::std::iter::empty()).0; let remote_proof = prove_read(remote_backend, &[b"value2"]).unwrap(); // check proof locally let local_result1 = read_proof_check::( @@ -1465,7 +1463,7 @@ mod tests { assert_eq!(local_result2, false); // on child trie let remote_backend = trie_backend::tests::test_trie(flagged); - let remote_root = remote_backend.storage_root(::std::iter::empty(), false).0; + let remote_root = remote_backend.storage_root(::std::iter::empty()).0; let remote_proof = prove_child_read( remote_backend, child_info, @@ -1514,7 +1512,7 @@ mod tests { let check_proof = |mdb, root| -> StorageProof { let remote_backend = TrieBackend::new(mdb, root); - let remote_root = remote_backend.storage_root(::std::iter::empty(), false).0; + let remote_root = remote_backend.storage_root(::std::iter::empty()).0; let remote_proof = prove_read(remote_backend, &[b"foo222"]).unwrap(); // check proof locally let local_result1 = read_proof_check::( @@ -1540,7 +1538,7 @@ mod tests { // do switch - layout = Layout::with_inner_hashing(); + layout = Layout::with_inner_hashing(TRESHOLD); // update with same value do not change { let mut trie = TrieDBMut::from_existing_with_layout( diff --git a/primitives/state-machine/src/overlayed_changes/mod.rs b/primitives/state-machine/src/overlayed_changes/mod.rs index ea1c973ebbfa1..fc377b808f640 100644 --- a/primitives/state-machine/src/overlayed_changes/mod.rs +++ b/primitives/state-machine/src/overlayed_changes/mod.rs @@ -625,8 +625,6 @@ impl OverlayedChanges { ) -> H::Out where H::Out: Ord + Encode, { - let alt_hashing = backend.get_trie_alt_hashing_threshold(); - let delta = self.changes().map(|(k, v)| (&k[..], v.value().map(|v| &v[..]))); let child_delta = self.children() .map(|(changes, info)| (info, changes.map( @@ -636,7 +634,6 @@ impl OverlayedChanges { let (root, transaction) = backend.full_storage_root( delta, child_delta, - alt_hashing, ); cache.transaction = Some(transaction); diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index f48e225844ada..1e8b1498a4f6f 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -343,18 +343,16 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> fn storage_root<'b>( &self, delta: impl Iterator)>, - alt_hashing: Option, ) -> (H::Out, Self::Transaction) where H::Out: Ord { - self.0.storage_root(delta, alt_hashing) + self.0.storage_root(delta) } fn child_storage_root<'b>( &self, child_info: &ChildInfo, delta: impl Iterator)>, - alt_hashing: Option, ) -> (H::Out, bool, Self::Transaction) where H::Out: Ord { - self.0.child_storage_root(child_info, delta, alt_hashing) + self.0.child_storage_root(child_info, delta) } fn register_overlay_stats(&self, _stats: &crate::stats::StateMachineStats) { } @@ -440,8 +438,8 @@ mod tests { assert_eq!(trie_backend.storage(b"key").unwrap(), proving_backend.storage(b"key").unwrap()); assert_eq!(trie_backend.pairs(), proving_backend.pairs()); - let (trie_root, mut trie_mdb) = trie_backend.storage_root(std::iter::empty(), flagged); - let (proving_root, mut proving_mdb) = proving_backend.storage_root(std::iter::empty(), flagged); + let (trie_root, mut trie_mdb) = trie_backend.storage_root(std::iter::empty()); + let (proving_root, mut proving_mdb) = proving_backend.storage_root(std::iter::empty()); assert_eq!(trie_root, proving_root); assert_eq!(trie_mdb.drain(), proving_mdb.drain()); } @@ -454,54 +452,22 @@ mod tests { fn proof_recorded_and_checked_inner(flagged: bool) { let size_content = 34; // above hashable value treshold. let value_range = 0..64; - let mut contents = value_range.clone().map(|i| (vec![i], Some(vec![i; size_content]))).collect::>(); + let contents = value_range.clone().map(|i| (vec![i], Some(vec![i; size_content]))).collect::>(); + let mut in_memory = InMemoryBackend::::default(); if flagged { - contents.push(( + in_memory = in_memory.update(vec![(None, vec![( sp_core::storage::well_known_keys::TRIE_HASHING_CONFIG.to_vec(), Some(sp_core::storage::trie_threshold_encode( sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD, )), - )); + )])]); } - let in_memory = InMemoryBackend::::default(); let mut in_memory = in_memory.update(vec![(None, contents)]); - let in_memory_root = in_memory.storage_root(std::iter::empty(), flagged).0; + let in_memory_root = in_memory.storage_root(std::iter::empty()).0; value_range.clone().for_each(|i| assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i; size_content])); let trie = in_memory.as_trie_backend().unwrap(); - let trie_root = trie.storage_root(std::iter::empty(), flagged).0; - assert_eq!(in_memory_root, trie_root); - value_range.clone().for_each(|i| assert_eq!(trie.storage(&[i]).unwrap().unwrap(), vec![i; size_content])); - - let proving = ProvingBackend::new(trie); - assert_eq!(proving.storage(&[42]).unwrap().unwrap(), vec![42; size_content]); - - let proof = proving.extract_proof(); - - let proof_check = create_proof_check_backend::(in_memory_root.into(), proof).unwrap(); - assert_eq!(proof_check.storage(&[42]).unwrap().unwrap(), vec![42; size_content]); - } - - #[test] - fn proof_recorded_and_checked_old_hash() { - // test proof starting with old hash content and flagging in between. - // TODO not that usefull (we do run with direct update). -> replace by change of threshold - // test. - let size_content = 34; // above hashable value treshold. - let value_range = 0..64; - let contents = value_range.clone().map(|i| (vec![i], Some(vec![i; size_content]))).collect::>(); - let in_memory = InMemoryBackend::::default(); - let mut in_memory = in_memory.update(vec![(None, contents)]); - value_range.clone().for_each(|i| assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i; size_content])); - in_memory = in_memory.update(vec![(None, vec![( - sp_core::storage::well_known_keys::TRIE_HASHING_CONFIG.to_vec(), - Some(sp_core::storage::trie_threshold_encode( - sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD, - )), - )])]); - let in_memory_root = in_memory.storage_root(std::iter::empty(), true).0; - let trie = in_memory.as_trie_backend().unwrap(); - let trie_root = trie.storage_root(std::iter::empty(), true).0; + let trie_root = trie.storage_root(std::iter::empty()).0; assert_eq!(in_memory_root, trie_root); value_range.clone().for_each(|i| assert_eq!(trie.storage(&[i]).unwrap().unwrap(), vec![i; size_content])); @@ -531,16 +497,16 @@ mod tests { (Some(child_info_2.clone()), (10..15).map(|i| (vec![i], Some(vec![i]))).collect()), ]; + let mut in_memory = InMemoryBackend::::default(); if flagged { - contents[0].1.push(( + in_memory = in_memory.update(vec![(None, vec![( sp_core::storage::well_known_keys::TRIE_HASHING_CONFIG.to_vec(), Some(sp_core::storage::trie_threshold_encode( sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD, )), - )); + )])]); } - let in_memory = InMemoryBackend::::default(); - let mut in_memory = in_memory.update(contents); + in_memory = in_memory.update(contents); let child_storage_keys = vec![child_info_1.to_owned(), child_info_2.to_owned()]; let in_memory_root = in_memory.full_storage_root( std::iter::empty(), @@ -560,7 +526,7 @@ mod tests { )); let trie = in_memory.as_trie_backend().unwrap(); - let trie_root = trie.storage_root(std::iter::empty(), flagged).0; + let trie_root = trie.storage_root(std::iter::empty()).0; assert_eq!(in_memory_root, trie_root); (0..64).for_each(|i| assert_eq!( trie.storage(&[i]).unwrap().unwrap(), diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 1cb34ff669ec9..e0e44dd264b89 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -176,12 +176,12 @@ impl, H: Hasher> Backend for TrieBackend where fn storage_root<'a>( &self, delta: impl Iterator)>, - use_inner_hash_value: Option, ) -> (H::Out, Self::Transaction) where H::Out: Ord { let use_inner_hash_value = if let Some(force) = self.force_alt_hashing.as_ref() { force.clone() } else { - use_inner_hash_value + // TODO try memoize in force + self.get_trie_alt_hashing_threshold() }; let mut write_overlay = S::Overlay::default(); let mut root = *self.essence.root(); @@ -213,12 +213,11 @@ impl, H: Hasher> Backend for TrieBackend where &self, child_info: &ChildInfo, delta: impl Iterator)>, - use_inner_hash_value: Option, ) -> (H::Out, bool, Self::Transaction) where H::Out: Ord { let use_inner_hash_value = if let Some(force) = self.force_alt_hashing.as_ref() { force.clone() } else { - use_inner_hash_value + self.get_trie_alt_hashing_threshold() }; let default_root = match child_info.child_type() { @@ -283,6 +282,7 @@ impl, H: Hasher> Backend for TrieBackend where pub mod tests { use std::{collections::HashSet, iter}; use sp_core::H256; + use sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD as TRESHOLD; use codec::Encode; use sp_trie::{TrieMut, PrefixedMemoryDB, trie_types::TrieDBMut, KeySpacedDBMut}; use sp_runtime::traits::BlakeTwo256; @@ -305,7 +305,7 @@ pub mod tests { let mut sub_root = Vec::new(); root.encode_to(&mut sub_root); let mut trie = if hashed_value { - let layout = Layout::with_inner_hashing(); + let layout = Layout::with_inner_hashing(TRESHOLD); TrieDBMut::new_with_layout(&mut mdb, &mut root, layout) } else { TrieDBMut::new(&mut mdb, &mut root) @@ -320,9 +320,7 @@ pub mod tests { if hashed_value { trie.insert( sp_core::storage::well_known_keys::TRIE_HASHING_CONFIG, - sp_core::storage::trie_threshold_encode( - sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD, - ).as_slice(), + sp_core::storage::trie_threshold_encode(TRESHOLD).as_slice(), ).unwrap(); } for i in 128u8..255u8 { @@ -391,41 +389,20 @@ pub mod tests { storage_root_is_non_default_inner(true); } fn storage_root_is_non_default_inner(flagged: bool) { - assert!(test_trie(flagged).storage_root(iter::empty(), flagged).0 != H256::repeat_byte(0)); - } - - #[test] - fn storage_root_transaction_state_root_update() { - // a drop a insert of same hash: rc is 0 - assert_eq!(test_trie(false).storage_root(iter::empty(), false).1.drain() - .into_iter().filter(|v| (v.1).1 != 0).count(), 0); - // Unchanged - assert_eq!(test_trie(false).storage_root(iter::empty(), true).1.drain() - .into_iter().filter(|v| (v.1).1 != 0).count(), 0); - // a drop a insert of same hash: rc is 0 - assert_eq!(test_trie(true).storage_root(iter::empty(), true).1.drain() - .into_iter().filter(|v| (v.1).1 != 0).count(), 0); - } - - #[test] - fn storage_root_flagged_is_empty() { - assert!(test_trie(false).storage_root(iter::empty(), true).1.drain().is_empty()); + assert!(test_trie(flagged).storage_root(iter::empty()).0 != H256::repeat_byte(0)); } #[test] fn storage_root_transaction_is_non_empty() { - storage_root_transaction_is_non_empty_inner(false, false); - storage_root_transaction_is_non_empty_inner(false, true); - storage_root_transaction_is_non_empty_inner(true, false); - storage_root_transaction_is_non_empty_inner(true, true); + storage_root_transaction_is_non_empty_inner(false); + storage_root_transaction_is_non_empty_inner(true); } - fn storage_root_transaction_is_non_empty_inner(flagged: bool, do_flag: bool) { + fn storage_root_transaction_is_non_empty_inner(flagged: bool) { let (new_root, mut tx) = test_trie(flagged).storage_root( iter::once((&b"new-key"[..], Some(&b"new-value"[..]))), - do_flag, ); assert!(!tx.drain().is_empty()); - assert!(new_root != test_trie(false).storage_root(iter::empty(), false).0); + assert!(new_root != test_trie(false).storage_root(iter::empty()).0); } #[test] diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 1b8acc9a796a3..9eafffd312877 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -833,6 +833,7 @@ mod tests { use super::*; use codec::{Encode, Decode, Compact}; use sp_core::Blake2Hasher; + use sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD as TRESHOLD; use hash_db::{HashDB, Hasher}; use trie_db::{DBValue, TrieMut, Trie, NodeCodec as NodeCodecT}; use trie_standardmap::{Alphabet, ValueMode, StandardMap}; @@ -890,7 +891,7 @@ mod tests { let layout = Layout::default(); check_equivalent::(input, layout.clone()); check_iteration::(input, layout); - let layout = Layout::with_inner_hashing(); + let layout = Layout::with_inner_hashing(TRESHOLD); check_equivalent::(input, layout.clone()); check_iteration::(input, layout); } @@ -1043,7 +1044,7 @@ mod tests { }.make_with(seed.as_fixed_bytes_mut()); let layout = if flag { - Layout::with_inner_hashing() + Layout::with_inner_hashing(TRESHOLD) } else { Layout::default() }; @@ -1140,7 +1141,7 @@ mod tests { } fn iterator_works_inner(flag: bool) { let layout = if flag { - Layout::with_inner_hashing() + Layout::with_inner_hashing(TRESHOLD) } else { Layout::default() }; From f4849a2d8b5d7c5adef7a4223220bfddb4a6b807 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 10 Jun 2021 18:28:59 +0200 Subject: [PATCH 050/188] Remove default from storage, genesis need a special build. --- primitives/state-machine/src/basic.rs | 5 +++-- .../state-machine/src/in_memory_backend.rs | 19 ++++++------------- primitives/state-machine/src/lib.rs | 6 +++--- .../state-machine/src/proving_backend.rs | 2 +- 4 files changed, 13 insertions(+), 19 deletions(-) diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index 550f6704d8cff..7d7be10f2b584 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -301,8 +301,9 @@ impl Externalities for BasicExternalities { ) -> Vec { if let Some(child) = self.inner.children_default.get(child_info.storage_key()) { let delta = child.data.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))); - crate::in_memory_backend::new_in_mem::(self.alt_hashing.clone()) - .child_storage_root(&child.child_info, delta).0 + let mut in_mem = crate::in_memory_backend::new_in_mem::(); + in_mem.force_alt_hashing(self.alt_hashing.clone()); + in_mem.child_storage_root(&child.child_info, delta).0 } else { empty_child_trie_root::>() }.encode() diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index 89ea70fd7106d..84c0b316387da 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -27,14 +27,12 @@ use codec::Codec; use sp_core::storage::{ChildInfo, Storage}; /// Create a new empty instance of in-memory backend. -pub fn new_in_mem(initial_alt_hashing: Option) -> TrieBackend, H> +pub fn new_in_mem() -> TrieBackend, H> where H::Out: Codec + Ord, { let db = MemoryDB::default(); - let mut trie = TrieBackend::new(db, empty_trie_root::>()); - trie.force_alt_hashing = Some(initial_alt_hashing); - trie + TrieBackend::new(db, empty_trie_root::>()) } impl TrieBackend, H> @@ -90,8 +88,6 @@ where self.root() == other.root() } - - /// To reset with threshold for genesis storage, this function allows /// setting a alt hashing threshold at start. pub fn force_alt_hashing(&mut self, threshold: Option) { self.force_alt_hashing = Some(threshold); @@ -112,7 +108,7 @@ where H::Out: Codec + Ord, { fn default() -> Self { - new_in_mem(None) + new_in_mem() } } @@ -122,10 +118,7 @@ where H::Out: Codec + Ord, { fn from(inner: HashMap, BTreeMap>) -> Self { - let alt_hashing = inner.get(&None).map(|map| - sp_core::storage::alt_hashing::get_trie_alt_hashing_threshold(&map) - ).flatten(); - let mut backend = new_in_mem(alt_hashing); + let mut backend = new_in_mem(); backend.insert( inner.into_iter().map(|(k, m)| (k, m.into_iter().map(|(k, v)| (k, Some(v))).collect())), ); @@ -187,7 +180,7 @@ mod tests { /// Assert in memory backend with only child trie keys works as trie backend. #[test] fn in_memory_with_child_trie_only() { - let storage = new_in_mem::(None); + let storage = new_in_mem::(); let child_info = ChildInfo::new_default(b"1"); let child_info = &child_info; let mut storage = storage.update( @@ -205,7 +198,7 @@ mod tests { #[test] fn insert_multiple_times_child_data_works() { - let mut storage = new_in_mem::(None); + let mut storage = new_in_mem::(); let child_info = ChildInfo::new_default(b"1"); storage.insert(vec![(Some(child_info.clone()), vec![(b"2".to_vec(), Some(b"3".to_vec()))])]); diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 0b27ee7b47b3c..1b911e7696499 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -1242,7 +1242,7 @@ mod tests { fn set_child_storage_works() { let child_info = ChildInfo::new_default(b"sub1"); let child_info = &child_info; - let mut state = new_in_mem::(None); + let mut state = new_in_mem::(); let backend = state.as_trie_backend().unwrap(); let mut overlay = OverlayedChanges::default(); let mut cache = StorageTransactionCache::default(); @@ -1288,7 +1288,7 @@ mod tests { b"d4".to_vec(), ]; let key = b"key".to_vec(); - let mut state = new_in_mem::(None); + let mut state = new_in_mem::(); let backend = state.as_trie_backend().unwrap(); let mut overlay = OverlayedChanges::default(); let mut cache = StorageTransactionCache::default(); @@ -1349,7 +1349,7 @@ mod tests { let key = b"events".to_vec(); let mut cache = StorageTransactionCache::default(); - let mut state = new_in_mem::(None); + let mut state = new_in_mem::(); let backend = state.as_trie_backend().unwrap(); let mut overlay = OverlayedChanges::default(); diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 1e8b1498a4f6f..558e22cdc7411 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -490,7 +490,7 @@ mod tests { let child_info_2 = ChildInfo::new_default(b"sub2"); let child_info_1 = &child_info_1; let child_info_2 = &child_info_2; - let mut contents = vec![ + let contents = vec![ (None, (0..64).map(|i| (vec![i], Some(vec![i]))).collect::>()), (Some(child_info_1.clone()), (28..65).map(|i| (vec![i], Some(vec![i]))).collect()), From 4eabbde29bc40141ca9867a5ca316b0a34e72540 Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 11 Jun 2021 09:23:38 +0200 Subject: [PATCH 051/188] rem empty space --- primitives/state-machine/src/trie_backend.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index e0e44dd264b89..ff6067d44d0d8 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -199,7 +199,7 @@ impl, H: Hasher> Backend for TrieBackend where }; delta_trie_root::, _, _, _, _, _>(&mut eph, root, delta, layout) }; - + match res() { Ok(ret) => root = ret, Err(e) => warn!(target: "trie", "Failed to write to trie: {}", e), @@ -219,7 +219,7 @@ impl, H: Hasher> Backend for TrieBackend where } else { self.get_trie_alt_hashing_threshold() }; - + let default_root = match child_info.child_type() { ChildType::ParentKeyId => empty_child_trie_root::>() }; From ae627f6ca25187a201c8a193a9b628fe8ee855b3 Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 11 Jun 2021 15:00:21 +0200 Subject: [PATCH 052/188] Catch problem: when using triedb with default: we should not revert nodes: otherwhise thing as trie codec cannot decode-encode without changing state. --- Cargo.lock | 16 ++-- primitives/state-machine/src/lib.rs | 21 ++++- primitives/trie/src/lib.rs | 135 ++++++++++++++++++++++++++- primitives/trie/src/node_codec.rs | 12 ++- primitives/trie/src/node_header.rs | 2 +- primitives/trie/src/storage_proof.rs | 14 +-- primitives/trie/src/trie_codec.rs | 24 +++-- 7 files changed, 192 insertions(+), 32 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d2fe310d8bdbb..3871808cdef0b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2370,7 +2370,7 @@ dependencies = [ [[package]] name = "hash-db" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#22910ad4af83e00b549cee918226efa940fdd3cf" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#3ba095cf3543b7b8140467741d90c8be0c69f5f3" [[package]] name = "hash256-std-hasher" @@ -2384,7 +2384,7 @@ dependencies = [ [[package]] name = "hash256-std-hasher" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#22910ad4af83e00b549cee918226efa940fdd3cf" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#3ba095cf3543b7b8140467741d90c8be0c69f5f3" dependencies = [ "crunchy", ] @@ -3066,7 +3066,7 @@ dependencies = [ [[package]] name = "keccak-hasher" version = "0.15.3" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#22910ad4af83e00b549cee918226efa940fdd3cf" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#3ba095cf3543b7b8140467741d90c8be0c69f5f3" dependencies = [ "hash-db", "hash256-std-hasher 0.15.2 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4)", @@ -3881,7 +3881,7 @@ dependencies = [ [[package]] name = "memory-db" version = "0.26.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#22910ad4af83e00b549cee918226efa940fdd3cf" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#3ba095cf3543b7b8140467741d90c8be0c69f5f3" dependencies = [ "hash-db", "hashbrown", @@ -10565,7 +10565,7 @@ checksum = "a7f741b240f1a48843f9b8e0444fb55fb2a4ff67293b50a9179dfd5ea67f8d41" [[package]] name = "trie-bench" version = "0.27.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#22910ad4af83e00b549cee918226efa940fdd3cf" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#3ba095cf3543b7b8140467741d90c8be0c69f5f3" dependencies = [ "criterion", "hash-db", @@ -10580,7 +10580,7 @@ dependencies = [ [[package]] name = "trie-db" version = "0.22.5" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#22910ad4af83e00b549cee918226efa940fdd3cf" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#3ba095cf3543b7b8140467741d90c8be0c69f5f3" dependencies = [ "hash-db", "hashbrown", @@ -10592,7 +10592,7 @@ dependencies = [ [[package]] name = "trie-root" version = "0.16.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#22910ad4af83e00b549cee918226efa940fdd3cf" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#3ba095cf3543b7b8140467741d90c8be0c69f5f3" dependencies = [ "hash-db", ] @@ -10610,7 +10610,7 @@ dependencies = [ [[package]] name = "trie-standardmap" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#22910ad4af83e00b549cee918226efa940fdd3cf" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#3ba095cf3543b7b8140467741d90c8be0c69f5f3" dependencies = [ "hash-db", "keccak-hasher 0.15.3 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4)", diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index ffb7152287f9b..420f8373fa43a 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -1585,19 +1585,31 @@ mod tests { remote_proof.encoded_size()); } + #[test] fn compact_multiple_child_trie() { + let size_inner_hash = compact_multiple_child_trie_inner(true); + let size_no_inner_hash = compact_multiple_child_trie_inner(false); + assert!(size_inner_hash < size_no_inner_hash); + } + fn compact_multiple_child_trie_inner(flagged: bool) -> usize { // this root will be queried let child_info1 = ChildInfo::new_default(b"sub1"); // this root will not be include in proof let child_info2 = ChildInfo::new_default(b"sub2"); // this root will be include in proof let child_info3 = ChildInfo::new_default(b"sub"); - let mut remote_backend = trie_backend::tests::test_trie(); + let mut remote_backend = trie_backend::tests::test_trie(flagged); + let long_vec: Vec = (0..1024usize).map(|_| 8u8).collect(); let (remote_root, transaction) = remote_backend.full_storage_root( std::iter::empty(), vec![ (&child_info1, vec![ - (&b"key1"[..], Some(&b"val2"[..])), + // a inner hashable node + (&b"k"[..], Some(&long_vec[..])), + // need to ensure this is not an inline node + // otherwhise we do not know what is accessed when + // storing proof. + (&b"key1"[..], Some(&vec![5u8; 32][..])), (&b"key2"[..], Some(&b"val3"[..])), ].into_iter()), (&child_info2, vec![ @@ -1612,11 +1624,13 @@ mod tests { ); remote_backend.backend_storage_mut().consolidate(transaction); remote_backend.essence.set_root(remote_root.clone()); + println!("{:?}", remote_root); let remote_proof = prove_child_read( remote_backend, &child_info1, &[b"key1"], ).unwrap(); + let size = remote_proof.encoded_size(); let remote_proof = test_compact(remote_proof, &remote_root); let local_result1 = read_child_proof_check::( remote_root, @@ -1625,7 +1639,8 @@ mod tests { &[b"key1"], ).unwrap(); assert_eq!(local_result1.len(), 1); - assert_eq!(local_result1.get(&b"key1"[..]), Some(&Some(b"val2".to_vec()))); + assert_eq!(local_result1.get(&b"key1"[..]), Some(&Some(vec![5u8; 32]))); + size } #[test] diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 8acdcd1b8e85c..49e115c91a747 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -28,7 +28,7 @@ mod trie_stream; use sp_std::{boxed::Box, marker::PhantomData, vec, vec::Vec, borrow::Borrow, fmt}; use hash_db::{Hasher, Prefix}; -//use trie_db::proof::{generate_proof, verify_proof}; +use trie_db::proof::{generate_proof, verify_proof}; pub use trie_db::proof::VerifyError; /// Our `NodeCodec`-specific error. pub use error::Error; @@ -438,6 +438,52 @@ pub mod trie_types { pub type TrieError = trie_db::TrieError; } +/// Create a proof for a subset of keys in a trie. +/// +/// The `keys` may contain any set of keys regardless of each one of them is included +/// in the `db`. +/// +/// For a key `K` that is included in the `db` a proof of inclusion is generated. +/// For a key `K` that is not included in the `db` a proof of non-inclusion is generated. +/// These can be later checked in `verify_trie_proof`. +pub fn generate_trie_proof<'a, L, I, K, DB>( + db: &DB, + root: TrieHash, + keys: I, +) -> Result>, Box>> where + L: TrieConfiguration, + I: IntoIterator, + K: 'a + AsRef<[u8]>, + DB: hash_db::HashDBRef>, +{ + // Can use default layout (read only). + let trie = TrieDB::::new(db, &root)?; + generate_proof(&trie, keys) +} + +/// Verify a set of key-value pairs against a trie root and a proof. +/// +/// Checks a set of keys with optional values for inclusion in the proof that was generated by +/// `generate_trie_proof`. +/// If the value in the pair is supplied (`(key, Some(value))`), this key-value pair will be +/// checked for inclusion in the proof. +/// If the value is omitted (`(key, None)`), this key will be checked for non-inclusion in the +/// proof. +pub fn verify_trie_proof<'a, L, I, K, V>( + root: &TrieHash, + proof: &[Vec], + items: I, +) -> Result<(), VerifyError, error::Error>> where + L: TrieConfiguration, + I: IntoIterator)>, + K: 'a + AsRef<[u8]>, + V: 'a + AsRef<[u8]>, +{ + // No specific info to read from layout. + let layout = Default::default(); + verify_proof::, _, _, _>(root, proof, items, layout) +} + /// Determine a trie root given a hash DB and delta values. pub fn delta_trie_root( db: &mut DB, @@ -1172,6 +1218,93 @@ mod tests { assert_eq!(pairs, iter_pairs); } + #[test] + fn proof_non_inclusion_works() { + let pairs = vec![ + (hex!("0102").to_vec(), hex!("01").to_vec()), + (hex!("0203").to_vec(), hex!("0405").to_vec()), + ]; + + let mut memdb = MemoryDB::default(); + let mut root = Default::default(); + let layout = Layout::default(); + populate_trie::(&mut memdb, &mut root, &pairs, layout); + + let non_included_key: Vec = hex!("0909").to_vec(); + let proof = generate_trie_proof::( + &memdb, + root, + &[non_included_key.clone()] + ).unwrap(); + + // Verifying that the K was not included into the trie should work. + assert!(verify_trie_proof::>( + &root, + &proof, + &[(non_included_key.clone(), None)], + ).is_ok() + ); + + // Verifying that the K was included into the trie should fail. + assert!(verify_trie_proof::>( + &root, + &proof, + &[(non_included_key, Some(hex!("1010").to_vec()))], + ).is_err() + ); + } + + #[test] + fn proof_inclusion_works() { + let pairs = vec![ + (hex!("0102").to_vec(), hex!("01").to_vec()), + (hex!("0203").to_vec(), hex!("0405").to_vec()), + ]; + + let mut memdb = MemoryDB::default(); + let mut root = Default::default(); + let layout = Layout::default(); + populate_trie::(&mut memdb, &mut root, &pairs, layout); + + let proof = generate_trie_proof::( + &memdb, + root, + &[pairs[0].0.clone()] + ).unwrap(); + + // Check that a K, V included into the proof are verified. + assert!(verify_trie_proof::( + &root, + &proof, + &[(pairs[0].0.clone(), Some(pairs[0].1.clone()))] + ).is_ok() + ); + + // Absence of the V is not verified with the proof that has K, V included. + assert!(verify_trie_proof::>( + &root, + &proof, + &[(pairs[0].0.clone(), None)] + ).is_err() + ); + + // K not included into the trie is not verified. + assert!(verify_trie_proof::( + &root, + &proof, + &[(hex!("4242").to_vec(), Some(pairs[0].1.clone()))] + ).is_err() + ); + + // K included into the trie but not included into the proof is not verified. + assert!(verify_trie_proof::( + &root, + &proof, + &[(pairs[1].0.clone(), Some(pairs[1].1.clone()))] + ).is_err() + ); + } + #[test] fn generate_storage_root_with_proof_works_independently_from_the_delta_order() { let proof = StorageProof::decode(&mut &include_bytes!("../test-res/proof")[..]).unwrap(); diff --git a/primitives/trie/src/node_codec.rs b/primitives/trie/src/node_codec.rs index 9e2931486cba0..48186fa7f6b34 100644 --- a/primitives/trie/src/node_codec.rs +++ b/primitives/trie/src/node_codec.rs @@ -332,10 +332,14 @@ impl NodeCodecT for NodeCodec // utils fn value_do_hash(val: &Value, threshold: &u32) -> bool { - if let Value::Value(val) = val { - val.encoded_size() >= *threshold as usize - } else { - false + match val { + Value::Value(val) => { + val.encoded_size() >= *threshold as usize + }, + Value::HashedValue(..) => true, // can only keep hashed + Value::NoValue => { + false + }, } } diff --git a/primitives/trie/src/node_header.rs b/primitives/trie/src/node_header.rs index 6711c1a047127..7f5c68794e745 100644 --- a/primitives/trie/src/node_header.rs +++ b/primitives/trie/src/node_header.rs @@ -65,7 +65,7 @@ impl NodeHeader { match self { NodeHeader::Null | NodeHeader::Leaf(..) - | NodeHeader::Branch(..) => false, + | NodeHeader::Branch(..) => false, NodeHeader::AltHashBranch(..) | NodeHeader::AltHashLeaf(..) => true, } diff --git a/primitives/trie/src/storage_proof.rs b/primitives/trie/src/storage_proof.rs index b13cad34d6ccd..43a32b25ebfb7 100644 --- a/primitives/trie/src/storage_proof.rs +++ b/primitives/trie/src/storage_proof.rs @@ -18,6 +18,9 @@ use sp_std::vec::Vec; use codec::{Encode, Decode}; use hash_db::{Hasher, HashDB}; +use hash_db::MetaHasher; +use trie_db::NodeCodec; +use crate::{trie_types::Layout, TrieLayout}; /// A proof that some set of key-value pairs are included in the storage trie. The proof contains /// the storage values so that the partial storage backend can be reconstructed by a verifier that @@ -101,8 +104,8 @@ impl StorageProof { pub fn into_compact_proof( self, root: H::Out, - ) -> Result>> { - crate::encode_compact::>(self, root) + ) -> Result>> { + crate::encode_compact::>(self, root) } /// Returns the estimated encoded size of the compact proof. @@ -130,9 +133,9 @@ impl CompactProof { pub fn to_storage_proof( &self, expected_root: Option<&H::Out>, - ) -> Result<(StorageProof, H::Out), crate::CompactProofError>> { + ) -> Result<(StorageProof, H::Out), crate::CompactProofError>> { let mut db = crate::MemoryDB::::new(&[]); - let root = crate::decode_compact::, _, _>( + let root = crate::decode_compact::, _, _>( &mut db, self.iter_compact_encoded_nodes(), expected_root, @@ -171,9 +174,6 @@ impl Iterator for StorageProofNodeIterator { impl From for crate::MemoryDB { fn from(proof: StorageProof) -> Self { - use hash_db::MetaHasher; - use trie_db::NodeCodec; - use crate::{trie_types::Layout, TrieLayout}; let mut db = crate::MemoryDB::default(); // Needed because we do not read trie structure, so // we do a heuristic related to the fact that host function diff --git a/primitives/trie/src/trie_codec.rs b/primitives/trie/src/trie_codec.rs index efe3223580f3f..3ffc2a4197f95 100644 --- a/primitives/trie/src/trie_codec.rs +++ b/primitives/trie/src/trie_codec.rs @@ -22,7 +22,7 @@ use crate::{ EMPTY_PREFIX, HashDBT, TrieHash, TrieError, TrieConfiguration, - CompactProof, StorageProof, + CompactProof, StorageProof, GlobalMeta, TrieMeta, }; use sp_std::boxed::Box; use sp_std::vec::Vec; @@ -109,13 +109,17 @@ pub fn decode_compact<'a, L, DB, I>( ) -> Result, Error> where L: TrieConfiguration, - DB: HashDBT + hash_db::HashDBRef, + DB: HashDBT> + + hash_db::HashDBRef>, I: IntoIterator, { let mut nodes_iter = encoded.into_iter(); - let (top_root, _nb_used) = trie_db::decode_compact_from_iter::( + // Layout does not change trie reading. + let layout = L::default(); + let (top_root, _nb_used) = trie_db::decode_compact_from_iter::( db, &mut nodes_iter, + &layout, )?; // Only check root if expected root is passed as argument. @@ -128,7 +132,7 @@ pub fn decode_compact<'a, L, DB, I>( let mut child_tries = Vec::new(); { // fetch child trie roots - let trie = crate::TrieDB::::new(db, &top_root)?; + let trie = crate::TrieDB::::new_with_layout(db, &top_root, layout.clone())?; let mut iter = trie.iter()?; @@ -159,16 +163,17 @@ pub fn decode_compact<'a, L, DB, I>( } } - if !HashDBT::::contains(db, &top_root, EMPTY_PREFIX) { + if !HashDBT::::contains(db, &top_root, EMPTY_PREFIX) { return Err(Error::IncompleteProof); } let mut previous_extracted_child_trie = None; for child_root in child_tries.into_iter() { if previous_extracted_child_trie.is_none() { - let (top_root, _) = trie_db::decode_compact_from_iter::( + let (top_root, _) = trie_db::decode_compact_from_iter::( db, &mut nodes_iter, + &layout, )?; previous_extracted_child_trie = Some(top_root); } @@ -206,11 +211,14 @@ pub fn encode_compact( root: TrieHash, ) -> Result> where - L: TrieConfiguration, + L: TrieConfiguration, { let mut child_tries = Vec::new(); let partial_db = proof.into_memory_db(); let mut compact_proof = { + // Layout does not change trie reading. + // And meta for writing are read from state + // (no new node so using trie without threshold is safe here). let trie = crate::TrieDB::::new(&partial_db, &root)?; let mut iter = trie.iter()?; @@ -243,7 +251,7 @@ pub fn encode_compact( }; for child_root in child_tries { - if !HashDBT::::contains(&partial_db, &child_root, EMPTY_PREFIX) { + if !HashDBT::::contains(&partial_db, &child_root, EMPTY_PREFIX) { // child proof are allowed to be missing (unused root can be included // due to trie structure modification). continue; From 4706814001393d2deeec102c77ea2c22b2df6ecc Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 11 Jun 2021 15:19:56 +0200 Subject: [PATCH 053/188] fix compilation --- client/db/src/bench.rs | 2 +- primitives/transaction-storage-proof/src/lib.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index 30f709bf9c9e8..de7e1b874c97e 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -540,7 +540,7 @@ impl StateBackend> for BenchmarkingState { fn proof_size(&self) -> Option { self.proof_recorder.as_ref().map(|recorder| { let proof_size = recorder.estimate_encoded_size() as u32; - let proof = recorder.to_storage_proof(); + let proof = recorder.to_storage_proof::>(); let proof_recorder_root = self.proof_recorder_root.get(); if proof_recorder_root == Default::default() || proof_size == 1 { // empty trie diff --git a/primitives/transaction-storage-proof/src/lib.rs b/primitives/transaction-storage-proof/src/lib.rs index 825de27b2a5a9..7891edb0b2dc5 100644 --- a/primitives/transaction-storage-proof/src/lib.rs +++ b/primitives/transaction-storage-proof/src/lib.rs @@ -141,7 +141,7 @@ pub mod registration { use super::*; type Hasher = sp_core::Blake2Hasher; - type TrieLayout = sp_trie::Layout::; + type TrieLayout = sp_trie::trie_types::Layout::; /// Create a new inherent data provider instance for a given parent block hash. pub fn new_data_provider( From 09d5cf6e8b252540e12abfe172193e7599f50a33 Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 11 Jun 2021 16:13:11 +0200 Subject: [PATCH 054/188] Right logic to avoid switch on reencode when default layout. --- primitives/trie/src/lib.rs | 11 +++++++---- primitives/trie/src/node_codec.rs | 4 ++-- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 49e115c91a747..7e4e77c8f8df1 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -63,6 +63,8 @@ pub struct TrieMeta { /// `TrieDbMut` it switch nodes to alternative hashing /// method by defining the threshold to use with alternative /// hashing. + /// Trie codec or other proof manipulation will always use + /// `None` in order to prevent state change on reencoding. pub try_inner_hashing: Option, /// Flag indicating alternative value hash is currently use /// or will be use. @@ -163,9 +165,10 @@ impl Meta for TrieMeta { ValuePlan::NoValue => return, }; - self.apply_inner_hashing = self.try_inner_hashing.as_ref().map(|threshold| - range.end - range.start >= *threshold as usize - ).unwrap_or(false); + if let Some(threshold) = self.try_inner_hashing.clone() { + self.apply_inner_hashing = range.end - range.start >= threshold as usize; + } + self.range = Some(range); self.contain_hash = contain_hash; } @@ -189,7 +192,7 @@ impl Meta for TrieMeta { self.contain_hash } - // TODO remove upstream + // TODO rename to get state meta fn do_value_hash(&self) -> bool { self.apply_inner_hashing } diff --git a/primitives/trie/src/node_codec.rs b/primitives/trie/src/node_codec.rs index 48186fa7f6b34..3528b5790d490 100644 --- a/primitives/trie/src/node_codec.rs +++ b/primitives/trie/src/node_codec.rs @@ -227,7 +227,7 @@ impl NodeCodecT for NodeCodec // sometime). let mut output = if meta.extract_global_meta().as_ref().map(|threshold| value_do_hash(&value, threshold) - ).unwrap_or(false) { + ).unwrap_or(meta.do_value_hash()) { partial_encode(partial, NodeKind::AltHashLeaf) } else { partial_encode(partial, NodeKind::Leaf) @@ -279,7 +279,7 @@ impl NodeCodecT for NodeCodec ) -> Vec { let mut output = match (&value, meta.extract_global_meta().as_ref().map(|threshold| value_do_hash(&value, threshold) - ).unwrap_or(false)) { + ).unwrap_or(meta.do_value_hash())) { (&Value::NoValue, _) => { partial_from_iterator_encode(partial, number_nibble, NodeKind::BranchNoValue) }, From 906cd48fc8061a27ed5de2b7f5477e8cfaa30976 Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 14 Jun 2021 17:13:45 +0200 Subject: [PATCH 055/188] Clean up some todos --- Cargo.lock | 24 ++++++++--------- Cargo.toml | 10 ++++---- client/db/src/lib.rs | 1 - primitives/externalities/src/lib.rs | 27 -------------------- primitives/state-machine/src/trie_backend.rs | 1 - primitives/trie/src/lib.rs | 8 ++---- primitives/trie/src/node_codec.rs | 15 +++++------ 7 files changed, 25 insertions(+), 61 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3871808cdef0b..ad845adf59f43 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2370,7 +2370,7 @@ dependencies = [ [[package]] name = "hash-db" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#3ba095cf3543b7b8140467741d90c8be0c69f5f3" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact#3ba095cf3543b7b8140467741d90c8be0c69f5f3" [[package]] name = "hash256-std-hasher" @@ -2384,7 +2384,7 @@ dependencies = [ [[package]] name = "hash256-std-hasher" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#3ba095cf3543b7b8140467741d90c8be0c69f5f3" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact#3ba095cf3543b7b8140467741d90c8be0c69f5f3" dependencies = [ "crunchy", ] @@ -3066,10 +3066,10 @@ dependencies = [ [[package]] name = "keccak-hasher" version = "0.15.3" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#3ba095cf3543b7b8140467741d90c8be0c69f5f3" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact#3ba095cf3543b7b8140467741d90c8be0c69f5f3" dependencies = [ "hash-db", - "hash256-std-hasher 0.15.2 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4)", + "hash256-std-hasher 0.15.2 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact)", "tiny-keccak", ] @@ -3881,7 +3881,7 @@ dependencies = [ [[package]] name = "memory-db" version = "0.26.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#3ba095cf3543b7b8140467741d90c8be0c69f5f3" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact#3ba095cf3543b7b8140467741d90c8be0c69f5f3" dependencies = [ "hash-db", "hashbrown", @@ -10565,22 +10565,22 @@ checksum = "a7f741b240f1a48843f9b8e0444fb55fb2a4ff67293b50a9179dfd5ea67f8d41" [[package]] name = "trie-bench" version = "0.27.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#3ba095cf3543b7b8140467741d90c8be0c69f5f3" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact#3ba095cf3543b7b8140467741d90c8be0c69f5f3" dependencies = [ "criterion", "hash-db", - "keccak-hasher 0.15.3 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4)", + "keccak-hasher 0.15.3 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact)", "memory-db", "parity-scale-codec", "trie-db", "trie-root", - "trie-standardmap 0.15.2 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4)", + "trie-standardmap 0.15.2 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact)", ] [[package]] name = "trie-db" version = "0.22.5" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#3ba095cf3543b7b8140467741d90c8be0c69f5f3" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact#3ba095cf3543b7b8140467741d90c8be0c69f5f3" dependencies = [ "hash-db", "hashbrown", @@ -10592,7 +10592,7 @@ dependencies = [ [[package]] name = "trie-root" version = "0.16.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#3ba095cf3543b7b8140467741d90c8be0c69f5f3" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact#3ba095cf3543b7b8140467741d90c8be0c69f5f3" dependencies = [ "hash-db", ] @@ -10610,10 +10610,10 @@ dependencies = [ [[package]] name = "trie-standardmap" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#3ba095cf3543b7b8140467741d90c8be0c69f5f3" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact#3ba095cf3543b7b8140467741d90c8be0c69f5f3" dependencies = [ "hash-db", - "keccak-hasher 0.15.3 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4)", + "keccak-hasher 0.15.3 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact)", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 754e1ea9f9b7d..566401fbe28f5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -278,8 +278,8 @@ zeroize = { opt-level = 3 } panic = "unwind" [patch.crates-io] -hash-db = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple4" } -memory-db = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple4" } -trie-db = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple4" } -trie-root = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple4" } -trie-bench = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple4" } +hash-db = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple4-refact" } +memory-db = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple4-refact" } +trie-db = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple4-refact" } +trie-root = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple4-refact" } +trie-bench = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple4-refact" } diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 66cedb6b54b7d..5c2ed5c607818 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -807,7 +807,6 @@ impl sc_client_api::backend::BlockImportOperation for Bloc )); let mut changes_trie_config: Option = None; - // TODO test genesis init with a threshold let (root, transaction) = self.old_state.full_storage_root( storage.top.iter().map(|(k, v)| { if &k[..] == well_known_keys::CHANGES_TRIE_CONFIG { diff --git a/primitives/externalities/src/lib.rs b/primitives/externalities/src/lib.rs index 3cf379131795a..14145e8798498 100644 --- a/primitives/externalities/src/lib.rs +++ b/primitives/externalities/src/lib.rs @@ -323,30 +323,3 @@ impl ExternalitiesExt for &mut dyn Externalities { self.deregister_extension_by_type_id(TypeId::of::()) } } - -/// Helpers method for the [`Externalities`] trait. -pub trait ExternalitiesHelpers: Externalities { - /* No we use backend in this case TODO remove function - /// Utility function to get trie inner value hash threshold from - /// backend state or pending changes. - fn get_trie_alt_hashing_threshold(&self) -> Option { - self.storage(sp_storage::well_known_keys::TRIE_HASHING_CONFIG) - .and_then(|encoded| sp_storage::trie_threshold_decode(&mut encoded.as_slice())) - } - */ - - /// Utility function to modify trie inner value hash threshold. - fn modify_trie_alt_hashing_threshold(&mut self, threshold: Option) { - match threshold { - Some(threshold) => { - let encoded = sp_storage::trie_threshold_encode(threshold); - self.set_storage(sp_storage::well_known_keys::TRIE_HASHING_CONFIG.to_vec(), encoded); - }, - None => { - self.clear_storage(sp_storage::well_known_keys::TRIE_HASHING_CONFIG); - }, - } - } -} - -impl ExternalitiesHelpers for E { } diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index ff6067d44d0d8..cfde64dbc119d 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -180,7 +180,6 @@ impl, H: Hasher> Backend for TrieBackend where let use_inner_hash_value = if let Some(force) = self.force_alt_hashing.as_ref() { force.clone() } else { - // TODO try memoize in force self.get_trie_alt_hashing_threshold() }; let mut write_overlay = S::Overlay::default(); diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 7e4e77c8f8df1..8a3a0f60314cf 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -76,8 +76,7 @@ pub struct TrieMeta { /// set as accessed by defalult, but can be /// change on access explicitely: `HashDB::get_with_meta`. /// and reset on access explicitely: `HashDB::access_from`. - /// TODO!! could be remove from meta: only use in proof recorder context. - /// But does not add memory usage here. + /// Not strictly needed in this struct, but does not add memory usage here. pub unused_value: bool, } @@ -859,10 +858,7 @@ pub fn estimate_entry_size(entry: &(DBValue, TrieMeta), hash_len: usize) -> usiz full_encoded } -/// If needed, call to decode plan in order to update meta earlier. -/// TODO if removing fully meta, this will still be needed but with -/// a less generic name: read variant of node from db value and indicate -/// if can hash value. +/// Decode plan in order to update meta early (needed to register proofs). pub fn resolve_encoded_meta(entry: &mut (DBValue, TrieMeta)) { use trie_db::NodeCodec; let _ = as TrieLayout>::Codec::decode_plan(entry.0.as_slice(), &mut entry.1); diff --git a/primitives/trie/src/node_codec.rs b/primitives/trie/src/node_codec.rs index 3528b5790d490..1358089ea8a16 100644 --- a/primitives/trie/src/node_codec.rs +++ b/primitives/trie/src/node_codec.rs @@ -90,16 +90,14 @@ pub struct NodeCodec(PhantomData); impl NodeCodec { fn decode_plan_inner_hashed>( data: &[u8], - mut meta: Option<&mut M>, // TODO when remove no meta, remove option + meta: &mut M, ) -> Result { let mut input = ByteSliceInput::new(data); - let contains_hash = meta.as_ref() - .map(|m| m.contains_hash_of_value()).unwrap_or_default(); + let contains_hash = meta.contains_hash_of_value(); let header = NodeHeader::decode(&mut input)?; let alt_hashing = header.alt_hashing(); - meta.as_mut() - .map(|m| m.set_state_meta(alt_hashing)); + meta.set_state_meta(alt_hashing); let branch_has_value = if let NodeHeader::Branch(has_value, _) = &header { *has_value @@ -196,15 +194,14 @@ impl NodeCodecT for NodeCodec } fn decode_plan(data: &[u8], meta: &mut M) -> Result { - Self::decode_plan_inner_hashed(data, Some(meta)).map(|plan| { + Self::decode_plan_inner_hashed(data, meta).map(|plan| { meta.decoded_callback(&plan); plan }) } - fn decode_plan_inner(data: &[u8]) -> Result { - let meta: Option<&mut M> = None; - Self::decode_plan_inner_hashed(data, meta) + fn decode_plan_inner(_data: &[u8]) -> Result { + unreachable!("decode_plan is implemented") } fn is_empty_node(data: &[u8]) -> bool { From 662eabe858b1e58384b6c48dd22d95a37d7353f2 Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 14 Jun 2021 18:50:25 +0200 Subject: [PATCH 056/188] remove trie meta from root upstream --- Cargo.lock | 16 ++-- bin/node/bench/src/generator.rs | 2 +- primitives/state-machine/src/basic.rs | 2 +- primitives/state-machine/src/lib.rs | 2 +- primitives/state-machine/src/trie_backend.rs | 6 +- primitives/trie/src/lib.rs | 78 +++----------------- primitives/trie/src/node_codec.rs | 8 +- 7 files changed, 29 insertions(+), 85 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ad845adf59f43..e4eaab283dcc2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2370,7 +2370,7 @@ dependencies = [ [[package]] name = "hash-db" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact#3ba095cf3543b7b8140467741d90c8be0c69f5f3" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact#b4b0d1919db7b100aba040c336d3c69ed11faa36" [[package]] name = "hash256-std-hasher" @@ -2384,7 +2384,7 @@ dependencies = [ [[package]] name = "hash256-std-hasher" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact#3ba095cf3543b7b8140467741d90c8be0c69f5f3" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact#b4b0d1919db7b100aba040c336d3c69ed11faa36" dependencies = [ "crunchy", ] @@ -3066,7 +3066,7 @@ dependencies = [ [[package]] name = "keccak-hasher" version = "0.15.3" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact#3ba095cf3543b7b8140467741d90c8be0c69f5f3" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact#b4b0d1919db7b100aba040c336d3c69ed11faa36" dependencies = [ "hash-db", "hash256-std-hasher 0.15.2 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact)", @@ -3881,7 +3881,7 @@ dependencies = [ [[package]] name = "memory-db" version = "0.26.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact#3ba095cf3543b7b8140467741d90c8be0c69f5f3" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact#b4b0d1919db7b100aba040c336d3c69ed11faa36" dependencies = [ "hash-db", "hashbrown", @@ -10565,7 +10565,7 @@ checksum = "a7f741b240f1a48843f9b8e0444fb55fb2a4ff67293b50a9179dfd5ea67f8d41" [[package]] name = "trie-bench" version = "0.27.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact#3ba095cf3543b7b8140467741d90c8be0c69f5f3" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact#b4b0d1919db7b100aba040c336d3c69ed11faa36" dependencies = [ "criterion", "hash-db", @@ -10580,7 +10580,7 @@ dependencies = [ [[package]] name = "trie-db" version = "0.22.5" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact#3ba095cf3543b7b8140467741d90c8be0c69f5f3" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact#b4b0d1919db7b100aba040c336d3c69ed11faa36" dependencies = [ "hash-db", "hashbrown", @@ -10592,7 +10592,7 @@ dependencies = [ [[package]] name = "trie-root" version = "0.16.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact#3ba095cf3543b7b8140467741d90c8be0c69f5f3" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact#b4b0d1919db7b100aba040c336d3c69ed11faa36" dependencies = [ "hash-db", ] @@ -10610,7 +10610,7 @@ dependencies = [ [[package]] name = "trie-standardmap" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact#3ba095cf3543b7b8140467741d90c8be0c69f5f3" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact#b4b0d1919db7b100aba040c336d3c69ed11faa36" dependencies = [ "hash-db", "keccak-hasher 0.15.3 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact)", diff --git a/bin/node/bench/src/generator.rs b/bin/node/bench/src/generator.rs index 6589bdcd0b277..876321a3a2756 100644 --- a/bin/node/bench/src/generator.rs +++ b/bin/node/bench/src/generator.rs @@ -45,7 +45,7 @@ pub fn generate_trie( { let mut trie_db = if let Some(threshold) = alt_hashing { - let layout = sp_trie::Layout::with_inner_hashing(threshold); + let layout = sp_trie::Layout::with_alt_hashing(threshold); TrieDBMut::::new_with_layout(&mut trie, &mut root, layout) } else { TrieDBMut::new(&mut trie, &mut root) diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index 7d7be10f2b584..60aa1918c6fd3 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -288,7 +288,7 @@ impl Externalities for BasicExternalities { } let layout = if let Some(threshold) = self.alt_hashing.as_ref() { - Layout::::with_inner_hashing(*threshold) + Layout::::with_alt_hashing(*threshold) } else { Layout::::default() }; diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 420f8373fa43a..5130a432fece8 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -1547,7 +1547,7 @@ mod tests { // do switch - layout = Layout::with_inner_hashing(TRESHOLD); + layout = Layout::with_alt_hashing(TRESHOLD); // update with same value do not change { let mut trie = TrieDBMut::from_existing_with_layout( diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index cfde64dbc119d..e712fbc23f172 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -192,7 +192,7 @@ impl, H: Hasher> Backend for TrieBackend where ); let res = || { let layout = if let Some(threshold) = use_inner_hash_value { - sp_trie::Layout::with_inner_hashing(threshold) + sp_trie::Layout::with_alt_hashing(threshold) } else { sp_trie::Layout::default() }; @@ -223,7 +223,7 @@ impl, H: Hasher> Backend for TrieBackend where ChildType::ParentKeyId => empty_child_trie_root::>() }; let layout = if let Some(threshold) = use_inner_hash_value { - sp_trie::Layout::with_inner_hashing(threshold) + sp_trie::Layout::with_alt_hashing(threshold) } else { sp_trie::Layout::default() }; @@ -304,7 +304,7 @@ pub mod tests { let mut sub_root = Vec::new(); root.encode_to(&mut sub_root); let mut trie = if hashed_value { - let layout = Layout::with_inner_hashing(TRESHOLD); + let layout = Layout::with_alt_hashing(TRESHOLD); TrieDBMut::new_with_layout(&mut mdb, &mut root, layout) } else { TrieDBMut::new(&mut mdb, &mut root) diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 8a3a0f60314cf..68d18a5621ce5 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -91,45 +91,16 @@ impl Meta for TrieMeta { self.apply_inner_hashing = state_meta; } - // TODO rename upstream as read_global_meta - fn extract_global_meta(&self) -> Self::GlobalMeta { - self.try_inner_hashing - } - - fn set_global_meta(&mut self, global_meta: Self::GlobalMeta) { - self.try_inner_hashing = global_meta; - } - - // TODO remove upstream? - fn has_state_meta(&self) -> bool { + fn read_state_meta(&self) -> Self::StateMeta { self.apply_inner_hashing } - // TODO consider removal upstream of this method (node type in codec) - fn read_state_meta(&mut self, _data: &[u8]) -> Result { - unreachable!() - // TODO read directly from codec. -/* let offset = if data[0] == trie_constants::ENCODED_META_ALLOW_HASH { - self.recorded_do_value_hash = true; - self.do_value_hash = true; - 1 - } else { - 0 - }; - Ok(offset)*/ + fn read_global_meta(&self) -> Self::GlobalMeta { + self.try_inner_hashing } - // TODO consider removal upstream of this method (node type in codec) - // `do_value_hash` method is enough function to write with codec. - fn write_state_meta(&self) -> Vec { - unreachable!() -/* if self.do_value_hash { - // Note that this only works with sp_trie codec. - // Acts as a boolean result. - [trie_constants::ENCODED_META_ALLOW_HASH].to_vec() - } else { - Vec::new() - }*/ + fn set_global_meta(&mut self, global_meta: Self::GlobalMeta) { + self.try_inner_hashing = global_meta; } fn meta_for_new( @@ -146,14 +117,12 @@ impl Meta for TrieMeta { Self::meta_for_new(global) } - // TODO meta for empty is unused: can consider removal upstream. fn meta_for_empty( global: Self::GlobalMeta, ) -> Self { Self::meta_for_new(global) } - // TODO if removing all meta, the Option will replace it. fn encoded_value_callback( &mut self, value_plan: ValuePlan, @@ -190,11 +159,6 @@ impl Meta for TrieMeta { fn contains_hash_of_value(&self) -> bool { self.contain_hash } - - // TODO rename to get state meta - fn do_value_hash(&self) -> bool { - self.apply_inner_hashing - } } impl TrieMeta { @@ -234,8 +198,7 @@ impl Default for Layout { impl Layout { /// Layout with inner hashing active. /// Will flag trie for hashing. - /// TODO rename inner -> alt - pub fn with_inner_hashing(threshold: u32) -> Self { + pub fn with_alt_hashing(threshold: u32) -> Self { Layout(Some(threshold), sp_std::marker::PhantomData) } } @@ -249,7 +212,6 @@ impl TrieLayout for Layout const USE_EXTENSION: bool = false; const ALLOW_EMPTY: bool = true; const USE_META: bool = true; - const READ_ROOT_STATE_META: bool = false; // TODO rem type Hash = H; type Codec = NodeCodec; @@ -259,20 +221,6 @@ impl TrieLayout for Layout fn layout_meta(&self) -> GlobalMeta { self.0 } - - // TODO remove upstream - fn initialize_from_root_meta(&mut self, _root_meta: &Self::Meta) { - unreachable!() - /*if root_meta.extract_global_meta() { - self.0 = true; - }*/ - } - - // TODO remove upstream - fn set_root_meta(_root_meta: &mut Self::Meta, _global_meta: GlobalMeta) { - unreachable!() -// root_meta.set_global_meta(global_meta); - } } /// Hasher with support to meta. @@ -302,8 +250,6 @@ impl MetaHasher for StateHasher } } - // TODO if removing meta upstream, still need to get DEAD_HEADER_META_HASHED_VALUE - // from proof. fn stored_value(value: &[u8], mut meta: Self::Meta) -> DBValue { let mut stored = Vec::with_capacity(value.len() + 1); if meta.contain_hash { @@ -314,8 +260,8 @@ impl MetaHasher for StateHasher } if meta.unused_value && meta.apply_inner_hashing { if meta.range.is_some() { - // Waring this assume that encoded value does not start by this, so it is tightly coupled - // with the header type of the codec: only for optimization. + // Warning this assumes that encoded value cannot start by this, + // so it is tightly coupled with the header type of the codec. stored.push(trie_constants::DEAD_HEADER_META_HASHED_VALUE); let range = meta.range.as_ref().expect("Tested in condition"); meta.contain_hash = true; // useless but could be with meta as &mut @@ -333,7 +279,6 @@ impl MetaHasher for StateHasher >::stored_value(value.as_slice(), meta) } - // TODO remove upstream? fn extract_value(mut stored: &[u8], global_meta: Self::GlobalMeta) -> (&[u8], Self::Meta) { let input = &mut stored; let mut contain_hash = false; @@ -352,7 +297,6 @@ impl MetaHasher for StateHasher (stored, meta) } - // TODO remove upstream fn extract_value_owned(mut stored: DBValue, global: Self::GlobalMeta) -> (DBValue, Self::Meta) { let len = stored.len(); let (v, meta) = >::extract_value(stored.as_slice(), global); @@ -941,7 +885,7 @@ mod tests { let layout = Layout::default(); check_equivalent::(input, layout.clone()); check_iteration::(input, layout); - let layout = Layout::with_inner_hashing(TRESHOLD); + let layout = Layout::with_alt_hashing(TRESHOLD); check_equivalent::(input, layout.clone()); check_iteration::(input, layout); } @@ -1094,7 +1038,7 @@ mod tests { }.make_with(seed.as_fixed_bytes_mut()); let layout = if flag { - Layout::with_inner_hashing(TRESHOLD) + Layout::with_alt_hashing(TRESHOLD) } else { Layout::default() }; @@ -1191,7 +1135,7 @@ mod tests { } fn iterator_works_inner(flag: bool) { let layout = if flag { - Layout::with_inner_hashing(TRESHOLD) + Layout::with_alt_hashing(TRESHOLD) } else { Layout::default() }; diff --git a/primitives/trie/src/node_codec.rs b/primitives/trie/src/node_codec.rs index 1358089ea8a16..bd6e6d476f3e8 100644 --- a/primitives/trie/src/node_codec.rs +++ b/primitives/trie/src/node_codec.rs @@ -222,9 +222,9 @@ impl NodeCodecT for NodeCodec // With fix inner hashing alt hash can be use with all node, but // that is not better (encoding can use an additional nibble byte // sometime). - let mut output = if meta.extract_global_meta().as_ref().map(|threshold| + let mut output = if meta.read_global_meta().as_ref().map(|threshold| value_do_hash(&value, threshold) - ).unwrap_or(meta.do_value_hash()) { + ).unwrap_or(meta.read_state_meta()) { partial_encode(partial, NodeKind::AltHashLeaf) } else { partial_encode(partial, NodeKind::Leaf) @@ -274,9 +274,9 @@ impl NodeCodecT for NodeCodec value: Value, meta: &mut M, ) -> Vec { - let mut output = match (&value, meta.extract_global_meta().as_ref().map(|threshold| + let mut output = match (&value, meta.read_global_meta().as_ref().map(|threshold| value_do_hash(&value, threshold) - ).unwrap_or(meta.do_value_hash())) { + ).unwrap_or(meta.read_state_meta())) { (&Value::NoValue, _) => { partial_from_iterator_encode(partial, number_nibble, NodeKind::BranchNoValue) }, From ac0e019f709abd084648f727f98fd06cb3c781ab Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 15 Jun 2021 11:30:02 +0200 Subject: [PATCH 057/188] update upstream and fix benches. --- Cargo.lock | 16 ++++++++-------- bin/node/executor/benches/bench.rs | 2 +- primitives/api/test/benches/bench.rs | 2 +- primitives/trie/benches/bench.rs | 4 ++-- primitives/trie/src/lib.rs | 6 +++--- 5 files changed, 15 insertions(+), 15 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index acfb520048c5d..e3ae3bdce9add 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2354,7 +2354,7 @@ dependencies = [ [[package]] name = "hash-db" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact#b4b0d1919db7b100aba040c336d3c69ed11faa36" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact#54c61d7e3c32a4090d34468c38621d33b0f853b5" [[package]] name = "hash256-std-hasher" @@ -2368,7 +2368,7 @@ dependencies = [ [[package]] name = "hash256-std-hasher" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact#b4b0d1919db7b100aba040c336d3c69ed11faa36" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact#54c61d7e3c32a4090d34468c38621d33b0f853b5" dependencies = [ "crunchy", ] @@ -3050,7 +3050,7 @@ dependencies = [ [[package]] name = "keccak-hasher" version = "0.15.3" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact#b4b0d1919db7b100aba040c336d3c69ed11faa36" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact#54c61d7e3c32a4090d34468c38621d33b0f853b5" dependencies = [ "hash-db", "hash256-std-hasher 0.15.2 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact)", @@ -3865,7 +3865,7 @@ dependencies = [ [[package]] name = "memory-db" version = "0.26.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact#b4b0d1919db7b100aba040c336d3c69ed11faa36" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact#54c61d7e3c32a4090d34468c38621d33b0f853b5" dependencies = [ "hash-db", "hashbrown", @@ -10547,7 +10547,7 @@ checksum = "a7f741b240f1a48843f9b8e0444fb55fb2a4ff67293b50a9179dfd5ea67f8d41" [[package]] name = "trie-bench" version = "0.27.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact#b4b0d1919db7b100aba040c336d3c69ed11faa36" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact#54c61d7e3c32a4090d34468c38621d33b0f853b5" dependencies = [ "criterion", "hash-db", @@ -10562,7 +10562,7 @@ dependencies = [ [[package]] name = "trie-db" version = "0.22.5" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact#b4b0d1919db7b100aba040c336d3c69ed11faa36" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact#54c61d7e3c32a4090d34468c38621d33b0f853b5" dependencies = [ "hash-db", "hashbrown", @@ -10574,7 +10574,7 @@ dependencies = [ [[package]] name = "trie-root" version = "0.16.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact#b4b0d1919db7b100aba040c336d3c69ed11faa36" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact#54c61d7e3c32a4090d34468c38621d33b0f853b5" dependencies = [ "hash-db", ] @@ -10592,7 +10592,7 @@ dependencies = [ [[package]] name = "trie-standardmap" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact#b4b0d1919db7b100aba040c336d3c69ed11faa36" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact#54c61d7e3c32a4090d34468c38621d33b0f853b5" dependencies = [ "hash-db", "keccak-hasher 0.15.3 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact)", diff --git a/bin/node/executor/benches/bench.rs b/bin/node/executor/benches/bench.rs index d21aedd1d1849..9c947b84381cd 100644 --- a/bin/node/executor/benches/bench.rs +++ b/bin/node/executor/benches/bench.rs @@ -83,7 +83,7 @@ fn construct_block( let extrinsics = extrinsics.into_iter().map(sign).collect::>(); // calculate the header fields that we can. - let extrinsics_root = Layout::::ordered_trie_root( + let extrinsics_root = Layout::::default().ordered_trie_root( extrinsics.iter().map(Encode::encode) ).to_fixed_bytes() .into(); diff --git a/primitives/api/test/benches/bench.rs b/primitives/api/test/benches/bench.rs index a9fe79d1abcec..6180fc244a7cb 100644 --- a/primitives/api/test/benches/bench.rs +++ b/primitives/api/test/benches/bench.rs @@ -26,7 +26,7 @@ use sp_api::ProvideRuntimeApi; fn sp_api_benchmark(c: &mut Criterion) { c.bench_function("add one with same runtime api", |b| { - let client = substrate_test_runtime_client::new(ture); + let client = substrate_test_runtime_client::new(true); let runtime_api = client.runtime_api(); let block_id = BlockId::Number(client.chain_info().best_number); diff --git a/primitives/trie/benches/bench.rs b/primitives/trie/benches/bench.rs index c2ccb31328aae..c40907ac5cdf4 100644 --- a/primitives/trie/benches/bench.rs +++ b/primitives/trie/benches/bench.rs @@ -21,11 +21,11 @@ criterion_main!(benches); fn benchmark(c: &mut Criterion) { trie_bench::standard_benchmark::< - sp_trie::Layout, + sp_trie::trie_types::Layout, sp_trie::TrieStream, >(c, "substrate-blake2"); trie_bench::standard_benchmark::< - sp_trie::Layout, + sp_trie::trie_types::Layout, sp_trie::TrieStream, >(c, "substrate-keccak"); } diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 68d18a5621ce5..90e47693a1410 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -218,7 +218,7 @@ impl TrieLayout for Layout type MetaHasher = M; type Meta = M::Meta; - fn layout_meta(&self) -> GlobalMeta { + fn global_meta(&self) -> GlobalMeta { self.0 } } @@ -316,7 +316,7 @@ impl TrieConfiguration for Layout A: AsRef<[u8]> + Ord, B: AsRef<[u8]>, { - trie_root::trie_root_no_extension::(input, self.layout_meta()) + trie_root::trie_root_no_extension::(input, self.global_meta()) } fn trie_root_unhashed(&self, input: I) -> Vec where @@ -324,7 +324,7 @@ impl TrieConfiguration for Layout A: AsRef<[u8]> + Ord, B: AsRef<[u8]>, { - trie_root::unhashed_trie_no_extension::(input, self.layout_meta()) + trie_root::unhashed_trie_no_extension::(input, self.global_meta()) } fn encode_index(input: u32) -> Vec { From 7e73a703e22c0431c944618b9f47e0c469684a44 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 15 Jun 2021 12:00:05 +0200 Subject: [PATCH 058/188] split some long lines. --- client/db/src/bench.rs | 10 ++- client/service/test/src/client/light.rs | 7 +- .../state-machine/src/proving_backend.rs | 16 +++- primitives/state-machine/src/trie_backend.rs | 4 +- .../state-machine/src/trie_backend_essence.rs | 89 +++++++++++-------- primitives/trie/src/lib.rs | 25 ++++-- primitives/trie/src/node_codec.rs | 6 +- primitives/trie/src/node_header.rs | 12 ++- primitives/trie/src/trie_stream.rs | 9 +- 9 files changed, 117 insertions(+), 61 deletions(-) diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index de7e1b874c97e..6d2d9e11b0098 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -61,14 +61,16 @@ impl sp_state_machine::Storage> for StorageDb, _>>::extract_value_owned(value, global))) - .map_err(|e| format!("Database backend error: {:?}", e))?; + .map(|result| result.map(|value| + , _>>::extract_value_owned(value, global) + )).map_err(|e| format!("Database backend error: {:?}", e))?; recorder.record::>(key.clone(), backend_value.clone()); Ok(backend_value) } else { self.db.get(0, &prefixed_key) - .map(|result| result.map(|value| , _>>::extract_value_owned(value, global))) - .map_err(|e| format!("Database backend error: {:?}", e)) + .map(|result| result.map(|value| + , _>>::extract_value_owned(value, global) + )).map_err(|e| format!("Database backend error: {:?}", e)) } } fn access_from(&self, key: &Block::Hash) { diff --git a/client/service/test/src/client/light.rs b/client/service/test/src/client/light.rs index e40538e08ebb9..fbbefbb5a5674 100644 --- a/client/service/test/src/client/light.rs +++ b/client/service/test/src/client/light.rs @@ -625,7 +625,12 @@ fn header_proof_is_generated_and_checked() { header_proof_is_generated_and_checked_inner(false); } fn header_proof_is_generated_and_checked_inner(hashed: bool) { - let (local_checker, local_cht_root, remote_block_header, remote_header_proof) = prepare_for_header_proof_check(true, hashed); + let ( + local_checker, + local_cht_root, + remote_block_header, + remote_header_proof, + ) = prepare_for_header_proof_check(true, hashed); assert_eq!((&local_checker as &dyn FetchChecker).check_header_proof(&RemoteHeaderRequest::
{ cht_root: local_cht_root, block: 1, diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 558e22cdc7411..26c98d43703e3 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -242,7 +242,12 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> TrieBackendStorage { type Overlay = S::Overlay; - fn get(&self, key: &H::Out, prefix: Prefix, global: Option) -> Result, String> { + fn get( + &self, + key: &H::Out, + prefix: Prefix, + global: Option, + ) -> Result, String> { if let Some(v) = self.proof_recorder.get(key) { return Ok(v); } @@ -452,7 +457,8 @@ mod tests { fn proof_recorded_and_checked_inner(flagged: bool) { let size_content = 34; // above hashable value treshold. let value_range = 0..64; - let contents = value_range.clone().map(|i| (vec![i], Some(vec![i; size_content]))).collect::>(); + let contents = value_range.clone() + .map(|i| (vec![i], Some(vec![i; size_content]))).collect::>(); let mut in_memory = InMemoryBackend::::default(); if flagged { in_memory = in_memory.update(vec![(None, vec![( @@ -464,12 +470,14 @@ mod tests { } let mut in_memory = in_memory.update(vec![(None, contents)]); let in_memory_root = in_memory.storage_root(std::iter::empty()).0; - value_range.clone().for_each(|i| assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i; size_content])); + value_range.clone() + .for_each(|i| assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i; size_content])); let trie = in_memory.as_trie_backend().unwrap(); let trie_root = trie.storage_root(std::iter::empty()).0; assert_eq!(in_memory_root, trie_root); - value_range.clone().for_each(|i| assert_eq!(trie.storage(&[i]).unwrap().unwrap(), vec![i; size_content])); + value_range.clone() + .for_each(|i| assert_eq!(trie.storage(&[i]).unwrap().unwrap(), vec![i; size_content])); let proving = ProvingBackend::new(trie); assert_eq!(proving.storage(&[42]).unwrap().unwrap(), vec![42; size_content]); diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index e712fbc23f172..33b6c6da9d5ea 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -329,7 +329,9 @@ pub mod tests { (mdb, root) } - pub(crate) fn test_trie(hashed_value: bool) -> TrieBackend, BlakeTwo256> { + pub(crate) fn test_trie( + hashed_value: bool, + ) -> TrieBackend, BlakeTwo256> { let (mdb, root) = test_db(hashed_value); TrieBackend::new(mdb, root) } diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 63b1ccc2d9698..8bc3b93fc7be5 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -22,7 +22,7 @@ use std::sync::Arc; use sp_std::{ops::Deref, boxed::Box, vec::Vec}; use crate::{warn, debug}; -use hash_db::{self, Hasher, Prefix}; +use hash_db::{self, Hasher, Prefix, AsHashDB, HashDB, HashDBRef}; use sp_trie::{Trie, PrefixedMemoryDB, DBValue, empty_child_trie_root, read_trie_value, read_child_trie_value, for_keys_in_child_trie, KeySpacedDB, TrieDBIterator, TrieDBKeyIterator, @@ -137,7 +137,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: child_info: Option<&ChildInfo>, key: &[u8], ) -> Result> { - let dyn_eph: &dyn hash_db::HashDBRef<_, _, _, _>; + let dyn_eph: &dyn HashDBRef<_, _, _, _>; let keyspace_eph; if let Some(child_info) = child_info.as_ref() { keyspace_eph = KeySpacedDB::new(self, child_info.keyspace()); @@ -305,11 +305,13 @@ pub(crate) struct Ephemeral<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { overlay: &'a mut S::Overlay, } -impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> hash_db::AsHashDB> +impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> AsHashDB> for Ephemeral<'a, S, H> { - fn as_hash_db<'b>(&'b self) -> &'b (dyn hash_db::HashDB> + 'b) { self } - fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::HashDB> + 'b) { self } + fn as_hash_db<'b>(&'b self) -> &'b (dyn HashDB> + 'b) { self } + fn as_hash_db_mut<'b>( + &'b mut self, + ) -> &'b mut (dyn HashDB> + 'b) { self } } impl<'a, S: TrieBackendStorage, H: Hasher> Ephemeral<'a, S, H> { @@ -321,15 +323,20 @@ impl<'a, S: TrieBackendStorage, H: Hasher> Ephemeral<'a, S, H> { } } -impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::HashDB> +impl<'a, S: 'a + TrieBackendStorage, H: Hasher> HashDB> for Ephemeral<'a, S, H> { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { - Self::get_with_meta(self, key, prefix, Default::default()).map(|r| r.0) + HashDB::get_with_meta(self, key, prefix, Default::default()).map(|r| r.0) } - fn get_with_meta(&self, key: &H::Out, prefix: Prefix, global: Option) -> Option<(DBValue, TrieMeta)> { - if let Some(val) = hash_db::HashDB::get_with_meta(self.overlay, key, prefix, global) { + fn get_with_meta( + &self, + key: &H::Out, + prefix: Prefix, + global: Option, + ) -> Option<(DBValue, TrieMeta)> { + if let Some(val) = HashDB::get_with_meta(self.overlay, key, prefix, global) { Some(val) } else { match self.storage.get(&key, prefix, global) { @@ -349,11 +356,11 @@ impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::HashDB bool { - hash_db::HashDB::get(self, key, prefix).is_some() + HashDB::get(self, key, prefix).is_some() } fn insert(&mut self, prefix: Prefix, value: &[u8]) -> H::Out { - hash_db::HashDB::insert(self.overlay, prefix, value) + HashDB::insert(self.overlay, prefix, value) } fn insert_with_meta( @@ -362,42 +369,47 @@ impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::HashDB H::Out { - hash_db::HashDB::insert_with_meta(self.overlay, prefix, value, meta) + HashDB::insert_with_meta(self.overlay, prefix, value, meta) } fn emplace(&mut self, key: H::Out, prefix: Prefix, value: DBValue) { - hash_db::HashDB::emplace(self.overlay, key, prefix, value) + HashDB::emplace(self.overlay, key, prefix, value) } fn remove(&mut self, key: &H::Out, prefix: Prefix) { - hash_db::HashDB::remove(self.overlay, key, prefix) + HashDB::remove(self.overlay, key, prefix) } } -impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::HashDBRef> +impl<'a, S: 'a + TrieBackendStorage, H: Hasher> HashDBRef> for Ephemeral<'a, S, H> { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { - hash_db::HashDB::get(self, key, prefix) + HashDB::get(self, key, prefix) } - fn get_with_meta(&self, key: &H::Out, prefix: Prefix, global: Option) -> Option<(DBValue, TrieMeta)> { - hash_db::HashDB::get_with_meta(self, key, prefix, global) + fn get_with_meta( + &self, + key: &H::Out, + prefix: Prefix, + global: Option, + ) -> Option<(DBValue, TrieMeta)> { + HashDB::get_with_meta(self, key, prefix, global) } fn access_from(&self, key: &H::Out, at: Option<&H::Out>) -> Option { - hash_db::HashDB::access_from(self, key, at) + HashDB::access_from(self, key, at) } fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { - hash_db::HashDB::contains(self, key, prefix) + HashDB::contains(self, key, prefix) } } /// Key-value pairs storage that is used by trie backend essence. pub trait TrieBackendStorage: Send + Sync { /// Type of in-memory overlay. - type Overlay: hash_db::HashDB> + Default + Consolidate; + type Overlay: HashDB> + Default + Consolidate; /// Get the value stored at key. fn get(&self, key: &H::Out, prefix: Prefix, global: Option) -> Result>; /// Call back when value get accessed in trie. @@ -437,26 +449,28 @@ impl TrieBackendStorage for sp_trie::GenericMemoryDB prefix: Prefix, global: Option, ) -> Result> { - Ok(hash_db::HashDB::get_with_meta(self, key, prefix, global)) + Ok(HashDB::get_with_meta(self, key, prefix, global)) } fn access_from(&self, key: &H::Out) { - hash_db::HashDB::access_from(self, key, None); + HashDB::access_from(self, key, None); } } -impl, H: Hasher> hash_db::AsHashDB> +impl, H: Hasher> AsHashDB> for TrieBackendEssence { - fn as_hash_db<'b>(&'b self) -> &'b (dyn hash_db::HashDB> + 'b) { self } - fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::HashDB> + 'b) { self } + fn as_hash_db<'b>(&'b self) -> &'b (dyn HashDB> + 'b) { self } + fn as_hash_db_mut<'b>( + &'b mut self, + ) -> &'b mut (dyn HashDB> + 'b) { self } } -impl, H: Hasher> hash_db::HashDB> +impl, H: Hasher> HashDB> for TrieBackendEssence { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { - self.get_with_meta(key, prefix, Default::default()).map(|r| r.0) + HashDB::get_with_meta(self, key, prefix, Default::default()).map(|r| r.0) } fn get_with_meta( @@ -484,7 +498,7 @@ impl, H: Hasher> hash_db::HashDB bool { - hash_db::HashDB::get(self, key, prefix).is_some() + HashDB::get(self, key, prefix).is_some() } fn insert_with_meta(&mut self, _prefix: Prefix, _value: &[u8], _meta: TrieMeta) -> H::Out { @@ -504,23 +518,28 @@ impl, H: Hasher> hash_db::HashDB, H: Hasher> hash_db::HashDBRef> +impl, H: Hasher> HashDBRef> for TrieBackendEssence { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { - hash_db::HashDB::get(self, key, prefix) + HashDB::get(self, key, prefix) } - fn get_with_meta(&self, key: &H::Out, prefix: Prefix, global: Option) -> Option<(DBValue, TrieMeta)> { - hash_db::HashDB::get_with_meta(self, key, prefix, global) + fn get_with_meta( + &self, + key: &H::Out, + prefix: Prefix, + global: Option, + ) -> Option<(DBValue, TrieMeta)> { + HashDB::get_with_meta(self, key, prefix, global) } fn access_from(&self, key: &H::Out, at: Option<&H::Out>) -> Option { - hash_db::HashDB::access_from(self, key, at) + HashDB::access_from(self, key, at) } fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { - hash_db::HashDB::contains(self, key, prefix) + HashDB::contains(self, key, prefix) } } diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 90e47693a1410..a102792950573 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -461,25 +461,30 @@ pub fn delta_trie_root( } /// Read a value from the trie. -pub fn read_trie_value>>( +pub fn read_trie_value( db: &DB, root: &TrieHash, key: &[u8] -) -> Result>, Box>> { +) -> Result>, Box>> + where + L: TrieConfiguration, + DB: hash_db::HashDBRef>, +{ Ok(TrieDB::::new(&*db, root)?.get(key).map(|x| x.map(|val| val.to_vec()))?) } /// Read a value from the trie with given Query. -pub fn read_trie_value_with< - L: TrieConfiguration, - Q: Query, - DB: hash_db::HashDBRef> ->( +pub fn read_trie_value_with ( db: &DB, root: &TrieHash, key: &[u8], query: Q -) -> Result>, Box>> { +) -> Result>, Box>> + where + L: TrieConfiguration, + Q: Query, + DB: hash_db::HashDBRef> +{ Ok(TrieDB::::new(&*db, root)?.get_with(key, query).map(|x| x.map(|val| val.to_vec()))?) } @@ -608,7 +613,7 @@ pub fn read_child_trie_value( } /// Read a value from the child trie with given query. -pub fn read_child_trie_value_with, DB>( +pub fn read_child_trie_value_with( keyspace: &[u8], db: &DB, root_slice: &[u8], @@ -616,6 +621,8 @@ pub fn read_child_trie_value_with Result>, Box>> where + L: TrieConfiguration, + Q: Query, DB: hash_db::HashDBRef>, { let mut root = TrieHash::::default(); diff --git a/primitives/trie/src/node_codec.rs b/primitives/trie/src/node_codec.rs index bd6e6d476f3e8..3f90bf8126352 100644 --- a/primitives/trie/src/node_codec.rs +++ b/primitives/trie/src/node_codec.rs @@ -355,7 +355,8 @@ fn partial_from_iterator_encode>( NodeKind::BranchWithValue => NodeHeader::Branch(true, nibble_count).encode_to(&mut output), NodeKind::BranchNoValue => NodeHeader::Branch(false, nibble_count).encode_to(&mut output), NodeKind::AltHashLeaf => NodeHeader::AltHashLeaf(nibble_count).encode_to(&mut output), - NodeKind::AltHashBranchWithValue => NodeHeader::AltHashBranch(nibble_count).encode_to(&mut output), + NodeKind::AltHashBranchWithValue => NodeHeader::AltHashBranch(nibble_count) + .encode_to(&mut output), }; output.extend(partial); output @@ -375,7 +376,8 @@ fn partial_encode(partial: Partial, node_kind: NodeKind) -> Vec { NodeKind::BranchWithValue => NodeHeader::Branch(true, nibble_count).encode_to(&mut output), NodeKind::BranchNoValue => NodeHeader::Branch(false, nibble_count).encode_to(&mut output), NodeKind::AltHashLeaf => NodeHeader::AltHashLeaf(nibble_count).encode_to(&mut output), - NodeKind::AltHashBranchWithValue => NodeHeader::AltHashBranch(nibble_count).encode_to(&mut output), + NodeKind::AltHashBranchWithValue => NodeHeader::AltHashBranch(nibble_count) + .encode_to(&mut output), }; if number_nibble_encoded > 0 { output.push(nibble_ops::pad_right((partial.0).1)); diff --git a/primitives/trie/src/node_header.rs b/primitives/trie/src/node_header.rs index 7f5c68794e745..43ac67cbabcaf 100644 --- a/primitives/trie/src/node_header.rs +++ b/primitives/trie/src/node_header.rs @@ -102,7 +102,11 @@ impl Decode for NodeHeader { /// Returns an iterator over encoded bytes for node header and size. /// Size encoding allows unlimited, length inefficient, representation, but /// is bounded to 16 bit maximum value to avoid possible DOS. -pub(crate) fn size_and_prefix_iterator(size: usize, prefix: u8, prefix_mask: usize) -> impl Iterator { +pub(crate) fn size_and_prefix_iterator( + size: usize, + prefix: u8, + prefix_mask: usize, +) -> impl Iterator { let size = sp_std::cmp::min(trie_constants::NIBBLE_SIZE_BOUND, size); let max_value = 255u8 >> prefix_mask; @@ -139,7 +143,11 @@ fn encode_size_and_prefix(size: usize, prefix: u8, prefix_mask: usize, out: & } /// Decode size only from stream input and header byte. -fn decode_size(first: u8, input: &mut impl Input, prefix_mask: usize) -> Result { +fn decode_size( + first: u8, + input: &mut impl Input, + prefix_mask: usize, +) -> Result { let max_value = 255u8 >> prefix_mask; let mut result = (first & max_value) as usize; if result < max_value as usize { diff --git a/primitives/trie/src/trie_stream.rs b/primitives/trie/src/trie_stream.rs index 2be98fad76ab6..bba41fe6d81c0 100644 --- a/primitives/trie/src/trie_stream.rs +++ b/primitives/trie/src/trie_stream.rs @@ -66,8 +66,10 @@ fn fuse_nibbles_node<'a>(nibbles: &'a [u8], kind: NodeKind) -> impl Iterator size_and_prefix_iterator(size, trie_constants::LEAF_PREFIX_MASK, 2), NodeKind::BranchNoValue => size_and_prefix_iterator(size, trie_constants::BRANCH_WITHOUT_MASK, 2), NodeKind::BranchWithValue => size_and_prefix_iterator(size, trie_constants::BRANCH_WITH_MASK, 2), - NodeKind::AltHashLeaf => size_and_prefix_iterator(size, trie_constants::ALT_HASHING_LEAF_PREFIX_MASK, 3), - NodeKind::AltHashBranchWithValue => size_and_prefix_iterator(size, trie_constants::ALT_HASHING_BRANCH_WITH_MASK, 4), + NodeKind::AltHashLeaf => + size_and_prefix_iterator(size, trie_constants::ALT_HASHING_LEAF_PREFIX_MASK, 3), + NodeKind::AltHashBranchWithValue => + size_and_prefix_iterator(size, trie_constants::ALT_HASHING_BRANCH_WITH_MASK, 4), }; iter_start .chain(if nibbles.len() % 2 == 1 { Some(nibbles[0]) } else { None }) @@ -162,7 +164,8 @@ impl trie_root::TrieStream for TrieStream { try_inner_hashing: None, apply_inner_hashing: true, }; - >>::hash(&data, &meta).as_ref().encode_to(&mut self.buffer); + >>::hash(&data, &meta).as_ref() + .encode_to(&mut self.buffer); } else { H::hash(&data).as_ref().encode_to(&mut self.buffer); } From 934021c1c05cdf8c6748aca513f93c3273c52c4b Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 18 Jun 2021 20:13:59 +0200 Subject: [PATCH 059/188] UPdate trie crate to work with new design. --- Cargo.lock | 24 +- Cargo.toml | 10 +- bin/node/bench/src/simple_trie.rs | 14 +- bin/node/bench/src/trie.rs | 4 +- client/db/src/bench.rs | 4 +- client/db/src/lib.rs | 6 +- primitives/io/src/lib.rs | 2 +- primitives/state-machine/src/backend.rs | 3 +- primitives/state-machine/src/basic.rs | 3 +- .../src/changes_trie/changes_iterator.rs | 4 +- .../state-machine/src/changes_trie/mod.rs | 15 +- .../state-machine/src/changes_trie/prune.rs | 2 +- .../state-machine/src/changes_trie/storage.rs | 17 +- primitives/state-machine/src/ext.rs | 2 +- .../state-machine/src/in_memory_backend.rs | 2 +- primitives/state-machine/src/lib.rs | 7 +- .../state-machine/src/proving_backend.rs | 50 +-- primitives/state-machine/src/trie_backend.rs | 5 +- .../state-machine/src/trie_backend_essence.rs | 119 ++---- primitives/trie/src/lib.rs | 358 ++++-------------- primitives/trie/src/node_codec.rs | 73 ++-- primitives/trie/src/node_header.rs | 39 +- primitives/trie/src/storage_proof.rs | 20 +- primitives/trie/src/trie_codec.rs | 11 +- primitives/trie/src/trie_stream.rs | 35 +- 25 files changed, 289 insertions(+), 540 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 94e601742d821..f97655a692d0b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2354,7 +2354,7 @@ dependencies = [ [[package]] name = "hash-db" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact#54c61d7e3c32a4090d34468c38621d33b0f853b5" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple7#c7e0aeb8fd74c23845efe4b6daee43b7f4084faa" [[package]] name = "hash256-std-hasher" @@ -2368,7 +2368,7 @@ dependencies = [ [[package]] name = "hash256-std-hasher" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact#54c61d7e3c32a4090d34468c38621d33b0f853b5" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple7#c7e0aeb8fd74c23845efe4b6daee43b7f4084faa" dependencies = [ "crunchy", ] @@ -3050,10 +3050,10 @@ dependencies = [ [[package]] name = "keccak-hasher" version = "0.15.3" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact#54c61d7e3c32a4090d34468c38621d33b0f853b5" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple7#c7e0aeb8fd74c23845efe4b6daee43b7f4084faa" dependencies = [ "hash-db", - "hash256-std-hasher 0.15.2 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact)", + "hash256-std-hasher 0.15.2 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple7)", "tiny-keccak", ] @@ -3865,7 +3865,7 @@ dependencies = [ [[package]] name = "memory-db" version = "0.26.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact#54c61d7e3c32a4090d34468c38621d33b0f853b5" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple7#c7e0aeb8fd74c23845efe4b6daee43b7f4084faa" dependencies = [ "hash-db", "hashbrown", @@ -10551,22 +10551,22 @@ checksum = "a7f741b240f1a48843f9b8e0444fb55fb2a4ff67293b50a9179dfd5ea67f8d41" [[package]] name = "trie-bench" version = "0.27.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact#54c61d7e3c32a4090d34468c38621d33b0f853b5" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple7#c7e0aeb8fd74c23845efe4b6daee43b7f4084faa" dependencies = [ "criterion", "hash-db", - "keccak-hasher 0.15.3 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact)", + "keccak-hasher 0.15.3 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple7)", "memory-db", "parity-scale-codec", "trie-db", "trie-root", - "trie-standardmap 0.15.2 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact)", + "trie-standardmap 0.15.2 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple7)", ] [[package]] name = "trie-db" version = "0.22.5" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact#54c61d7e3c32a4090d34468c38621d33b0f853b5" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple7#c7e0aeb8fd74c23845efe4b6daee43b7f4084faa" dependencies = [ "hash-db", "hashbrown", @@ -10578,7 +10578,7 @@ dependencies = [ [[package]] name = "trie-root" version = "0.16.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact#54c61d7e3c32a4090d34468c38621d33b0f853b5" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple7#c7e0aeb8fd74c23845efe4b6daee43b7f4084faa" dependencies = [ "hash-db", ] @@ -10596,10 +10596,10 @@ dependencies = [ [[package]] name = "trie-standardmap" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact#54c61d7e3c32a4090d34468c38621d33b0f853b5" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple7#c7e0aeb8fd74c23845efe4b6daee43b7f4084faa" dependencies = [ "hash-db", - "keccak-hasher 0.15.3 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-refact)", + "keccak-hasher 0.15.3 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple7)", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 566401fbe28f5..fc427dd2f2e16 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -278,8 +278,8 @@ zeroize = { opt-level = 3 } panic = "unwind" [patch.crates-io] -hash-db = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple4-refact" } -memory-db = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple4-refact" } -trie-db = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple4-refact" } -trie-root = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple4-refact" } -trie-bench = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple4-refact" } +hash-db = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple7" } +memory-db = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple7" } +trie-db = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple7" } +trie-root = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple7" } +trie-bench = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple7" } diff --git a/bin/node/bench/src/simple_trie.rs b/bin/node/bench/src/simple_trie.rs index fea106ed196fe..f4a4576508f6c 100644 --- a/bin/node/bench/src/simple_trie.rs +++ b/bin/node/bench/src/simple_trie.rs @@ -20,7 +20,7 @@ use std::{collections::HashMap, sync::Arc}; use kvdb::KeyValueDB; use node_primitives::Hash; -use sp_trie::{DBValue, TrieMeta, StateHasher, MetaHasher}; +use sp_trie::{DBValue, Meta, StateHasher, MetaHasher}; use hash_db::{HashDB, AsHashDB, Prefix, Hasher as _}; pub type Hasher = sp_core::Blake2Hasher; @@ -31,15 +31,15 @@ pub struct SimpleTrie<'a> { pub overlay: &'a mut HashMap, Option>>, } -impl<'a> AsHashDB> for SimpleTrie<'a> { - fn as_hash_db(&self) -> &dyn hash_db::HashDB> { &*self } +impl<'a> AsHashDB> for SimpleTrie<'a> { + fn as_hash_db(&self) -> &dyn hash_db::HashDB> { &*self } - fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn HashDB> + 'b) { + fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn HashDB> + 'b) { &mut *self } } -impl<'a> HashDB> for SimpleTrie<'a> { +impl<'a> HashDB> for SimpleTrie<'a> { fn get(&self, key: &Hash, prefix: Prefix) -> Option { let key = sp_trie::prefixed_key::(key, prefix); if let Some(value) = self.overlay.get(&key) { @@ -48,7 +48,7 @@ impl<'a> HashDB> for SimpleTrie<'a> { self.db.get(0, &key).expect("Database backend error") } - fn get_with_meta(&self, key: &Hash, prefix: Prefix, global: Option) -> Option<(DBValue, TrieMeta)> { + fn get_with_meta(&self, key: &Hash, prefix: Prefix, global: Option) -> Option<(DBValue, Meta)> { let result = self.get(key, prefix); result.map(|value| >::extract_value_owned(value, global)) } @@ -57,7 +57,7 @@ impl<'a> HashDB> for SimpleTrie<'a> { self.get(hash, prefix).is_some() } - fn insert_with_meta(&mut self, prefix: Prefix, value: &[u8], meta: TrieMeta) -> Hash { + fn insert_with_meta(&mut self, prefix: Prefix, value: &[u8], meta: Meta) -> Hash { let key = >::hash(value, &meta); let stored_value = >::stored_value(value, meta); self.emplace(key, prefix, stored_value); diff --git a/bin/node/bench/src/trie.rs b/bin/node/bench/src/trie.rs index f1bcd3b2239c6..67d07a7c024b1 100644 --- a/bin/node/bench/src/trie.rs +++ b/bin/node/bench/src/trie.rs @@ -24,7 +24,7 @@ use lazy_static::lazy_static; use rand::Rng; use hash_db::Prefix; use sp_state_machine::Backend as _; -use sp_trie::{trie_types::TrieDBMut, TrieMut as _, TrieMeta, MetaHasher, StateHasher}; +use sp_trie::{trie_types::TrieDBMut, TrieMut as _, Meta, MetaHasher, StateHasher}; use node_primitives::Hash; @@ -175,7 +175,7 @@ impl sp_state_machine::Storage for Storage { key: &Hash, prefix: Prefix, global: Option, - ) -> Result, TrieMeta)>, String> { + ) -> Result, Meta)>, String> { let key = sp_trie::prefixed_key::(key, prefix); self.0.get(0, &key).map_err(|e| format!("Database backend error: {:?}", e)) .map(|result| result diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index 72be5dd5062d4..a43a733bdd47b 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -23,7 +23,7 @@ use std::cell::{Cell, RefCell}; use std::collections::HashMap; use hash_db::{Prefix, Hasher}; -use sp_trie::{MemoryDB, prefixed_key, StateHasher, TrieMeta, MetaHasher}; +use sp_trie::{MemoryDB, prefixed_key, StateHasher, Meta, MetaHasher}; use sp_core::{ storage::{ChildInfo, TrackedStorageKey}, hexdisplay::HexDisplay @@ -54,7 +54,7 @@ impl sp_state_machine::Storage> for StorageDb, - ) -> Result, String> { + ) -> Result, String> { let prefixed_key = prefixed_key::>(key, prefix); if let Some(recorder) = &self.proof_recorder { if let Some(v) = recorder.get(&key) { diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index c32a476a5d8da..fcbea6201e7bf 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -66,7 +66,7 @@ use sp_blockchain::{ use codec::{Decode, Encode}; use hash_db::Prefix; use sp_trie::{MemoryDB, PrefixedMemoryDB, prefixed_key, StateHasher, - TrieMeta, MetaHasher}; + Meta, MetaHasher}; use sp_database::Transaction; use sp_core::ChangesTrieConfiguration; use sp_core::offchain::OffchainOverlayedChange; @@ -894,7 +894,7 @@ impl sp_state_machine::Storage> for StorageDb, - ) -> Result, String> { + ) -> Result, String> { if self.prefix_keys { let key = prefixed_key::>(key, prefix); self.state_db.get(&key, self) @@ -936,7 +936,7 @@ impl sp_state_machine::Storage> for DbGenesisStora _key: &Block::Hash, _prefix: Prefix, _global: Option, - ) -> Result, String> { + ) -> Result, String> { Ok(None) } fn access_from(&self, _key: &Block::Hash) { diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index ffbfc4bdc54fb..21608c535c982 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -54,7 +54,7 @@ use sp_core::{ }; #[cfg(feature = "std")] -use sp_trie::{TrieConfiguration, trie_types::Layout}; +use sp_trie::{TrieConfiguration, Layout}; use sp_runtime_interface::{runtime_interface, Pointer}; use sp_runtime_interface::pass_by::{PassBy, PassByCodec}; diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index c90b5aa2013b2..c879053fce569 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -285,10 +285,9 @@ impl Consolidate for Vec<( } } -impl Consolidate for sp_trie::GenericMemoryDB +impl Consolidate for sp_trie::GenericMemoryDB where H: Hasher, - MH: sp_trie::MetaHasher, KF: sp_trie::KeyFunction, { fn consolidate(&mut self, other: Self) { diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index 288cb36993472..653bdc28a3c7d 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -22,8 +22,7 @@ use std::{ }; use crate::{Backend, StorageKey, StorageValue}; use hash_db::Hasher; -use sp_trie::{TrieConfiguration, empty_child_trie_root}; -use sp_trie::trie_types::Layout; +use sp_trie::{TrieConfiguration, empty_child_trie_root, Layout}; use sp_core::{ storage::{ well_known_keys::is_child_storage_key, Storage, diff --git a/primitives/state-machine/src/changes_trie/changes_iterator.rs b/primitives/state-machine/src/changes_trie/changes_iterator.rs index 25eda86b4e66a..be35581e7514d 100644 --- a/primitives/state-machine/src/changes_trie/changes_iterator.rs +++ b/primitives/state-machine/src/changes_trie/changes_iterator.rs @@ -24,7 +24,7 @@ use codec::{Decode, Encode, Codec}; use hash_db::Hasher; use num_traits::Zero; use sp_core::storage::PrefixedStorageKey; -use sp_trie::{Recorder, TrieMeta}; +use sp_trie::Recorder; use crate::changes_trie::{AnchorBlockId, ConfigurationRange, RootsStorage, Storage, BlockNumber}; use crate::changes_trie::input::{DigestIndex, ExtrinsicIndex, DigestIndexValue, ExtrinsicIndexValue}; use crate::changes_trie::storage::{TrieBackendAdapter, InMemoryStorage}; @@ -337,7 +337,7 @@ struct ProvingDrilldownIterator<'a, H, Number> H::Out: 'a, { essence: DrilldownIteratorEssence<'a, H, Number>, - proof_recorder: RefCell>, + proof_recorder: RefCell>, } impl<'a, H, Number> ProvingDrilldownIterator<'a, H, Number> diff --git a/primitives/state-machine/src/changes_trie/mod.rs b/primitives/state-machine/src/changes_trie/mod.rs index 5a2e206956ac9..f2e5ff461eecf 100644 --- a/primitives/state-machine/src/changes_trie/mod.rs +++ b/primitives/state-machine/src/changes_trie/mod.rs @@ -167,19 +167,10 @@ pub struct TrieBackendStorageAdapter<'a, H: Hasher, Number: BlockNumber>(pub &'a impl<'a, H: Hasher, N: BlockNumber> crate::TrieBackendStorage for TrieBackendStorageAdapter<'a, H, N> { type Overlay = sp_trie::MemoryDB; - fn get( - &self, - key: &H::Out, - prefix: Prefix, - _global: Option, - ) -> Result, String> { - match self.0.get(key, prefix) { - // change trie do not use meta. - Ok(Some(v)) => Ok(Some((v, Default::default()))), - Ok(None) => Ok(None), - Err(e) => Err(e), - } + fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { + self.0.get(key, prefix) } + fn access_from(&self, _key: &H::Out) { } } diff --git a/primitives/state-machine/src/changes_trie/prune.rs b/primitives/state-machine/src/changes_trie/prune.rs index 6f00e9b6a8e0d..a741b814a5c70 100644 --- a/primitives/state-machine/src/changes_trie/prune.rs +++ b/primitives/state-machine/src/changes_trie/prune.rs @@ -95,7 +95,7 @@ fn prune_trie( // enumerate all changes trie' keys, recording all nodes that have been 'touched' // (effectively - all changes trie nodes) - let mut proof_recorder: Recorder = Default::default(); + let mut proof_recorder: Recorder = Default::default(); { let mut trie = ProvingBackendRecorder::<_, H> { backend: &TrieBackendEssence::new(TrieBackendAdapter::new(storage), root), diff --git a/primitives/state-machine/src/changes_trie/storage.rs b/primitives/state-machine/src/changes_trie/storage.rs index 0cea30f0d809c..d99d918012f94 100644 --- a/primitives/state-machine/src/changes_trie/storage.rs +++ b/primitives/state-machine/src/changes_trie/storage.rs @@ -190,7 +190,7 @@ impl Storage for InMemoryStorage Result, String> { - Ok( as hash_db::HashDBRef>::get(&self.data.read().mdb, key, prefix)) + Ok( as hash_db::HashDBRef>::get(&self.data.read().mdb, key, prefix)) } } @@ -207,18 +207,9 @@ impl<'a, H, Number> TrieBackendStorage for TrieBackendAdapter<'a, H, Number> { type Overlay = MemoryDB; - fn get( - &self, - key: &H::Out, - prefix: Prefix, - _global: Option, - ) -> Result, String> { - match self.storage.get(key, prefix) { - // change trie do not use meta. - Ok(Some(v)) => Ok(Some((v, Default::default()))), - Ok(None) => Ok(None), - Err(e) => Err(e), - } + fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { + self.storage.get(key, prefix) } + fn access_from(&self, _key: &H::Out) { } } diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index 25e672b768652..2defaa49142af 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -26,7 +26,7 @@ use sp_core::{ storage::{well_known_keys::is_child_storage_key, ChildInfo, TrackedStorageKey}, hexdisplay::HexDisplay, }; -use sp_trie::{trie_types::Layout, empty_child_trie_root}; +use sp_trie::{Layout, empty_child_trie_root}; use sp_externalities::{ Externalities, Extensions, Extension, ExtensionStore, }; diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index 84c0b316387da..b122120b36e44 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -22,7 +22,7 @@ use crate::{ }; use std::collections::{BTreeMap, HashMap}; use hash_db::Hasher; -use sp_trie::{MemoryDB, empty_trie_root, trie_types::Layout}; +use sp_trie::{MemoryDB, empty_trie_root, Layout}; use codec::Codec; use sp_core::storage::{ChildInfo, Storage}; diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 91bd575316fd7..34363dfa6fe3a 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -143,7 +143,7 @@ mod changes_trie { #[cfg(feature = "std")] mod std_reexport { - pub use sp_trie::{trie_types::{Layout, TrieDBMut}, StorageProof, TrieMut, + pub use sp_trie::{trie_types::TrieDBMut, Layout, StorageProof, TrieMut, DBValue, MemoryDB}; pub use crate::testing::TestExternalities; pub use crate::basic::BasicExternalities; @@ -848,14 +848,13 @@ mod execution { } /// Check storage read proof on pre-created proving backend. - pub fn read_proof_check_on_proving_backend_generic( - proving_backend: &TrieBackend, H>, + pub fn read_proof_check_on_proving_backend_generic( + proving_backend: &TrieBackend, H>, key: &[u8], ) -> Result>, Box> where H: Hasher, H::Out: Ord + Codec, - MH: sp_trie::MetaHasher>, KF: sp_trie::KeyFunction + Send + Sync, { proving_backend.storage(key).map_err(|e| Box::new(e) as Box) diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index ceeba59c83611..206024af44095 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -24,9 +24,9 @@ use log::debug; use hash_db::{Hasher, HashDB, EMPTY_PREFIX, Prefix}; use sp_trie::{ MemoryDB, empty_child_trie_root, read_trie_value_with, read_child_trie_value_with, - record_all_keys, StorageProof, TrieMeta, + record_all_keys, StorageProof, Meta, Layout, Recorder, }; -pub use sp_trie::{Recorder, trie_types::{Layout, TrieError}}; +pub use sp_trie::trie_types::TrieError; use crate::trie_backend::TrieBackend; use crate::trie_backend_essence::{Ephemeral, TrieBackendEssence, TrieBackendStorage}; use crate::{Error, ExecutionError, Backend, DBValue}; @@ -35,7 +35,7 @@ use sp_core::storage::ChildInfo; /// Patricia trie-based backend specialized in get value proofs. pub struct ProvingBackendRecorder<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { pub(crate) backend: &'a TrieBackendEssence, - pub(crate) proof_recorder: &'a mut Recorder, + pub(crate) proof_recorder: &'a mut Recorder, } impl<'a, S, H> ProvingBackendRecorder<'a, S, H> @@ -112,7 +112,7 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> #[derive(Default)] struct ProofRecorderInner { /// All the records that we have stored so far. - records: HashMap>, + records: HashMap>, /// The encoded size of all recorded values. encoded_size: usize, } @@ -125,17 +125,17 @@ pub struct ProofRecorder { impl ProofRecorder { /// Record the given `key` => `val` combination. - pub fn record(&self, key: Hash, mut val: Option<(DBValue, TrieMeta)>) { + pub fn record(&self, key: Hash, val: Option) { let mut inner = self.inner.write(); let ProofRecorderInner { encoded_size, records } = &mut *inner; records.entry(key).or_insert_with(|| { - if let Some(val) = val.as_mut() { - val.1.set_accessed_value(false); - sp_trie::resolve_encoded_meta::(val); - *encoded_size += sp_trie::estimate_entry_size(val, H::LENGTH); - } - val + val.map(|val| { + let mut val = (val, Meta::default(), false); + sp_trie::resolve_encoded_meta::(&mut val); + *encoded_size += sp_trie::estimate_entry_size(&val, H::LENGTH); + val + }) }); } @@ -146,9 +146,9 @@ impl ProofRecorder { records.entry(key.clone()) .and_modify(|entry| { if let Some(entry) = entry.as_mut() { - if !entry.1.accessed_value() { + if !entry.2 { let old_size = sp_trie::estimate_entry_size(entry, hash_len); - entry.1.set_accessed_value(true); + entry.2 = true; let new_size = sp_trie::estimate_entry_size(entry, hash_len); *encoded_size += new_size; *encoded_size -= old_size; @@ -158,8 +158,9 @@ impl ProofRecorder { } /// Returns the value at the given `key`. - pub fn get(&self, key: &Hash) -> Option> { - self.inner.read().records.get(key).cloned() + pub fn get(&self, key: &Hash) -> Option> { + self.inner.read().records.get(key).as_ref() + .map(|v| v.as_ref().map(|v| v.0.clone())) } /// Returns the estimated encoded size of the proof. @@ -174,14 +175,18 @@ impl ProofRecorder { /// Convert into a [`StorageProof`]. pub fn to_storage_proof(&self) -> StorageProof { - let inner = self.inner.read(); - let trie_nodes = inner + let trie_nodes = self.inner.read() .records .iter() .filter_map(|(_k, v)| v.as_ref().map(|v| { - < - as sp_trie::TrieLayout>::MetaHasher as hash_db::MetaHasher - >::stored_value(v.0.as_slice(), v.1.clone()) + let mut meta = v.1.clone(); + if let Some(hashed) = sp_trie::to_hashed_variant::( + v.0.as_slice(), &mut meta, v.2, + ) { + hashed + } else { + v.0.clone() + } })) .collect(); @@ -246,13 +251,12 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> TrieBackendStorage &self, key: &H::Out, prefix: Prefix, - global: Option, - ) -> Result, String> { + ) -> Result, String> { if let Some(v) = self.proof_recorder.get(key) { return Ok(v); } - let backend_value = self.backend.get(key, prefix, global)?; + let backend_value = self.backend.get(key, prefix)?; self.proof_recorder.record::(key.clone(), backend_value.clone()); Ok(backend_value) } diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 83643f645548c..c09a6e6e0899b 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -19,8 +19,9 @@ use crate::{warn, debug}; use hash_db::Hasher; -use sp_trie::{Trie, delta_trie_root, empty_child_trie_root, child_delta_trie_root}; -use sp_trie::trie_types::{TrieDB, TrieError, Layout}; +use sp_trie::{Trie, delta_trie_root, empty_child_trie_root, child_delta_trie_root, + Layout}; +use sp_trie::trie_types::{TrieDB, TrieError}; use sp_core::storage::{ChildInfo, ChildType}; use codec::{Codec, Decode}; use crate::{ diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 0c45923e374e0..e22f4d96c680e 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -23,10 +23,10 @@ use std::sync::Arc; use sp_std::{ops::Deref, boxed::Box, vec::Vec}; use crate::{warn, debug}; use hash_db::{self, Hasher, Prefix, AsHashDB, HashDB, HashDBRef}; -use sp_trie::{Trie, PrefixedMemoryDB, DBValue, +use sp_trie::{Trie, PrefixedMemoryDB, DBValue, Layout, empty_child_trie_root, read_trie_value, read_child_trie_value, - KeySpacedDB, TrieDBIterator, TrieDBKeyIterator, TrieMeta}; -use sp_trie::trie_types::{TrieDB, TrieError, Layout}; + KeySpacedDB, TrieDBIterator, TrieDBKeyIterator}; +use sp_trie::trie_types::{TrieDB, TrieError}; use crate::{backend::Consolidate, StorageKey, StorageValue}; use sp_core::storage::ChildInfo; use codec::Encode; @@ -47,8 +47,7 @@ pub trait Storage: Send + Sync { &self, key: &H::Out, prefix: Prefix, - alt_hashing: Option, - ) -> Result>; + ) -> Result>; /// Call back when value get accessed in trie. fn access_from(&self, key: &H::Out); } @@ -136,7 +135,7 @@ impl, H: Hasher> TrieBackendEssence where H::Out: child_info: Option<&ChildInfo>, key: &[u8], ) -> Result> { - let dyn_eph: &dyn HashDBRef<_, _, _, _>; + let dyn_eph: &dyn HashDBRef<_, _>; let keyspace_eph; if let Some(child_info) = child_info.as_ref() { keyspace_eph = KeySpacedDB::new(self, child_info.keyspace()); @@ -336,13 +335,11 @@ pub(crate) struct Ephemeral<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { overlay: &'a mut S::Overlay, } -impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> AsHashDB> +impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> AsHashDB for Ephemeral<'a, S, H> { - fn as_hash_db<'b>(&'b self) -> &'b (dyn HashDB> + 'b) { self } - fn as_hash_db_mut<'b>( - &'b mut self, - ) -> &'b mut (dyn HashDB> + 'b) { self } + fn as_hash_db<'b>(&'b self) -> &'b (dyn HashDB + 'b) { self } + fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn HashDB + 'b) { self } } impl<'a, S: TrieBackendStorage, H: Hasher> Ephemeral<'a, S, H> { @@ -354,23 +351,14 @@ impl<'a, S: TrieBackendStorage, H: Hasher> Ephemeral<'a, S, H> { } } -impl<'a, S: 'a + TrieBackendStorage, H: Hasher> HashDB> +impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::HashDB for Ephemeral<'a, S, H> { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { - HashDB::get_with_meta(self, key, prefix, Default::default()).map(|r| r.0) - } - - fn get_with_meta( - &self, - key: &H::Out, - prefix: Prefix, - global: Option, - ) -> Option<(DBValue, TrieMeta)> { - if let Some(val) = HashDB::get_with_meta(self.overlay, key, prefix, global) { + if let Some(val) = HashDB::get(self.overlay, key, prefix) { Some(val) } else { - match self.storage.get(&key, prefix, global) { + match self.storage.get(&key, prefix) { Ok(x) => x, Err(e) => { warn!(target: "trie", "Failed to read from DB: {}", e); @@ -394,40 +382,26 @@ impl<'a, S: 'a + TrieBackendStorage, H: Hasher> HashDB H::Out { - HashDB::insert_with_meta(self.overlay, prefix, value, meta) - } - fn emplace(&mut self, key: H::Out, prefix: Prefix, value: DBValue) { HashDB::emplace(self.overlay, key, prefix, value) } + fn emplace_ref(&mut self, key: &H::Out, prefix: Prefix, value: &[u8]) { + HashDB::emplace_ref(self.overlay, key, prefix, value) + } + fn remove(&mut self, key: &H::Out, prefix: Prefix) { HashDB::remove(self.overlay, key, prefix) } } -impl<'a, S: 'a + TrieBackendStorage, H: Hasher> HashDBRef> +impl<'a, S: 'a + TrieBackendStorage, H: Hasher> HashDBRef for Ephemeral<'a, S, H> { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { HashDB::get(self, key, prefix) } - fn get_with_meta( - &self, - key: &H::Out, - prefix: Prefix, - global: Option, - ) -> Option<(DBValue, TrieMeta)> { - HashDB::get_with_meta(self, key, prefix, global) - } - fn access_from(&self, key: &H::Out, at: Option<&H::Out>) -> Option { HashDB::access_from(self, key, at) } @@ -440,9 +414,9 @@ impl<'a, S: 'a + TrieBackendStorage, H: Hasher> HashDBRef: Send + Sync { /// Type of in-memory overlay. - type Overlay: HashDB> + Default + Consolidate; + type Overlay: HashDB + Default + Consolidate; /// Get the value stored at key. - fn get(&self, key: &H::Out, prefix: Prefix, global: Option) -> Result>; + fn get(&self, key: &H::Out, prefix: Prefix) -> Result>; /// Call back when value get accessed in trie. fn access_from(&self, key: &H::Out); } @@ -456,9 +430,8 @@ impl TrieBackendStorage for Arc> { &self, key: &H::Out, prefix: Prefix, - global: Option, - ) -> Result> { - Storage::::get(self.deref(), key, prefix, global) + ) -> Result> { + Storage::::get(self.deref(), key, prefix) } fn access_from(&self, key: &H::Out) { @@ -466,21 +439,15 @@ impl TrieBackendStorage for Arc> { } } -impl TrieBackendStorage for sp_trie::GenericMemoryDB +impl TrieBackendStorage for sp_trie::GenericMemoryDB where H: Hasher, - MH: sp_trie::MetaHasher>, KF: sp_trie::KeyFunction + Send + Sync, { type Overlay = Self; - fn get( - &self, - key: &H::Out, - prefix: Prefix, - global: Option, - ) -> Result> { - Ok(HashDB::get_with_meta(self, key, prefix, global)) + fn get(&self, key: &H::Out, prefix: Prefix) -> Result> { + Ok(hash_db::HashDB::get(self, key, prefix)) } fn access_from(&self, key: &H::Out) { @@ -488,32 +455,21 @@ impl TrieBackendStorage for sp_trie::GenericMemoryDB } } -impl, H: Hasher> AsHashDB> +impl, H: Hasher> AsHashDB for TrieBackendEssence { - fn as_hash_db<'b>(&'b self) -> &'b (dyn HashDB> + 'b) { self } - fn as_hash_db_mut<'b>( - &'b mut self, - ) -> &'b mut (dyn HashDB> + 'b) { self } + fn as_hash_db<'b>(&'b self) -> &'b (dyn HashDB + 'b) { self } + fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn HashDB + 'b) { self } } -impl, H: Hasher> HashDB> +impl, H: Hasher> HashDB for TrieBackendEssence { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { - HashDB::get_with_meta(self, key, prefix, Default::default()).map(|r| r.0) - } - - fn get_with_meta( - &self, - key: &H::Out, - prefix: Prefix, - global: Option, - ) -> Option<(DBValue, TrieMeta)> { if *key == self.empty { - return Some(([0u8].to_vec(), ::meta_for_empty(global))) + return Some([0u8].to_vec()) } - match self.storage.get(&key, prefix, global) { + match self.storage.get(&key, prefix) { Ok(x) => x, Err(e) => { warn!(target: "trie", "Failed to read from DB: {}", e); @@ -532,15 +488,15 @@ impl, H: Hasher> HashDB H::Out { + fn insert(&mut self, _prefix: Prefix, _value: &[u8]) -> H::Out { unimplemented!(); } - fn insert(&mut self, _prefix: Prefix, _value: &[u8]) -> H::Out { + fn emplace(&mut self, _key: H::Out, _prefix: Prefix, _value: DBValue) { unimplemented!(); } - fn emplace(&mut self, _key: H::Out, _prefix: Prefix, _value: DBValue) { + fn emplace_ref(&mut self, _key: &H::Out, _prefix: Prefix, _value: &[u8]) { unimplemented!(); } @@ -549,22 +505,13 @@ impl, H: Hasher> HashDB, H: Hasher> HashDBRef> +impl, H: Hasher> HashDBRef for TrieBackendEssence { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { HashDB::get(self, key, prefix) } - fn get_with_meta( - &self, - key: &H::Out, - prefix: Prefix, - global: Option, - ) -> Option<(DBValue, TrieMeta)> { - HashDB::get_with_meta(self, key, prefix, global) - } - fn access_from(&self, key: &H::Out, at: Option<&H::Out>) -> Option { HashDB::access_from(self, key, at) } diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 149e2166b68e9..79068c585eb6e 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -41,161 +41,38 @@ pub use storage_proof::{StorageProof, CompactProof}; pub use trie_db::{ Trie, TrieMut, DBValue, Recorder, CError, Query, TrieLayout, TrieConfiguration, nibble_ops, TrieDBIterator, TrieDBKeyIterator, Meta, node::{NodePlan, ValuePlan}, - GlobalMeta, }; /// Various re-exports from the `memory-db` crate. pub use memory_db::KeyFunction; pub use memory_db::prefixed_key; /// Various re-exports from the `hash-db` crate. -pub use hash_db::{HashDB as HashDBT, EMPTY_PREFIX, MetaHasher}; -pub use hash_db::NoMeta; +pub use hash_db::{HashDB as HashDBT, EMPTY_PREFIX}; /// Trie codec reexport, mainly child trie support /// for trie compact proof. pub use trie_codec::{decode_compact, encode_compact, Error as CompactProofError}; -/// Meta use by trie state. -#[derive(Default, Clone, Debug)] -pub struct TrieMeta { - /// Range of encoded value or hashed value. - /// When encoded value, it includes the length of the value. - pub range: Option>, - /// Defined in the trie layout, when used with - /// `TrieDbMut` it switch nodes to alternative hashing - /// method by defining the threshold to use with alternative - /// hashing. - /// Trie codec or other proof manipulation will always use - /// `None` in order to prevent state change on reencoding. - pub try_inner_hashing: Option, - /// Flag indicating alternative value hash is currently use - /// or will be use. - pub apply_inner_hashing: bool, - /// Does current encoded contains a hash instead of - /// a value (information stored in meta for proofs). - pub contain_hash: bool, - /// Record if a value was accessed, this is - /// set as accessed by defalult, but can be - /// change on access explicitely: `HashDB::get_with_meta`. - /// and reset on access explicitely: `HashDB::access_from`. - /// Not strictly needed in this struct, but does not add memory usage here. - pub unused_value: bool, -} - -impl Meta for TrieMeta { - /// When true apply inner hashing of value. - type GlobalMeta = Option; - - /// When true apply inner hashing of value. - type StateMeta = bool; - - fn set_state_meta(&mut self, state_meta: Self::StateMeta) { - self.apply_inner_hashing = state_meta; - } - - fn read_state_meta(&self) -> Self::StateMeta { - self.apply_inner_hashing - } - - fn read_global_meta(&self) -> Self::GlobalMeta { - self.try_inner_hashing - } - - fn set_global_meta(&mut self, global_meta: Self::GlobalMeta) { - self.try_inner_hashing = global_meta; - } - - fn meta_for_new( - global: Self::GlobalMeta, - ) -> Self { - let mut result = Self::default(); - result.set_global_meta(global); - result - } - - fn meta_for_existing_inline_node( - global: Self::GlobalMeta, - ) -> Self { - Self::meta_for_new(global) - } - - fn meta_for_empty( - global: Self::GlobalMeta, - ) -> Self { - Self::meta_for_new(global) - } - - fn encoded_value_callback( - &mut self, - value_plan: ValuePlan, - ) { - let (contain_hash, range) = match value_plan { - ValuePlan::Value(range, with_len) => (false, with_len..range.end), - ValuePlan::HashedValue(range, _size) => (true, range), - ValuePlan::NoValue => return, - }; - - if let Some(threshold) = self.try_inner_hashing.clone() { - self.apply_inner_hashing = range.end - range.start >= threshold as usize; - } - - self.range = Some(range); - self.contain_hash = contain_hash; - } - - fn decoded_callback( - &mut self, - node_plan: &NodePlan, - ) { - let (contain_hash, range) = match node_plan.value_plan() { - Some(ValuePlan::Value(range, with_len)) => (false, *with_len..range.end), - Some(ValuePlan::HashedValue(range, _size)) => (true, range.clone()), - Some(ValuePlan::NoValue) => return, - None => return, - }; - - self.range = Some(range); - self.contain_hash = contain_hash; - } - - fn contains_hash_of_value(&self) -> bool { - self.contain_hash - } -} - -impl TrieMeta { - /// Was value accessed. - pub fn accessed_value(&mut self) -> bool { - !self.unused_value - } - - /// For proof, this allow setting node as unaccessed until - /// a call to `access_from`. - pub fn set_accessed_value(&mut self, accessed: bool) { - self.unused_value = !accessed; - } -} - /// substrate trie layout -pub struct Layout(Option, sp_std::marker::PhantomData<(H, M)>); +pub struct Layout(Option, sp_std::marker::PhantomData); -impl fmt::Debug for Layout { +impl fmt::Debug for Layout { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Layout").finish() } } -impl Clone for Layout { +impl Clone for Layout { fn clone(&self) -> Self { Layout(self.0, sp_std::marker::PhantomData) } } -impl Default for Layout { +impl Default for Layout { fn default() -> Self { Layout(None, sp_std::marker::PhantomData) } } -impl Layout { +impl Layout { /// Layout with inner hashing active. /// Will flag trie for hashing. pub fn with_alt_hashing(threshold: u32) -> Self { @@ -203,11 +80,9 @@ impl Layout { } } -impl TrieLayout for Layout +impl TrieLayout for Layout where H: Hasher, - M: MetaHasher>, - M::Meta: Meta, StateMeta = bool>, { const USE_EXTENSION: bool = false; const ALLOW_EMPTY: bool = true; @@ -215,108 +90,22 @@ impl TrieLayout for Layout type Hash = H; type Codec = NodeCodec; - type MetaHasher = M; - type Meta = M::Meta; - fn global_meta(&self) -> GlobalMeta { + fn alt_threshold(&self) -> Option { self.0 } } -/// Hasher with support to meta. -#[derive(Clone, Copy, Debug, Eq, PartialEq)] -pub struct StateHasher; - -impl MetaHasher for StateHasher - where - H: Hasher, -{ - type Meta = TrieMeta; - type GlobalMeta = Option; - - fn hash(value: &[u8], meta: &Self::Meta) -> H::Out { - match &meta { - TrieMeta { range: Some(range), contain_hash: false, apply_inner_hashing: true, .. } => { - let value = inner_hashed_value::(value, Some((range.start, range.end))); - H::hash(value.as_slice()) - }, - TrieMeta { range: Some(_range), contain_hash: true, .. } => { - // value contains a hash of data (already inner_hashed_value). - H::hash(value) - }, - _ => { - H::hash(value) - }, - } - } - - fn stored_value(value: &[u8], mut meta: Self::Meta) -> DBValue { - let mut stored = Vec::with_capacity(value.len() + 1); - if meta.contain_hash { - // already contain hash, just flag it. - stored.push(trie_constants::DEAD_HEADER_META_HASHED_VALUE); - stored.extend_from_slice(value); - return stored; - } - if meta.unused_value && meta.apply_inner_hashing { - if meta.range.is_some() { - // Warning this assumes that encoded value cannot start by this, - // so it is tightly coupled with the header type of the codec. - stored.push(trie_constants::DEAD_HEADER_META_HASHED_VALUE); - let range = meta.range.as_ref().expect("Tested in condition"); - meta.contain_hash = true; // useless but could be with meta as &mut - // store hash instead of value. - let value = inner_hashed_value::(value, Some((range.start, range.end))); - stored.extend_from_slice(value.as_slice()); - return stored; - } - } - stored.extend_from_slice(value); - stored - } - - fn stored_value_owned(value: DBValue, meta: Self::Meta) -> DBValue { - >::stored_value(value.as_slice(), meta) - } - - fn extract_value(mut stored: &[u8], global_meta: Self::GlobalMeta) -> (&[u8], Self::Meta) { - let input = &mut stored; - let mut contain_hash = false; - if input.get(0) == Some(&trie_constants::DEAD_HEADER_META_HASHED_VALUE) { - contain_hash = true; - *input = &input[1..]; - } - let mut meta = TrieMeta { - range: None, - unused_value: contain_hash, - contain_hash, - apply_inner_hashing: false, - try_inner_hashing: None, - }; - meta.set_global_meta(global_meta); - (stored, meta) - } - - fn extract_value_owned(mut stored: DBValue, global: Self::GlobalMeta) -> (DBValue, Self::Meta) { - let len = stored.len(); - let (v, meta) = >::extract_value(stored.as_slice(), global); - let removed = len - v.len(); - (stored.split_off(removed), meta) - } -} - -impl TrieConfiguration for Layout +impl TrieConfiguration for Layout where H: Hasher, - M: MetaHasher>, - M::Meta: Meta, StateMeta = bool>, { fn trie_root(&self, input: I) -> ::Out where I: IntoIterator, A: AsRef<[u8]> + Ord, B: AsRef<[u8]>, { - trie_root::trie_root_no_extension::(input, self.global_meta()) + trie_root::trie_root_no_extension::(input, self.alt_threshold()) } fn trie_root_unhashed(&self, input: I) -> Vec where @@ -324,7 +113,7 @@ impl TrieConfiguration for Layout A: AsRef<[u8]> + Ord, B: AsRef<[u8]>, { - trie_root::unhashed_trie_no_extension::(input, self.global_meta()) + trie_root::unhashed_trie_no_extension::(input, self.alt_threshold()) } fn encode_index(input: u32) -> Vec { @@ -340,25 +129,25 @@ type MemTracker = memory_db::MemCounter; /// TrieDB error over `TrieConfiguration` trait. pub type TrieError = trie_db::TrieError, CError>; /// Reexport from `hash_db`, with genericity set for `Hasher` trait. -pub trait AsHashDB: hash_db::AsHashDB {} -impl> AsHashDB for T {} +pub trait AsHashDB: hash_db::AsHashDB {} +impl> AsHashDB for T {} /// Reexport from `hash_db`, with genericity set for `Hasher` trait. -pub type HashDB<'a, H, M, GM> = dyn hash_db::HashDB + 'a; +pub type HashDB<'a, H> = dyn hash_db::HashDB + 'a; /// Reexport from `hash_db`, with genericity set for `Hasher` trait. /// This uses a `KeyFunction` for prefixing keys internally (avoiding /// key conflict for non random keys). pub type PrefixedMemoryDB = memory_db::MemoryDB< - H, memory_db::PrefixedKey, trie_db::DBValue, StateHasher, MemTracker + H, memory_db::PrefixedKey, trie_db::DBValue, MemTracker, >; /// Reexport from `hash_db`, with genericity set for `Hasher` trait. /// This uses a noops `KeyFunction` (key addressing must be hashed or using /// an encoding scheme that avoid key conflict). pub type MemoryDB = memory_db::MemoryDB< - H, memory_db::HashKey, trie_db::DBValue, StateHasher, MemTracker, + H, memory_db::HashKey, trie_db::DBValue, MemTracker, >; /// Reexport from `hash_db`, with genericity set for `Hasher` trait. -pub type GenericMemoryDB = memory_db::MemoryDB< - H, KF, trie_db::DBValue, MH, MemTracker, +pub type GenericMemoryDB = memory_db::MemoryDB< + H, KF, trie_db::DBValue, MemTracker, >; /// Persistent trie database read-access interface for the a given hasher. @@ -372,8 +161,7 @@ pub type TrieHash = <::Hash as Hasher>::Out; /// This module is for non generic definition of trie type. /// Only the `Hasher` trait is generic in this case. pub mod trie_types { - /// State layout. - pub type Layout = super::Layout; + use super::Layout; /// Persistent trie database read-access interface for the a given hasher. pub type TrieDB<'a, H> = super::TrieDB<'a, Layout>; /// Persistent trie database write-access interface for the a given hasher. @@ -400,7 +188,7 @@ pub fn generate_trie_proof<'a, L, I, K, DB>( L: TrieConfiguration, I: IntoIterator, K: 'a + AsRef<[u8]>, - DB: hash_db::HashDBRef>, + DB: hash_db::HashDBRef, { // Can use default layout (read only). let trie = TrieDB::::new(db, &root)?; @@ -427,7 +215,7 @@ pub fn verify_trie_proof<'a, L, I, K, V>( { // No specific info to read from layout. let layout = Default::default(); - verify_proof::, _, _, _>(root, proof, items, layout) + verify_proof::, _, _, _>(root, proof, items, layout) } /// Determine a trie root given a hash DB and delta values. @@ -441,7 +229,7 @@ pub fn delta_trie_root( A: Borrow<[u8]>, B: Borrow>, V: Borrow<[u8]>, - DB: hash_db::HashDB>, + DB: hash_db::HashDB, { { let mut trie = TrieDBMut::::from_existing_with_layout(db, &mut root, layout)?; @@ -468,7 +256,7 @@ pub fn read_trie_value( ) -> Result>, Box>> where L: TrieConfiguration, - DB: hash_db::HashDBRef>, + DB: hash_db::HashDBRef, { Ok(TrieDB::::new(&*db, root)?.get(key).map(|x| x.map(|val| val.to_vec()))?) } @@ -482,8 +270,8 @@ pub fn read_trie_value_with ( ) -> Result>, Box>> where L: TrieConfiguration, - Q: Query, - DB: hash_db::HashDBRef> + Q: Query, + DB: hash_db::HashDBRef { Ok(TrieDB::::new(&*db, root)?.get_with(key, query).map(|x| x.map(|val| val.to_vec()))?) } @@ -527,7 +315,7 @@ pub fn child_delta_trie_root( B: Borrow>, V: Borrow<[u8]>, RD: AsRef<[u8]>, - DB: hash_db::HashDB>, + DB: hash_db::HashDB, { let mut root = TrieHash::::default(); // root is fetched from DB, not writable by runtime, so it's always valid. @@ -546,9 +334,9 @@ pub fn child_delta_trie_root( pub fn record_all_keys( db: &DB, root: &TrieHash, - recorder: &mut Recorder, L::Meta> + recorder: &mut Recorder> ) -> Result<(), Box>> where - DB: hash_db::HashDBRef>, + DB: hash_db::HashDBRef, { let trie = TrieDB::::new(&*db, root)?; let iter = trie.iter()?; @@ -573,7 +361,7 @@ pub fn read_child_trie_value( key: &[u8] ) -> Result>, Box>> where - DB: hash_db::HashDBRef>, + DB: hash_db::HashDBRef, { let mut root = TrieHash::::default(); // root is fetched from DB, not writable by runtime, so it's always valid. @@ -593,8 +381,8 @@ pub fn read_child_trie_value_with( ) -> Result>, Box>> where L: TrieConfiguration, - Q: Query, - DB: hash_db::HashDBRef>, + Q: Query, + DB: hash_db::HashDBRef, { let mut root = TrieHash::::default(); // root is fetched from DB, not writable by runtime, so it's always valid. @@ -641,8 +429,8 @@ impl<'a, DB, H> KeySpacedDBMut<'a, DB, H> where } } -impl<'a, DB, H, T, M, GM> hash_db::HashDBRef for KeySpacedDB<'a, DB, H> where - DB: hash_db::HashDBRef, +impl<'a, DB, H, T> hash_db::HashDBRef for KeySpacedDB<'a, DB, H> where + DB: hash_db::HashDBRef, H: Hasher, T: From<&'static [u8]>, { @@ -655,19 +443,14 @@ impl<'a, DB, H, T, M, GM> hash_db::HashDBRef for KeySpacedDB<'a, DB self.0.access_from(key, at) } - fn get_with_meta(&self, key: &H::Out, prefix: Prefix, global: GM) -> Option<(T, M)> { - let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.get_with_meta(key, (&derived_prefix.0, derived_prefix.1), global) - } - fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); self.0.contains(key, (&derived_prefix.0, derived_prefix.1)) } } -impl<'a, DB, H, T, M, GM> hash_db::HashDB for KeySpacedDBMut<'a, DB, H> where - DB: hash_db::HashDB, +impl<'a, DB, H, T> hash_db::HashDB for KeySpacedDBMut<'a, DB, H> where + DB: hash_db::HashDB, H: Hasher, T: Default + PartialEq + for<'b> From<&'b [u8]> + Clone + Send + Sync, { @@ -680,11 +463,6 @@ impl<'a, DB, H, T, M, GM> hash_db::HashDB for KeySpacedDBMut<'a, DB self.0.access_from(key, at) } - fn get_with_meta(&self, key: &H::Out, prefix: Prefix, global: GM) -> Option<(T, M)> { - let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.get_with_meta(key, (&derived_prefix.0, derived_prefix.1), global) - } - fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); self.0.contains(key, (&derived_prefix.0, derived_prefix.1)) @@ -695,19 +473,14 @@ impl<'a, DB, H, T, M, GM> hash_db::HashDB for KeySpacedDBMut<'a, DB self.0.insert((&derived_prefix.0, derived_prefix.1), value) } - fn insert_with_meta( - &mut self, - prefix: Prefix, - value: &[u8], - meta: M, - ) -> H::Out { + fn emplace(&mut self, key: H::Out, prefix: Prefix, value: T) { let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.insert_with_meta((&derived_prefix.0, derived_prefix.1), value, meta) + self.0.emplace(key, (&derived_prefix.0, derived_prefix.1), value) } - fn emplace(&mut self, key: H::Out, prefix: Prefix, value: T) { + fn emplace_ref(&mut self, key: &H::Out, prefix: Prefix, value: &[u8]) { let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.emplace(key, (&derived_prefix.0, derived_prefix.1), value) + self.0.emplace_ref(key, (&derived_prefix.0, derived_prefix.1), value) } fn remove(&mut self, key: &H::Out, prefix: Prefix) { @@ -716,14 +489,14 @@ impl<'a, DB, H, T, M, GM> hash_db::HashDB for KeySpacedDBMut<'a, DB } } -impl<'a, DB, H, T, M, GM> hash_db::AsHashDB for KeySpacedDBMut<'a, DB, H> where - DB: hash_db::HashDB, +impl<'a, DB, H, T> hash_db::AsHashDB for KeySpacedDBMut<'a, DB, H> where + DB: hash_db::HashDB, H: Hasher, T: Default + PartialEq + for<'b> From<&'b [u8]> + Clone + Send + Sync, { - fn as_hash_db(&self) -> &dyn hash_db::HashDB { &*self } + fn as_hash_db(&self) -> &dyn hash_db::HashDB { &*self } - fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::HashDB + 'b) { + fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::HashDB + 'b) { &mut *self } } @@ -765,10 +538,10 @@ fn inner_hashed_value(x: &[u8], range: Option<(usize, usize)>) -> Vec } /// Estimate encoded size of node. -pub fn estimate_entry_size(entry: &(DBValue, TrieMeta), hash_len: usize) -> usize { +pub fn estimate_entry_size(entry: &(DBValue, Meta, bool), hash_len: usize) -> usize { use codec::Encode; let mut full_encoded = entry.0.encoded_size(); - if entry.1.unused_value && entry.1.apply_inner_hashing { + if !entry.2 && entry.1.apply_inner_hashing { if let Some(range) = entry.1.range.as_ref() { let value_size = range.end - range.start; full_encoded -= value_size; @@ -780,10 +553,31 @@ pub fn estimate_entry_size(entry: &(DBValue, TrieMeta), hash_len: usize) -> usiz full_encoded } +/// Switch to hashed value variant. +pub fn to_hashed_variant( + value: &[u8], + meta: &mut Meta, + used_value: bool, +) -> Option { + if !meta.contain_hash && meta.apply_inner_hashing && !used_value && meta.range.is_some() { + let mut stored = Vec::with_capacity(value.len() + 1); + // Warning this assumes that encoded value cannot start by this, + // so it is tightly coupled with the header type of the codec. + stored.push(trie_constants::DEAD_HEADER_META_HASHED_VALUE); + let range = meta.range.as_ref().expect("Tested in condition"); + // store hash instead of value. + let value = inner_hashed_value::(value, Some((range.start, range.end))); + stored.extend_from_slice(value.as_slice()); + meta.contain_hash = true; + return Some(stored); + } + None +} + /// Decode plan in order to update meta early (needed to register proofs). -pub fn resolve_encoded_meta(entry: &mut (DBValue, TrieMeta)) { +pub fn resolve_encoded_meta(entry: &mut (DBValue, Meta, bool)) { use trie_db::NodeCodec; - let _ = as TrieLayout>::Codec::decode_plan(entry.0.as_slice(), &mut entry.1); + let _ = as TrieLayout>::Codec::decode_plan(entry.0.as_slice(), &mut entry.1); } /// Constants used into trie simplification codec. @@ -811,14 +605,14 @@ mod tests { use trie_standardmap::{Alphabet, ValueMode, StandardMap}; use hex_literal::hex; - type Layout = super::trie_types::Layout; + type Layout = super::Layout; - type MemoryDBMeta = memory_db::MemoryDB< - H, memory_db::HashKey, trie_db::DBValue, M, MemTracker, + type MemoryDBMeta = memory_db::MemoryDB< + H, memory_db::HashKey, trie_db::DBValue, MemTracker, >; fn hashed_null_node() -> TrieHash { - >::hashed_null_node() + ::hashed_null_node() } fn check_equivalent(input: &Vec<(&[u8], &[u8])>, layout: T) { @@ -827,7 +621,7 @@ mod tests { let d = layout.trie_root_unhashed(input.clone()); println!("Data: {:#x?}, {:#x?}", d, Blake2Hasher::hash(&d[..])); let persistent = { - let mut memdb = MemoryDBMeta::<_, T::MetaHasher>::default(); + let mut memdb = MemoryDBMeta::default(); let mut root = Default::default(); let mut t = TrieDBMut::::new_with_layout(&mut memdb, &mut root, layout); for (x, y) in input.iter().rev() { @@ -840,7 +634,7 @@ mod tests { } fn check_iteration(input: &Vec<(&[u8], &[u8])>, layout: T) { - let mut memdb = MemoryDBMeta::<_, T::MetaHasher>::default(); + let mut memdb = MemoryDBMeta::default(); let mut root = Default::default(); { let mut t = TrieDBMut::::new_with_layout(&mut memdb, &mut root, layout.clone()); @@ -969,13 +763,13 @@ mod tests { } fn populate_trie<'db, T>( - db: &'db mut dyn HashDB>, + db: &'db mut dyn HashDB, root: &'db mut TrieHash, v: &[(Vec, Vec)], layout: T, ) -> TrieDBMut<'db, T> where - T: TrieConfiguration, + T: TrieConfiguration, { let mut t = TrieDBMut::::new_with_layout(db, root, layout); for i in 0..v.len() { diff --git a/primitives/trie/src/node_codec.rs b/primitives/trie/src/node_codec.rs index 3f90bf8126352..bed8ae73b5852 100644 --- a/primitives/trie/src/node_codec.rs +++ b/primitives/trie/src/node_codec.rs @@ -88,16 +88,16 @@ impl<'a> Input for ByteSliceInput<'a> { pub struct NodeCodec(PhantomData); impl NodeCodec { - fn decode_plan_inner_hashed>( + fn decode_plan_inner_hashed( data: &[u8], - meta: &mut M, + meta: &mut Meta, ) -> Result { let mut input = ByteSliceInput::new(data); - let contains_hash = meta.contains_hash_of_value(); let header = NodeHeader::decode(&mut input)?; + let contains_hash = header.contains_hash_of_value(); let alt_hashing = header.alt_hashing(); - meta.set_state_meta(alt_hashing); + meta.apply_inner_hashing = alt_hashing; let branch_has_value = if let NodeHeader::Branch(has_value, _) = &header { *has_value @@ -108,7 +108,7 @@ impl NodeCodec { match header { NodeHeader::Null => Ok(NodePlan::Empty), - NodeHeader::AltHashBranch(nibble_count) + NodeHeader::AltHashBranch(nibble_count, _) | NodeHeader::Branch(_, nibble_count) => { let padding = nibble_count % nibble_ops::NIBBLE_PER_BYTE != 0; // check that the padding is valid (if any) @@ -153,7 +153,7 @@ impl NodeCodec { children, }) }, - NodeHeader::AltHashLeaf(nibble_count) + NodeHeader::AltHashLeaf(nibble_count, _) | NodeHeader::Leaf(nibble_count) => { let padding = nibble_count % nibble_ops::NIBBLE_PER_BYTE != 0; // check that the padding is valid (if any) @@ -181,19 +181,19 @@ impl NodeCodec { } } -impl NodeCodecT for NodeCodec +impl NodeCodecT for NodeCodec where H: Hasher, - M: Meta>, { + const OFFSET_CONTAINS_HASH: usize = 1; type Error = Error; type HashOut = H::Out; fn hashed_null_node() -> ::Out { - H::hash(>::empty_node_no_meta()) + H::hash(::empty_node()) } - fn decode_plan(data: &[u8], meta: &mut M) -> Result { + fn decode_plan(data: &[u8], meta: &mut Meta) -> Result { Self::decode_plan_inner_hashed(data, meta).map(|plan| { meta.decoded_callback(&plan); plan @@ -205,27 +205,28 @@ impl NodeCodecT for NodeCodec } fn is_empty_node(data: &[u8]) -> bool { - data == >::empty_node_no_meta() + data == ::empty_node() } - fn empty_node(_meta: &mut M) -> Vec { - sp_std::vec![trie_constants::EMPTY_TRIE] - } - - fn empty_node_no_meta() -> &'static [u8] { + fn empty_node() -> &'static [u8] { &[trie_constants::EMPTY_TRIE] } - fn leaf_node(partial: Partial, value: Value, meta: &mut M) -> Vec { + fn leaf_node(partial: Partial, value: Value, meta: &mut Meta) -> Vec { + let contains_hash = matches!(&value, Value::HashedValue(..)); // Note that we use AltHash type only if inner hashing will occur, // this way we allow changing hash threshold. // With fix inner hashing alt hash can be use with all node, but // that is not better (encoding can use an additional nibble byte // sometime). - let mut output = if meta.read_global_meta().as_ref().map(|threshold| + let mut output = if meta.try_inner_hashing.as_ref().map(|threshold| value_do_hash(&value, threshold) - ).unwrap_or(meta.read_state_meta()) { - partial_encode(partial, NodeKind::AltHashLeaf) + ).unwrap_or(meta.apply_inner_hashing) { + if contains_hash { + partial_encode(partial, NodeKind::AltHashLeafHash) + } else { + partial_encode(partial, NodeKind::AltHashLeaf) + } } else { partial_encode(partial, NodeKind::Leaf) }; @@ -254,7 +255,7 @@ impl NodeCodecT for NodeCodec _partial: impl Iterator, _nbnibble: usize, _child: ChildReference<::Out>, - _meta: &mut M, + _meta: &mut Meta, ) -> Vec { unreachable!() } @@ -262,7 +263,7 @@ impl NodeCodecT for NodeCodec fn branch_node( _children: impl Iterator::Out>>>>, _maybe_value: Value, - _meta: &mut M, + _meta: &mut Meta, ) -> Vec { unreachable!() } @@ -272,11 +273,13 @@ impl NodeCodecT for NodeCodec number_nibble: usize, children: impl Iterator::Out>>>>, value: Value, - meta: &mut M, + meta: &mut Meta, ) -> Vec { - let mut output = match (&value, meta.read_global_meta().as_ref().map(|threshold| + + let contains_hash = matches!(&value, Value::HashedValue(..)); + let mut output = match (&value, meta.try_inner_hashing.as_ref().map(|threshold| value_do_hash(&value, threshold) - ).unwrap_or(meta.read_state_meta())) { + ).unwrap_or(meta.apply_inner_hashing)) { (&Value::NoValue, _) => { partial_from_iterator_encode(partial, number_nibble, NodeKind::BranchNoValue) }, @@ -284,7 +287,11 @@ impl NodeCodecT for NodeCodec partial_from_iterator_encode(partial, number_nibble, NodeKind::BranchWithValue) }, (_, true) => { - partial_from_iterator_encode(partial, number_nibble, NodeKind::AltHashBranchWithValue) + if contains_hash { + partial_from_iterator_encode(partial, number_nibble, NodeKind::AltHashBranchWithValueHash) + } else { + partial_from_iterator_encode(partial, number_nibble, NodeKind::AltHashBranchWithValue) + } }, }; @@ -354,8 +361,11 @@ fn partial_from_iterator_encode>( NodeKind::Leaf => NodeHeader::Leaf(nibble_count).encode_to(&mut output), NodeKind::BranchWithValue => NodeHeader::Branch(true, nibble_count).encode_to(&mut output), NodeKind::BranchNoValue => NodeHeader::Branch(false, nibble_count).encode_to(&mut output), - NodeKind::AltHashLeaf => NodeHeader::AltHashLeaf(nibble_count).encode_to(&mut output), - NodeKind::AltHashBranchWithValue => NodeHeader::AltHashBranch(nibble_count) + NodeKind::AltHashLeaf => NodeHeader::AltHashLeaf(nibble_count, false).encode_to(&mut output), + NodeKind::AltHashBranchWithValue => NodeHeader::AltHashBranch(nibble_count, false) + .encode_to(&mut output), + NodeKind::AltHashLeafHash => NodeHeader::AltHashLeaf(nibble_count, true).encode_to(&mut output), + NodeKind::AltHashBranchWithValueHash => NodeHeader::AltHashBranch(nibble_count, true) .encode_to(&mut output), }; output.extend(partial); @@ -375,8 +385,11 @@ fn partial_encode(partial: Partial, node_kind: NodeKind) -> Vec { NodeKind::Leaf => NodeHeader::Leaf(nibble_count).encode_to(&mut output), NodeKind::BranchWithValue => NodeHeader::Branch(true, nibble_count).encode_to(&mut output), NodeKind::BranchNoValue => NodeHeader::Branch(false, nibble_count).encode_to(&mut output), - NodeKind::AltHashLeaf => NodeHeader::AltHashLeaf(nibble_count).encode_to(&mut output), - NodeKind::AltHashBranchWithValue => NodeHeader::AltHashBranch(nibble_count) + NodeKind::AltHashLeaf => NodeHeader::AltHashLeaf(nibble_count, false).encode_to(&mut output), + NodeKind::AltHashBranchWithValue => NodeHeader::AltHashBranch(nibble_count, false) + .encode_to(&mut output), + NodeKind::AltHashLeafHash => NodeHeader::AltHashLeaf(nibble_count, true).encode_to(&mut output), + NodeKind::AltHashBranchWithValueHash => NodeHeader::AltHashBranch(nibble_count, true) .encode_to(&mut output), }; if number_nibble_encoded > 0 { diff --git a/primitives/trie/src/node_header.rs b/primitives/trie/src/node_header.rs index 43ac67cbabcaf..ca1f375fb8eff 100644 --- a/primitives/trie/src/node_header.rs +++ b/primitives/trie/src/node_header.rs @@ -26,10 +26,24 @@ use sp_std::iter::once; #[derive(sp_core::RuntimeDebug)] pub(crate) enum NodeHeader { Null, + // contains wether there is a value and nibble count Branch(bool, usize), + // contains nibble count Leaf(usize), - AltHashBranch(usize), - AltHashLeaf(usize), + // contains nibble count and wether the value is a hash. + AltHashBranch(usize, bool), + // contains nibble count and wether the value is a hash. + AltHashLeaf(usize, bool), +} + +impl NodeHeader { + pub(crate) fn contains_hash_of_value(&self) -> bool { + match self { + NodeHeader::AltHashBranch(_, true) + | NodeHeader::AltHashLeaf(_, true) => true, + _ => false, + } + } } /// NodeHeader without content @@ -39,10 +53,15 @@ pub(crate) enum NodeKind { BranchWithValue, AltHashLeaf, AltHashBranchWithValue, + AltHashLeafHash, + AltHashBranchWithValueHash, } impl Encode for NodeHeader { fn encode_to(&self, output: &mut T) { + if self.contains_hash_of_value() { + output.write(&[trie_constants::DEAD_HEADER_META_HASHED_VALUE]); + } match self { NodeHeader::Null => output.push_byte(trie_constants::EMPTY_TRIE), NodeHeader::Branch(true, nibble_count) => @@ -51,9 +70,9 @@ impl Encode for NodeHeader { encode_size_and_prefix(*nibble_count, trie_constants::BRANCH_WITHOUT_MASK, 2, output), NodeHeader::Leaf(nibble_count) => encode_size_and_prefix(*nibble_count, trie_constants::LEAF_PREFIX_MASK, 2, output), - NodeHeader::AltHashBranch(nibble_count) => + NodeHeader::AltHashBranch(nibble_count, _) => encode_size_and_prefix(*nibble_count, trie_constants::ALT_HASHING_BRANCH_WITH_MASK, 4, output), - NodeHeader::AltHashLeaf(nibble_count) => + NodeHeader::AltHashLeaf(nibble_count, _) => encode_size_and_prefix(*nibble_count, trie_constants::ALT_HASHING_LEAF_PREFIX_MASK, 3, output), } } @@ -76,19 +95,25 @@ impl codec::EncodeLike for NodeHeader {} impl Decode for NodeHeader { fn decode(input: &mut I) -> Result { - let i = input.read_byte()?; + let mut i = input.read_byte()?; if i == trie_constants::EMPTY_TRIE { return Ok(NodeHeader::Null); } + let contain_hash = if trie_constants::DEAD_HEADER_META_HASHED_VALUE == i { + i = input.read_byte()?; + true + } else { + false + }; match i & (0b11 << 6) { trie_constants::LEAF_PREFIX_MASK => Ok(NodeHeader::Leaf(decode_size(i, input, 2)?)), trie_constants::BRANCH_WITH_MASK => Ok(NodeHeader::Branch(true, decode_size(i, input, 2)?)), trie_constants::BRANCH_WITHOUT_MASK => Ok(NodeHeader::Branch(false, decode_size(i, input, 2)?)), trie_constants::EMPTY_TRIE => { if i & (0b111 << 5) == trie_constants::ALT_HASHING_LEAF_PREFIX_MASK { - Ok(NodeHeader::AltHashLeaf(decode_size(i, input, 3)?)) + Ok(NodeHeader::AltHashLeaf(decode_size(i, input, 3)?, contain_hash)) } else if i & (0b1111 << 4) == trie_constants::ALT_HASHING_BRANCH_WITH_MASK { - Ok(NodeHeader::AltHashBranch(decode_size(i, input, 4)?)) + Ok(NodeHeader::AltHashBranch(decode_size(i, input, 4)?, contain_hash)) } else { // do not allow any special encoding Err("Unallowed encoding".into()) diff --git a/primitives/trie/src/storage_proof.rs b/primitives/trie/src/storage_proof.rs index 43a32b25ebfb7..cebad1911315f 100644 --- a/primitives/trie/src/storage_proof.rs +++ b/primitives/trie/src/storage_proof.rs @@ -18,9 +18,8 @@ use sp_std::vec::Vec; use codec::{Encode, Decode}; use hash_db::{Hasher, HashDB}; -use hash_db::MetaHasher; use trie_db::NodeCodec; -use crate::{trie_types::Layout, TrieLayout}; +use crate::{Layout, TrieLayout}; /// A proof that some set of key-value pairs are included in the storage trie. The proof contains /// the storage values so that the partial storage backend can be reconstructed by a verifier that @@ -181,15 +180,14 @@ impl From for crate::MemoryDB { // Using compact proof will work directly here (read trie structure and // work directly. for item in proof.trie_nodes.iter() { - // Note using `default()` to build proof is fine, do_value being in header - // and no switch needed. - let layout_meta = Default::default(); - let (encoded_node, mut meta) = < - as TrieLayout>::MetaHasher as MetaHasher - >::extract_value(item.as_slice(), layout_meta); - // read state meta (required for value layout and AltHash node. - let _ = as TrieLayout>::Codec::decode_plan(encoded_node, &mut meta); - db.insert_with_meta(crate::EMPTY_PREFIX, encoded_node, meta); + let mut meta = Default::default(); + // read state meta (required for value layout). + let _ = as TrieLayout>::Codec::decode_plan(item.as_slice(), &mut meta); + db.alt_insert( + crate::EMPTY_PREFIX, + item, + meta.resolve_alt_hashing::< as TrieLayout>::Codec>(), + ); } db } diff --git a/primitives/trie/src/trie_codec.rs b/primitives/trie/src/trie_codec.rs index 3ffc2a4197f95..a04ffce939c67 100644 --- a/primitives/trie/src/trie_codec.rs +++ b/primitives/trie/src/trie_codec.rs @@ -22,7 +22,7 @@ use crate::{ EMPTY_PREFIX, HashDBT, TrieHash, TrieError, TrieConfiguration, - CompactProof, StorageProof, GlobalMeta, TrieMeta, + CompactProof, StorageProof, }; use sp_std::boxed::Box; use sp_std::vec::Vec; @@ -109,8 +109,7 @@ pub fn decode_compact<'a, L, DB, I>( ) -> Result, Error> where L: TrieConfiguration, - DB: HashDBT> - + hash_db::HashDBRef>, + DB: HashDBT + hash_db::HashDBRef, I: IntoIterator, { let mut nodes_iter = encoded.into_iter(); @@ -163,7 +162,7 @@ pub fn decode_compact<'a, L, DB, I>( } } - if !HashDBT::::contains(db, &top_root, EMPTY_PREFIX) { + if !HashDBT::::contains(db, &top_root, EMPTY_PREFIX) { return Err(Error::IncompleteProof); } @@ -211,7 +210,7 @@ pub fn encode_compact( root: TrieHash, ) -> Result> where - L: TrieConfiguration, + L: TrieConfiguration, { let mut child_tries = Vec::new(); let partial_db = proof.into_memory_db(); @@ -251,7 +250,7 @@ pub fn encode_compact( }; for child_root in child_tries { - if !HashDBT::::contains(&partial_db, &child_root, EMPTY_PREFIX) { + if !HashDBT::::contains(&partial_db, &child_root, EMPTY_PREFIX) { // child proof are allowed to be missing (unused root can be included // due to trie structure modification). continue; diff --git a/primitives/trie/src/trie_stream.rs b/primitives/trie/src/trie_stream.rs index bba41fe6d81c0..375db3de04cd0 100644 --- a/primitives/trie/src/trie_stream.rs +++ b/primitives/trie/src/trie_stream.rs @@ -17,12 +17,12 @@ //! `TrieStream` implementation for Substrate's trie format. -use hash_db::{MetaHasher, Hasher}; +use hash_db::Hasher; use trie_root; use codec::{Encode, Compact}; use sp_std::vec::Vec; use sp_std::ops::Range; -use crate::{trie_constants, TrieMeta, StateHasher}; +use crate::{trie_constants}; use crate::node_header::{NodeKind, size_and_prefix_iterator}; use crate::node_codec::Bitmap; @@ -70,6 +70,8 @@ fn fuse_nibbles_node<'a>(nibbles: &'a [u8], kind: NodeKind) -> impl Iterator size_and_prefix_iterator(size, trie_constants::ALT_HASHING_BRANCH_WITH_MASK, 4), + NodeKind::AltHashBranchWithValueHash + | NodeKind::AltHashLeafHash => unreachable!("only added value that do not contain hash"), }; iter_start .chain(if nibbles.len() % 2 == 1 { Some(nibbles[0]) } else { None }) @@ -78,8 +80,6 @@ fn fuse_nibbles_node<'a>(nibbles: &'a [u8], kind: NodeKind) -> impl Iterator; - fn new(meta: Option) -> Self { Self { buffer: Vec::new(), @@ -155,16 +155,10 @@ impl trie_root::TrieStream for TrieStream { 0..=31 => data.encode_to(&mut self.buffer), _ => { if apply_inner_hashing { - let meta = TrieMeta { - range: range, - unused_value: false, - contain_hash: false, - // Using `inner_value_hashing` instead to check this. - // And unused in hasher. - try_inner_hashing: None, - apply_inner_hashing: true, - }; - >>::hash(&data, &meta).as_ref() + hash_db::AltHashing { + encoded_offset: 0, + value_range: range.map(|r| (r.start, r.end)), + }.alt_hash::(&data).as_ref() .encode_to(&mut self.buffer); } else { H::hash(&data).as_ref().encode_to(&mut self.buffer); @@ -177,16 +171,11 @@ impl trie_root::TrieStream for TrieStream { let apply_inner_hashing = self.apply_inner_hashing; let range = self.current_value_range; let data = self.buffer; - let meta = TrieMeta { - range: range, - unused_value: false, - contain_hash: false, - try_inner_hashing: None, - apply_inner_hashing: true, - }; - if apply_inner_hashing { - >>::hash(&data, &meta) + hash_db::AltHashing { + encoded_offset: 0, + value_range: range.map(|r| (r.start, r.end)), + }.alt_hash::(&data) } else { H::hash(&data) } From 1e40e6d7da459de835387767f99664d5e17cb9b3 Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 21 Jun 2021 11:30:53 +0200 Subject: [PATCH 060/188] Finish update to refactored upstream. --- Cargo.lock | 24 ++++++++--------- Cargo.toml | 10 +++---- bin/node/bench/src/simple_trie.rs | 27 +++++++------------ bin/node/bench/src/trie.rs | 12 ++------- client/api/src/cht.rs | 4 +-- client/db/src/bench.rs | 18 ++++--------- client/db/src/lib.rs | 26 +++++------------- client/executor/src/integration_tests/mod.rs | 2 +- client/service/test/src/client/mod.rs | 2 +- .../state-machine/src/trie_backend_essence.rs | 1 + .../transaction-storage-proof/src/lib.rs | 2 +- primitives/trie/benches/bench.rs | 4 +-- primitives/trie/src/lib.rs | 2 +- 13 files changed, 49 insertions(+), 85 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f97655a692d0b..4fb09ef6c6d2c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2354,7 +2354,7 @@ dependencies = [ [[package]] name = "hash-db" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple7#c7e0aeb8fd74c23845efe4b6daee43b7f4084faa" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#511e98bff38e2d618a5ec382eeca8661a5459df3" [[package]] name = "hash256-std-hasher" @@ -2368,7 +2368,7 @@ dependencies = [ [[package]] name = "hash256-std-hasher" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple7#c7e0aeb8fd74c23845efe4b6daee43b7f4084faa" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#511e98bff38e2d618a5ec382eeca8661a5459df3" dependencies = [ "crunchy", ] @@ -3050,10 +3050,10 @@ dependencies = [ [[package]] name = "keccak-hasher" version = "0.15.3" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple7#c7e0aeb8fd74c23845efe4b6daee43b7f4084faa" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#511e98bff38e2d618a5ec382eeca8661a5459df3" dependencies = [ "hash-db", - "hash256-std-hasher 0.15.2 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple7)", + "hash256-std-hasher 0.15.2 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4)", "tiny-keccak", ] @@ -3865,7 +3865,7 @@ dependencies = [ [[package]] name = "memory-db" version = "0.26.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple7#c7e0aeb8fd74c23845efe4b6daee43b7f4084faa" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#511e98bff38e2d618a5ec382eeca8661a5459df3" dependencies = [ "hash-db", "hashbrown", @@ -10551,22 +10551,22 @@ checksum = "a7f741b240f1a48843f9b8e0444fb55fb2a4ff67293b50a9179dfd5ea67f8d41" [[package]] name = "trie-bench" version = "0.27.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple7#c7e0aeb8fd74c23845efe4b6daee43b7f4084faa" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#511e98bff38e2d618a5ec382eeca8661a5459df3" dependencies = [ "criterion", "hash-db", - "keccak-hasher 0.15.3 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple7)", + "keccak-hasher 0.15.3 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4)", "memory-db", "parity-scale-codec", "trie-db", "trie-root", - "trie-standardmap 0.15.2 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple7)", + "trie-standardmap 0.15.2 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4)", ] [[package]] name = "trie-db" version = "0.22.5" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple7#c7e0aeb8fd74c23845efe4b6daee43b7f4084faa" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#511e98bff38e2d618a5ec382eeca8661a5459df3" dependencies = [ "hash-db", "hashbrown", @@ -10578,7 +10578,7 @@ dependencies = [ [[package]] name = "trie-root" version = "0.16.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple7#c7e0aeb8fd74c23845efe4b6daee43b7f4084faa" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#511e98bff38e2d618a5ec382eeca8661a5459df3" dependencies = [ "hash-db", ] @@ -10596,10 +10596,10 @@ dependencies = [ [[package]] name = "trie-standardmap" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple7#c7e0aeb8fd74c23845efe4b6daee43b7f4084faa" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#511e98bff38e2d618a5ec382eeca8661a5459df3" dependencies = [ "hash-db", - "keccak-hasher 0.15.3 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple7)", + "keccak-hasher 0.15.3 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4)", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index fc427dd2f2e16..754e1ea9f9b7d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -278,8 +278,8 @@ zeroize = { opt-level = 3 } panic = "unwind" [patch.crates-io] -hash-db = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple7" } -memory-db = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple7" } -trie-db = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple7" } -trie-root = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple7" } -trie-bench = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple7" } +hash-db = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple4" } +memory-db = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple4" } +trie-db = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple4" } +trie-root = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple4" } +trie-bench = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple4" } diff --git a/bin/node/bench/src/simple_trie.rs b/bin/node/bench/src/simple_trie.rs index f4a4576508f6c..ae3c1c6ce28df 100644 --- a/bin/node/bench/src/simple_trie.rs +++ b/bin/node/bench/src/simple_trie.rs @@ -20,7 +20,7 @@ use std::{collections::HashMap, sync::Arc}; use kvdb::KeyValueDB; use node_primitives::Hash; -use sp_trie::{DBValue, Meta, StateHasher, MetaHasher}; +use sp_trie::DBValue; use hash_db::{HashDB, AsHashDB, Prefix, Hasher as _}; pub type Hasher = sp_core::Blake2Hasher; @@ -31,15 +31,15 @@ pub struct SimpleTrie<'a> { pub overlay: &'a mut HashMap, Option>>, } -impl<'a> AsHashDB> for SimpleTrie<'a> { - fn as_hash_db(&self) -> &dyn hash_db::HashDB> { &*self } +impl<'a> AsHashDB for SimpleTrie<'a> { + fn as_hash_db(&self) -> &dyn hash_db::HashDB { &*self } - fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn HashDB> + 'b) { + fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn HashDB + 'b) { &mut *self } } -impl<'a> HashDB> for SimpleTrie<'a> { +impl<'a> HashDB for SimpleTrie<'a> { fn get(&self, key: &Hash, prefix: Prefix) -> Option { let key = sp_trie::prefixed_key::(key, prefix); if let Some(value) = self.overlay.get(&key) { @@ -48,22 +48,10 @@ impl<'a> HashDB> for SimpleTrie<'a> { self.db.get(0, &key).expect("Database backend error") } - fn get_with_meta(&self, key: &Hash, prefix: Prefix, global: Option) -> Option<(DBValue, Meta)> { - let result = self.get(key, prefix); - result.map(|value| >::extract_value_owned(value, global)) - } - fn contains(&self, hash: &Hash, prefix: Prefix) -> bool { self.get(hash, prefix).is_some() } - fn insert_with_meta(&mut self, prefix: Prefix, value: &[u8], meta: Meta) -> Hash { - let key = >::hash(value, &meta); - let stored_value = >::stored_value(value, meta); - self.emplace(key, prefix, stored_value); - key - } - fn insert(&mut self, prefix: Prefix, value: &[u8]) -> Hash { let key = Hasher::hash(value); self.emplace(key, prefix, value.to_vec()); @@ -75,6 +63,11 @@ impl<'a> HashDB> for SimpleTrie<'a> { self.overlay.insert(key, Some(value)); } + fn emplace_ref(&mut self, key: &Hash, prefix: Prefix, value: &[u8]) { + let key = sp_trie::prefixed_key::(key, prefix); + self.overlay.insert(key, Some(value.into())); + } + fn remove(&mut self, key: &Hash, prefix: Prefix) { let key = sp_trie::prefixed_key::(key, prefix); self.overlay.insert(key, None); diff --git a/bin/node/bench/src/trie.rs b/bin/node/bench/src/trie.rs index 67d07a7c024b1..7dcf2cd503e28 100644 --- a/bin/node/bench/src/trie.rs +++ b/bin/node/bench/src/trie.rs @@ -24,7 +24,7 @@ use lazy_static::lazy_static; use rand::Rng; use hash_db::Prefix; use sp_state_machine::Backend as _; -use sp_trie::{trie_types::TrieDBMut, TrieMut as _, Meta, MetaHasher, StateHasher}; +use sp_trie::{trie_types::TrieDBMut, TrieMut as _}; use node_primitives::Hash; @@ -170,17 +170,9 @@ impl core::BenchmarkDescription for TrieReadBenchmarkDescription { struct Storage(Arc); impl sp_state_machine::Storage for Storage { - fn get( - &self, - key: &Hash, - prefix: Prefix, - global: Option, - ) -> Result, Meta)>, String> { + fn get(&self, key: &Hash, prefix: Prefix) -> Result>, String> { let key = sp_trie::prefixed_key::(key, prefix); self.0.get(0, &key).map_err(|e| format!("Database backend error: {:?}", e)) - .map(|result| result - .map(|value| >::extract_value_owned(value, global)) - ) } fn access_from(&self, _key: &Hash) { diff --git a/client/api/src/cht.rs b/client/api/src/cht.rs index d0ad9facd73c2..498f7f8af5203 100644 --- a/client/api/src/cht.rs +++ b/client/api/src/cht.rs @@ -94,7 +94,7 @@ pub fn compute_root( I: IntoIterator>>, { use sp_trie::TrieConfiguration; - Ok(sp_trie::trie_types::Layout::::default().trie_root( + Ok(sp_trie::Layout::::default().trie_root( build_pairs::(cht_size, cht_num, hashes)? )) } @@ -172,7 +172,7 @@ pub fn check_proof_on_proving_backend( local_number, remote_hash, |_, local_cht_key| - read_proof_check_on_proving_backend::( + read_proof_check_on_proving_backend::( proving_backend, local_cht_key, ).map_err(ClientError::from_state), diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index a43a733bdd47b..a3db2142ca986 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -23,7 +23,7 @@ use std::cell::{Cell, RefCell}; use std::collections::HashMap; use hash_db::{Prefix, Hasher}; -use sp_trie::{MemoryDB, prefixed_key, StateHasher, Meta, MetaHasher}; +use sp_trie::{MemoryDB, prefixed_key}; use sp_core::{ storage::{ChildInfo, TrackedStorageKey}, hexdisplay::HexDisplay @@ -49,30 +49,22 @@ struct StorageDb { } impl sp_state_machine::Storage> for StorageDb { - fn get( - &self, - key: &Block::Hash, - prefix: Prefix, - global: Option, - ) -> Result, String> { + fn get(&self, key: &Block::Hash, prefix: Prefix) -> Result, String> { let prefixed_key = prefixed_key::>(key, prefix); if let Some(recorder) = &self.proof_recorder { if let Some(v) = recorder.get(&key) { return Ok(v.clone()); } let backend_value = self.db.get(0, &prefixed_key) - .map(|result| result.map(|value| - , _>>::extract_value_owned(value, global) - )).map_err(|e| format!("Database backend error: {:?}", e))?; + .map_err(|e| format!("Database backend error: {:?}", e))?; recorder.record::>(key.clone(), backend_value.clone()); Ok(backend_value) } else { self.db.get(0, &prefixed_key) - .map(|result| result.map(|value| - , _>>::extract_value_owned(value, global) - )).map_err(|e| format!("Database backend error: {:?}", e)) + .map_err(|e| format!("Database backend error: {:?}", e)) } } + fn access_from(&self, key: &Block::Hash) { if let Some(recorder) = &self.proof_recorder { recorder.access_from(key, HashFor::::LENGTH); diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index fcbea6201e7bf..89b02d3a0354b 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -65,8 +65,7 @@ use sp_blockchain::{ }; use codec::{Decode, Encode}; use hash_db::Prefix; -use sp_trie::{MemoryDB, PrefixedMemoryDB, prefixed_key, StateHasher, - Meta, MetaHasher}; +use sp_trie::{MemoryDB, PrefixedMemoryDB, prefixed_key}; use sp_database::Transaction; use sp_core::ChangesTrieConfiguration; use sp_core::offchain::OffchainOverlayedChange; @@ -889,25 +888,17 @@ struct StorageDb { } impl sp_state_machine::Storage> for StorageDb { - fn get( - &self, - key: &Block::Hash, - prefix: Prefix, - global: Option, - ) -> Result, String> { + fn get(&self, key: &Block::Hash, prefix: Prefix) -> Result, String> { if self.prefix_keys { let key = prefixed_key::>(key, prefix); self.state_db.get(&key, self) } else { self.state_db.get(key.as_ref(), self) } - .map(|result| result.map(|value| - , _>>::extract_value_owned(value, global) - )).map_err(|e| format!("Database backend error: {:?}", e)) + .map_err(|e| format!("Database backend error: {:?}", e)) } - fn access_from(&self, _key: &Block::Hash) { - } + fn access_from(&self, _key: &Block::Hash) { } } impl sc_state_db::NodeDb for StorageDb { @@ -931,14 +922,10 @@ impl DbGenesisStorage { } impl sp_state_machine::Storage> for DbGenesisStorage { - fn get( - &self, - _key: &Block::Hash, - _prefix: Prefix, - _global: Option, - ) -> Result, String> { + fn get(&self, _key: &Block::Hash, _prefix: Prefix) -> Result, String> { Ok(None) } + fn access_from(&self, _key: &Block::Hash) { } } @@ -2157,7 +2144,6 @@ impl sc_client_api::backend::Backend for Backend { self.storage.as_ref(), &header.state_root, (&[], None), - Default::default(), ).unwrap_or(None).is_some() }, _ => false, diff --git a/client/executor/src/integration_tests/mod.rs b/client/executor/src/integration_tests/mod.rs index 62767e28a31c3..f92e1b32c60c8 100644 --- a/client/executor/src/integration_tests/mod.rs +++ b/client/executor/src/integration_tests/mod.rs @@ -30,7 +30,7 @@ use sp_core::{ }; use sc_runtime_test::wasm_binary_unwrap; use sp_state_machine::TestExternalities as CoreTestExternalities; -use sp_trie::{TrieConfiguration, trie_types::Layout}; +use sp_trie::{TrieConfiguration, Layout}; use sp_wasm_interface::HostFunctions as _; use sp_runtime::traits::BlakeTwo256; use sc_executor_common::{wasm_runtime::WasmModule, runtime_blob::RuntimeBlob}; diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index 1fccd918be7c9..c045c767d4da2 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -51,7 +51,7 @@ use sp_consensus::{ BlockStatus, BlockImportParams, ForkChoiceStrategy, }; use sp_storage::StorageKey; -use sp_trie::{TrieConfiguration, trie_types::Layout}; +use sp_trie::{TrieConfiguration, Layout}; use sp_runtime::{generic::BlockId, DigestItem, Justifications}; use hex_literal::hex; use futures::executor::block_on; diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index e22f4d96c680e..dd7fe0f9b8d9f 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -48,6 +48,7 @@ pub trait Storage: Send + Sync { key: &H::Out, prefix: Prefix, ) -> Result>; + /// Call back when value get accessed in trie. fn access_from(&self, key: &H::Out); } diff --git a/primitives/transaction-storage-proof/src/lib.rs b/primitives/transaction-storage-proof/src/lib.rs index 7891edb0b2dc5..825de27b2a5a9 100644 --- a/primitives/transaction-storage-proof/src/lib.rs +++ b/primitives/transaction-storage-proof/src/lib.rs @@ -141,7 +141,7 @@ pub mod registration { use super::*; type Hasher = sp_core::Blake2Hasher; - type TrieLayout = sp_trie::trie_types::Layout::; + type TrieLayout = sp_trie::Layout::; /// Create a new inherent data provider instance for a given parent block hash. pub fn new_data_provider( diff --git a/primitives/trie/benches/bench.rs b/primitives/trie/benches/bench.rs index c40907ac5cdf4..c2ccb31328aae 100644 --- a/primitives/trie/benches/bench.rs +++ b/primitives/trie/benches/bench.rs @@ -21,11 +21,11 @@ criterion_main!(benches); fn benchmark(c: &mut Criterion) { trie_bench::standard_benchmark::< - sp_trie::trie_types::Layout, + sp_trie::Layout, sp_trie::TrieStream, >(c, "substrate-blake2"); trie_bench::standard_benchmark::< - sp_trie::trie_types::Layout, + sp_trie::Layout, sp_trie::TrieStream, >(c, "substrate-keccak"); } diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 79068c585eb6e..714307e1347d5 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -161,7 +161,7 @@ pub type TrieHash = <::Hash as Hasher>::Out; /// This module is for non generic definition of trie type. /// Only the `Hasher` trait is generic in this case. pub mod trie_types { - use super::Layout; + pub type Layout = super::Layout; /// Persistent trie database read-access interface for the a given hasher. pub type TrieDB<'a, H> = super::TrieDB<'a, Layout>; /// Persistent trie database write-access interface for the a given hasher. From 602723e33fbedf6967f3d5ffdb16ab062c4559ed Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 21 Jun 2021 14:46:57 +0200 Subject: [PATCH 061/188] update to latest triedb changes. --- Cargo.lock | 16 ++++++++-------- primitives/trie/src/node_codec.rs | 12 ++++++------ 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4fb09ef6c6d2c..030a779c34e0d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2354,7 +2354,7 @@ dependencies = [ [[package]] name = "hash-db" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#511e98bff38e2d618a5ec382eeca8661a5459df3" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#cec2aa252bd03779e57403610222fc818823fda4" [[package]] name = "hash256-std-hasher" @@ -2368,7 +2368,7 @@ dependencies = [ [[package]] name = "hash256-std-hasher" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#511e98bff38e2d618a5ec382eeca8661a5459df3" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#cec2aa252bd03779e57403610222fc818823fda4" dependencies = [ "crunchy", ] @@ -3050,7 +3050,7 @@ dependencies = [ [[package]] name = "keccak-hasher" version = "0.15.3" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#511e98bff38e2d618a5ec382eeca8661a5459df3" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#cec2aa252bd03779e57403610222fc818823fda4" dependencies = [ "hash-db", "hash256-std-hasher 0.15.2 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4)", @@ -3865,7 +3865,7 @@ dependencies = [ [[package]] name = "memory-db" version = "0.26.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#511e98bff38e2d618a5ec382eeca8661a5459df3" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#cec2aa252bd03779e57403610222fc818823fda4" dependencies = [ "hash-db", "hashbrown", @@ -10551,7 +10551,7 @@ checksum = "a7f741b240f1a48843f9b8e0444fb55fb2a4ff67293b50a9179dfd5ea67f8d41" [[package]] name = "trie-bench" version = "0.27.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#511e98bff38e2d618a5ec382eeca8661a5459df3" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#cec2aa252bd03779e57403610222fc818823fda4" dependencies = [ "criterion", "hash-db", @@ -10566,7 +10566,7 @@ dependencies = [ [[package]] name = "trie-db" version = "0.22.5" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#511e98bff38e2d618a5ec382eeca8661a5459df3" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#cec2aa252bd03779e57403610222fc818823fda4" dependencies = [ "hash-db", "hashbrown", @@ -10578,7 +10578,7 @@ dependencies = [ [[package]] name = "trie-root" version = "0.16.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#511e98bff38e2d618a5ec382eeca8661a5459df3" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#cec2aa252bd03779e57403610222fc818823fda4" dependencies = [ "hash-db", ] @@ -10596,7 +10596,7 @@ dependencies = [ [[package]] name = "trie-standardmap" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#511e98bff38e2d618a5ec382eeca8661a5459df3" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#cec2aa252bd03779e57403610222fc818823fda4" dependencies = [ "hash-db", "keccak-hasher 0.15.3 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4)", diff --git a/primitives/trie/src/node_codec.rs b/primitives/trie/src/node_codec.rs index bed8ae73b5852..58782e060f77d 100644 --- a/primitives/trie/src/node_codec.rs +++ b/primitives/trie/src/node_codec.rs @@ -123,7 +123,7 @@ impl NodeCodec { let bitmap = Bitmap::decode(&data[bitmap_range])?; let value = if branch_has_value { if alt_hashing && contains_hash { - ValuePlan::HashedValue(input.take(H::LENGTH)?, 0) + ValuePlan::HashedValue(input.take(H::LENGTH)?) } else { let with_len = input.offset; let count = >::decode(&mut input)?.0 as usize; @@ -165,7 +165,7 @@ impl NodeCodec { )?; let partial_padding = nibble_ops::number_padding(nibble_count); let value = if alt_hashing && contains_hash { - ValuePlan::HashedValue(input.take(H::LENGTH)?, 0) + ValuePlan::HashedValue(input.take(H::LENGTH)?) } else { let with_len = input.offset; let count = >::decode(&mut input)?.0 as usize; @@ -239,12 +239,12 @@ impl NodeCodecT for NodeCodec let end = output.len(); meta.encoded_value_callback(ValuePlan::Value(start..end, with_len)); }, - Value::HashedValue(hash, _size) => { + Value::HashedValue(hash) => { debug_assert!(hash.len() == H::LENGTH); let start = output.len(); output.extend_from_slice(hash); let end = output.len(); - meta.encoded_value_callback(ValuePlan::HashedValue(start..end, 0)); + meta.encoded_value_callback(ValuePlan::HashedValue(start..end)); }, Value::NoValue => unimplemented!("No support for incomplete nodes"), } @@ -307,12 +307,12 @@ impl NodeCodecT for NodeCodec let end = output.len(); meta.encoded_value_callback(ValuePlan::Value(start..end, with_len)); }, - Value::HashedValue(hash, _size) => { + Value::HashedValue(hash) => { debug_assert!(hash.len() == H::LENGTH); let start = output.len(); output.extend_from_slice(hash); let end = output.len(); - meta.encoded_value_callback(ValuePlan::HashedValue(start..end, 0)); + meta.encoded_value_callback(ValuePlan::HashedValue(start..end)); }, Value::NoValue => (), } From 66ee72db0895a7dc14e28383cfcaca243289372c Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 21 Jun 2021 15:22:50 +0200 Subject: [PATCH 062/188] Clean up. --- client/db/src/bench.rs | 11 +++-------- client/db/src/lib.rs | 3 +-- client/executor/runtime-test/src/lib.rs | 1 - client/light/src/backend.rs | 7 +++---- primitives/state-machine/src/backend.rs | 3 +-- primitives/state-machine/src/ext.rs | 7 ++----- primitives/state-machine/src/in_memory_backend.rs | 2 +- primitives/state-machine/src/lib.rs | 3 --- .../state-machine/src/overlayed_changes/mod.rs | 5 +---- primitives/state-machine/src/trie_backend.rs | 4 ++-- .../state-machine/src/trie_backend_essence.rs | 14 ++++---------- primitives/trie/src/storage_proof.rs | 7 +------ 12 files changed, 19 insertions(+), 48 deletions(-) diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index a3db2142ca986..c194e9672a1c0 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -156,8 +156,7 @@ impl BenchmarkingState { &child_content.child_info, child_content.data.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))), )); - let (root, transaction): (B::Hash, _) = state.state.borrow_mut().as_mut().unwrap() - .full_storage_root( + let (root, transaction): (B::Hash, _) = state.state.borrow_mut().as_mut().unwrap().full_storage_root( genesis.top.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))), child_delta, ); @@ -406,8 +405,7 @@ impl StateBackend> for BenchmarkingState { &self, delta: impl Iterator)>, ) -> (B::Hash, Self::Transaction) where B::Hash: Ord { - self.state.borrow().as_ref() - .map_or(Default::default(), |s| s.storage_root(delta)) + self.state.borrow().as_ref().map_or(Default::default(), |s| s.storage_root(delta)) } fn child_storage_root<'a>( @@ -415,10 +413,7 @@ impl StateBackend> for BenchmarkingState { child_info: &ChildInfo, delta: impl Iterator)>, ) -> (B::Hash, bool, Self::Transaction) where B::Hash: Ord { - self.state.borrow().as_ref().map_or( - Default::default(), - |s| s.child_storage_root(child_info, delta), - ) + self.state.borrow().as_ref().map_or(Default::default(), |s| s.child_storage_root(child_info, delta)) } fn pairs(&self) -> Vec<(Vec, Vec)> { diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 89b02d3a0354b..64cfb975f79b9 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -73,8 +73,7 @@ use sp_core::storage::{well_known_keys, ChildInfo}; use sp_arithmetic::traits::Saturating; use sp_runtime::{generic::{DigestItem, BlockId}, Justification, Justifications, Storage}; use sp_runtime::traits::{ - Block as BlockT, Header as HeaderT, NumberFor, Zero, One, SaturatedConversion, - HashFor, + Block as BlockT, Header as HeaderT, NumberFor, Zero, One, SaturatedConversion, HashFor, }; use sp_state_machine::{ DBValue, ChangesTrieTransaction, ChangesTrieCacheAction, UsageInfo as StateUsageInfo, diff --git a/client/executor/runtime-test/src/lib.rs b/client/executor/runtime-test/src/lib.rs index c8963b7b87baa..926fddcf63073 100644 --- a/client/executor/runtime-test/src/lib.rs +++ b/client/executor/runtime-test/src/lib.rs @@ -79,7 +79,6 @@ sp_core::wasm_export_functions! { print("switched!"); } - fn test_clear_prefix(input: Vec) -> Vec { storage::clear_prefix(&input, None); b"all ok!".to_vec() diff --git a/client/light/src/backend.rs b/client/light/src/backend.rs index a63c7879de289..a7f1b8e0c1696 100644 --- a/client/light/src/backend.rs +++ b/client/light/src/backend.rs @@ -492,8 +492,8 @@ impl StateBackend for GenesisOrUnavailableState delta: impl Iterator)>, ) -> (H::Out, Self::Transaction) where H::Out: Ord { match *self { - GenesisOrUnavailableState::Genesis(ref state) => state - .storage_root(delta), + GenesisOrUnavailableState::Genesis(ref state) => + state.storage_root(delta), GenesisOrUnavailableState::Unavailable => Default::default(), } } @@ -505,8 +505,7 @@ impl StateBackend for GenesisOrUnavailableState ) -> (H::Out, bool, Self::Transaction) where H::Out: Ord { match *self { GenesisOrUnavailableState::Genesis(ref state) => { - let (root, is_equal, _) = state - .child_storage_root(child_info, delta); + let (root, is_equal, _) = state.child_storage_root(child_info, delta); (root, is_equal, Default::default()) }, GenesisOrUnavailableState::Unavailable => diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index c879053fce569..fb59458c0f9c1 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -169,7 +169,6 @@ pub trait Backend: sp_std::fmt::Debug { /// Calculate the storage root, with given delta over what is already stored /// in the backend, and produce a "transaction" that can be used to commit. /// Does include child storage updates. - /// Alt hashing paremeter must contain possible changes from delta. fn full_storage_root<'a>( &self, delta: impl Iterator)>, @@ -257,7 +256,7 @@ pub trait Backend: sp_std::fmt::Debug { } /// Read current trie hashing threshold. - /// Please do not reimplement. + /// Please do not change default implementation when implementing this trait. fn get_trie_alt_hashing_threshold(&self) -> Option { self.storage(sp_core::storage::well_known_keys::TRIE_HASHING_CONFIG).ok().flatten() .and_then(|encoded| sp_core::storage::trie_threshold_decode(&mut encoded.as_slice())) diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index 2defaa49142af..e66664647d9d8 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -26,7 +26,7 @@ use sp_core::{ storage::{well_known_keys::is_child_storage_key, ChildInfo, TrackedStorageKey}, hexdisplay::HexDisplay, }; -use sp_trie::{Layout, empty_child_trie_root}; +use sp_trie::{trie_types::Layout, empty_child_trie_root}; use sp_externalities::{ Externalities, Extensions, Extension, ExtensionStore, }; @@ -530,10 +530,7 @@ where return root.encode(); } - let root = self.overlay.storage_root( - self.backend, - self.storage_transaction_cache, - ); + let root = self.overlay.storage_root(self.backend, self.storage_transaction_cache); trace!(target: "state", "{:04x}: Root {}", self.id, HexDisplay::from(&root.as_ref())); root.encode() } diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index b122120b36e44..74338d5d77a52 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -88,7 +88,7 @@ where self.root() == other.root() } - /// setting a alt hashing threshold at start. + /// Setting a alternate hashing threshold at start. pub fn force_alt_hashing(&mut self, threshold: Option) { self.force_alt_hashing = Some(threshold); } diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 34363dfa6fe3a..278126c7432e8 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -1561,8 +1561,6 @@ mod tests { local_result1.into_iter().collect::>(), vec![(b"foo222".to_vec(), Some(vec![5u8; 100]))], ); - println!("a{:?}", remote_proof.encode().len()); - println!("b{:?}", remote_proof.encoded_size()); remote_proof }; @@ -1651,7 +1649,6 @@ mod tests { ); remote_backend.backend_storage_mut().consolidate(transaction); remote_backend.essence.set_root(remote_root.clone()); - println!("{:?}", remote_root); let remote_proof = prove_child_read( remote_backend, &child_info1, diff --git a/primitives/state-machine/src/overlayed_changes/mod.rs b/primitives/state-machine/src/overlayed_changes/mod.rs index 1bca2da310c27..c01d56ab919a0 100644 --- a/primitives/state-machine/src/overlayed_changes/mod.rs +++ b/primitives/state-machine/src/overlayed_changes/mod.rs @@ -636,10 +636,7 @@ impl OverlayedChanges { |(k, v)| (&k[..], v.value().map(|v| &v[..])) ))); - let (root, transaction) = backend.full_storage_root( - delta, - child_delta, - ); + let (root, transaction) = backend.full_storage_root(delta, child_delta); cache.transaction = Some(transaction); cache.transaction_storage_root = Some(root); diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index c09a6e6e0899b..334f80f0dcf25 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -34,8 +34,8 @@ use sp_std::{boxed::Box, vec::Vec}; pub struct TrieBackend, H: Hasher> { pub (crate) essence: TrieBackendEssence, // Allows setting alt hashing at start for testing only - // (see in_memory_backend that cannot read from state as - // it changes. + // (mainly for in_memory_backend when it cannot read it from + // state). pub (crate) force_alt_hashing: Option>, } diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index dd7fe0f9b8d9f..f8945a88b5ec0 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -43,11 +43,7 @@ type Result = sp_std::result::Result; /// Patricia trie-based storage trait. pub trait Storage: Send + Sync { /// Get a trie node. - fn get( - &self, - key: &H::Out, - prefix: Prefix, - ) -> Result>; + fn get(&self, key: &H::Out, prefix: Prefix) -> Result>; /// Call back when value get accessed in trie. fn access_from(&self, key: &H::Out); @@ -416,8 +412,10 @@ impl<'a, S: 'a + TrieBackendStorage, H: Hasher> HashDBRef pub trait TrieBackendStorage: Send + Sync { /// Type of in-memory overlay. type Overlay: HashDB + Default + Consolidate; + /// Get the value stored at key. fn get(&self, key: &H::Out, prefix: Prefix) -> Result>; + /// Call back when value get accessed in trie. fn access_from(&self, key: &H::Out); } @@ -427,11 +425,7 @@ pub trait TrieBackendStorage: Send + Sync { impl TrieBackendStorage for Arc> { type Overlay = PrefixedMemoryDB; - fn get( - &self, - key: &H::Out, - prefix: Prefix, - ) -> Result> { + fn get(&self, key: &H::Out, prefix: Prefix) -> Result> { Storage::::get(self.deref(), key, prefix) } diff --git a/primitives/trie/src/storage_proof.rs b/primitives/trie/src/storage_proof.rs index cebad1911315f..78730ca13b7cc 100644 --- a/primitives/trie/src/storage_proof.rs +++ b/primitives/trie/src/storage_proof.rs @@ -174,14 +174,9 @@ impl Iterator for StorageProofNodeIterator { impl From for crate::MemoryDB { fn from(proof: StorageProof) -> Self { let mut db = crate::MemoryDB::default(); - // Needed because we do not read trie structure, so - // we do a heuristic related to the fact that host function - // only allow global definition. - // Using compact proof will work directly here (read trie structure and - // work directly. for item in proof.trie_nodes.iter() { let mut meta = Default::default(); - // read state meta (required for value layout). + // Read meta from state (required for value layout). let _ = as TrieLayout>::Codec::decode_plan(item.as_slice(), &mut meta); db.alt_insert( crate::EMPTY_PREFIX, From 23c5db3e5e1e4f72d897bf92d4a16e998bea393a Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 21 Jun 2021 18:24:04 +0200 Subject: [PATCH 063/188] fix executor test. --- client/executor/src/integration_tests/mod.rs | 18 ++++------ primitives/state-machine/src/testing.rs | 37 ++++++++++++++++++-- 2 files changed, 40 insertions(+), 15 deletions(-) diff --git a/client/executor/src/integration_tests/mod.rs b/client/executor/src/integration_tests/mod.rs index f92e1b32c60c8..622fac865683d 100644 --- a/client/executor/src/integration_tests/mod.rs +++ b/client/executor/src/integration_tests/mod.rs @@ -199,7 +199,7 @@ fn storage_should_work(wasm_method: WasmExecutionMethod) { let output = call_in_wasm( "test_data_in", - &b"Hello world".to_vec().encode(), + &b"Hello worldHello worldHello worldHello world".to_vec().encode(), wasm_method, &mut ext, ).unwrap(); @@ -207,18 +207,15 @@ fn storage_should_work(wasm_method: WasmExecutionMethod) { assert_eq!(output, b"all ok!".to_vec().encode()); } - let mut storage = sp_core::storage::Storage { + let storage = sp_core::storage::Storage { top: map![ - b"input".to_vec() => b"Hello world".to_vec(), + b"input".to_vec() => b"Hello worldHello worldHello worldHello world".to_vec(), b"foo".to_vec() => b"bar".to_vec(), b"baz".to_vec() => b"bar".to_vec() ], children_default: map![], }; - storage.modify_trie_alt_hashing_threshold(Some( - sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD - )); - let expected = TestExternalities::new(storage); + let expected = TestExternalities::new_with_alt_hashing(storage); assert_eq!(ext, expected); } @@ -244,7 +241,7 @@ fn clear_prefix_should_work(wasm_method: WasmExecutionMethod) { assert_eq!(output, b"all ok!".to_vec().encode()); } - let mut storage = sp_core::storage::Storage { + let storage = sp_core::storage::Storage { top: map![ b"aaa".to_vec() => b"1".to_vec(), b"aab".to_vec() => b"2".to_vec(), @@ -252,11 +249,8 @@ fn clear_prefix_should_work(wasm_method: WasmExecutionMethod) { ], children_default: map![], }; - storage.modify_trie_alt_hashing_threshold(Some( - sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD - )); - let expected = TestExternalities::new(storage); + let expected = TestExternalities::new_with_alt_hashing(storage); assert_eq!(expected, ext); } diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index e7f711eced39c..a8dccb7274571 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -18,6 +18,7 @@ //! Test implementation for Externalities. use std::{any::{Any, TypeId}, panic::{AssertUnwindSafe, UnwindSafe}}; +use std::collections::{HashMap, BTreeMap}; use crate::{ backend::Backend, OverlayedChanges, StorageTransactionCache, ext::Ext, InMemoryBackend, @@ -36,7 +37,7 @@ use sp_core::{ offchain::testing::TestPersistentOffchainDB, storage::{ well_known_keys::{CHANGES_TRIE_CONFIG, CODE, is_child_storage_key}, - Storage, + Storage, ChildInfo, }, traits::TaskExecutorExt, testing::TaskExecutor, @@ -88,13 +89,24 @@ where Self::new_with_code(&[], storage) } + /// Create a new instance of `TestExternalities` with storage + /// on a backend containing defined default alt hashing threshold. + pub fn new_with_alt_hashing(storage: Storage) -> Self { + Self::new_with_code_inner(&[], storage, true) + } + + /// New empty test externalities. pub fn new_empty() -> Self { Self::new_with_code(&[], Storage::default()) } /// Create a new instance of `TestExternalities` with code and storage. - pub fn new_with_code(code: &[u8], mut storage: Storage) -> Self { + pub fn new_with_code(code: &[u8], storage: Storage) -> Self { + Self::new_with_code_inner(code, storage, false) + } + + fn new_with_code_inner(code: &[u8], mut storage: Storage, force_alt_hashing: bool) -> Self { let mut overlay = OverlayedChanges::default(); let changes_trie_config = storage.top.get(CHANGES_TRIE_CONFIG) .and_then(|v| Decode::decode(&mut &v[..]).ok()); @@ -110,13 +122,32 @@ where let offchain_db = TestPersistentOffchainDB::new(); + let backend = if force_alt_hashing { + let mut backend: InMemoryBackend = { + let mut storage = Storage::default(); + storage.modify_trie_alt_hashing_threshold(Some( + sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD + )); + storage.into() + }; + let mut inner: HashMap, BTreeMap> + = storage.children_default.into_iter().map(|(_k, c)| (Some(c.child_info), c.data)).collect(); + inner.insert(None, storage.top); + backend.insert( + inner.into_iter().map(|(k, m)| (k, m.into_iter().map(|(k, v)| (k, Some(v))).collect())), + ); + backend + } else { + storage.into() + }; + TestExternalities { overlay, offchain_db, changes_trie_config, extensions, changes_trie_storage: ChangesTrieInMemoryStorage::new(), - backend: storage.into(), + backend, storage_transaction_cache: Default::default(), } } From 03d93b2f5aa9495119706072e51ea992a10da6a7 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 22 Jul 2021 13:13:15 +0200 Subject: [PATCH 064/188] rust fmt from master. --- rustfmt.toml | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) create mode 100644 rustfmt.toml diff --git a/rustfmt.toml b/rustfmt.toml new file mode 100644 index 0000000000000..15e9bdcdf10f1 --- /dev/null +++ b/rustfmt.toml @@ -0,0 +1,20 @@ +# Basic +hard_tabs = true +max_width = 100 +use_small_heuristics = "Max" +# Imports +imports_granularity = "Crate" +reorder_imports = true +# Consistency +newline_style = "Unix" +# Misc +chain_width = 80 +spaces_around_ranges = false +binop_separator = "Back" +reorder_impl_items = false +match_arm_leading_pipes = "Preserve" +match_arm_blocks = false +match_block_trailing_comma = true +trailing_comma = "Vertical" +trailing_semicolon = false +use_field_init_shorthand = true From 93aaa4cdea3a2b8b219b9ef0b843ba6a70b4916c Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 22 Jul 2021 17:35:46 +0200 Subject: [PATCH 065/188] rust format. --- bin/node/bench/src/generator.rs | 7 +- bin/node/bench/src/trie.rs | 3 +- bin/node/executor/benches/bench.rs | 6 +- bin/node/executor/tests/common.rs | 8 +- client/api/src/cht.rs | 16 +- client/db/src/lib.rs | 93 ++++---- client/executor/src/integration_tests/mod.rs | 49 ++--- client/network/test/src/lib.rs | 6 +- client/service/test/src/client/light.rs | 90 +++++--- client/service/test/src/client/mod.rs | 31 +-- frame/support/test/tests/instance.rs | 5 +- frame/system/src/lib.rs | 6 +- primitives/io/src/lib.rs | 10 +- primitives/state-machine/src/backend.rs | 26 +-- primitives/state-machine/src/basic.rs | 2 +- .../state-machine/src/changes_trie/mod.rs | 2 +- .../state-machine/src/changes_trie/storage.rs | 4 +- primitives/state-machine/src/lib.rs | 131 +++++------ .../state-machine/src/proving_backend.rs | 154 ++++++------- primitives/state-machine/src/testing.rs | 24 +- primitives/state-machine/src/trie_backend.rs | 59 ++--- .../state-machine/src/trie_backend_essence.rs | 91 +++++--- primitives/storage/src/lib.rs | 3 +- primitives/trie/src/lib.rs | 207 +++++++++--------- primitives/trie/src/node_codec.rs | 139 ++++++------ primitives/trie/src/node_header.rs | 47 ++-- primitives/trie/src/storage_proof.rs | 6 +- primitives/trie/src/trie_codec.rs | 14 +- primitives/trie/src/trie_stream.rs | 54 ++--- test-utils/client/src/lib.rs | 6 +- test-utils/runtime/client/src/lib.rs | 9 +- utils/frame/rpc/system/src/lib.rs | 2 +- 32 files changed, 661 insertions(+), 649 deletions(-) diff --git a/bin/node/bench/src/generator.rs b/bin/node/bench/src/generator.rs index f349c4555a5ea..f95811c40ebee 100644 --- a/bin/node/bench/src/generator.rs +++ b/bin/node/bench/src/generator.rs @@ -30,7 +30,7 @@ use crate::simple_trie::SimpleTrie; /// return root. pub fn generate_trie( db: Arc, - key_values: impl IntoIterator, Vec)>, + key_values: impl IntoIterator, Vec)>, alt_hashing: Option, ) -> Hash { let mut root = Hash::default(); @@ -44,10 +44,11 @@ pub fn generate_trie( ); let mut trie = SimpleTrie { db, overlay: &mut overlay }; { - let mut trie_db = if let Some(threshold) = alt_hashing { let layout = sp_trie::Layout::with_alt_hashing(threshold); - TrieDBMut::::new_with_layout(&mut trie, &mut root, layout) + TrieDBMut::::new_with_layout( + &mut trie, &mut root, layout, + ) } else { TrieDBMut::new(&mut trie, &mut root) }; diff --git a/bin/node/bench/src/trie.rs b/bin/node/bench/src/trie.rs index ba8a42900bd2c..77ca3e85b8b05 100644 --- a/bin/node/bench/src/trie.rs +++ b/bin/node/bench/src/trie.rs @@ -176,8 +176,7 @@ impl sp_state_machine::Storage for Storage { self.0.get(0, &key).map_err(|e| format!("Database backend error: {:?}", e)) } - fn access_from(&self, _key: &Hash) { - } + fn access_from(&self, _key: &Hash) {} } impl core::Benchmark for TrieReadBenchmark { diff --git a/bin/node/executor/benches/bench.rs b/bin/node/executor/benches/bench.rs index 7645c43d25fc1..7539d14b31a4e 100644 --- a/bin/node/executor/benches/bench.rs +++ b/bin/node/executor/benches/bench.rs @@ -89,9 +89,9 @@ fn construct_block( let extrinsics = extrinsics.into_iter().map(sign).collect::>(); // calculate the header fields that we can. - let extrinsics_root = Layout::::default().ordered_trie_root( - extrinsics.iter().map(Encode::encode) - ).to_fixed_bytes() + let extrinsics_root = Layout::::default() + .ordered_trie_root(extrinsics.iter().map(Encode::encode)) + .to_fixed_bytes() .into(); let header = Header { diff --git a/bin/node/executor/tests/common.rs b/bin/node/executor/tests/common.rs index 08476cc428f65..853639bd50325 100644 --- a/bin/node/executor/tests/common.rs +++ b/bin/node/executor/tests/common.rs @@ -152,10 +152,10 @@ pub fn construct_block( let extrinsics = extrinsics.into_iter().map(sign).collect::>(); // calculate the header fields that we can. - let extrinsics_root = - Layout::::default().ordered_trie_root(extrinsics.iter().map(Encode::encode)) - .to_fixed_bytes() - .into(); + let extrinsics_root = Layout::::default() + .ordered_trie_root(extrinsics.iter().map(Encode::encode)) + .to_fixed_bytes() + .into(); let header = Header { parent_hash, diff --git a/client/api/src/cht.rs b/client/api/src/cht.rs index 75e45a50af1c0..996374314d3cb 100644 --- a/client/api/src/cht.rs +++ b/client/api/src/cht.rs @@ -32,9 +32,9 @@ use sp_trie; use sp_core::{convert_hash, H256}; use sp_runtime::traits::{AtLeast32Bit, Header as HeaderT, One, Zero}; use sp_state_machine::{ - MemoryDB, TrieBackend, Backend as StateBackend, StorageProof, InMemoryBackend, prove_read_on_trie_backend, read_proof_check, read_proof_check_on_proving_backend_generic as read_proof_check_on_proving_backend, + Backend as StateBackend, InMemoryBackend, MemoryDB, StorageProof, TrieBackend, }; use sp_blockchain::{Error as ClientError, Result as ClientResult}; @@ -94,9 +94,8 @@ where I: IntoIterator>>, { use sp_trie::TrieConfiguration; - Ok(sp_trie::Layout::::default().trie_root( - build_pairs::(cht_size, cht_num, hashes)? - )) + Ok(sp_trie::Layout::::default() + .trie_root(build_pairs::(cht_size, cht_num, hashes)?)) } /// Build CHT-based header proof. @@ -172,11 +171,10 @@ where local_root, local_number, remote_hash, - |_, local_cht_key| - read_proof_check_on_proving_backend::( - proving_backend, - local_cht_key, - ).map_err(ClientError::from_state), + |_, local_cht_key| { + read_proof_check_on_proving_backend::(proving_backend, local_cht_key) + .map_err(ClientError::from_state) + }, ) } diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 2f2a7c35c4360..170862595bf03 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -882,18 +882,17 @@ impl sc_client_api::backend::BlockImportOperation Ok(()) } - fn reset_storage( - &mut self, - storage: Storage, - ) -> ClientResult { + fn reset_storage(&mut self, storage: Storage) -> ClientResult { if storage.top.keys().any(|k| well_known_keys::is_child_storage_key(&k)) { - return Err(sp_blockchain::Error::GenesisInvalid.into()); + return Err(sp_blockchain::Error::GenesisInvalid.into()) } - let child_delta = storage.children_default.iter().map(|(_storage_key, child_content)|( - &child_content.child_info, - child_content.data.iter().map(|(k, v)| (&k[..], Some(&v[..]))), - )); + let child_delta = storage.children_default.iter().map(|(_storage_key, child_content)| { + ( + &child_content.child_info, + child_content.data.iter().map(|(k, v)| (&k[..], Some(&v[..]))), + ) + }); let mut changes_trie_config: Option = None; let (root, transaction) = self.old_state.full_storage_root( @@ -901,7 +900,7 @@ impl sc_client_api::backend::BlockImportOperation if &k[..] == well_known_keys::CHANGES_TRIE_CONFIG { changes_trie_config = Some( Decode::decode(&mut &v[..]) - .expect("changes trie configuration is encoded properly at genesis") + .expect("changes trie configuration is encoded properly at genesis"), ); } (&k[..], Some(&v[..])) @@ -994,7 +993,7 @@ impl sp_state_machine::Storage> for StorageDb sc_state_db::NodeDb for StorageDb { @@ -1022,7 +1021,7 @@ impl sp_state_machine::Storage> for DbGenesisStora use hash_db::HashDB; Ok(self.storage.get(key, prefix)) } - fn access_from(&self, _key: &Block::Hash) { } + fn access_from(&self, _key: &Block::Hash) {} } struct EmptyStorage(pub Block::Hash); @@ -1041,8 +1040,7 @@ impl sp_state_machine::Storage> for EmptyStorage = vec![]; - header.state_root = op.old_state.storage_root(storage - .iter() - .cloned() - .map(|(x, y)| (x, Some(y))), - ).0.into(); + header.state_root = op + .old_state + .storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y)))) + .0 + .into(); let hash = header.hash(); op.db_updates.insert(EMPTY_PREFIX, b"hello"); @@ -2739,11 +2734,11 @@ pub(crate) mod tests { let storage: Vec<(_, _)> = vec![]; - header.state_root = op.old_state.storage_root(storage - .iter() - .cloned() - .map(|(x, y)| (x, Some(y))), - ).0.into(); + header.state_root = op + .old_state + .storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y)))) + .0 + .into(); let hash = header.hash(); op.db_updates.remove(&key, EMPTY_PREFIX); @@ -2773,11 +2768,11 @@ pub(crate) mod tests { let storage: Vec<(_, _)> = vec![]; - header.state_root = op.old_state.storage_root(storage - .iter() - .cloned() - .map(|(x, y)| (x, Some(y))), - ).0.into(); + header.state_root = op + .old_state + .storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y)))) + .0 + .into(); op.set_block_data(header, Some(vec![]), None, None, NewBlockState::Best) .unwrap(); @@ -3127,10 +3122,11 @@ pub(crate) mod tests { let storage = vec![(b"test".to_vec(), b"test".to_vec())]; - header.state_root = op.old_state.storage_root(storage - .iter() - .map(|(x, y)| (&x[..], Some(&y[..]))), - ).0.into(); + header.state_root = op + .old_state + .storage_root(storage.iter().map(|(x, y)| (&x[..], Some(&y[..])))) + .0 + .into(); let hash = header.hash(); op.reset_storage(Storage { @@ -3165,10 +3161,9 @@ pub(crate) mod tests { let storage = vec![(b"test".to_vec(), Some(b"test2".to_vec()))]; - let (root, overlay) = op.old_state.storage_root( - storage.iter() - .map(|(k, v)| (&k[..], v.as_ref().map(|v| &v[..]))), - ); + let (root, overlay) = op + .old_state + .storage_root(storage.iter().map(|(k, v)| (&k[..], v.as_ref().map(|v| &v[..])))); op.update_db_storage(overlay).unwrap(); header.state_root = root.into(); let hash = header.hash(); diff --git a/client/executor/src/integration_tests/mod.rs b/client/executor/src/integration_tests/mod.rs index 77eb5fd9e2704..357e3ea972a37 100644 --- a/client/executor/src/integration_tests/mod.rs +++ b/client/executor/src/integration_tests/mod.rs @@ -184,7 +184,8 @@ fn storage_should_work(wasm_method: WasmExecutionMethod) { &b"Hello worldHello worldHello worldHello world".to_vec().encode(), wasm_method, &mut ext, - ).unwrap(); + ) + .unwrap(); assert_eq!(output, b"all ok!".to_vec().encode()); } @@ -373,13 +374,11 @@ fn ordered_trie_root_should_work(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); let trie_input = vec![b"zero".to_vec(), b"one".to_vec(), b"two".to_vec()]; assert_eq!( - call_in_wasm( - "test_ordered_trie_root", - &[0], - wasm_method, - &mut ext.ext(), - ).unwrap(), - Layout::::default().ordered_trie_root(trie_input.iter()).as_bytes().encode(), + call_in_wasm("test_ordered_trie_root", &[0], wasm_method, &mut ext.ext(),).unwrap(), + Layout::::default() + .ordered_trie_root(trie_input.iter()) + .as_bytes() + .encode(), ); } @@ -672,12 +671,8 @@ fn state_hashing_update(wasm_method: WasmExecutionMethod) { let root1 = { let mut ext = ext.ext(); ext.set_storage(b"foo".to_vec(), b"bar".to_vec()); - let output = call_in_wasm( - "test_data_in", - &vec![1u8; 100].encode(), - wasm_method, - &mut ext, - ).unwrap(); + let output = + call_in_wasm("test_data_in", &vec![1u8; 100].encode(), wasm_method, &mut ext).unwrap(); assert_eq!(output, b"all ok!".to_vec().encode()); ext.storage_root() @@ -687,12 +682,8 @@ fn state_hashing_update(wasm_method: WasmExecutionMethod) { let root2 = { let mut ext = ext.ext(); // flag state. - let _ = call_in_wasm( - "test_switch_state", - Default::default(), - wasm_method, - &mut ext, - ).unwrap(); + let _ = + call_in_wasm("test_switch_state", Default::default(), wasm_method, &mut ext).unwrap(); ext.storage_root() }; @@ -701,12 +692,9 @@ fn state_hashing_update(wasm_method: WasmExecutionMethod) { ext.commit_all().unwrap(); let root3 = { let mut ext = ext.ext(); - let _ = call_in_wasm( - "test_data_in", - &vec![2u8; 100].to_vec().encode(), - wasm_method, - &mut ext, - ).unwrap(); + let _ = + call_in_wasm("test_data_in", &vec![2u8; 100].to_vec().encode(), wasm_method, &mut ext) + .unwrap(); ext.storage_root() }; assert!(root2 != root3); @@ -716,12 +704,9 @@ fn state_hashing_update(wasm_method: WasmExecutionMethod) { let mut ext = ext.ext(); // revert to root 2 state, but this time // inner hashing should apply - let _ = call_in_wasm( - "test_data_in", - &vec![1u8; 100].to_vec().encode(), - wasm_method, - &mut ext, - ).unwrap(); + let _ = + call_in_wasm("test_data_in", &vec![1u8; 100].to_vec().encode(), wasm_method, &mut ext) + .unwrap(); ext.storage_root() }; assert!(root2 != root3); diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index dc1b8d8dd4687..00293405c5aae 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -747,9 +747,11 @@ where /// Add a full peer. fn add_full_peer_with_config(&mut self, config: FullPeerConfig) { let mut test_client_builder = match (config.keep_blocks, config.storage_chain) { - (Some(keep_blocks), true) => TestClientBuilder::with_tx_storage(keep_blocks).state_hashed_value(), + (Some(keep_blocks), true) => + TestClientBuilder::with_tx_storage(keep_blocks).state_hashed_value(), (None, true) => TestClientBuilder::with_tx_storage(u32::MAX).state_hashed_value(), - (Some(keep_blocks), false) => TestClientBuilder::with_pruning_window(keep_blocks).state_hashed_value(), + (Some(keep_blocks), false) => + TestClientBuilder::with_pruning_window(keep_blocks).state_hashed_value(), (None, false) => TestClientBuilder::with_default_backend().state_hashed_value(), }; if matches!(config.sync_mode, SyncMode::Fast { .. }) { diff --git a/client/service/test/src/client/light.rs b/client/service/test/src/client/light.rs index 0c567b61db0a5..7a50f32d3ac24 100644 --- a/client/service/test/src/client/light.rs +++ b/client/service/test/src/client/light.rs @@ -467,8 +467,12 @@ fn prepare_for_read_proof_check(hashed_value: bool) -> (TestChecker, Header, Sto let remote_block_id = BlockId::Number(0); let remote_block_hash = remote_client.block_hash(0).unwrap().unwrap(); let mut remote_block_header = remote_client.header(&remote_block_id).unwrap().unwrap(); - remote_block_header.state_root = remote_client.state_at(&remote_block_id).unwrap() - .storage_root(std::iter::empty()).0.into(); + remote_block_header.state_root = remote_client + .state_at(&remote_block_id) + .unwrap() + .storage_root(std::iter::empty()) + .0 + .into(); // 'fetch' read proof from remote node let heap_pages = remote_client @@ -504,8 +508,12 @@ fn prepare_for_read_child_proof_check() -> (TestChecker, Header, StorageProof, V let remote_block_id = BlockId::Number(0); let remote_block_hash = remote_client.block_hash(0).unwrap().unwrap(); let mut remote_block_header = remote_client.header(&remote_block_id).unwrap().unwrap(); - remote_block_header.state_root = remote_client.state_at(&remote_block_id).unwrap() - .storage_root(std::iter::empty()).0.into(); + remote_block_header.state_root = remote_client + .state_at(&remote_block_id) + .unwrap() + .storage_root(std::iter::empty()) + .0 + .into(); // 'fetch' child read proof from remote node let child_value = remote_client @@ -531,7 +539,10 @@ fn prepare_for_read_child_proof_check() -> (TestChecker, Header, StorageProof, V (local_checker, remote_block_header, remote_read_proof, child_value) } -fn prepare_for_header_proof_check(insert_cht: bool, hashed_value: bool) -> (TestChecker, Hash, Header, StorageProof) { +fn prepare_for_header_proof_check( + insert_cht: bool, + hashed_value: bool, +) -> (TestChecker, Hash, Header, StorageProof) { // prepare remote client let mut remote_client = substrate_test_runtime_client::new(hashed_value); let mut local_headers_hashes = Vec::new(); @@ -580,18 +591,25 @@ fn storage_read_proof_is_generated_and_checked() { storage_read_proof_is_generated_and_checked_inner(false); } fn storage_read_proof_is_generated_and_checked_inner(hashed_value: bool) { - let ( - local_checker, - remote_block_header, - remote_read_proof, - heap_pages, - ) = prepare_for_read_proof_check(hashed_value); - assert_eq!((&local_checker as &dyn FetchChecker).check_read_proof(&RemoteReadRequest::
{ - block: remote_block_header.hash(), - header: remote_block_header, - keys: vec![well_known_keys::HEAP_PAGES.to_vec()], - retry_count: None, - }, remote_read_proof).unwrap().remove(well_known_keys::HEAP_PAGES).unwrap().unwrap()[0], heap_pages as u8); + let (local_checker, remote_block_header, remote_read_proof, heap_pages) = + prepare_for_read_proof_check(hashed_value); + assert_eq!( + (&local_checker as &dyn FetchChecker) + .check_read_proof( + &RemoteReadRequest::
{ + block: remote_block_header.hash(), + header: remote_block_header, + keys: vec![well_known_keys::HEAP_PAGES.to_vec()], + retry_count: None, + }, + remote_read_proof + ) + .unwrap() + .remove(well_known_keys::HEAP_PAGES) + .unwrap() + .unwrap()[0], + heap_pages as u8 + ); } #[test] @@ -625,22 +643,28 @@ fn header_proof_is_generated_and_checked() { header_proof_is_generated_and_checked_inner(false); } fn header_proof_is_generated_and_checked_inner(hashed: bool) { - let ( - local_checker, - local_cht_root, - remote_block_header, - remote_header_proof, - ) = prepare_for_header_proof_check(true, hashed); - assert_eq!((&local_checker as &dyn FetchChecker).check_header_proof(&RemoteHeaderRequest::
{ - cht_root: local_cht_root, - block: 1, - retry_count: None, - }, Some(remote_block_header.clone()), remote_header_proof).unwrap(), remote_block_header); + let (local_checker, local_cht_root, remote_block_header, remote_header_proof) = + prepare_for_header_proof_check(true, hashed); + assert_eq!( + (&local_checker as &dyn FetchChecker) + .check_header_proof( + &RemoteHeaderRequest::
{ + cht_root: local_cht_root, + block: 1, + retry_count: None, + }, + Some(remote_block_header.clone()), + remote_header_proof + ) + .unwrap(), + remote_block_header + ); } #[test] fn check_header_proof_fails_if_cht_root_is_invalid() { - let (local_checker, _, mut remote_block_header, remote_header_proof) = prepare_for_header_proof_check(true, true); + let (local_checker, _, mut remote_block_header, remote_header_proof) = + prepare_for_header_proof_check(true, true); remote_block_header.number = 100; assert!((&local_checker as &dyn FetchChecker) .check_header_proof( @@ -657,12 +681,8 @@ fn check_header_proof_fails_if_cht_root_is_invalid() { #[test] fn check_header_proof_fails_if_invalid_header_provided() { - let ( - local_checker, - local_cht_root, - mut remote_block_header, - remote_header_proof, - ) = prepare_for_header_proof_check(true, true); + let (local_checker, local_cht_root, mut remote_block_header, remote_header_proof) = + prepare_for_header_proof_check(true, true); remote_block_header.number = 100; assert!((&local_checker as &dyn FetchChecker) .check_header_proof( diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index 1b47efa1cc40c..ee169a007382a 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -1286,9 +1286,8 @@ fn importing_diverged_finalized_block_should_trigger_reorg() { #[test] fn finalizing_diverged_block_should_trigger_reorg() { - let (mut client, select_chain) = TestClientBuilder::new() - .state_hashed_value() - .build_with_longest_chain(); + let (mut client, select_chain) = + TestClientBuilder::new().state_hashed_value().build_with_longest_chain(); // G -> A1 -> A2 // \ @@ -2049,17 +2048,23 @@ fn storage_keys_iter_works_inner(hashed_value: bool) { .map(|x| x.0) .collect(); if hashed_value { - assert_eq!(res, [ - hex!("3a686561707061676573").to_vec(), - sp_core::storage::well_known_keys::TRIE_HASHING_CONFIG.to_vec(), - hex!("6644b9b8bc315888ac8e41a7968dc2b4141a5403c58acdf70b7e8f7e07bf5081").to_vec(), - ]); + assert_eq!( + res, + [ + hex!("3a686561707061676573").to_vec(), + sp_core::storage::well_known_keys::TRIE_HASHING_CONFIG.to_vec(), + hex!("6644b9b8bc315888ac8e41a7968dc2b4141a5403c58acdf70b7e8f7e07bf5081").to_vec(), + ] + ); } else { - assert_eq!(res, [ - hex!("3a686561707061676573").to_vec(), - hex!("6644b9b8bc315888ac8e41a7968dc2b4141a5403c58acdf70b7e8f7e07bf5081").to_vec(), - hex!("79c07e2b1d2e2abfd4855b936617eeff5e0621c4869aa60c02be9adcc98a0d1d").to_vec(), - ]); + assert_eq!( + res, + [ + hex!("3a686561707061676573").to_vec(), + hex!("6644b9b8bc315888ac8e41a7968dc2b4141a5403c58acdf70b7e8f7e07bf5081").to_vec(), + hex!("79c07e2b1d2e2abfd4855b936617eeff5e0621c4869aa60c02be9adcc98a0d1d").to_vec(), + ] + ); } let res: Vec<_> = client diff --git a/frame/support/test/tests/instance.rs b/frame/support/test/tests/instance.rs index 1fd8ebac218eb..d7dfe6316298a 100644 --- a/frame/support/test/tests/instance.rs +++ b/frame/support/test/tests/instance.rs @@ -345,9 +345,8 @@ fn storage_instance_independence() { top: std::collections::BTreeMap::new(), children_default: std::collections::HashMap::new(), }; - storage.modify_trie_alt_hashing_threshold(Some( - sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD, - )); + storage + .modify_trie_alt_hashing_threshold(Some(sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD)); sp_state_machine::BasicExternalities::execute_with_storage(&mut storage, || { module2::Value::::put(0); module2::Value::::put(0); diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index 28860a3784974..292594825ac0a 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -1445,9 +1445,9 @@ impl Pallet { ], children_default: map![], }; - storage.modify_trie_alt_hashing_threshold( - Some(sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD), - ); + storage.modify_trie_alt_hashing_threshold(Some( + sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD, + )); TestExternalities::new(storage) } diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index ba4f6ae520eff..58a79a1586f39 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -55,7 +55,7 @@ use sp_core::{ }; #[cfg(feature = "std")] -use sp_trie::{TrieConfiguration, Layout}; +use sp_trie::{Layout, TrieConfiguration}; use sp_runtime_interface::{ pass_by::{PassBy, PassByCodec}, @@ -1493,12 +1493,10 @@ mod tests { }); let value = vec![7u8; 35]; - let mut storage = Storage { - top: map![b"foo00".to_vec() => value.clone()], - children_default: map![], - }; + let mut storage = + Storage { top: map![b"foo00".to_vec() => value.clone()], children_default: map![] }; storage.modify_trie_alt_hashing_threshold(Some( - sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD + sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD, )); t = BasicExternalities::new(storage); diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index b6af5c02657c3..4d5de57a20631 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -195,10 +195,7 @@ pub trait Backend: sp_std::fmt::Debug { let mut child_roots: Vec<_> = Default::default(); // child first for (child_info, child_delta) in child_deltas { - let (child_root, empty, child_txs) = self.child_storage_root( - &child_info, - child_delta, - ); + let (child_root, empty, child_txs) = self.child_storage_root(&child_info, child_delta); let prefixed_storage_key = child_info.prefixed_storage_key(); txs.consolidate(child_txs); if empty { @@ -207,13 +204,10 @@ pub trait Backend: sp_std::fmt::Debug { child_roots.push((prefixed_storage_key.into_inner(), Some(child_root.encode()))); } } - let (root, parent_txs) = self.storage_root(delta - .map(|(k, v)| (&k[..], v.as_ref().map(|v| &v[..]))) - .chain( - child_roots - .iter() - .map(|(k, v)| (&k[..], v.as_ref().map(|v| &v[..]))) - ), + let (root, parent_txs) = self.storage_root( + delta + .map(|(k, v)| (&k[..], v.as_ref().map(|v| &v[..]))) + .chain(child_roots.iter().map(|(k, v)| (&k[..], v.as_ref().map(|v| &v[..])))), ); txs.consolidate(parent_txs); (root, txs) @@ -272,7 +266,9 @@ pub trait Backend: sp_std::fmt::Debug { /// Read current trie hashing threshold. /// Please do not change default implementation when implementing this trait. fn get_trie_alt_hashing_threshold(&self) -> Option { - self.storage(sp_core::storage::well_known_keys::TRIE_HASHING_CONFIG).ok().flatten() + self.storage(sp_core::storage::well_known_keys::TRIE_HASHING_CONFIG) + .ok() + .flatten() .and_then(|encoded| sp_core::storage::trie_threshold_decode(&mut encoded.as_slice())) } @@ -301,9 +297,9 @@ impl Consolidate for Vec<(Option, StorageCollection)> { } impl Consolidate for sp_trie::GenericMemoryDB - where - H: Hasher, - KF: sp_trie::KeyFunction, +where + H: Hasher, + KF: sp_trie::KeyFunction, { fn consolidate(&mut self, other: Self) { sp_trie::GenericMemoryDB::consolidate(self, other) diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index ed2e4f439c726..3d7ac00c18184 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -29,7 +29,7 @@ use sp_core::{ Blake2Hasher, }; use sp_externalities::{Extension, Extensions}; -use sp_trie::{TrieConfiguration, empty_child_trie_root, Layout}; +use sp_trie::{empty_child_trie_root, Layout, TrieConfiguration}; use std::{ any::{Any, TypeId}, collections::BTreeMap, diff --git a/primitives/state-machine/src/changes_trie/mod.rs b/primitives/state-machine/src/changes_trie/mod.rs index ab8c127f008eb..473360f4ffb26 100644 --- a/primitives/state-machine/src/changes_trie/mod.rs +++ b/primitives/state-machine/src/changes_trie/mod.rs @@ -202,7 +202,7 @@ impl<'a, H: Hasher, N: BlockNumber> crate::TrieBackendStorage self.0.get(key, prefix) } - fn access_from(&self, _key: &H::Out) { } + fn access_from(&self, _key: &H::Out) {} } /// Changes trie configuration. diff --git a/primitives/state-machine/src/changes_trie/storage.rs b/primitives/state-machine/src/changes_trie/storage.rs index b47a53f16f00d..b1fca1eb971a9 100644 --- a/primitives/state-machine/src/changes_trie/storage.rs +++ b/primitives/state-machine/src/changes_trie/storage.rs @@ -191,7 +191,7 @@ impl Storage for InMemoryStorage Result, String> { - Ok( as hash_db::HashDBRef>::get(&self.data.read().mdb, key, prefix)) + Ok( as hash_db::HashDBRef>::get(&self.data.read().mdb, key, prefix)) } } @@ -212,5 +212,5 @@ where self.storage.get(key, prefix) } - fn access_from(&self, _key: &H::Out) { } + fn access_from(&self, _key: &H::Out) {} } diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 5fa07367c814d..d2c0ec0651502 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -119,6 +119,7 @@ impl sp_std::fmt::Display for DefaultError { pub use crate::{ backend::Backend, + error::{Error, ExecutionError}, ext::Ext, overlayed_changes::{ ChildStorageCollection, IndexOperation, OffchainChangesCollection, @@ -128,7 +129,6 @@ pub use crate::{ stats::{StateMachineStats, UsageInfo, UsageUnit}, trie_backend::TrieBackend, trie_backend_essence::{Storage, TrieBackendStorage}, - error::{Error, ExecutionError}, }; #[cfg(not(feature = "std"))] @@ -142,30 +142,25 @@ mod changes_trie { #[cfg(feature = "std")] mod std_reexport { - pub use sp_trie::{trie_types::TrieDBMut, Layout, StorageProof, TrieMut, - DBValue, MemoryDB}; - pub use crate::testing::TestExternalities; - pub use crate::basic::BasicExternalities; - pub use crate::read_only::{ReadOnlyExternalities, InspectState}; - pub use crate::proving_backend::{ - create_proof_check_backend, ProofRecorder, ProvingBackend, ProvingBackendRecorder, + pub use crate::{ + basic::BasicExternalities, + changes_trie::{ + disabled_state as disabled_changes_trie_state, key_changes, key_changes_proof, + key_changes_proof_check, key_changes_proof_check_with_db, prune as prune_changes_tries, + AnchorBlockId as ChangesTrieAnchorBlockId, BlockNumber as ChangesTrieBlockNumber, + BuildCache as ChangesTrieBuildCache, CacheAction as ChangesTrieCacheAction, + ConfigurationRange as ChangesTrieConfigurationRange, + InMemoryStorage as InMemoryChangesTrieStorage, RootsStorage as ChangesTrieRootsStorage, + State as ChangesTrieState, Storage as ChangesTrieStorage, + }, + in_memory_backend::new_in_mem, + proving_backend::{ + create_proof_check_backend, ProofRecorder, ProvingBackend, ProvingBackendRecorder, + }, + read_only::{InspectState, ReadOnlyExternalities}, + testing::TestExternalities, }; - pub use crate::changes_trie::{ - AnchorBlockId as ChangesTrieAnchorBlockId, - State as ChangesTrieState, - Storage as ChangesTrieStorage, - RootsStorage as ChangesTrieRootsStorage, - InMemoryStorage as InMemoryChangesTrieStorage, - BuildCache as ChangesTrieBuildCache, - CacheAction as ChangesTrieCacheAction, - ConfigurationRange as ChangesTrieConfigurationRange, - key_changes, key_changes_proof, - key_changes_proof_check, key_changes_proof_check_with_db, - prune as prune_changes_tries, - disabled_state as disabled_changes_trie_state, - BlockNumber as ChangesTrieBlockNumber, - }; - pub use crate::in_memory_backend::new_in_mem; + pub use sp_trie::{trie_types::TrieDBMut, DBValue, Layout, MemoryDB, StorageProof, TrieMut}; } #[cfg(feature = "std")] @@ -984,7 +979,7 @@ mod tests { use codec::{Decode, Encode}; use sp_core::{ map, - storage::ChildInfo, + storage::{ChildInfo, TEST_DEFAULT_ALT_HASH_THRESHOLD as TRESHOLD}, testing::TaskExecutor, traits::{CodeExecutor, Externalities, RuntimeCode}, NativeOrEncoded, NeverNativeValue, @@ -995,7 +990,6 @@ mod tests { panic::UnwindSafe, result, }; - use sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD as TRESHOLD; #[derive(Clone)] struct DummyCodeExecutor { @@ -1628,15 +1622,11 @@ mod tests { #[test] fn inner_state_hashing_switch_proofs() { - let mut layout = Layout::default(); let (mut mdb, mut root) = trie_backend::tests::test_db(false); { - let mut trie = TrieDBMut::from_existing_with_layout( - &mut mdb, - &mut root, - layout.clone(), - ).unwrap(); + let mut trie = + TrieDBMut::from_existing_with_layout(&mut mdb, &mut root, layout.clone()).unwrap(); trie.insert(b"foo", vec![1u8; 1_000].as_slice()) // big inner hash .expect("insert failed"); trie.insert(b"foo2", vec![3u8; 16].as_slice()) // no inner hash @@ -1644,17 +1634,15 @@ mod tests { trie.insert(b"foo222", vec![5u8; 100].as_slice()) // inner hash .expect("insert failed"); } - + let check_proof = |mdb, root| -> StorageProof { let remote_backend = TrieBackend::new(mdb, root); let remote_root = remote_backend.storage_root(::std::iter::empty()).0; let remote_proof = prove_read(remote_backend, &[b"foo222"]).unwrap(); // check proof locally - let local_result1 = read_proof_check::( - remote_root, - remote_proof.clone(), - &[b"foo222"], - ).unwrap(); + let local_result1 = + read_proof_check::(remote_root, remote_proof.clone(), &[b"foo222"]) + .unwrap(); // check that results are correct assert_eq!( local_result1.into_iter().collect::>(), @@ -1669,16 +1657,12 @@ mod tests { assert!(remote_proof.encoded_size() > 1_100); let root1 = root.clone(); - // do switch layout = Layout::with_alt_hashing(TRESHOLD); // update with same value do not change { - let mut trie = TrieDBMut::from_existing_with_layout( - &mut mdb, - &mut root, - layout.clone(), - ).unwrap(); + let mut trie = + TrieDBMut::from_existing_with_layout(&mut mdb, &mut root, layout.clone()).unwrap(); trie.insert(b"foo222", vec![5u8; 100].as_slice()) // inner hash .expect("insert failed"); } @@ -1689,11 +1673,8 @@ mod tests { // work with state machine as only changes do makes // it to payload (would require a special host function). { - let mut trie = TrieDBMut::from_existing_with_layout( - &mut mdb, - &mut root, - layout.clone(), - ).unwrap(); + let mut trie = + TrieDBMut::from_existing_with_layout(&mut mdb, &mut root, layout.clone()).unwrap(); trie.insert(b"foo222", vec![4u8].as_slice()) // inner hash .expect("insert failed"); trie.insert(b"foo222", vec![5u8; 100].as_slice()) // inner hash @@ -1705,8 +1686,7 @@ mod tests { // nodes foo is replaced by its hashed value form. assert!(remote_proof.encode().len() < 1000); assert!(remote_proof.encoded_size() < 1000); - assert_eq!(remote_proof.encode().len(), - remote_proof.encoded_size()); + assert_eq!(remote_proof.encode().len(), remote_proof.encoded_size()); } #[test] @@ -1727,32 +1707,35 @@ mod tests { let (remote_root, transaction) = remote_backend.full_storage_root( std::iter::empty(), vec![ - (&child_info1, vec![ - // a inner hashable node - (&b"k"[..], Some(&long_vec[..])), - // need to ensure this is not an inline node - // otherwhise we do not know what is accessed when - // storing proof. - (&b"key1"[..], Some(&vec![5u8; 32][..])), - (&b"key2"[..], Some(&b"val3"[..])), - ].into_iter()), - (&child_info2, vec![ - (&b"key3"[..], Some(&b"val4"[..])), - (&b"key4"[..], Some(&b"val5"[..])), - ].into_iter()), - (&child_info3, vec![ - (&b"key5"[..], Some(&b"val6"[..])), - (&b"key6"[..], Some(&b"val7"[..])), - ].into_iter()), - ].into_iter(), + ( + &child_info1, + vec![ + // a inner hashable node + (&b"k"[..], Some(&long_vec[..])), + // need to ensure this is not an inline node + // otherwhise we do not know what is accessed when + // storing proof. + (&b"key1"[..], Some(&vec![5u8; 32][..])), + (&b"key2"[..], Some(&b"val3"[..])), + ] + .into_iter(), + ), + ( + &child_info2, + vec![(&b"key3"[..], Some(&b"val4"[..])), (&b"key4"[..], Some(&b"val5"[..]))] + .into_iter(), + ), + ( + &child_info3, + vec![(&b"key5"[..], Some(&b"val6"[..])), (&b"key6"[..], Some(&b"val7"[..]))] + .into_iter(), + ), + ] + .into_iter(), ); remote_backend.backend_storage_mut().consolidate(transaction); remote_backend.essence.set_root(remote_root.clone()); - let remote_proof = prove_child_read( - remote_backend, - &child_info1, - &[b"key1"], - ).unwrap(); + let remote_proof = prove_child_read(remote_backend, &child_info1, &[b"key1"]).unwrap(); let size = remote_proof.encoded_size(); let remote_proof = test_compact(remote_proof, &remote_root); let local_result1 = read_child_proof_check::( diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 8713b3c44ab2f..c09e0bff075ce 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -27,17 +27,12 @@ use hash_db::{HashDB, Hasher, Prefix, EMPTY_PREFIX}; use log::debug; use parking_lot::RwLock; use sp_core::storage::ChildInfo; +pub use sp_trie::trie_types::TrieError; use sp_trie::{ - MemoryDB, empty_child_trie_root, read_trie_value_with, read_child_trie_value_with, - record_all_keys, StorageProof, Meta, Layout, Recorder, -}; -pub use sp_trie::{ - trie_types::TrieError, -}; -use std::{ - collections::HashMap, - sync::Arc, + empty_child_trie_root, read_child_trie_value_with, read_trie_value_with, record_all_keys, + Layout, MemoryDB, Meta, Recorder, StorageProof, }; +use std::{collections::HashMap, sync::Arc}; /// Patricia trie-based backend specialized in get value proofs. pub struct ProvingBackendRecorder<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { @@ -144,23 +139,26 @@ impl ProofRecorder { pub fn access_from(&self, key: &Hash, hash_len: usize) { let mut inner = self.inner.write(); let ProofRecorderInner { encoded_size, records, .. } = &mut *inner; - records.entry(key.clone()) - .and_modify(|entry| { - if let Some(entry) = entry.as_mut() { - if !entry.2 { - let old_size = sp_trie::estimate_entry_size(entry, hash_len); - entry.2 = true; - let new_size = sp_trie::estimate_entry_size(entry, hash_len); - *encoded_size += new_size; - *encoded_size -= old_size; - } + records.entry(key.clone()).and_modify(|entry| { + if let Some(entry) = entry.as_mut() { + if !entry.2 { + let old_size = sp_trie::estimate_entry_size(entry, hash_len); + entry.2 = true; + let new_size = sp_trie::estimate_entry_size(entry, hash_len); + *encoded_size += new_size; + *encoded_size -= old_size; } - }); + } + }); } /// Returns the value at the given `key`. pub fn get(&self, key: &Hash) -> Option> { - self.inner.read().records.get(key).as_ref() + self.inner + .read() + .records + .get(key) + .as_ref() .map(|v| v.as_ref().map(|v| v.0.clone())) } @@ -175,19 +173,23 @@ impl ProofRecorder { /// Convert into a [`StorageProof`]. pub fn to_storage_proof(&self) -> StorageProof { - let trie_nodes = self.inner.read() + let trie_nodes = self + .inner + .read() .records .iter() - .filter_map(|(_k, v)| v.as_ref().map(|v| { - let mut meta = v.1.clone(); - if let Some(hashed) = sp_trie::to_hashed_variant::( - v.0.as_slice(), &mut meta, v.2, - ) { - hashed - } else { - v.0.clone() - } - })) + .filter_map(|(_k, v)| { + v.as_ref().map(|v| { + let mut meta = v.1.clone(); + if let Some(hashed) = + sp_trie::to_hashed_variant::(v.0.as_slice(), &mut meta, v.2) + { + hashed + } else { + v.0.clone() + } + }) + }) .collect(); StorageProof::new(trie_nodes) @@ -253,11 +255,7 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> TrieBackendStorage { type Overlay = S::Overlay; - fn get( - &self, - key: &H::Out, - prefix: Prefix, - ) -> Result, String> { + fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { if let Some(v) = self.proof_recorder.get(key) { return Ok(v) } @@ -482,26 +480,33 @@ mod tests { fn proof_recorded_and_checked_inner(flagged: bool) { let size_content = 34; // above hashable value treshold. let value_range = 0..64; - let contents = value_range.clone() - .map(|i| (vec![i], Some(vec![i; size_content]))).collect::>(); + let contents = value_range + .clone() + .map(|i| (vec![i], Some(vec![i; size_content]))) + .collect::>(); let mut in_memory = InMemoryBackend::::default(); if flagged { - in_memory = in_memory.update(vec![(None, vec![( - sp_core::storage::well_known_keys::TRIE_HASHING_CONFIG.to_vec(), - Some(sp_core::storage::trie_threshold_encode( - sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD, - )), - )])]); + in_memory = in_memory.update(vec![( + None, + vec![( + sp_core::storage::well_known_keys::TRIE_HASHING_CONFIG.to_vec(), + Some(sp_core::storage::trie_threshold_encode( + sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD, + )), + )], + )]); } let mut in_memory = in_memory.update(vec![(None, contents)]); let in_memory_root = in_memory.storage_root(std::iter::empty()).0; - value_range.clone() - .for_each(|i| assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i; size_content])); + value_range.clone().for_each(|i| { + assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i; size_content]) + }); let trie = in_memory.as_trie_backend().unwrap(); let trie_root = trie.storage_root(std::iter::empty()).0; assert_eq!(in_memory_root, trie_root); - value_range.clone() + value_range + .clone() .for_each(|i| assert_eq!(trie.storage(&[i]).unwrap().unwrap(), vec![i; size_content])); let proving = ProvingBackend::new(trie); @@ -509,7 +514,8 @@ mod tests { let proof = proving.extract_proof(); - let proof_check = create_proof_check_backend::(in_memory_root.into(), proof).unwrap(); + let proof_check = + create_proof_check_backend::(in_memory_root.into(), proof).unwrap(); assert_eq!(proof_check.storage(&[42]).unwrap().unwrap(), vec![42; size_content]); } @@ -525,38 +531,36 @@ mod tests { let child_info_2 = &child_info_2; let contents = vec![ (None, (0..64).map(|i| (vec![i], Some(vec![i]))).collect::>()), - (Some(child_info_1.clone()), - (28..65).map(|i| (vec![i], Some(vec![i]))).collect()), - (Some(child_info_2.clone()), - (10..15).map(|i| (vec![i], Some(vec![i]))).collect()), + (Some(child_info_1.clone()), (28..65).map(|i| (vec![i], Some(vec![i]))).collect()), + (Some(child_info_2.clone()), (10..15).map(|i| (vec![i], Some(vec![i]))).collect()), ]; let mut in_memory = InMemoryBackend::::default(); if flagged { - in_memory = in_memory.update(vec![(None, vec![( - sp_core::storage::well_known_keys::TRIE_HASHING_CONFIG.to_vec(), - Some(sp_core::storage::trie_threshold_encode( - sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD, - )), - )])]); + in_memory = in_memory.update(vec![( + None, + vec![( + sp_core::storage::well_known_keys::TRIE_HASHING_CONFIG.to_vec(), + Some(sp_core::storage::trie_threshold_encode( + sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD, + )), + )], + )]); } in_memory = in_memory.update(contents); let child_storage_keys = vec![child_info_1.to_owned(), child_info_2.to_owned()]; - let in_memory_root = in_memory.full_storage_root( - std::iter::empty(), - child_storage_keys.iter().map(|k|(k, std::iter::empty())), - ).0; - (0..64).for_each(|i| assert_eq!( - in_memory.storage(&[i]).unwrap().unwrap(), - vec![i] - )); - (28..65).for_each(|i| assert_eq!( - in_memory.child_storage(child_info_1, &[i]).unwrap().unwrap(), - vec![i] - )); - (10..15).for_each(|i| assert_eq!( - in_memory.child_storage(child_info_2, &[i]).unwrap().unwrap(), - vec![i] - )); + let in_memory_root = in_memory + .full_storage_root( + std::iter::empty(), + child_storage_keys.iter().map(|k| (k, std::iter::empty())), + ) + .0; + (0..64).for_each(|i| assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i])); + (28..65).for_each(|i| { + assert_eq!(in_memory.child_storage(child_info_1, &[i]).unwrap().unwrap(), vec![i]) + }); + (10..15).for_each(|i| { + assert_eq!(in_memory.child_storage(child_info_2, &[i]).unwrap().unwrap(), vec![i]) + }); let trie = in_memory.as_trie_backend().unwrap(); let trie_root = trie.storage_root(std::iter::empty()).0; diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index 42f447917f100..ce7aaee289c12 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -19,9 +19,9 @@ use std::{ any::{Any, TypeId}, + collections::{BTreeMap, HashMap}, panic::{AssertUnwindSafe, UnwindSafe}, }; -use std::collections::{HashMap, BTreeMap}; use crate::{ backend::Backend, @@ -39,7 +39,7 @@ use sp_core::{ offchain::testing::TestPersistentOffchainDB, storage::{ well_known_keys::{is_child_storage_key, CHANGES_TRIE_CONFIG, CODE}, - Storage, ChildInfo, + ChildInfo, Storage, }, testing::TaskExecutor, traits::TaskExecutorExt, @@ -97,7 +97,6 @@ where Self::new_with_code_inner(&[], storage, true) } - /// New empty test externalities. pub fn new_empty() -> Self { Self::new_with_code(&[], Storage::default()) @@ -130,15 +129,20 @@ where let mut backend: InMemoryBackend = { let mut storage = Storage::default(); storage.modify_trie_alt_hashing_threshold(Some( - sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD + sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD, )); storage.into() }; - let mut inner: HashMap, BTreeMap> - = storage.children_default.into_iter().map(|(_k, c)| (Some(c.child_info), c.data)).collect(); + let mut inner: HashMap, BTreeMap> = storage + .children_default + .into_iter() + .map(|(_k, c)| (Some(c.child_info), c.data)) + .collect(); inner.insert(None, storage.top); backend.insert( - inner.into_iter().map(|(k, m)| (k, m.into_iter().map(|(k, v)| (k, Some(v))).collect())), + inner + .into_iter() + .map(|(k, m)| (k, m.into_iter().map(|(k, v)| (k, Some(v))).collect())), ); backend } else { @@ -274,9 +278,9 @@ where fn default() -> Self { // default to inner hashed. let mut storage = Storage::default(); - storage.modify_trie_alt_hashing_threshold( - Some(sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD), - ); + storage.modify_trie_alt_hashing_threshold(Some( + sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD, + )); Self::new(storage) } } diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 3e9360efc6d9a..704ae0424ba3b 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -17,26 +17,28 @@ //! Trie-based state machine backend. -use crate::{warn, debug}; -use hash_db::Hasher; -use sp_trie::{Trie, delta_trie_root, empty_child_trie_root, child_delta_trie_root, - Layout}; -use sp_trie::trie_types::{TrieDB, TrieError}; -use sp_core::storage::{ChildInfo, ChildType}; -use codec::{Codec, Decode}; use crate::{ + debug, trie_backend_essence::{Ephemeral, TrieBackendEssence, TrieBackendStorage}, - Backend, StorageKey, StorageValue, + warn, Backend, StorageKey, StorageValue, }; +use codec::{Codec, Decode}; +use hash_db::Hasher; +use sp_core::storage::{ChildInfo, ChildType}; use sp_std::{boxed::Box, vec::Vec}; +use sp_trie::{ + child_delta_trie_root, delta_trie_root, empty_child_trie_root, + trie_types::{TrieDB, TrieError}, + Layout, Trie, +}; /// Patricia trie-based backend. Transaction type is an overlay of changes to commit. pub struct TrieBackend, H: Hasher> { - pub (crate) essence: TrieBackendEssence, + pub(crate) essence: TrieBackendEssence, // Allows setting alt hashing at start for testing only // (mainly for in_memory_backend when it cannot read it from // state). - pub (crate) force_alt_hashing: Option>, + pub(crate) force_alt_hashing: Option>, } impl, H: Hasher> TrieBackend @@ -45,10 +47,7 @@ where { /// Create new trie-based backend. pub fn new(storage: S, root: H::Out) -> Self { - TrieBackend { - essence: TrieBackendEssence::new(storage, root), - force_alt_hashing: None, - } + TrieBackend { essence: TrieBackendEssence::new(storage, root), force_alt_hashing: None } } /// Get backend essence reference. @@ -195,8 +194,11 @@ where fn storage_root<'a>( &self, - delta: impl Iterator)>, - ) -> (H::Out, Self::Transaction) where H::Out: Ord { + delta: impl Iterator)>, + ) -> (H::Out, Self::Transaction) + where + H::Out: Ord, + { let use_inner_hash_value = if let Some(force) = self.force_alt_hashing.as_ref() { force.clone() } else { @@ -206,10 +208,7 @@ where let mut root = *self.essence.root(); { - let mut eph = Ephemeral::new( - self.essence.backend_storage(), - &mut write_overlay, - ); + let mut eph = Ephemeral::new(self.essence.backend_storage(), &mut write_overlay); let res = || { let layout = if let Some(threshold) = use_inner_hash_value { sp_trie::Layout::with_alt_hashing(threshold) @@ -231,8 +230,11 @@ where fn child_storage_root<'a>( &self, child_info: &ChildInfo, - delta: impl Iterator)>, - ) -> (H::Out, bool, Self::Transaction) where H::Out: Ord { + delta: impl Iterator)>, + ) -> (H::Out, bool, Self::Transaction) + where + H::Out: Ord, + { let use_inner_hash_value = if let Some(force) = self.force_alt_hashing.as_ref() { force.clone() } else { @@ -298,12 +300,11 @@ where #[cfg(test)] pub mod tests { use super::*; - use std::{collections::HashSet, iter}; - use sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD as TRESHOLD; use codec::Encode; - use sp_core::H256; + use sp_core::{storage::TEST_DEFAULT_ALT_HASH_THRESHOLD as TRESHOLD, H256}; use sp_runtime::traits::BlakeTwo256; use sp_trie::{trie_types::TrieDBMut, KeySpacedDBMut, PrefixedMemoryDB, TrieMut}; + use std::{collections::HashSet, iter}; const CHILD_KEY_1: &[u8] = b"sub1"; @@ -338,7 +339,8 @@ pub mod tests { trie.insert( sp_core::storage::well_known_keys::TRIE_HASHING_CONFIG, sp_core::storage::trie_threshold_encode(TRESHOLD).as_slice(), - ).unwrap(); + ) + .unwrap(); } for i in 128u8..255u8 { trie.insert(&[i], &[i]).unwrap(); @@ -421,9 +423,8 @@ pub mod tests { storage_root_transaction_is_non_empty_inner(true); } fn storage_root_transaction_is_non_empty_inner(flagged: bool) { - let (new_root, mut tx) = test_trie(flagged).storage_root( - iter::once((&b"new-key"[..], Some(&b"new-value"[..]))), - ); + let (new_root, mut tx) = + test_trie(flagged).storage_root(iter::once((&b"new-key"[..], Some(&b"new-value"[..])))); assert!(!tx.drain().is_empty()); assert!(new_root != test_trie(false).storage_root(iter::empty()).0); } diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index ac775e1aafec6..0e3db15761d43 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -18,15 +18,16 @@ //! Trie-based state machine backend essence used to read values //! from storage. -use sp_std::{ops::Deref, boxed::Box, vec::Vec}; -use hash_db::{self, Hasher, Prefix, AsHashDB, HashDB, HashDBRef}; -use sp_trie::{Trie, PrefixedMemoryDB, DBValue, Layout, - empty_child_trie_root, read_trie_value, read_child_trie_value, - KeySpacedDB, TrieDBIterator, TrieDBKeyIterator}; -use sp_trie::trie_types::{TrieDB, TrieError}; -use codec::Encode; use crate::{backend::Consolidate, debug, warn, StorageKey, StorageValue}; +use codec::Encode; +use hash_db::{self, AsHashDB, HashDB, HashDBRef, Hasher, Prefix}; use sp_core::storage::ChildInfo; +use sp_std::{boxed::Box, ops::Deref, vec::Vec}; +use sp_trie::{ + empty_child_trie_root, read_child_trie_value, read_trie_value, + trie_types::{TrieDB, TrieError}, + DBValue, KeySpacedDB, Layout, PrefixedMemoryDB, Trie, TrieDBIterator, TrieDBKeyIterator, +}; #[cfg(feature = "std")] use std::sync::Arc; @@ -139,10 +140,9 @@ where dyn_eph = self; } - let trie = TrieDB::::new(dyn_eph, root) - .map_err(|e| format!("TrieDB creation error: {}", e))?; - let mut iter = trie.key_iter() - .map_err(|e| format!("TrieDB iteration error: {}", e))?; + let trie = + TrieDB::::new(dyn_eph, root).map_err(|e| format!("TrieDB creation error: {}", e))?; + let mut iter = trie.key_iter().map_err(|e| format!("TrieDB iteration error: {}", e))?; // The key just after the one given in input, basically `key++0`. // Note: We are sure this is the next key if: @@ -158,8 +158,8 @@ where let next_element = iter.next(); let next_key = if let Some(next_element) = next_element { - let next_key = next_element - .map_err(|e| format!("TrieDB iterator next error: {}", e))?; + let next_key = + next_element.map_err(|e| format!("TrieDB iterator next error: {}", e))?; Some(next_key) } else { None @@ -263,12 +263,28 @@ where }; let mut root = H::Out::default(); root.as_mut().copy_from_slice(&root_vec); - self.trie_iter_key_inner(&root, Some(prefix), |k| { f(k); true }, Some(child_info)) + self.trie_iter_key_inner( + &root, + Some(prefix), + |k| { + f(k); + true + }, + Some(child_info), + ) } /// Execute given closure for all keys starting with prefix. pub fn for_keys_with_prefix(&self, prefix: &[u8], mut f: F) { - self.trie_iter_key_inner(&self.root, Some(prefix), |k| { f(k); true }, None) + self.trie_iter_key_inner( + &self.root, + Some(prefix), + |k| { + f(k); + true + }, + None, + ) } fn trie_iter_key_inner bool>( @@ -289,10 +305,13 @@ where for x in iter { let key = x?; - debug_assert!(prefix.as_ref().map(|prefix| key.starts_with(prefix)).unwrap_or(true)); + debug_assert!(prefix + .as_ref() + .map(|prefix| key.starts_with(prefix)) + .unwrap_or(true)); if !f(&key) { - break; + break } } @@ -379,8 +398,12 @@ pub(crate) struct Ephemeral<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> AsHashDB for Ephemeral<'a, S, H> { - fn as_hash_db<'b>(&'b self) -> &'b (dyn HashDB + 'b) { self } - fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn HashDB + 'b) { self } + fn as_hash_db<'b>(&'b self) -> &'b (dyn HashDB + 'b) { + self + } + fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn HashDB + 'b) { + self + } } impl<'a, S: TrieBackendStorage, H: Hasher> Ephemeral<'a, S, H> { @@ -433,9 +456,7 @@ impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::HashDB } } -impl<'a, S: 'a + TrieBackendStorage, H: Hasher> HashDBRef - for Ephemeral<'a, S, H> -{ +impl<'a, S: 'a + TrieBackendStorage, H: Hasher> HashDBRef for Ephemeral<'a, S, H> { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { HashDB::get(self, key, prefix) } @@ -476,9 +497,9 @@ impl TrieBackendStorage for Arc> { } impl TrieBackendStorage for sp_trie::GenericMemoryDB - where - H: Hasher, - KF: sp_trie::KeyFunction + Send + Sync, +where + H: Hasher, + KF: sp_trie::KeyFunction + Send + Sync, { type Overlay = Self; @@ -491,16 +512,16 @@ impl TrieBackendStorage for sp_trie::GenericMemoryDB } } -impl, H: Hasher> AsHashDB - for TrieBackendEssence -{ - fn as_hash_db<'b>(&'b self) -> &'b (dyn HashDB + 'b) { self } - fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn HashDB + 'b) { self } +impl, H: Hasher> AsHashDB for TrieBackendEssence { + fn as_hash_db<'b>(&'b self) -> &'b (dyn HashDB + 'b) { + self + } + fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn HashDB + 'b) { + self + } } -impl, H: Hasher> HashDB - for TrieBackendEssence -{ +impl, H: Hasher> HashDB for TrieBackendEssence { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { if *key == self.empty { return Some([0u8].to_vec()) @@ -541,9 +562,7 @@ impl, H: Hasher> HashDB } } -impl, H: Hasher> HashDBRef - for TrieBackendEssence -{ +impl, H: Hasher> HashDBRef for TrieBackendEssence { fn get(&self, key: &H::Out, prefix: Prefix) -> Option { HashDB::get(self, key, prefix) } diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index ed0eac1f2d44b..b00a42d8aab18 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -244,8 +244,7 @@ pub fn trie_threshold_encode(threshold: u32) -> Vec { /// Configuration threshold from encoded, invalid encoded /// is same as no threshold. pub fn trie_threshold_decode(mut encoded: &[u8]) -> Option { - codec::Compact::::decode(&mut encoded).ok() - .map(|compact| compact.0) + codec::Compact::::decode(&mut encoded).ok().map(|compact| compact.0) } /// Default value to use as a threshold for testing. diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 10a1e62eff32d..179f44db1e7a0 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -26,30 +26,32 @@ mod storage_proof; mod trie_codec; mod trie_stream; -use sp_std::{boxed::Box, marker::PhantomData, vec, vec::Vec, borrow::Borrow, fmt}; -use trie_db::proof::{generate_proof, verify_proof}; -pub use trie_db::proof::VerifyError; -/// The Substrate format implementation of `TrieStream`. -pub use trie_stream::TrieStream; -/// The Substrate format implementation of `NodeCodec`. -pub use node_codec::NodeCodec; -pub use storage_proof::{StorageProof, CompactProof}; -/// Various re-exports from the `trie-db` crate. -pub use trie_db::{ - Trie, TrieMut, DBValue, Recorder, CError, Query, TrieLayout, TrieConfiguration, - nibble_ops, TrieDBIterator, TrieDBKeyIterator, Meta, node::{NodePlan, ValuePlan}, -}; -/// Various re-exports from the `memory-db` crate. -pub use memory_db::KeyFunction; /// Our `NodeCodec`-specific error. pub use error::Error; /// Various re-exports from the `hash-db` crate. pub use hash_db::{HashDB as HashDBT, EMPTY_PREFIX}; use hash_db::{Hasher, Prefix}; pub use memory_db::prefixed_key; +/// Various re-exports from the `memory-db` crate. +pub use memory_db::KeyFunction; +/// The Substrate format implementation of `NodeCodec`. +pub use node_codec::NodeCodec; +use sp_std::{borrow::Borrow, boxed::Box, fmt, marker::PhantomData, vec, vec::Vec}; +pub use storage_proof::{CompactProof, StorageProof}; /// Trie codec reexport, mainly child trie support /// for trie compact proof. pub use trie_codec::{decode_compact, encode_compact, Error as CompactProofError}; +pub use trie_db::proof::VerifyError; +use trie_db::proof::{generate_proof, verify_proof}; +/// Various re-exports from the `trie-db` crate. +pub use trie_db::{ + nibble_ops, + node::{NodePlan, ValuePlan}, + CError, DBValue, Meta, Query, Recorder, Trie, TrieConfiguration, TrieDBIterator, + TrieDBKeyIterator, TrieLayout, TrieMut, +}; +/// The Substrate format implementation of `TrieStream`. +pub use trie_stream::TrieStream; /// substrate trie layout pub struct Layout(Option, sp_std::marker::PhantomData); @@ -81,8 +83,8 @@ impl Layout { } impl TrieLayout for Layout - where - H: Hasher, +where + H: Hasher, { const USE_EXTENSION: bool = false; const ALLOW_EMPTY: bool = true; @@ -97,10 +99,11 @@ impl TrieLayout for Layout } impl TrieConfiguration for Layout - where - H: Hasher, +where + H: Hasher, { - fn trie_root(&self, input: I) -> ::Out where + fn trie_root(&self, input: I) -> ::Out + where I: IntoIterator, A: AsRef<[u8]> + Ord, B: AsRef<[u8]>, @@ -108,7 +111,8 @@ impl TrieConfiguration for Layout trie_root::trie_root_no_extension::(input, self.alt_threshold()) } - fn trie_root_unhashed(&self, input: I) -> Vec where + fn trie_root_unhashed(&self, input: I) -> Vec + where I: IntoIterator, A: AsRef<[u8]> + Ord, B: AsRef<[u8]>, @@ -136,17 +140,14 @@ pub type HashDB<'a, H> = dyn hash_db::HashDB + 'a; /// Reexport from `hash_db`, with genericity set for `Hasher` trait. /// This uses a `KeyFunction` for prefixing keys internally (avoiding /// key conflict for non random keys). -pub type PrefixedMemoryDB = memory_db::MemoryDB< - H, memory_db::PrefixedKey, trie_db::DBValue, MemTracker, ->; +pub type PrefixedMemoryDB = + memory_db::MemoryDB, trie_db::DBValue, MemTracker>; /// Reexport from `hash_db`, with genericity set for `Hasher` trait. /// This uses a noops `KeyFunction` (key addressing must be hashed or using /// an encoding scheme that avoid key conflict). pub type MemoryDB = memory_db::MemoryDB, trie_db::DBValue, MemTracker>; /// Reexport from `hash_db`, with genericity set for `Hasher` trait. -pub type GenericMemoryDB = memory_db::MemoryDB< - H, KF, trie_db::DBValue, MemTracker, ->; +pub type GenericMemoryDB = memory_db::MemoryDB; /// Persistent trie database read-access interface for the a given hasher. pub type TrieDB<'a, L> = trie_db::TrieDB<'a, L>; @@ -182,9 +183,10 @@ pub fn generate_trie_proof<'a, L, I, K, DB>( db: &DB, root: TrieHash, keys: I, -) -> Result>, Box>> where +) -> Result>, Box>> +where L: TrieConfiguration, - I: IntoIterator, + I: IntoIterator, K: 'a + AsRef<[u8]>, DB: hash_db::HashDBRef, { @@ -205,9 +207,10 @@ pub fn verify_trie_proof<'a, L, I, K, V>( root: &TrieHash, proof: &[Vec], items: I, -) -> Result<(), VerifyError, error::Error>> where +) -> Result<(), VerifyError, error::Error>> +where L: TrieConfiguration, - I: IntoIterator)>, + I: IntoIterator)>, K: 'a + AsRef<[u8]>, V: 'a + AsRef<[u8]>, { @@ -222,7 +225,8 @@ pub fn delta_trie_root( mut root: TrieHash, delta: I, layout: L, -) -> Result, Box>> where +) -> Result, Box>> +where I: IntoIterator, A: Borrow<[u8]>, B: Borrow>, @@ -250,28 +254,30 @@ pub fn delta_trie_root( pub fn read_trie_value( db: &DB, root: &TrieHash, - key: &[u8] + key: &[u8], ) -> Result>, Box>> - where - L: TrieConfiguration, - DB: hash_db::HashDBRef, +where + L: TrieConfiguration, + DB: hash_db::HashDBRef, { Ok(TrieDB::::new(&*db, root)?.get(key).map(|x| x.map(|val| val.to_vec()))?) } /// Read a value from the trie with given Query. -pub fn read_trie_value_with ( +pub fn read_trie_value_with( db: &DB, root: &TrieHash, key: &[u8], - query: Q + query: Q, ) -> Result>, Box>> - where - L: TrieConfiguration, - Q: Query, - DB: hash_db::HashDBRef +where + L: TrieConfiguration, + Q: Query, + DB: hash_db::HashDBRef, { - Ok(TrieDB::::new(&*db, root)?.get_with(key, query).map(|x| x.map(|val| val.to_vec()))?) + Ok(TrieDB::::new(&*db, root)? + .get_with(key, query) + .map(|x| x.map(|val| val.to_vec()))?) } /// Determine the empty trie root. @@ -290,10 +296,10 @@ pub fn child_trie_root( layout: &L, input: I, ) -> ::Out - where - I: IntoIterator, - A: AsRef<[u8]> + Ord, - B: AsRef<[u8]>, +where + I: IntoIterator, + A: AsRef<[u8]> + Ord, + B: AsRef<[u8]>, { layout.trie_root(input) } @@ -307,33 +313,29 @@ pub fn child_delta_trie_root( delta: I, layout: L, ) -> Result<::Out, Box>> - where - I: IntoIterator, - A: Borrow<[u8]>, - B: Borrow>, - V: Borrow<[u8]>, - RD: AsRef<[u8]>, - DB: hash_db::HashDB, +where + I: IntoIterator, + A: Borrow<[u8]>, + B: Borrow>, + V: Borrow<[u8]>, + RD: AsRef<[u8]>, + DB: hash_db::HashDB, { let mut root = TrieHash::::default(); // root is fetched from DB, not writable by runtime, so it's always valid. root.as_mut().copy_from_slice(root_data.as_ref()); let mut db = KeySpacedDBMut::new(&mut *db, keyspace); - delta_trie_root::( - &mut db, - root, - delta, - layout, - ) + delta_trie_root::(&mut db, root, delta, layout) } /// Record all keys for a given root. pub fn record_all_keys( db: &DB, root: &TrieHash, - recorder: &mut Recorder> -) -> Result<(), Box>> where + recorder: &mut Recorder>, +) -> Result<(), Box>> +where DB: hash_db::HashDBRef, { let trie = TrieDB::::new(&*db, root)?; @@ -358,8 +360,8 @@ pub fn read_child_trie_value( root_slice: &[u8], key: &[u8], ) -> Result>, Box>> - where - DB: hash_db::HashDBRef, +where + DB: hash_db::HashDBRef, { let mut root = TrieHash::::default(); // root is fetched from DB, not writable by runtime, so it's always valid. @@ -377,10 +379,10 @@ pub fn read_child_trie_value_with( key: &[u8], query: Q, ) -> Result>, Box>> - where - L: TrieConfiguration, - Q: Query, - DB: hash_db::HashDBRef, +where + L: TrieConfiguration, + Q: Query, + DB: hash_db::HashDBRef, { let mut root = TrieHash::::default(); // root is fetched from DB, not writable by runtime, so it's always valid. @@ -518,7 +520,7 @@ fn inner_hashed_value(x: &[u8], range: Option<(usize, usize)>) -> Vec let mut buff = vec![0; x.len() + hash_end.as_ref().len() - (end - start)]; buff[..start].copy_from_slice(&x[..start]); buff[start..].copy_from_slice(hash_end.as_ref()); - return buff; + return buff } if start == 0 && end < len { // start inner hash @@ -527,7 +529,7 @@ fn inner_hashed_value(x: &[u8], range: Option<(usize, usize)>) -> Vec let mut buff = vec![0; x.len() + hash_len - (end - start)]; buff[..hash_len].copy_from_slice(hash_start.as_ref()); buff[hash_len..].copy_from_slice(&x[end..]); - return buff; + return buff } if start < len && end < len { // middle inner hash @@ -537,7 +539,7 @@ fn inner_hashed_value(x: &[u8], range: Option<(usize, usize)>) -> Vec buff[..start].copy_from_slice(&x[..start]); buff[start..start + hash_len].copy_from_slice(hash_middle.as_ref()); buff[start + hash_len..].copy_from_slice(&x[end..]); - return buff; + return buff } } // if anything wrong default to hash @@ -561,7 +563,7 @@ pub fn estimate_entry_size(entry: &(DBValue, Meta, bool), hash_len: usize) -> us } /// Switch to hashed value variant. -pub fn to_hashed_variant( +pub fn to_hashed_variant( value: &[u8], meta: &mut Meta, used_value: bool, @@ -576,7 +578,7 @@ pub fn to_hashed_variant( let value = inner_hashed_value::(value, Some((range.start, range.end))); stored.extend_from_slice(value.as_slice()); meta.contain_hash = true; - return Some(stored); + return Some(stored) } None } @@ -584,7 +586,7 @@ pub fn to_hashed_variant( /// Decode plan in order to update meta early (needed to register proofs). pub fn resolve_encoded_meta(entry: &mut (DBValue, Meta, bool)) { use trie_db::NodeCodec; - let _ = as TrieLayout>::Codec::decode_plan(entry.0.as_slice(), &mut entry.1); + let _ = as TrieLayout>::Codec::decode_plan(entry.0.as_slice(), &mut entry.1); } /// Constants used into trie simplification codec. @@ -604,19 +606,17 @@ mod trie_constants { #[cfg(test)] mod tests { use super::*; - use codec::{Encode, Decode, Compact}; - use sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD as TRESHOLD; + use codec::{Compact, Decode, Encode}; use hash_db::{HashDB, Hasher}; use hex_literal::hex; - use sp_core::Blake2Hasher; + use sp_core::{storage::TEST_DEFAULT_ALT_HASH_THRESHOLD as TRESHOLD, Blake2Hasher}; use trie_db::{DBValue, NodeCodec as NodeCodecT, Trie, TrieMut}; use trie_standardmap::{Alphabet, StandardMap, ValueMode}; type Layout = super::Layout; - type MemoryDBMeta = memory_db::MemoryDB< - H, memory_db::HashKey, trie_db::DBValue, MemTracker, - >; + type MemoryDBMeta = + memory_db::MemoryDB, trie_db::DBValue, MemTracker>; fn hashed_null_node() -> TrieHash { ::hashed_null_node() @@ -677,9 +677,12 @@ mod tests { let mut empty = TrieDBMut::::new(&mut db, &mut root); empty.commit(); let root1 = empty.root().as_ref().to_vec(); - let root2: Vec = Layout::default().trie_root::<_, Vec, Vec>( - std::iter::empty(), - ).as_ref().iter().cloned().collect(); + let root2: Vec = Layout::default() + .trie_root::<_, Vec, Vec>(std::iter::empty()) + .as_ref() + .iter() + .cloned() + .collect(); assert_eq!(root1, root2); } @@ -698,19 +701,15 @@ mod tests { #[test] fn branch_is_equivalent() { - let input: Vec<(&[u8], &[u8])> = vec![ - (&[0xaa][..], &[0x10][..]), - (&[0xba][..], &[0x11][..]), - ]; + let input: Vec<(&[u8], &[u8])> = + vec![(&[0xaa][..], &[0x10][..]), (&[0xba][..], &[0x11][..])]; check_input(&input); } #[test] fn extension_and_branch_is_equivalent() { - let input: Vec<(&[u8], &[u8])> = vec![ - (&[0xaa][..], &[0x10][..]), - (&[0xab][..], &[0x11][..]), - ]; + let input: Vec<(&[u8], &[u8])> = + vec![(&[0xaa][..], &[0x10][..]), (&[0xab][..], &[0x11][..])]; check_input(&input); } @@ -785,8 +784,8 @@ mod tests { v: &[(Vec, Vec)], layout: T, ) -> TrieDBMut<'db, T> - where - T: TrieConfiguration, + where + T: TrieConfiguration, { let mut t = TrieDBMut::::new_with_layout(db, root, layout); for i in 0..v.len() { @@ -827,11 +826,7 @@ mod tests { } .make_with(seed.as_fixed_bytes_mut()); - let layout = if flag { - Layout::with_alt_hashing(TRESHOLD) - } else { - Layout::default() - }; + let layout = if flag { Layout::with_alt_hashing(TRESHOLD) } else { Layout::default() }; let real = layout.trie_root(x.clone()); let mut memdb = MemoryDB::default(); let mut root = Default::default(); @@ -879,9 +874,7 @@ mod tests { #[test] fn codec_trie_single_tuple() { let layout = Layout::default(); - let input = vec![ - (vec![0xaa], vec![0xbb]) - ]; + let input = vec![(vec![0xaa], vec![0xbb])]; let trie = layout.trie_root_unhashed(input); println!("trie: {:#x?}", trie); assert_eq!( @@ -927,11 +920,7 @@ mod tests { iterator_works_inner(false); } fn iterator_works_inner(flag: bool) { - let layout = if flag { - Layout::with_alt_hashing(TRESHOLD) - } else { - Layout::default() - }; + let layout = if flag { Layout::with_alt_hashing(TRESHOLD) } else { Layout::default() }; let pairs = vec![ (hex!("0103000000000000000464").to_vec(), hex!("0400000000").to_vec()), @@ -1058,13 +1047,15 @@ mod tests { storage_root, valid_delta, Default::default(), - ).unwrap(); + ) + .unwrap(); let second_storage_root = delta_trie_root::( &mut proof_db.clone(), storage_root, invalid_delta, Default::default(), - ).unwrap(); + ) + .unwrap(); assert_eq!(first_storage_root, second_storage_root); } diff --git a/primitives/trie/src/node_codec.rs b/primitives/trie/src/node_codec.rs index 29108d76e7778..1c6a2bec72924 100644 --- a/primitives/trie/src/node_codec.rs +++ b/primitives/trie/src/node_codec.rs @@ -17,16 +17,16 @@ //! `NodeCodec` implementation for Substrate's trie format. -use sp_std::marker::PhantomData; -use sp_std::ops::Range; -use sp_std::vec::Vec; -use sp_std::borrow::Borrow; +use super::node_header::{NodeHeader, NodeKind}; use crate::{error::Error, trie_constants}; use codec::{Compact, Decode, Encode, Input}; use hash_db::Hasher; -use trie_db::{self, node::{NibbleSlicePlan, NodePlan, Value, ValuePlan, NodeHandlePlan}, - ChildReference, nibble_ops, Partial, NodeCodec as NodeCodecT, Meta}; -use super::{node_header::{NodeHeader, NodeKind}}; +use sp_std::{borrow::Borrow, marker::PhantomData, ops::Range, vec::Vec}; +use trie_db::{ + self, nibble_ops, + node::{NibbleSlicePlan, NodeHandlePlan, NodePlan, Value, ValuePlan}, + ChildReference, Meta, NodeCodec as NodeCodecT, Partial, +}; /// Helper struct for trie node decoder. This implements `codec::Input` on a byte slice, while /// tracking the absolute position. This is similar to `std::io::Cursor` but does not implement @@ -81,10 +81,7 @@ impl<'a> Input for ByteSliceInput<'a> { pub struct NodeCodec(PhantomData); impl NodeCodec { - fn decode_plan_inner_hashed( - data: &[u8], - meta: &mut Meta, - ) -> Result { + fn decode_plan_inner_hashed(data: &[u8], meta: &mut Meta) -> Result { let mut input = ByteSliceInput::new(data); let header = NodeHeader::decode(&mut input)?; @@ -101,8 +98,7 @@ impl NodeCodec { match header { NodeHeader::Null => Ok(NodePlan::Empty), - NodeHeader::AltHashBranch(nibble_count, _) - | NodeHeader::Branch(_, nibble_count) => { + NodeHeader::AltHashBranch(nibble_count, _) | NodeHeader::Branch(_, nibble_count) => { let padding = nibble_count % nibble_ops::NIBBLE_PER_BYTE != 0; // check that the padding is valid (if any) if padding && nibble_ops::pad_left(data[input.offset]) != 0 { @@ -147,8 +143,7 @@ impl NodeCodec { children, }) }, - NodeHeader::AltHashLeaf(nibble_count, _) - | NodeHeader::Leaf(nibble_count) => { + NodeHeader::AltHashLeaf(nibble_count, _) | NodeHeader::Leaf(nibble_count) => { let padding = nibble_count % nibble_ops::NIBBLE_PER_BYTE != 0; // check that the padding is valid (if any) if padding && nibble_ops::pad_left(data[input.offset]) != 0 { @@ -177,8 +172,8 @@ impl NodeCodec { } impl NodeCodecT for NodeCodec - where - H: Hasher, +where + H: Hasher, { const OFFSET_CONTAINS_HASH: usize = 1; type Error = Error; @@ -214,9 +209,12 @@ impl NodeCodecT for NodeCodec // With fix inner hashing alt hash can be use with all node, but // that is not better (encoding can use an additional nibble byte // sometime). - let mut output = if meta.try_inner_hashing.as_ref().map(|threshold| - value_do_hash(&value, threshold) - ).unwrap_or(meta.apply_inner_hashing) { + let mut output = if meta + .try_inner_hashing + .as_ref() + .map(|threshold| value_do_hash(&value, threshold)) + .unwrap_or(meta.apply_inner_hashing) + { if contains_hash { partial_encode(partial, NodeKind::AltHashLeafHash) } else { @@ -270,29 +268,37 @@ impl NodeCodecT for NodeCodec value: Value, meta: &mut Meta, ) -> Vec { - let contains_hash = matches!(&value, Value::HashedValue(..)); - let mut output = match (&value, meta.try_inner_hashing.as_ref().map(|threshold| - value_do_hash(&value, threshold) - ).unwrap_or(meta.apply_inner_hashing)) { - (&Value::NoValue, _) => { - partial_from_iterator_encode(partial, number_nibble, NodeKind::BranchNoValue) - }, - (_, false) => { - partial_from_iterator_encode(partial, number_nibble, NodeKind::BranchWithValue) - }, - (_, true) => { + let mut output = match ( + &value, + meta.try_inner_hashing + .as_ref() + .map(|threshold| value_do_hash(&value, threshold)) + .unwrap_or(meta.apply_inner_hashing), + ) { + (&Value::NoValue, _) => + partial_from_iterator_encode(partial, number_nibble, NodeKind::BranchNoValue), + (_, false) => + partial_from_iterator_encode(partial, number_nibble, NodeKind::BranchWithValue), + (_, true) => if contains_hash { - partial_from_iterator_encode(partial, number_nibble, NodeKind::AltHashBranchWithValueHash) + partial_from_iterator_encode( + partial, + number_nibble, + NodeKind::AltHashBranchWithValueHash, + ) } else { - partial_from_iterator_encode(partial, number_nibble, NodeKind::AltHashBranchWithValue) - } - }, + partial_from_iterator_encode( + partial, + number_nibble, + NodeKind::AltHashBranchWithValue, + ) + }, }; let bitmap_index = output.len(); let mut bitmap: [u8; BITMAP_LENGTH] = [0; BITMAP_LENGTH]; - (0..BITMAP_LENGTH).for_each(|_|output.push(0)); + (0..BITMAP_LENGTH).for_each(|_| output.push(0)); match value { Value::Value(value) => { let with_len = output.len(); @@ -311,17 +317,20 @@ impl NodeCodecT for NodeCodec }, Value::NoValue => (), } - Bitmap::encode(children.map(|maybe_child| match maybe_child.borrow() { - Some(ChildReference::Hash(h)) => { - h.as_ref().encode_to(&mut output); - true - } - &Some(ChildReference::Inline(inline_data, len)) => { - inline_data.as_ref()[..len].encode_to(&mut output); - true - } - None => false, - }), bitmap.as_mut()); + Bitmap::encode( + children.map(|maybe_child| match maybe_child.borrow() { + Some(ChildReference::Hash(h)) => { + h.as_ref().encode_to(&mut output); + true + }, + &Some(ChildReference::Inline(inline_data, len)) => { + inline_data.as_ref()[..len].encode_to(&mut output); + true + }, + None => false, + }), + bitmap.as_mut(), + ); output[bitmap_index..bitmap_index + BITMAP_LENGTH] .copy_from_slice(&bitmap[..BITMAP_LENGTH]); output @@ -332,13 +341,9 @@ impl NodeCodecT for NodeCodec fn value_do_hash(val: &Value, threshold: &u32) -> bool { match val { - Value::Value(val) => { - val.encoded_size() >= *threshold as usize - }, + Value::Value(val) => val.encoded_size() >= *threshold as usize, Value::HashedValue(..) => true, // can only keep hashed - Value::NoValue => { - false - }, + Value::NoValue => false, } } @@ -356,12 +361,14 @@ fn partial_from_iterator_encode>( NodeKind::Leaf => NodeHeader::Leaf(nibble_count).encode_to(&mut output), NodeKind::BranchWithValue => NodeHeader::Branch(true, nibble_count).encode_to(&mut output), NodeKind::BranchNoValue => NodeHeader::Branch(false, nibble_count).encode_to(&mut output), - NodeKind::AltHashLeaf => NodeHeader::AltHashLeaf(nibble_count, false).encode_to(&mut output), - NodeKind::AltHashBranchWithValue => NodeHeader::AltHashBranch(nibble_count, false) - .encode_to(&mut output), - NodeKind::AltHashLeafHash => NodeHeader::AltHashLeaf(nibble_count, true).encode_to(&mut output), - NodeKind::AltHashBranchWithValueHash => NodeHeader::AltHashBranch(nibble_count, true) - .encode_to(&mut output), + NodeKind::AltHashLeaf => + NodeHeader::AltHashLeaf(nibble_count, false).encode_to(&mut output), + NodeKind::AltHashBranchWithValue => + NodeHeader::AltHashBranch(nibble_count, false).encode_to(&mut output), + NodeKind::AltHashLeafHash => + NodeHeader::AltHashLeaf(nibble_count, true).encode_to(&mut output), + NodeKind::AltHashBranchWithValueHash => + NodeHeader::AltHashBranch(nibble_count, true).encode_to(&mut output), }; output.extend(partial); output @@ -380,12 +387,14 @@ fn partial_encode(partial: Partial, node_kind: NodeKind) -> Vec { NodeKind::Leaf => NodeHeader::Leaf(nibble_count).encode_to(&mut output), NodeKind::BranchWithValue => NodeHeader::Branch(true, nibble_count).encode_to(&mut output), NodeKind::BranchNoValue => NodeHeader::Branch(false, nibble_count).encode_to(&mut output), - NodeKind::AltHashLeaf => NodeHeader::AltHashLeaf(nibble_count, false).encode_to(&mut output), - NodeKind::AltHashBranchWithValue => NodeHeader::AltHashBranch(nibble_count, false) - .encode_to(&mut output), - NodeKind::AltHashLeafHash => NodeHeader::AltHashLeaf(nibble_count, true).encode_to(&mut output), - NodeKind::AltHashBranchWithValueHash => NodeHeader::AltHashBranch(nibble_count, true) - .encode_to(&mut output), + NodeKind::AltHashLeaf => + NodeHeader::AltHashLeaf(nibble_count, false).encode_to(&mut output), + NodeKind::AltHashBranchWithValue => + NodeHeader::AltHashBranch(nibble_count, false).encode_to(&mut output), + NodeKind::AltHashLeafHash => + NodeHeader::AltHashLeaf(nibble_count, true).encode_to(&mut output), + NodeKind::AltHashBranchWithValueHash => + NodeHeader::AltHashBranch(nibble_count, true).encode_to(&mut output), }; if number_nibble_encoded > 0 { output.push(nibble_ops::pad_right((partial.0).1)); diff --git a/primitives/trie/src/node_header.rs b/primitives/trie/src/node_header.rs index fd782981d4b5a..2443ad03dc53e 100644 --- a/primitives/trie/src/node_header.rs +++ b/primitives/trie/src/node_header.rs @@ -38,8 +38,7 @@ pub(crate) enum NodeHeader { impl NodeHeader { pub(crate) fn contains_hash_of_value(&self) -> bool { match self { - NodeHeader::AltHashBranch(_, true) - | NodeHeader::AltHashLeaf(_, true) => true, + NodeHeader::AltHashBranch(_, true) | NodeHeader::AltHashLeaf(_, true) => true, _ => false, } } @@ -63,16 +62,28 @@ impl Encode for NodeHeader { } match self { NodeHeader::Null => output.push_byte(trie_constants::EMPTY_TRIE), - NodeHeader::Branch(true, nibble_count) => + NodeHeader::Branch(true, nibble_count) => encode_size_and_prefix(*nibble_count, trie_constants::BRANCH_WITH_MASK, 2, output), - NodeHeader::Branch(false, nibble_count) => - encode_size_and_prefix(*nibble_count, trie_constants::BRANCH_WITHOUT_MASK, 2, output), + NodeHeader::Branch(false, nibble_count) => encode_size_and_prefix( + *nibble_count, + trie_constants::BRANCH_WITHOUT_MASK, + 2, + output, + ), NodeHeader::Leaf(nibble_count) => encode_size_and_prefix(*nibble_count, trie_constants::LEAF_PREFIX_MASK, 2, output), - NodeHeader::AltHashBranch(nibble_count, _) => - encode_size_and_prefix(*nibble_count, trie_constants::ALT_HASHING_BRANCH_WITH_MASK, 4, output), - NodeHeader::AltHashLeaf(nibble_count, _) => - encode_size_and_prefix(*nibble_count, trie_constants::ALT_HASHING_LEAF_PREFIX_MASK, 3, output), + NodeHeader::AltHashBranch(nibble_count, _) => encode_size_and_prefix( + *nibble_count, + trie_constants::ALT_HASHING_BRANCH_WITH_MASK, + 4, + output, + ), + NodeHeader::AltHashLeaf(nibble_count, _) => encode_size_and_prefix( + *nibble_count, + trie_constants::ALT_HASHING_LEAF_PREFIX_MASK, + 3, + output, + ), } } } @@ -81,11 +92,8 @@ impl NodeHeader { /// Is this header using alternate hashing scheme. pub(crate) fn alt_hashing(&self) -> bool { match self { - NodeHeader::Null - | NodeHeader::Leaf(..) - | NodeHeader::Branch(..) => false, - NodeHeader::AltHashBranch(..) - | NodeHeader::AltHashLeaf(..) => true, + NodeHeader::Null | NodeHeader::Leaf(..) | NodeHeader::Branch(..) => false, + NodeHeader::AltHashBranch(..) | NodeHeader::AltHashLeaf(..) => true, } } } @@ -106,8 +114,10 @@ impl Decode for NodeHeader { }; match i & (0b11 << 6) { trie_constants::LEAF_PREFIX_MASK => Ok(NodeHeader::Leaf(decode_size(i, input, 2)?)), - trie_constants::BRANCH_WITH_MASK => Ok(NodeHeader::Branch(true, decode_size(i, input, 2)?)), - trie_constants::BRANCH_WITHOUT_MASK => Ok(NodeHeader::Branch(false, decode_size(i, input, 2)?)), + trie_constants::BRANCH_WITH_MASK => + Ok(NodeHeader::Branch(true, decode_size(i, input, 2)?)), + trie_constants::BRANCH_WITHOUT_MASK => + Ok(NodeHeader::Branch(false, decode_size(i, input, 2)?)), trie_constants::EMPTY_TRIE => { if i & (0b111 << 5) == trie_constants::ALT_HASHING_LEAF_PREFIX_MASK { Ok(NodeHeader::AltHashLeaf(decode_size(i, input, 3)?, contain_hash)) @@ -159,7 +169,8 @@ pub(crate) fn size_and_prefix_iterator( /// Encodes size and prefix to a stream output (prefix on 2 first bit only). fn encode_size_and_prefix(size: usize, prefix: u8, prefix_mask: usize, out: &mut W) - where W: Output + ?Sized, +where + W: Output + ?Sized, { for b in size_and_prefix_iterator(size, prefix, prefix_mask) { out.push_byte(b) @@ -175,7 +186,7 @@ fn decode_size( let max_value = 255u8 >> prefix_mask; let mut result = (first & max_value) as usize; if result < max_value as usize { - return Ok(result); + return Ok(result) } result -= 1; while result <= trie_constants::NIBBLE_SIZE_BOUND { diff --git a/primitives/trie/src/storage_proof.rs b/primitives/trie/src/storage_proof.rs index e0344e9e02bdb..d9c57c7e8a95d 100644 --- a/primitives/trie/src/storage_proof.rs +++ b/primitives/trie/src/storage_proof.rs @@ -15,11 +15,11 @@ // See the License for the specific language governing permissions and // limitations under the License. +use crate::{Layout, TrieLayout}; use codec::{Decode, Encode}; use hash_db::{HashDB, Hasher}; use sp_std::vec::Vec; use trie_db::NodeCodec; -use crate::{Layout, TrieLayout}; /// A proof that some set of key-value pairs are included in the storage trie. The proof contains /// the storage values so that the partial storage backend can be reconstructed by a verifier that @@ -178,11 +178,11 @@ impl From for crate::MemoryDB { for item in proof.trie_nodes.iter() { let mut meta = Default::default(); // Read meta from state (required for value layout). - let _ = as TrieLayout>::Codec::decode_plan(item.as_slice(), &mut meta); + let _ = as TrieLayout>::Codec::decode_plan(item.as_slice(), &mut meta); db.alt_insert( crate::EMPTY_PREFIX, item, - meta.resolve_alt_hashing::< as TrieLayout>::Codec>(), + meta.resolve_alt_hashing::< as TrieLayout>::Codec>(), ); } db diff --git a/primitives/trie/src/trie_codec.rs b/primitives/trie/src/trie_codec.rs index 52a6c550da976..ad6228b30c511 100644 --- a/primitives/trie/src/trie_codec.rs +++ b/primitives/trie/src/trie_codec.rs @@ -113,11 +113,8 @@ where let mut nodes_iter = encoded.into_iter(); // Layout does not change trie reading. let layout = L::default(); - let (top_root, _nb_used) = trie_db::decode_compact_from_iter::( - db, - &mut nodes_iter, - &layout, - )?; + let (top_root, _nb_used) = + trie_db::decode_compact_from_iter::(db, &mut nodes_iter, &layout)?; // Only check root if expected root is passed as argument. if let Some(expected_root) = expected_root { @@ -167,11 +164,8 @@ where let mut previous_extracted_child_trie = None; for child_root in child_tries.into_iter() { if previous_extracted_child_trie.is_none() { - let (top_root, _) = trie_db::decode_compact_from_iter::( - db, - &mut nodes_iter, - &layout, - )?; + let (top_root, _) = + trie_db::decode_compact_from_iter::(db, &mut nodes_iter, &layout)?; previous_extracted_child_trie = Some(top_root); } diff --git a/primitives/trie/src/trie_stream.rs b/primitives/trie/src/trie_stream.rs index 52851bdba933c..aba5ea3d4aa14 100644 --- a/primitives/trie/src/trie_stream.rs +++ b/primitives/trie/src/trie_stream.rs @@ -22,11 +22,10 @@ use crate::{ node_header::{size_and_prefix_iterator, NodeKind}, trie_constants, }; -use codec::{Encode, Compact}; +use codec::{Compact, Encode}; use hash_db::Hasher; -use sp_std::vec::Vec; +use sp_std::{ops::Range, vec::Vec}; use trie_root; -use sp_std::ops::Range; const BRANCH_NODE_NO_VALUE: u8 = 254; const BRANCH_NODE_WITH_VALUE: u8 = 255; @@ -69,14 +68,16 @@ fn fuse_nibbles_node<'a>(nibbles: &'a [u8], kind: NodeKind) -> impl Iterator size_and_prefix_iterator(size, trie_constants::LEAF_PREFIX_MASK, 2), - NodeKind::BranchNoValue => size_and_prefix_iterator(size, trie_constants::BRANCH_WITHOUT_MASK, 2), - NodeKind::BranchWithValue => size_and_prefix_iterator(size, trie_constants::BRANCH_WITH_MASK, 2), + NodeKind::BranchNoValue => + size_and_prefix_iterator(size, trie_constants::BRANCH_WITHOUT_MASK, 2), + NodeKind::BranchWithValue => + size_and_prefix_iterator(size, trie_constants::BRANCH_WITH_MASK, 2), NodeKind::AltHashLeaf => size_and_prefix_iterator(size, trie_constants::ALT_HASHING_LEAF_PREFIX_MASK, 3), NodeKind::AltHashBranchWithValue => size_and_prefix_iterator(size, trie_constants::ALT_HASHING_BRANCH_WITH_MASK, 4), - NodeKind::AltHashBranchWithValueHash - | NodeKind::AltHashLeafHash => unreachable!("only added value that do not contain hash"), + NodeKind::AltHashBranchWithValueHash | NodeKind::AltHashLeafHash => + unreachable!("only added value that do not contain hash"), }; iter_start .chain(if nibbles.len() % 2 == 1 { Some(nibbles[0]) } else { None }) @@ -98,14 +99,12 @@ impl trie_root::TrieStream for TrieStream { } fn append_leaf(&mut self, key: &[u8], value: &[u8]) { - self.apply_inner_hashing = self.inner_value_hashing.as_ref().map(|threshold| - value_do_hash(value, threshold) - ).unwrap_or(false); - let kind = if self.apply_inner_hashing { - NodeKind::AltHashLeaf - } else { - NodeKind::Leaf - }; + self.apply_inner_hashing = self + .inner_value_hashing + .as_ref() + .map(|threshold| value_do_hash(value, threshold)) + .unwrap_or(false); + let kind = if self.apply_inner_hashing { NodeKind::AltHashLeaf } else { NodeKind::Leaf }; self.buffer.extend(fuse_nibbles_node(key, kind)); let start = self.buffer.len(); Compact(value.len() as u32).encode_to(&mut self.buffer); @@ -121,9 +120,11 @@ impl trie_root::TrieStream for TrieStream { ) { if let Some(partial) = maybe_partial { if let Some(value) = maybe_value { - self.apply_inner_hashing = self.inner_value_hashing.as_ref().map(|threshold| - value_do_hash(value, threshold) - ).unwrap_or(false); + self.apply_inner_hashing = self + .inner_value_hashing + .as_ref() + .map(|threshold| value_do_hash(value, threshold)) + .unwrap_or(false); let kind = if self.apply_inner_hashing { NodeKind::AltHashBranchWithValue } else { @@ -157,17 +158,18 @@ impl trie_root::TrieStream for TrieStream { let data = other.out(); match data.len() { 0..=31 => data.encode_to(&mut self.buffer), - _ => { + _ => if apply_inner_hashing { hash_db::AltHashing { encoded_offset: 0, value_range: range.map(|r| (r.start, r.end)), - }.alt_hash::(&data).as_ref() - .encode_to(&mut self.buffer); + } + .alt_hash::(&data) + .as_ref() + .encode_to(&mut self.buffer); } else { H::hash(&data).as_ref().encode_to(&mut self.buffer); - } - }, + }, } } @@ -176,10 +178,8 @@ impl trie_root::TrieStream for TrieStream { let range = self.current_value_range; let data = self.buffer; if apply_inner_hashing { - hash_db::AltHashing { - encoded_offset: 0, - value_range: range.map(|r| (r.start, r.end)), - }.alt_hash::(&data) + hash_db::AltHashing { encoded_offset: 0, value_range: range.map(|r| (r.start, r.end)) } + .alt_hash::(&data) } else { H::hash(&data) } diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index b1c0b9299ae0b..1c5135ff556ec 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -231,9 +231,9 @@ impl let storage = { let mut storage = self.genesis_init.genesis_storage(); if self.state_hashed_value { - storage.modify_trie_alt_hashing_threshold( - Some(sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD), - ); + storage.modify_trie_alt_hashing_threshold(Some( + sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD, + )); } // Add some child storage keys. diff --git a/test-utils/runtime/client/src/lib.rs b/test-utils/runtime/client/src/lib.rs index 29b7a423c350b..d5cd4d3ab1958 100644 --- a/test-utils/runtime/client/src/lib.rs +++ b/test-utils/runtime/client/src/lib.rs @@ -394,7 +394,9 @@ pub fn new(hashed_state: bool) -> Client { } /// Creates new light client instance used for tests. -pub fn new_light(hashed_state: bool) -> ( +pub fn new_light( + hashed_state: bool, +) -> ( client::Client< LightBackend, LightExecutor, @@ -420,10 +422,7 @@ pub fn new_light(hashed_state: bool) -> ( if hashed_state { builder = builder.state_hashed_value(); } - ( - builder.build_with_executor(call_executor).0, - backend, - ) + (builder.build_with_executor(call_executor).0, backend) } /// Creates new light client fetcher used for tests. diff --git a/utils/frame/rpc/system/src/lib.rs b/utils/frame/rpc/system/src/lib.rs index 2c8a9ba6f02d9..4126c1f4401fe 100644 --- a/utils/frame/rpc/system/src/lib.rs +++ b/utils/frame/rpc/system/src/lib.rs @@ -325,7 +325,7 @@ mod tests { sp_tracing::try_init_simple(); // given - let client = Arc::new(substrate_test_runtime_client::new(true,)); + let client = Arc::new(substrate_test_runtime_client::new(true)); let spawner = sp_core::testing::TaskExecutor::new(); let pool = BasicPool::new_full(Default::default(), true.into(), None, spawner, client.clone()); From 1e02c0166034df496167b1802eb25cef84fe1cc1 Mon Sep 17 00:00:00 2001 From: Emeric Chevalier Date: Mon, 16 Aug 2021 11:56:23 +0200 Subject: [PATCH 066/188] rustfmt --- client/light/src/fetcher.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/client/light/src/fetcher.rs b/client/light/src/fetcher.rs index 438d6b2fc114e..fba3d854b8d2b 100644 --- a/client/light/src/fetcher.rs +++ b/client/light/src/fetcher.rs @@ -169,7 +169,8 @@ impl> LightDataChecker { remote_roots_proof: StorageProof, ) -> ClientResult<()> { // all the checks are sharing the same storage - let storage: sp_state_machine::MemoryDB> = remote_roots_proof.into_memory_db_no_meta(); + let storage: sp_state_machine::MemoryDB> = + remote_roots_proof.into_memory_db_no_meta(); // remote_roots.keys() are sorted => we can use this to group changes tries roots // that are belongs to the same CHT From a48970fd8731e964823e7596ea5a387b72662f1e Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 27 Aug 2021 15:14:27 +0200 Subject: [PATCH 067/188] fix --- primitives/state-machine/src/proving_backend.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index ee097b8d89060..235f37b1c9c1e 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -484,7 +484,7 @@ mod tests { .clone() .map(|i| (vec![i], Some(vec![i; size_content]))) .collect::>(); - let in_memory = InMemoryBackend::::default(); + let mut in_memory = InMemoryBackend::::default(); if flagged { in_memory = in_memory.update(vec![( None, @@ -496,7 +496,7 @@ mod tests { )], )]); } - let mut in_memory = in_memory.update(vec![(None, contents)]); + let in_memory = in_memory.update(vec![(None, contents)]); let in_memory_root = in_memory.storage_root(std::iter::empty()).0; value_range.clone().for_each(|i| { assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i; size_content]) @@ -534,7 +534,7 @@ mod tests { (Some(child_info_1.clone()), (28..65).map(|i| (vec![i], Some(vec![i]))).collect()), (Some(child_info_2.clone()), (10..15).map(|i| (vec![i], Some(vec![i]))).collect()), ]; - let in_memory = InMemoryBackend::::default(); + let mut in_memory = InMemoryBackend::::default(); if flagged { in_memory = in_memory.update(vec![( None, From 92b9fcffd70ce6c41be382db42b87e410584d41e Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 27 Aug 2021 18:42:45 +0200 Subject: [PATCH 068/188] start host function driven versioning --- primitives/core/src/lib.rs | 1 + primitives/externalities/src/lib.rs | 8 +-- primitives/io/src/lib.rs | 29 ++++++++++- primitives/runtime/src/lib.rs | 2 +- primitives/state-machine/src/backend.rs | 7 ++- primitives/state-machine/src/basic.rs | 28 +++++----- primitives/state-machine/src/ext.rs | 14 ++--- .../state-machine/src/in_memory_backend.rs | 38 +++++++++----- .../src/overlayed_changes/mod.rs | 11 ++-- .../state-machine/src/proving_backend.rs | 7 ++- primitives/state-machine/src/read_only.rs | 8 +-- primitives/state-machine/src/testing.rs | 51 ++++++------------- primitives/state-machine/src/trie_backend.rs | 18 ++----- primitives/storage/src/lib.rs | 8 +++ primitives/tasks/src/async_externalities.rs | 8 +-- test-utils/runtime/src/lib.rs | 4 +- 16 files changed, 138 insertions(+), 104 deletions(-) diff --git a/primitives/core/src/lib.rs b/primitives/core/src/lib.rs index 0a61c90d71357..fc09b5cd466b7 100644 --- a/primitives/core/src/lib.rs +++ b/primitives/core/src/lib.rs @@ -86,6 +86,7 @@ pub use self::hasher::keccak::KeccakHasher; pub use hash_db::Hasher; pub use sp_storage as storage; +pub use sp_storage::{StateVersion, DEFAULT_STATE_HASHING}; #[doc(hidden)] pub use sp_std; diff --git a/primitives/externalities/src/lib.rs b/primitives/externalities/src/lib.rs index e6a8f8caa8d33..46beceb585784 100644 --- a/primitives/externalities/src/lib.rs +++ b/primitives/externalities/src/lib.rs @@ -31,7 +31,7 @@ use sp_std::{ vec::Vec, }; -use sp_storage::{ChildInfo, TrackedStorageKey}; +use sp_storage::{ChildInfo, TrackedStorageKey, StateVersion}; pub use extensions::{Extension, ExtensionStore, Extensions}; pub use scope_limited::{set_and_run_with_externalities, with_externalities}; @@ -157,7 +157,7 @@ pub trait Externalities: ExtensionStore { /// This will also update all child storage keys in the top-level storage map. /// /// The returned hash is defined by the `Block` and is SCALE encoded. - fn storage_root(&mut self) -> Vec; + fn storage_root(&mut self, state_hashing: StateVersion) -> Vec; /// Get the trie root of a child storage map. /// @@ -165,7 +165,7 @@ pub trait Externalities: ExtensionStore { /// /// If the storage root equals the default hash as defined by the trie, the key in the top-level /// storage map will be removed. - fn child_storage_root(&mut self, child_info: &ChildInfo) -> Vec; + fn child_storage_root(&mut self, child_info: &ChildInfo, state_hashing: StateVersion) -> Vec; /// Append storage item. /// @@ -227,7 +227,7 @@ pub trait Externalities: ExtensionStore { /// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! /// /// Commits all changes to the database and clears all caches. - fn commit(&mut self); + fn commit(&mut self, state_hashing: StateVersion); /// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! /// Benchmarking related functionality and shouldn't be used anywhere else! diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index fb300a7702e96..947a28e03e6a8 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -191,7 +191,17 @@ pub trait Storage { /// /// Returns a `Vec` that holds the SCALE encoded hash. fn root(&mut self) -> Vec { - self.storage_root() + self.storage_root(None) + } + + #[version(2)] + /// "Commit" all existing operations and compute the resulting storage root. + /// + /// The hashing algorithm is defined by the `Block`. + /// + /// Returns a `Vec` that holds the SCALE encoded hash. + fn root(&mut self) -> Vec { + self.storage_root(sp_core::DEFAULT_STATE_HASHING) } /// "Commit" all existing operations and get the resulting storage change root. @@ -379,7 +389,22 @@ pub trait DefaultChildStorage { /// Returns a `Vec` that holds the SCALE encoded hash. fn root(&mut self, storage_key: &[u8]) -> Vec { let child_info = ChildInfo::new_default(storage_key); - self.child_storage_root(&child_info) + self.child_storage_root(&child_info, None) + } + + /// Default child root calculation. + /// + /// "Commit" all existing operations and compute the resulting child storage root. + /// The hashing algorithm is defined by the `Block`. + /// + /// Returns a `Vec` that holds the SCALE encoded hash. + /// TODO this will be use by default for all new runtime that is an issue: we want it + /// to be call only when we choose to migrate, otherwhise lazy migration will apply too + /// soon. -> Maybe just name it differently. + #[version(2)] + fn root(&mut self, storage_key: &[u8]) -> Vec { + let child_info = ChildInfo::new_default(storage_key); + self.child_storage_root(&child_info, sp_core::DEFAULT_STATE_HASHING) } /// Child storage key iteration. diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index 4a9c6087fa5cc..e631ec033991f 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -45,7 +45,7 @@ use sp_core::{ crypto::{self, Public}, ecdsa, ed25519, hash::{H256, H512}, - sr25519, + sr25519, StateVersion, DEFAULT_STATE_HASHING, }; use sp_std::{convert::TryFrom, prelude::*}; diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 0e4c1644d0f91..af615e0a16d5f 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -24,6 +24,7 @@ use crate::{ use codec::{Decode, Encode}; use hash_db::Hasher; use sp_core::storage::{well_known_keys, ChildInfo, TrackedStorageKey}; +use sp_core::StateVersion; #[cfg(feature = "std")] use sp_core::traits::RuntimeCode; use sp_std::vec::Vec; @@ -142,6 +143,7 @@ pub trait Backend: sp_std::fmt::Debug { fn storage_root<'a>( &self, delta: impl Iterator)>, + threshold: StateVersion, ) -> (H::Out, Self::Transaction) where H::Out: Ord; @@ -153,6 +155,7 @@ pub trait Backend: sp_std::fmt::Debug { &self, child_info: &ChildInfo, delta: impl Iterator)>, + threshold: StateVersion, ) -> (H::Out, bool, Self::Transaction) where H::Out: Ord; @@ -187,6 +190,7 @@ pub trait Backend: sp_std::fmt::Debug { child_deltas: impl Iterator< Item = (&'a ChildInfo, impl Iterator)>), >, + threshold: StateVersion, ) -> (H::Out, Self::Transaction) where H::Out: Ord + Encode, @@ -195,7 +199,7 @@ pub trait Backend: sp_std::fmt::Debug { let mut child_roots: Vec<_> = Default::default(); // child first for (child_info, child_delta) in child_deltas { - let (child_root, empty, child_txs) = self.child_storage_root(&child_info, child_delta); + let (child_root, empty, child_txs) = self.child_storage_root(&child_info, child_delta, threshold); let prefixed_storage_key = child_info.prefixed_storage_key(); txs.consolidate(child_txs); if empty { @@ -208,6 +212,7 @@ pub trait Backend: sp_std::fmt::Debug { delta .map(|(k, v)| (&k[..], v.as_ref().map(|v| &v[..]))) .chain(child_roots.iter().map(|(k, v)| (&k[..], v.as_ref().map(|v| &v[..])))), + threshold, ); txs.consolidate(parent_txs); (root, txs) diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index 3d7ac00c18184..98266a57ebb0b 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -26,7 +26,7 @@ use sp_core::{ well_known_keys::is_child_storage_key, ChildInfo, Storage, StorageChild, TrackedStorageKey, }, traits::Externalities, - Blake2Hasher, + Blake2Hasher, StateVersion, }; use sp_externalities::{Extension, Extensions}; use sp_trie::{empty_child_trie_root, Layout, TrieConfiguration}; @@ -111,8 +111,8 @@ impl BasicExternalities { impl PartialEq for BasicExternalities { fn eq(&self, other: &BasicExternalities) -> bool { - self.inner.top.eq(&other.inner.top) && - self.inner.children_default.eq(&other.inner.children_default) + self.inner.top.eq(&other.inner.top) + && self.inner.children_default.eq(&other.inner.children_default) } } @@ -180,16 +180,16 @@ impl Externalities for BasicExternalities { fn place_storage(&mut self, key: StorageKey, maybe_value: Option) { if is_child_storage_key(&key) { warn!(target: "trie", "Refuse to set child storage key via main storage"); - return + return; } match maybe_value { Some(value) => { self.inner.top.insert(key, value); - }, + } None => { self.inner.top.remove(&key); - }, + } } } @@ -230,7 +230,7 @@ impl Externalities for BasicExternalities { target: "trie", "Refuse to clear prefix that is part of child storage key via main storage" ); - return (false, 0) + return (false, 0); } let to_remove = self @@ -279,7 +279,7 @@ impl Externalities for BasicExternalities { crate::ext::StorageAppend::new(current).append(value); } - fn storage_root(&mut self) -> Vec { + fn storage_root(&mut self, threshold: StateVersion) -> Vec { let mut top = self.inner.top.clone(); let prefixed_keys: Vec<_> = self .inner @@ -292,7 +292,7 @@ impl Externalities for BasicExternalities { // type of child trie support. let empty_hash = empty_child_trie_root::>(); for (prefixed_storage_key, child_info) in prefixed_keys { - let child_root = self.child_storage_root(&child_info); + let child_root = self.child_storage_root(&child_info, threshold); if &empty_hash[..] == &child_root[..] { top.remove(prefixed_storage_key.as_slice()); } else { @@ -300,20 +300,20 @@ impl Externalities for BasicExternalities { } } - let layout = if let Some(threshold) = self.alt_hashing.as_ref() { - Layout::::with_alt_hashing(*threshold) + let layout = if let Some(threshold) = threshold { + Layout::::with_alt_hashing(threshold) } else { Layout::::default() }; layout.trie_root(self.inner.top.clone()).as_ref().into() } - fn child_storage_root(&mut self, child_info: &ChildInfo) -> Vec { + fn child_storage_root(&mut self, child_info: &ChildInfo, threshold: StateVersion) -> Vec { if let Some(child) = self.inner.children_default.get(child_info.storage_key()) { let delta = child.data.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))); let mut in_mem = crate::in_memory_backend::new_in_mem::(); in_mem.force_alt_hashing(self.alt_hashing.clone()); - in_mem.child_storage_root(&child.child_info, delta).0 + in_mem.child_storage_root(&child.child_info, delta, threshold).0 } else { empty_child_trie_root::>() } @@ -338,7 +338,7 @@ impl Externalities for BasicExternalities { fn wipe(&mut self) {} - fn commit(&mut self) {} + fn commit(&mut self, _threshold: StateVersion) {} fn read_write_count(&self) -> (u32, u32, u32, u32) { unimplemented!("read_write_count is not supported in Basic") diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index c9693ca6a88c1..32c204b3a8689 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -24,7 +24,7 @@ use crate::{ use codec::{Decode, Encode, EncodeAppend}; use hash_db::Hasher; use sp_core::{ - hexdisplay::HexDisplay, + hexdisplay::HexDisplay, StateVersion, storage::{well_known_keys::is_child_storage_key, ChildInfo, TrackedStorageKey}, }; use sp_externalities::{Extension, ExtensionStore, Extensions, Externalities}; @@ -521,7 +521,7 @@ where StorageAppend::new(current_value).append(value); } - fn storage_root(&mut self) -> Vec { + fn storage_root(&mut self, threshold: StateVersion) -> Vec { let _guard = guard(); if let Some(ref root) = self.storage_transaction_cache.transaction_storage_root { trace!( @@ -534,7 +534,7 @@ where return root.encode() } - let root = self.overlay.storage_root(self.backend, self.storage_transaction_cache); + let root = self.overlay.storage_root(self.backend, self.storage_transaction_cache, threshold); trace!( target: "state", method = "StorageRoot", @@ -545,7 +545,7 @@ where root.encode() } - fn child_storage_root(&mut self, child_info: &ChildInfo) -> Vec { + fn child_storage_root(&mut self, child_info: &ChildInfo, threshold: StateVersion) -> Vec { let _guard = guard(); let storage_key = child_info.storage_key(); let prefixed_storage_key = child_info.prefixed_storage_key(); @@ -566,7 +566,7 @@ where } else { let root = if let Some((changes, info)) = self.overlay.child_changes(storage_key) { let delta = changes.map(|(k, v)| (k.as_ref(), v.value().map(AsRef::as_ref))); - Some(self.backend.child_storage_root(info, delta)) + Some(self.backend.child_storage_root(info, delta, threshold)) } else { None }; @@ -718,6 +718,7 @@ where None, Default::default(), self.storage_transaction_cache, + None, // using any state ) .expect(EXT_NOT_ALLOWED_TO_FAIL); self.backend.wipe().expect(EXT_NOT_ALLOWED_TO_FAIL); @@ -727,7 +728,7 @@ where .expect("We have reset the overlay above, so we can not be in the runtime; qed"); } - fn commit(&mut self) { + fn commit(&mut self, state_threshold: StateVersion) { for _ in 0..self.overlay.transaction_depth() { self.overlay.commit_transaction().expect(BENCHMARKING_FN); } @@ -739,6 +740,7 @@ where None, Default::default(), self.storage_transaction_cache, + state_threshold, ) .expect(EXT_NOT_ALLOWED_TO_FAIL); self.backend diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index 92fa00e6455a8..50a37b5ecfa14 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -23,6 +23,7 @@ use crate::{ use codec::Codec; use hash_db::Hasher; use sp_core::storage::{ChildInfo, Storage}; +use sp_core::StateVersion; use sp_trie::{empty_trie_root, Layout, MemoryDB}; use std::collections::{BTreeMap, HashMap}; @@ -43,9 +44,10 @@ where pub fn update, StorageCollection)>>( &self, changes: T, + state_threshold: StateVersion, ) -> Self { let mut clone = self.clone(); - clone.insert(changes); + clone.insert(changes, state_threshold); clone } @@ -53,6 +55,7 @@ where pub fn insert, StorageCollection)>>( &mut self, changes: T, + state_threshold: StateVersion, ) { let (top, child) = changes.into_iter().partition::, _>(|v| v.0.is_none()); let (root, transaction) = self.full_storage_root( @@ -60,6 +63,7 @@ where child.iter().filter_map(|v| { v.0.as_ref().map(|c| (c, v.1.iter().map(|(k, v)| (&k[..], v.as_deref())))) }), + state_threshold, ); self.apply_transaction(root, transaction); @@ -107,53 +111,63 @@ where } } -impl From, BTreeMap>> +impl From<(HashMap, BTreeMap>, StateVersion)> for TrieBackend, H> where H::Out: Codec + Ord, { - fn from(inner: HashMap, BTreeMap>) -> Self { + fn from( + (inner, state_hashing): ( + HashMap, BTreeMap>, + StateVersion, + ), + ) -> Self { let mut backend = new_in_mem(); backend.insert( inner .into_iter() .map(|(k, m)| (k, m.into_iter().map(|(k, v)| (k, Some(v))).collect())), + state_hashing, ); backend } } -impl From for TrieBackend, H> +impl From<(Storage, StateVersion)> for TrieBackend, H> where H::Out: Codec + Ord, { - fn from(inners: Storage) -> Self { + fn from((inners, state_hashing): (Storage, StateVersion)) -> Self { let mut inner: HashMap, BTreeMap> = inners .children_default .into_iter() .map(|(_k, c)| (Some(c.child_info), c.data)) .collect(); inner.insert(None, inners.top); - inner.into() + (inner, state_hashing).into() } } -impl From> for TrieBackend, H> +impl From<(BTreeMap, StateVersion)> + for TrieBackend, H> where H::Out: Codec + Ord, { - fn from(inner: BTreeMap) -> Self { + fn from((inner, state_hashing): (BTreeMap, StateVersion)) -> Self { let mut expanded = HashMap::new(); expanded.insert(None, inner); - expanded.into() + (expanded, state_hashing).into() } } -impl From, StorageCollection)>> for TrieBackend, H> +impl From<(Vec<(Option, StorageCollection)>, StateVersion)> + for TrieBackend, H> where H::Out: Codec + Ord, { - fn from(inner: Vec<(Option, StorageCollection)>) -> Self { + fn from( + (inner, state_hashing): (Vec<(Option, StorageCollection)>, StateVersion), + ) -> Self { let mut expanded: HashMap, BTreeMap> = HashMap::new(); for (child_info, key_values) in inner { @@ -164,7 +178,7 @@ where } } } - expanded.into() + (expanded, state_hashing).into() } } diff --git a/primitives/state-machine/src/overlayed_changes/mod.rs b/primitives/state-machine/src/overlayed_changes/mod.rs index a0558e06a380e..4823289e8383a 100644 --- a/primitives/state-machine/src/overlayed_changes/mod.rs +++ b/primitives/state-machine/src/overlayed_changes/mod.rs @@ -38,7 +38,7 @@ use crate::{ use codec::{Decode, Encode}; use hash_db::Hasher; use sp_core::{ - offchain::OffchainOverlayedChange, + offchain::OffchainOverlayedChange, StateVersion, storage::{well_known_keys::EXTRINSIC_INDEX, ChildInfo}, }; use sp_externalities::{Extension, Extensions}; @@ -546,11 +546,12 @@ impl OverlayedChanges { changes_trie_state: Option<&ChangesTrieState>, parent_hash: H::Out, mut cache: StorageTransactionCache, + state_threshold: StateVersion, ) -> Result, DefaultError> where H::Out: Ord + Encode + 'static, { - self.drain_storage_changes(backend, changes_trie_state, parent_hash, &mut cache) + self.drain_storage_changes(backend, changes_trie_state, parent_hash, &mut cache, state_threshold) } /// Drain all changes into a [`StorageChanges`] instance. Leave empty overlay in place. @@ -560,13 +561,14 @@ impl OverlayedChanges { #[cfg(feature = "std")] changes_trie_state: Option<&ChangesTrieState>, parent_hash: H::Out, mut cache: &mut StorageTransactionCache, + state_threshold: StateVersion, ) -> Result, DefaultError> where H::Out: Ord + Encode + 'static, { // If the transaction does not exist, we generate it. if cache.transaction.is_none() { - self.storage_root(backend, &mut cache); + self.storage_root(backend, &mut cache, state_threshold); } let (transaction, transaction_storage_root) = cache @@ -642,6 +644,7 @@ impl OverlayedChanges { &self, backend: &B, cache: &mut StorageTransactionCache, + threshold: sp_core::StateVersion, ) -> H::Out where H::Out: Ord + Encode, @@ -651,7 +654,7 @@ impl OverlayedChanges { (info, changes.map(|(k, v)| (&k[..], v.value().map(|v| &v[..])))) }); - let (root, transaction) = backend.full_storage_root(delta, child_delta); + let (root, transaction) = backend.full_storage_root(delta, child_delta, threshold); cache.transaction = Some(transaction); cache.transaction_storage_root = Some(root); diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 235f37b1c9c1e..b56bc776c4926 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -27,6 +27,7 @@ use hash_db::{HashDB, Hasher, Prefix, EMPTY_PREFIX}; use log::debug; use parking_lot::RwLock; use sp_core::storage::ChildInfo; +use sp_core::StateVersion; pub use sp_trie::trie_types::TrieError; use sp_trie::{ empty_child_trie_root, read_child_trie_value_with, read_trie_value_with, record_all_keys, @@ -364,22 +365,24 @@ where fn storage_root<'b>( &self, delta: impl Iterator)>, + threshold: StateVersion, ) -> (H::Out, Self::Transaction) where H::Out: Ord, { - self.0.storage_root(delta) + self.0.storage_root(delta, threshold) } fn child_storage_root<'b>( &self, child_info: &ChildInfo, delta: impl Iterator)>, + threshold: StateVersion, ) -> (H::Out, bool, Self::Transaction) where H::Out: Ord, { - self.0.child_storage_root(child_info, delta) + self.0.child_storage_root(child_info, delta, threshold) } fn register_overlay_stats(&self, _stats: &crate::stats::StateMachineStats) {} diff --git a/primitives/state-machine/src/read_only.rs b/primitives/state-machine/src/read_only.rs index 5b7d568b0311e..510cf6b0a38d9 100644 --- a/primitives/state-machine/src/read_only.rs +++ b/primitives/state-machine/src/read_only.rs @@ -23,7 +23,7 @@ use hash_db::Hasher; use sp_core::{ storage::{ChildInfo, TrackedStorageKey}, traits::Externalities, - Blake2Hasher, + Blake2Hasher, StateVersion, }; use std::{ any::{Any, TypeId}, @@ -145,11 +145,11 @@ impl<'a, H: Hasher, B: 'a + Backend> Externalities for ReadOnlyExternalities< unimplemented!("storage_append is not supported in ReadOnlyExternalities") } - fn storage_root(&mut self) -> Vec { + fn storage_root(&mut self, _threshold: StateVersion) -> Vec { unimplemented!("storage_root is not supported in ReadOnlyExternalities") } - fn child_storage_root(&mut self, _child_info: &ChildInfo) -> Vec { + fn child_storage_root(&mut self, _child_info: &ChildInfo, _threshold: StateVersion) -> Vec { unimplemented!("child_storage_root is not supported in ReadOnlyExternalities") } @@ -171,7 +171,7 @@ impl<'a, H: Hasher, B: 'a + Backend> Externalities for ReadOnlyExternalities< fn wipe(&mut self) {} - fn commit(&mut self) {} + fn commit(&mut self, _threshold: StateVersion) {} fn read_write_count(&self) -> (u32, u32, u32, u32) { unimplemented!("read_write_count is not supported in ReadOnlyExternalities") diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index ce7aaee289c12..fafe755c06885 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -43,6 +43,7 @@ use sp_core::{ }, testing::TaskExecutor, traits::TaskExecutorExt, + StateVersion, }; use sp_externalities::{Extension, ExtensionStore, Extensions}; @@ -62,6 +63,8 @@ where changes_trie_storage: ChangesTrieInMemoryStorage, /// Extensions. pub extensions: Extensions, + /// State hashing to apply during tests. + pub state_hashing: StateVersion, } impl TestExternalities @@ -91,12 +94,6 @@ where Self::new_with_code(&[], storage) } - /// Create a new instance of `TestExternalities` with storage - /// on a backend containing defined default alt hashing threshold. - pub fn new_with_alt_hashing(storage: Storage) -> Self { - Self::new_with_code_inner(&[], storage, true) - } - /// New empty test externalities. pub fn new_empty() -> Self { Self::new_with_code(&[], Storage::default()) @@ -104,10 +101,16 @@ where /// Create a new instance of `TestExternalities` with code and storage. pub fn new_with_code(code: &[u8], storage: Storage) -> Self { - Self::new_with_code_inner(code, storage, false) + Self::new_with_code_and_state(code, storage, sp_core::DEFAULT_STATE_HASHING) } - fn new_with_code_inner(code: &[u8], mut storage: Storage, force_alt_hashing: bool) -> Self { + /// Create a new instance of `TestExternalities` with code and storage for a given state + /// version. + pub fn new_with_code_and_state( + code: &[u8], + mut storage: Storage, + state_hashing: StateVersion, + ) -> Self { let mut overlay = OverlayedChanges::default(); let changes_trie_config = storage .top @@ -125,38 +128,15 @@ where let offchain_db = TestPersistentOffchainDB::new(); - let backend = if force_alt_hashing { - let mut backend: InMemoryBackend = { - let mut storage = Storage::default(); - storage.modify_trie_alt_hashing_threshold(Some( - sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD, - )); - storage.into() - }; - let mut inner: HashMap, BTreeMap> = storage - .children_default - .into_iter() - .map(|(_k, c)| (Some(c.child_info), c.data)) - .collect(); - inner.insert(None, storage.top); - backend.insert( - inner - .into_iter() - .map(|(k, m)| (k, m.into_iter().map(|(k, v)| (k, Some(v))).collect())), - ); - backend - } else { - storage.into() - }; - TestExternalities { overlay, offchain_db, changes_trie_config, extensions, changes_trie_storage: ChangesTrieInMemoryStorage::new(), - backend, + backend: (storage, state_hashing).into(), storage_transaction_cache: Default::default(), + state_hashing, } } @@ -177,7 +157,7 @@ where /// Insert key/value into backend pub fn insert(&mut self, k: StorageKey, v: StorageValue) { - self.backend.insert(vec![(None, vec![(k, Some(v))])]); + self.backend.insert(vec![(None, vec![(k, Some(v))])], self.state_hashing); } /// Registers the given extension for this instance. @@ -206,7 +186,7 @@ where )) } - self.backend.update(transaction) + self.backend.update(transaction, self.state_hashing) } /// Commit all pending changes to the underlying backend. @@ -220,6 +200,7 @@ where None, Default::default(), &mut Default::default(), + self.state_hashing, )?; self.backend diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 285eede8a84f7..d8aa0facf46f8 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -25,6 +25,7 @@ use crate::{ use codec::{Codec, Decode}; use hash_db::Hasher; use sp_core::storage::{ChildInfo, ChildType}; +use sp_core::StateVersion; use sp_std::{boxed::Box, vec::Vec}; use sp_trie::{ child_delta_trie_root, delta_trie_root, empty_child_trie_root, @@ -195,22 +196,18 @@ where fn storage_root<'a>( &self, delta: impl Iterator)>, + threshold: StateVersion, ) -> (H::Out, Self::Transaction) where H::Out: Ord, { - let use_inner_hash_value = if let Some(force) = self.force_alt_hashing.as_ref() { - force.clone() - } else { - self.get_trie_alt_hashing_threshold() - }; let mut write_overlay = S::Overlay::default(); let mut root = *self.essence.root(); { let mut eph = Ephemeral::new(self.essence.backend_storage(), &mut write_overlay); let res = || { - let layout = if let Some(threshold) = use_inner_hash_value { + let layout = if let Some(threshold) = threshold { sp_trie::Layout::with_alt_hashing(threshold) } else { sp_trie::Layout::default() @@ -231,20 +228,15 @@ where &self, child_info: &ChildInfo, delta: impl Iterator)>, + threshold: StateVersion, ) -> (H::Out, bool, Self::Transaction) where H::Out: Ord, { - let use_inner_hash_value = if let Some(force) = self.force_alt_hashing.as_ref() { - force.clone() - } else { - self.get_trie_alt_hashing_threshold() - }; - let default_root = match child_info.child_type() { ChildType::ParentKeyId => empty_child_trie_root::>(), }; - let layout = if let Some(threshold) = use_inner_hash_value { + let layout = if let Some(threshold) = threshold { sp_trie::Layout::with_alt_hashing(threshold) } else { sp_trie::Layout::default() diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index b00a42d8aab18..f3f8050e252d9 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -451,6 +451,14 @@ impl ChildTrieParentKeyId { } } +/// Different state that can be applied. TODO rename to StateValueHashing. +/// +/// When a value is define, apply inner hashing over the given threshold. +pub type StateVersion = Option; + +/// Default threshold value for activated inner hashing of trie state. +pub const DEFAULT_STATE_HASHING: StateVersion = Some(33); + #[cfg(test)] mod tests { use super::*; diff --git a/primitives/tasks/src/async_externalities.rs b/primitives/tasks/src/async_externalities.rs index 975a81af4f53d..e5409922ef2dc 100644 --- a/primitives/tasks/src/async_externalities.rs +++ b/primitives/tasks/src/async_externalities.rs @@ -19,7 +19,7 @@ //! Async externalities. use sp_core::{ - storage::{ChildInfo, TrackedStorageKey}, + storage::{ChildInfo, TrackedStorageKey}, StateVersion, traits::{Externalities, RuntimeSpawn, RuntimeSpawnExt, SpawnNamed, TaskExecutorExt}, }; use sp_externalities::{Extensions, ExternalitiesExt as _}; @@ -126,11 +126,11 @@ impl Externalities for AsyncExternalities { panic!("`storage_append`: should not be used in async externalities!") } - fn storage_root(&mut self) -> Vec { + fn storage_root(&mut self, _state_hashing: StateVersion) -> Vec { panic!("`storage_root`: should not be used in async externalities!") } - fn child_storage_root(&mut self, _child_info: &ChildInfo) -> Vec { + fn child_storage_root(&mut self, _child_info: &ChildInfo, _state_hashing: StateVersion) -> Vec { panic!("`child_storage_root`: should not be used in async externalities!") } @@ -152,7 +152,7 @@ impl Externalities for AsyncExternalities { fn wipe(&mut self) {} - fn commit(&mut self) {} + fn commit(&mut self, _state_hashing: StateVersion) {} fn read_write_count(&self) -> (u32, u32, u32, u32) { unimplemented!("read_write_count is not supported in AsyncExternalities") diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index bdb8724120813..bcf3bdd45c5fd 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -1239,9 +1239,9 @@ fn test_witness(proof: StorageProof, root: crate::Hash) { None, ); assert!(ext.storage(b"value3").is_some()); - assert!(ext.storage_root().as_slice() == &root[..]); + assert!(ext.storage_root(sp_core::DEFAULT_STATE_HASHING).as_slice() == &root[..]); ext.place_storage(vec![0], Some(vec![1])); - assert!(ext.storage_root().as_slice() != &root[..]); + assert!(ext.storage_root(sp_core::DEFAULT_STATE_HASHING).as_slice() != &root[..]); } #[cfg(test)] From 145406ae7eef4400041684429f936b522c358e67 Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 30 Aug 2021 12:18:39 +0200 Subject: [PATCH 069/188] update state-machine part --- client/db/src/lib.rs | 11 +-- client/executor/runtime-test/src/lib.rs | 10 -- client/service/test/src/client/mod.rs | 27 ++--- primitives/io/src/lib.rs | 5 +- primitives/runtime/src/lib.rs | 2 +- primitives/state-machine/src/backend.rs | 9 -- primitives/state-machine/src/basic.rs | 13 +-- .../state-machine/src/changes_trie/build.rs | 4 +- primitives/state-machine/src/ext.rs | 22 ++--- .../state-machine/src/in_memory_backend.rs | 13 +-- primitives/state-machine/src/lib.rs | 98 ++++++++++--------- .../src/overlayed_changes/mod.rs | 5 +- .../state-machine/src/proving_backend.rs | 84 ++++++---------- primitives/state-machine/src/testing.rs | 14 +-- primitives/state-machine/src/trie_backend.rs | 81 +++++++-------- primitives/storage/src/lib.rs | 45 +-------- primitives/trie/src/lib.rs | 2 +- test-utils/runtime/src/system.rs | 4 - 18 files changed, 162 insertions(+), 287 deletions(-) diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 0dfc998cd7d66..6dae84b967371 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -2586,16 +2586,7 @@ pub(crate) mod tests { extrinsics_root: Default::default(), }; - let mut storage = vec![(vec![1, 3, 5], vec![2, 4, 6]), (vec![1, 2, 3], vec![9, 9, 9])]; - - if alt_hashing { - storage.push(( - sp_core::storage::well_known_keys::TRIE_HASHING_CONFIG.to_vec(), - sp_core::storage::trie_threshold_encode( - sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD, - ), - )); - } + let storage = vec![(vec![1, 3, 5], vec![2, 4, 6]), (vec![1, 2, 3], vec![9, 9, 9])]; header.state_root = op .old_state diff --git a/client/executor/runtime-test/src/lib.rs b/client/executor/runtime-test/src/lib.rs index 436f87d88b1e0..c9f7d6b1e2970 100644 --- a/client/executor/runtime-test/src/lib.rs +++ b/client/executor/runtime-test/src/lib.rs @@ -54,20 +54,10 @@ static mut MUTABLE_STATIC: u64 = 32; static mut MUTABLE_STATIC_BSS: u64 = 0; sp_core::wasm_export_functions! { - fn test_switch_state() { - print("switch_state"); - storage::set( - sp_storage::well_known_keys::TRIE_HASHING_CONFIG, - sp_storage::trie_threshold_encode(sp_storage::TEST_DEFAULT_ALT_HASH_THRESHOLD).as_slice(), - ); - print("switched!"); - } - fn test_calling_missing_external() { unsafe { missing_external() } } - fn test_calling_yet_another_missing_external() { unsafe { yet_another_missing_external() } } diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index 2c53edb52177f..9ebf97c89821e 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -2064,25 +2064,14 @@ fn storage_keys_iter_works_inner(hashed_value: bool) { .take(3) .map(|x| x.0) .collect(); - if hashed_value { - assert_eq!( - res, - [ - hex!("3a686561707061676573").to_vec(), - sp_core::storage::well_known_keys::TRIE_HASHING_CONFIG.to_vec(), - hex!("6644b9b8bc315888ac8e41a7968dc2b4141a5403c58acdf70b7e8f7e07bf5081").to_vec(), - ] - ); - } else { - assert_eq!( - res, - [ - hex!("3a686561707061676573").to_vec(), - hex!("6644b9b8bc315888ac8e41a7968dc2b4141a5403c58acdf70b7e8f7e07bf5081").to_vec(), - hex!("79c07e2b1d2e2abfd4855b936617eeff5e0621c4869aa60c02be9adcc98a0d1d").to_vec(), - ] - ); - } + assert_eq!( + res, + [ + hex!("3a686561707061676573").to_vec(), + hex!("6644b9b8bc315888ac8e41a7968dc2b4141a5403c58acdf70b7e8f7e07bf5081").to_vec(), + hex!("79c07e2b1d2e2abfd4855b936617eeff5e0621c4869aa60c02be9adcc98a0d1d").to_vec(), + ] + ); let res: Vec<_> = client .storage_keys_iter( diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index 947a28e03e6a8..4bf655b1656fc 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -1577,11 +1577,8 @@ mod tests { }); let value = vec![7u8; 35]; - let mut storage = + let storage = Storage { top: map![b"foo00".to_vec() => value.clone()], children_default: map![] }; - storage.modify_trie_alt_hashing_threshold(Some( - sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD, - )); t = BasicExternalities::new(storage); t.execute_with(|| { diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index e631ec033991f..4a9c6087fa5cc 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -45,7 +45,7 @@ use sp_core::{ crypto::{self, Public}, ecdsa, ed25519, hash::{H256, H512}, - sr25519, StateVersion, DEFAULT_STATE_HASHING, + sr25519, }; use sp_std::{convert::TryFrom, prelude::*}; diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index af615e0a16d5f..8d68d47b97ce9 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -268,15 +268,6 @@ pub trait Backend: sp_std::fmt::Debug { unimplemented!() } - /// Read current trie hashing threshold. - /// Please do not change default implementation when implementing this trait. - fn get_trie_alt_hashing_threshold(&self) -> Option { - self.storage(sp_core::storage::well_known_keys::TRIE_HASHING_CONFIG) - .ok() - .flatten() - .and_then(|encoded| sp_core::storage::trie_threshold_decode(&mut encoded.as_slice())) - } - /// Extend storage info for benchmarking db fn get_read_and_written_keys(&self) -> Vec<(Vec, u32, u32, bool)> { unimplemented!() diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index 98266a57ebb0b..7a82fe7a1a697 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -42,14 +42,12 @@ use std::{ pub struct BasicExternalities { inner: Storage, extensions: Extensions, - alt_hashing: Option, } impl BasicExternalities { /// Create a new instance of `BasicExternalities` pub fn new(inner: Storage) -> Self { - let alt_hashing = inner.get_trie_alt_hashing_threshold(); - BasicExternalities { inner, extensions: Default::default(), alt_hashing } + BasicExternalities { inner, extensions: Default::default() } } /// New basic externalities with empty storage. @@ -74,14 +72,12 @@ impl BasicExternalities { storage: &mut sp_core::storage::Storage, f: impl FnOnce() -> R, ) -> R { - let alt_hashing = storage.get_trie_alt_hashing_threshold(); let mut ext = Self { inner: Storage { top: std::mem::take(&mut storage.top), children_default: std::mem::take(&mut storage.children_default), }, extensions: Default::default(), - alt_hashing, }; let r = ext.execute_with(f); @@ -132,11 +128,9 @@ impl Default for BasicExternalities { impl From> for BasicExternalities { fn from(hashmap: BTreeMap) -> Self { - let alt_hashing = sp_core::storage::alt_hashing::get_trie_alt_hashing_threshold(&hashmap); BasicExternalities { inner: Storage { top: hashmap, children_default: Default::default() }, extensions: Default::default(), - alt_hashing, } } } @@ -311,8 +305,7 @@ impl Externalities for BasicExternalities { fn child_storage_root(&mut self, child_info: &ChildInfo, threshold: StateVersion) -> Vec { if let Some(child) = self.inner.children_default.get(child_info.storage_key()) { let delta = child.data.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))); - let mut in_mem = crate::in_memory_backend::new_in_mem::(); - in_mem.force_alt_hashing(self.alt_hashing.clone()); + let in_mem = crate::in_memory_backend::new_in_mem::(); in_mem.child_storage_root(&child.child_info, delta, threshold).0 } else { empty_child_trie_root::>() @@ -404,7 +397,7 @@ mod tests { const ROOT: [u8; 32] = hex!("39245109cef3758c2eed2ccba8d9b370a917850af3824bc8348d505df2c298fa"); - assert_eq!(&ext.storage_root()[..], &ROOT); + assert_eq!(&ext.storage_root(None)[..], &ROOT); } #[test] diff --git a/primitives/state-machine/src/changes_trie/build.rs b/primitives/state-machine/src/changes_trie/build.rs index d3c6c12122c4f..732f2e6bfd5e9 100644 --- a/primitives/state-machine/src/changes_trie/build.rs +++ b/primitives/state-machine/src/changes_trie/build.rs @@ -382,7 +382,7 @@ mod test { ) { let child_info_1 = ChildInfo::new_default(b"storage_key1"); let child_info_2 = ChildInfo::new_default(b"storage_key2"); - let backend: InMemoryBackend<_> = vec![ + let backend: InMemoryBackend<_> = (vec![ (vec![100], vec![255]), (vec![101], vec![255]), (vec![102], vec![255]), @@ -391,7 +391,7 @@ mod test { (vec![105], vec![255]), ] .into_iter() - .collect::>() + .collect::>(), None) .into(); let prefixed_child_trie_key1 = child_info_1.prefixed_storage_key(); let storage = InMemoryStorage::with_inputs( diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index 32c204b3a8689..2bcc0d92185ef 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -952,7 +952,7 @@ mod tests { use hex_literal::hex; use num_traits::Zero; use sp_core::{ - map, + map, DEFAULT_STATE_HASHING, storage::{well_known_keys::EXTRINSIC_INDEX, Storage, StorageChild}, Blake2Hasher, H256, }; @@ -1029,14 +1029,14 @@ mod tests { let mut overlay = OverlayedChanges::default(); overlay.set_storage(vec![20], None); overlay.set_storage(vec![30], Some(vec![31])); - let backend = Storage { + let backend = (Storage { top: map![ vec![10] => vec![10], vec![20] => vec![20], vec![40] => vec![40] ], children_default: map![], - } + }, DEFAULT_STATE_HASHING) .into(); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); @@ -1075,12 +1075,12 @@ mod tests { overlay.set_storage(vec![27], None); overlay.set_storage(vec![28], None); overlay.set_storage(vec![29], None); - let backend = Storage { + let backend = (Storage { top: map![ vec![30] => vec![30] ], children_default: map![], - } + }, DEFAULT_STATE_HASHING) .into(); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); @@ -1099,7 +1099,7 @@ mod tests { let mut overlay = OverlayedChanges::default(); overlay.set_child_storage(child_info, vec![20], None); overlay.set_child_storage(child_info, vec![30], Some(vec![31])); - let backend = Storage { + let backend = (Storage { top: map![], children_default: map![ child_info.storage_key().to_vec() => StorageChild { @@ -1111,7 +1111,7 @@ mod tests { child_info: child_info.to_owned(), } ], - } + }, DEFAULT_STATE_HASHING) .into(); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); @@ -1144,7 +1144,7 @@ mod tests { let mut overlay = OverlayedChanges::default(); overlay.set_child_storage(child_info, vec![20], None); overlay.set_child_storage(child_info, vec![30], Some(vec![31])); - let backend = Storage { + let backend = (Storage { top: map![], children_default: map![ child_info.storage_key().to_vec() => StorageChild { @@ -1156,7 +1156,7 @@ mod tests { child_info: child_info.to_owned(), } ], - } + }, DEFAULT_STATE_HASHING) .into(); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); @@ -1183,7 +1183,7 @@ mod tests { let child_info = &child_info; let mut cache = StorageTransactionCache::default(); let mut overlay = OverlayedChanges::default(); - let backend = Storage { + let backend = (Storage { top: map![], children_default: map![ child_info.storage_key().to_vec() => StorageChild { @@ -1193,7 +1193,7 @@ mod tests { child_info: child_info.to_owned(), } ], - } + }, DEFAULT_STATE_HASHING) .into(); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index 50a37b5ecfa14..785a7ae6879e3 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -86,11 +86,6 @@ where pub fn eq(&self, other: &Self) -> bool { self.root() == other.root() } - - /// Setting a alternate hashing threshold at start. - pub fn force_alt_hashing(&mut self, threshold: Option) { - self.force_alt_hashing = Some(threshold); - } } impl Clone for TrieBackend, H> @@ -191,11 +186,12 @@ mod tests { /// Assert in memory backend with only child trie keys works as trie backend. #[test] fn in_memory_with_child_trie_only() { + let state_hash = sp_core::DEFAULT_STATE_HASHING; let storage = new_in_mem::(); let child_info = ChildInfo::new_default(b"1"); let child_info = &child_info; let storage = storage - .update(vec![(Some(child_info.clone()), vec![(b"2".to_vec(), Some(b"3".to_vec()))])]); + .update(vec![(Some(child_info.clone()), vec![(b"2".to_vec(), Some(b"3".to_vec()))])], state_hash); let trie_backend = storage.as_trie_backend().unwrap(); assert_eq!(trie_backend.child_storage(child_info, b"2").unwrap(), Some(b"3".to_vec())); let storage_key = child_info.prefixed_storage_key(); @@ -204,13 +200,14 @@ mod tests { #[test] fn insert_multiple_times_child_data_works() { + let state_hash = sp_core::DEFAULT_STATE_HASHING; let mut storage = new_in_mem::(); let child_info = ChildInfo::new_default(b"1"); storage - .insert(vec![(Some(child_info.clone()), vec![(b"2".to_vec(), Some(b"3".to_vec()))])]); + .insert(vec![(Some(child_info.clone()), vec![(b"2".to_vec(), Some(b"3".to_vec()))])], state_hash); storage - .insert(vec![(Some(child_info.clone()), vec![(b"1".to_vec(), Some(b"3".to_vec()))])]); + .insert(vec![(Some(child_info.clone()), vec![(b"1".to_vec(), Some(b"3".to_vec()))])], state_hash); assert_eq!(storage.child_storage(&child_info, &b"2"[..]), Ok(Some(b"3".to_vec()))); assert_eq!(storage.child_storage(&child_info, &b"1"[..]), Ok(Some(b"3".to_vec()))); diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 857cecb29f2d2..f023f9613bfac 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -1000,7 +1000,8 @@ mod tests { use codec::{Decode, Encode}; use sp_core::{ map, - storage::{ChildInfo, TEST_DEFAULT_ALT_HASH_THRESHOLD as TRESHOLD}, + DEFAULT_STATE_HASHING, StateVersion, + storage::ChildInfo, testing::TaskExecutor, traits::{CodeExecutor, Externalities, RuntimeCode}, NativeOrEncoded, NeverNativeValue, @@ -1071,10 +1072,10 @@ mod tests { #[test] fn execute_works() { - execute_works_inner(false); - execute_works_inner(true); + execute_works_inner(None); + execute_works_inner(DEFAULT_STATE_HASHING); } - fn execute_works_inner(hashed: bool) { + fn execute_works_inner(hashed: StateVersion) { let backend = trie_backend::tests::test_trie(hashed); let mut overlayed_changes = Default::default(); let wasm_code = RuntimeCode::empty(); @@ -1101,11 +1102,11 @@ mod tests { #[test] fn execute_works_with_native_else_wasm() { - execute_works_with_native_else_wasm_inner(false); - execute_works_with_native_else_wasm_inner(true); + execute_works_with_native_else_wasm_inner(None); + execute_works_with_native_else_wasm_inner(DEFAULT_STATE_HASHING); } - fn execute_works_with_native_else_wasm_inner(hashed: bool) { - let backend = trie_backend::tests::test_trie(hashed); + fn execute_works_with_native_else_wasm_inner(state_hash: StateVersion) { + let backend = trie_backend::tests::test_trie(state_hash); let mut overlayed_changes = Default::default(); let wasm_code = RuntimeCode::empty(); @@ -1131,12 +1132,12 @@ mod tests { #[test] fn dual_execution_strategy_detects_consensus_failure() { - dual_execution_strategy_detects_consensus_failure_inner(false); - dual_execution_strategy_detects_consensus_failure_inner(true); + dual_execution_strategy_detects_consensus_failure_inner(None); + dual_execution_strategy_detects_consensus_failure_inner(DEFAULT_STATE_HASHING); } - fn dual_execution_strategy_detects_consensus_failure_inner(hashed: bool) { + fn dual_execution_strategy_detects_consensus_failure_inner(state_hash: StateVersion) { let mut consensus_failed = false; - let backend = trie_backend::tests::test_trie(hashed); + let backend = trie_backend::tests::test_trie(state_hash); let mut overlayed_changes = Default::default(); let wasm_code = RuntimeCode::empty(); @@ -1171,10 +1172,10 @@ mod tests { #[test] fn prove_execution_and_proof_check_works() { - prove_execution_and_proof_check_works_inner(true); - prove_execution_and_proof_check_works_inner(false); + prove_execution_and_proof_check_works_inner(DEFAULT_STATE_HASHING); + prove_execution_and_proof_check_works_inner(None); } - fn prove_execution_and_proof_check_works_inner(flagged: bool) { + fn prove_execution_and_proof_check_works_inner(state_hash: StateVersion) { let executor = DummyCodeExecutor { change_changes_trie_config: false, native_available: true, @@ -1183,8 +1184,8 @@ mod tests { }; // fetch execution proof from 'remote' full node - let mut remote_backend = trie_backend::tests::test_trie(flagged); - let remote_root = remote_backend.storage_root(std::iter::empty()).0; + let mut remote_backend = trie_backend::tests::test_trie(state_hash); + let remote_root = remote_backend.storage_root(std::iter::empty(), state_hash).0; let (remote_result, remote_proof) = prove_execution::<_, _, u64, _, _>( &mut remote_backend, &mut Default::default(), @@ -1222,7 +1223,7 @@ mod tests { b"abc".to_vec() => b"2".to_vec(), b"bbb".to_vec() => b"3".to_vec() ]; - let state = InMemoryBackend::::from(initial); + let state = InMemoryBackend::::from((initial, DEFAULT_STATE_HASHING)); let backend = state.as_trie_backend().unwrap(); let mut overlay = OverlayedChanges::default(); @@ -1303,7 +1304,7 @@ mod tests { b"d".to_vec() => b"3".to_vec() ], ]; - let backend = InMemoryBackend::::from(initial); + let backend = InMemoryBackend::::from((initial, DEFAULT_STATE_HASHING)); let mut overlay = OverlayedChanges::default(); overlay.set_child_storage(&child_info, b"1".to_vec(), Some(b"1312".to_vec())); @@ -1351,7 +1352,7 @@ mod tests { b"d".to_vec() => b"3".to_vec() ], ]; - let backend = InMemoryBackend::::from(initial); + let backend = InMemoryBackend::::from((initial, DEFAULT_STATE_HASHING)); let mut overlay = OverlayedChanges::default(); let mut cache = StorageTransactionCache::default(); let mut ext = Ext::new( @@ -1539,15 +1540,15 @@ mod tests { #[test] fn prove_read_and_proof_check_works() { - prove_read_and_proof_check_works_inner(false); - prove_read_and_proof_check_works_inner(true); + prove_read_and_proof_check_works_inner(None); + prove_read_and_proof_check_works_inner(DEFAULT_STATE_HASHING); } - fn prove_read_and_proof_check_works_inner(flagged: bool) { + fn prove_read_and_proof_check_works_inner(state_hash: StateVersion) { let child_info = ChildInfo::new_default(b"sub1"); let child_info = &child_info; // fetch read proof from 'remote' full node - let remote_backend = trie_backend::tests::test_trie(flagged); - let remote_root = remote_backend.storage_root(std::iter::empty()).0; + let remote_backend = trie_backend::tests::test_trie(state_hash); + let remote_root = remote_backend.storage_root(std::iter::empty(), state_hash).0; let remote_proof = prove_read(remote_backend, &[b"value2"]).unwrap(); let remote_proof = test_compact(remote_proof, &remote_root); // check proof locally @@ -1564,8 +1565,8 @@ mod tests { ); assert_eq!(local_result2, false); // on child trie - let remote_backend = trie_backend::tests::test_trie(flagged); - let remote_root = remote_backend.storage_root(std::iter::empty()).0; + let remote_backend = trie_backend::tests::test_trie(state_hash); + let remote_root = remote_backend.storage_root(std::iter::empty(), state_hash).0; let remote_proof = prove_child_read(remote_backend, child_info, &[b"value3"]).unwrap(); let remote_proof = test_compact(remote_proof, &remote_root); let local_result1 = read_child_proof_check::( @@ -1591,16 +1592,16 @@ mod tests { #[test] fn prove_read_with_size_limit_works() { - let hashed_value = false; - let remote_backend = trie_backend::tests::test_trie(hashed_value); - let remote_root = remote_backend.storage_root(::std::iter::empty()).0; + let state_hash = None; + let remote_backend = trie_backend::tests::test_trie(state_hash); + let remote_root = remote_backend.storage_root(::std::iter::empty(), state_hash).0; let (proof, count) = prove_range_read_with_size(remote_backend, None, None, 0, None).unwrap(); // Alwasys contains at least some nodes. assert_eq!(proof.into_memory_db::().drain().len(), 3); assert_eq!(count, 1); - let remote_backend = trie_backend::tests::test_trie(hashed_value); + let remote_backend = trie_backend::tests::test_trie(state_hash); let (proof, count) = prove_range_read_with_size(remote_backend, None, None, 800, Some(&[])).unwrap(); assert_eq!(proof.clone().into_memory_db::().drain().len(), 9); @@ -1623,7 +1624,7 @@ mod tests { assert_eq!(results.len() as u32, 101); assert_eq!(completed, false); - let remote_backend = trie_backend::tests::test_trie(hashed_value); + let remote_backend = trie_backend::tests::test_trie(state_hash); let (proof, count) = prove_range_read_with_size(remote_backend, None, None, 50000, Some(&[])).unwrap(); assert_eq!(proof.clone().into_memory_db::().drain().len(), 11); @@ -1644,7 +1645,8 @@ mod tests { #[test] fn inner_state_hashing_switch_proofs() { let mut layout = Layout::default(); - let (mut mdb, mut root) = trie_backend::tests::test_db(false); + let mut state_hash = None; + let (mut mdb, mut root) = trie_backend::tests::test_db(state_hash); { let mut trie = TrieDBMut::from_existing_with_layout(&mut mdb, &mut root, layout.clone()).unwrap(); @@ -1656,9 +1658,9 @@ mod tests { .expect("insert failed"); } - let check_proof = |mdb, root| -> StorageProof { + let check_proof = |mdb, root, state_hash| -> StorageProof { let remote_backend = TrieBackend::new(mdb, root); - let remote_root = remote_backend.storage_root(::std::iter::empty()).0; + let remote_root = remote_backend.storage_root(::std::iter::empty(), state_hash).0; let remote_proof = prove_read(remote_backend, &[b"foo222"]).unwrap(); // check proof locally let local_result1 = @@ -1672,14 +1674,15 @@ mod tests { remote_proof }; - let remote_proof = check_proof(mdb.clone(), root.clone()); + let remote_proof = check_proof(mdb.clone(), root.clone(), state_hash); // check full values in proof assert!(remote_proof.encode().len() > 1_100); assert!(remote_proof.encoded_size() > 1_100); let root1 = root.clone(); // do switch - layout = Layout::with_alt_hashing(TRESHOLD); + layout = Layout::with_alt_hashing(sp_core::storage::DEFAULT_ALT_HASH_THRESHOLD); + state_hash = DEFAULT_STATE_HASHING; // update with same value do not change { let mut trie = @@ -1703,7 +1706,7 @@ mod tests { } let root3 = root.clone(); assert!(root1 != root3); - let remote_proof = check_proof(mdb.clone(), root.clone()); + let remote_proof = check_proof(mdb.clone(), root.clone(), state_hash); // nodes foo is replaced by its hashed value form. assert!(remote_proof.encode().len() < 1000); assert!(remote_proof.encoded_size() < 1000); @@ -1712,18 +1715,18 @@ mod tests { #[test] fn compact_multiple_child_trie() { - let size_inner_hash = compact_multiple_child_trie_inner(true); - let size_no_inner_hash = compact_multiple_child_trie_inner(false); + let size_inner_hash = compact_multiple_child_trie_inner(DEFAULT_STATE_HASHING); + let size_no_inner_hash = compact_multiple_child_trie_inner(None); assert!(size_inner_hash < size_no_inner_hash); } - fn compact_multiple_child_trie_inner(flagged: bool) -> usize { + fn compact_multiple_child_trie_inner(state_hash: StateVersion) -> usize { // this root will be queried let child_info1 = ChildInfo::new_default(b"sub1"); // this root will not be include in proof let child_info2 = ChildInfo::new_default(b"sub2"); // this root will be include in proof let child_info3 = ChildInfo::new_default(b"sub"); - let mut remote_backend = trie_backend::tests::test_trie(flagged); + let mut remote_backend = trie_backend::tests::test_trie(state_hash); let long_vec: Vec = (0..1024usize).map(|_| 8u8).collect(); let (remote_root, transaction) = remote_backend.full_storage_root( std::iter::empty(), @@ -1753,6 +1756,7 @@ mod tests { ), ] .into_iter(), + state_hash, ); remote_backend.backend_storage_mut().consolidate(transaction); remote_backend.essence.set_root(remote_root.clone()); @@ -1773,6 +1777,7 @@ mod tests { #[test] fn child_storage_uuid() { + let state_hash = None; let child_info_1 = ChildInfo::new_default(b"sub_test1"); let child_info_2 = ChildInfo::new_default(b"sub_test2"); @@ -1780,7 +1785,7 @@ mod tests { let mut overlay = OverlayedChanges::default(); let mut transaction = { - let backend = test_trie(false); + let backend = test_trie(state_hash); let mut cache = StorageTransactionCache::default(); let mut ext = Ext::new( &mut overlay, @@ -1791,7 +1796,7 @@ mod tests { ); ext.set_child_storage(&child_info_1, b"abc".to_vec(), b"def".to_vec()); ext.set_child_storage(&child_info_2, b"abc".to_vec(), b"def".to_vec()); - ext.storage_root(); + ext.storage_root(state_hash); cache.transaction.unwrap() }; let mut duplicate = false; @@ -1811,7 +1816,7 @@ mod tests { b"aaa".to_vec() => b"0".to_vec(), b"bbb".to_vec() => b"".to_vec() ]; - let state = InMemoryBackend::::from(initial); + let state = InMemoryBackend::::from((initial, DEFAULT_STATE_HASHING)); let backend = state.as_trie_backend().unwrap(); let mut overlay = OverlayedChanges::default(); @@ -1843,12 +1848,13 @@ mod tests { #[test] fn runtime_registered_extensions_are_removed_after_execution() { + let state_hash = DEFAULT_STATE_HASHING; use sp_externalities::ExternalitiesExt; sp_externalities::decl_extension! { struct DummyExt(u32); } - let backend = trie_backend::tests::test_trie(false); + let backend = trie_backend::tests::test_trie(state_hash); let mut overlayed_changes = Default::default(); let wasm_code = RuntimeCode::empty(); diff --git a/primitives/state-machine/src/overlayed_changes/mod.rs b/primitives/state-machine/src/overlayed_changes/mod.rs index 4823289e8383a..e97948709ccaa 100644 --- a/primitives/state-machine/src/overlayed_changes/mod.rs +++ b/primitives/state-machine/src/overlayed_changes/mod.rs @@ -926,6 +926,7 @@ mod tests { #[test] fn overlayed_storage_root_works() { + let state_hash = None; let initial: BTreeMap<_, _> = vec![ (b"doe".to_vec(), b"reindeer".to_vec()), (b"dog".to_vec(), b"puppyXXX".to_vec()), @@ -934,7 +935,7 @@ mod tests { ] .into_iter() .collect(); - let backend = InMemoryBackend::::from(initial); + let backend = InMemoryBackend::::from((initial, state_hash)); let mut overlay = OverlayedChanges::default(); overlay.set_collect_extrinsics(false); @@ -959,7 +960,7 @@ mod tests { const ROOT: [u8; 32] = hex!("39245109cef3758c2eed2ccba8d9b370a917850af3824bc8348d505df2c298fa"); - assert_eq!(&ext.storage_root()[..], &ROOT); + assert_eq!(&ext.storage_root(state_hash)[..], &ROOT); } #[test] diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index b56bc776c4926..6af9c6a2aafe9 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -419,6 +419,7 @@ mod tests { }; use sp_runtime::traits::BlakeTwo256; use sp_trie::PrefixedMemoryDB; + use sp_core::DEFAULT_STATE_HASHING; fn test_proving<'a>( trie_backend: &'a TrieBackend, BlakeTwo256>, @@ -428,21 +429,21 @@ mod tests { #[test] fn proof_is_empty_until_value_is_read() { - proof_is_empty_until_value_is_read_inner(false); - proof_is_empty_until_value_is_read_inner(true); + proof_is_empty_until_value_is_read_inner(None); + proof_is_empty_until_value_is_read_inner(DEFAULT_STATE_HASHING); } - fn proof_is_empty_until_value_is_read_inner(flagged: bool) { - let trie_backend = test_trie(flagged); + fn proof_is_empty_until_value_is_read_inner(test_hash: StateVersion) { + let trie_backend = test_trie(test_hash); assert!(test_proving(&trie_backend).extract_proof().is_empty()); } #[test] fn proof_is_non_empty_after_value_is_read() { - proof_is_non_empty_after_value_is_read_inner(false); - proof_is_non_empty_after_value_is_read_inner(true); + proof_is_non_empty_after_value_is_read_inner(None); + proof_is_non_empty_after_value_is_read_inner(DEFAULT_STATE_HASHING); } - fn proof_is_non_empty_after_value_is_read_inner(flagged: bool) { - let trie_backend = test_trie(flagged); + fn proof_is_non_empty_after_value_is_read_inner(test_hash: StateVersion) { + let trie_backend = test_trie(test_hash); let backend = test_proving(&trie_backend); assert_eq!(backend.storage(b"key").unwrap(), Some(b"value".to_vec())); assert!(!backend.extract_proof().is_empty()); @@ -460,53 +461,42 @@ mod tests { #[test] fn passes_through_backend_calls() { - passes_through_backend_calls_inner(false); - passes_through_backend_calls_inner(true); + passes_through_backend_calls_inner(None); + passes_through_backend_calls_inner(DEFAULT_STATE_HASHING); } - fn passes_through_backend_calls_inner(flagged: bool) { - let trie_backend = test_trie(flagged); + fn passes_through_backend_calls_inner(state_hash: StateVersion) { + let trie_backend = test_trie(state_hash); let proving_backend = test_proving(&trie_backend); assert_eq!(trie_backend.storage(b"key").unwrap(), proving_backend.storage(b"key").unwrap()); assert_eq!(trie_backend.pairs(), proving_backend.pairs()); - let (trie_root, mut trie_mdb) = trie_backend.storage_root(std::iter::empty()); - let (proving_root, mut proving_mdb) = proving_backend.storage_root(std::iter::empty()); + let (trie_root, mut trie_mdb) = trie_backend.storage_root(std::iter::empty(), state_hash); + let (proving_root, mut proving_mdb) = proving_backend.storage_root(std::iter::empty(), state_hash); assert_eq!(trie_root, proving_root); assert_eq!(trie_mdb.drain(), proving_mdb.drain()); } #[test] fn proof_recorded_and_checked_top() { - proof_recorded_and_checked_inner(true); - proof_recorded_and_checked_inner(false); + proof_recorded_and_checked_inner(DEFAULT_STATE_HASHING); + proof_recorded_and_checked_inner(None); } - fn proof_recorded_and_checked_inner(flagged: bool) { + fn proof_recorded_and_checked_inner(state_hash: StateVersion) { let size_content = 34; // above hashable value treshold. let value_range = 0..64; let contents = value_range .clone() .map(|i| (vec![i], Some(vec![i; size_content]))) .collect::>(); - let mut in_memory = InMemoryBackend::::default(); - if flagged { - in_memory = in_memory.update(vec![( - None, - vec![( - sp_core::storage::well_known_keys::TRIE_HASHING_CONFIG.to_vec(), - Some(sp_core::storage::trie_threshold_encode( - sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD, - )), - )], - )]); - } - let in_memory = in_memory.update(vec![(None, contents)]); - let in_memory_root = in_memory.storage_root(std::iter::empty()).0; + let in_memory = InMemoryBackend::::default(); + let in_memory = in_memory.update(vec![(None, contents)], state_hash); + let in_memory_root = in_memory.storage_root(std::iter::empty(), state_hash).0; value_range.clone().for_each(|i| { assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i; size_content]) }); let trie = in_memory.as_trie_backend().unwrap(); - let trie_root = trie.storage_root(std::iter::empty()).0; + let trie_root = trie.storage_root(std::iter::empty(), state_hash).0; assert_eq!(in_memory_root, trie_root); value_range .clone() @@ -524,10 +514,10 @@ mod tests { #[test] fn proof_recorded_and_checked_with_child() { - proof_recorded_and_checked_with_child_inner(false); - proof_recorded_and_checked_with_child_inner(true); + proof_recorded_and_checked_with_child_inner(None); + proof_recorded_and_checked_with_child_inner(DEFAULT_STATE_HASHING); } - fn proof_recorded_and_checked_with_child_inner(flagged: bool) { + fn proof_recorded_and_checked_with_child_inner(state_hash: StateVersion) { let child_info_1 = ChildInfo::new_default(b"sub1"); let child_info_2 = ChildInfo::new_default(b"sub2"); let child_info_1 = &child_info_1; @@ -538,23 +528,13 @@ mod tests { (Some(child_info_2.clone()), (10..15).map(|i| (vec![i], Some(vec![i]))).collect()), ]; let mut in_memory = InMemoryBackend::::default(); - if flagged { - in_memory = in_memory.update(vec![( - None, - vec![( - sp_core::storage::well_known_keys::TRIE_HASHING_CONFIG.to_vec(), - Some(sp_core::storage::trie_threshold_encode( - sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD, - )), - )], - )]); - } - in_memory = in_memory.update(contents); + in_memory = in_memory.update(contents, state_hash); let child_storage_keys = vec![child_info_1.to_owned(), child_info_2.to_owned()]; let in_memory_root = in_memory .full_storage_root( std::iter::empty(), child_storage_keys.iter().map(|k| (k, std::iter::empty())), + state_hash, ) .0; (0..64).for_each(|i| assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i])); @@ -566,7 +546,7 @@ mod tests { }); let trie = in_memory.as_trie_backend().unwrap(); - let trie_root = trie.storage_root(std::iter::empty()).0; + let trie_root = trie.storage_root(std::iter::empty(), state_hash).0; assert_eq!(in_memory_root, trie_root); (0..64).for_each(|i| assert_eq!(trie.storage(&[i]).unwrap().unwrap(), vec![i])); @@ -594,11 +574,11 @@ mod tests { #[test] fn storage_proof_encoded_size_estimation_works() { - storage_proof_encoded_size_estimation_works_inner(false); - storage_proof_encoded_size_estimation_works_inner(true); + storage_proof_encoded_size_estimation_works_inner(None); + storage_proof_encoded_size_estimation_works_inner(DEFAULT_STATE_HASHING); } - fn storage_proof_encoded_size_estimation_works_inner(flagged: bool) { - let trie_backend = test_trie(flagged); + fn storage_proof_encoded_size_estimation_works_inner(state_hash: StateVersion) { + let trie_backend = test_trie(state_hash); let backend = test_proving(&trie_backend); let check_estimation = diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index fafe755c06885..313e8c99514a2 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -19,7 +19,6 @@ use std::{ any::{Any, TypeId}, - collections::{BTreeMap, HashMap}, panic::{AssertUnwindSafe, UnwindSafe}, }; @@ -39,7 +38,7 @@ use sp_core::{ offchain::testing::TestPersistentOffchainDB, storage::{ well_known_keys::{is_child_storage_key, CHANGES_TRIE_CONFIG, CODE}, - ChildInfo, Storage, + Storage, }, testing::TaskExecutor, traits::TaskExecutorExt, @@ -257,12 +256,7 @@ where H::Out: Ord + 'static + codec::Codec, { fn default() -> Self { - // default to inner hashed. - let mut storage = Storage::default(); - storage.modify_trie_alt_hashing_threshold(Some( - sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD, - )); - Self::new(storage) + Self::new(Default::default()) } } @@ -328,7 +322,7 @@ where mod tests { use super::*; use hex_literal::hex; - use sp_core::{storage::ChildInfo, traits::Externalities, H256}; + use sp_core::{storage::ChildInfo, traits::Externalities, H256, DEFAULT_STATE_HASHING}; use sp_runtime::traits::BlakeTwo256; #[test] @@ -341,7 +335,7 @@ mod tests { ext.set_storage(b"dogglesworth".to_vec(), b"cat".to_vec()); let root = H256::from(hex!("ed4d8c799d996add422395a6abd7545491d40bd838d738afafa1b8a4de625489")); - assert_eq!(H256::from_slice(ext.storage_root().as_slice()), root); + assert_eq!(H256::from_slice(ext.storage_root(DEFAULT_STATE_HASHING).as_slice()), root); } #[test] diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index d8aa0facf46f8..e2b17b14a25f3 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -36,10 +36,6 @@ use sp_trie::{ /// Patricia trie-based backend. Transaction type is an overlay of changes to commit. pub struct TrieBackend, H: Hasher> { pub(crate) essence: TrieBackendEssence, - // Allows setting alt hashing at start for testing only - // (mainly for in_memory_backend when it cannot read it from - // state). - pub(crate) force_alt_hashing: Option>, } impl, H: Hasher> TrieBackend @@ -48,7 +44,7 @@ where { /// Create new trie-based backend. pub fn new(storage: S, root: H::Out) -> Self { - TrieBackend { essence: TrieBackendEssence::new(storage, root), force_alt_hashing: None } + TrieBackend { essence: TrieBackendEssence::new(storage, root) } } /// Get backend essence reference. @@ -293,14 +289,14 @@ where pub mod tests { use super::*; use codec::Encode; - use sp_core::{storage::TEST_DEFAULT_ALT_HASH_THRESHOLD as TRESHOLD, H256}; + use sp_core::{H256, DEFAULT_STATE_HASHING}; use sp_runtime::traits::BlakeTwo256; use sp_trie::{trie_types::TrieDBMut, KeySpacedDBMut, PrefixedMemoryDB, TrieMut}; use std::{collections::HashSet, iter}; const CHILD_KEY_1: &[u8] = b"sub1"; - pub(crate) fn test_db(hashed_value: bool) -> (PrefixedMemoryDB, H256) { + pub(crate) fn test_db(hashed_value: StateVersion) -> (PrefixedMemoryDB, H256) { let child_info = ChildInfo::new_default(CHILD_KEY_1); let mut root = H256::default(); let mut mdb = PrefixedMemoryDB::::default(); @@ -314,8 +310,8 @@ pub mod tests { { let mut sub_root = Vec::new(); root.encode_to(&mut sub_root); - let mut trie = if hashed_value { - let layout = Layout::with_alt_hashing(TRESHOLD); + let mut trie = if let Some(hash) = hashed_value { + let layout = Layout::with_alt_hashing(hash); TrieDBMut::new_with_layout(&mut mdb, &mut root, layout) } else { TrieDBMut::new(&mut mdb, &mut root) @@ -327,13 +323,6 @@ pub mod tests { trie.insert(b"value1", &[42]).expect("insert failed"); trie.insert(b"value2", &[24]).expect("insert failed"); trie.insert(b":code", b"return 42").expect("insert failed"); - if hashed_value { - trie.insert( - sp_core::storage::well_known_keys::TRIE_HASHING_CONFIG, - sp_core::storage::trie_threshold_encode(TRESHOLD).as_slice(), - ) - .unwrap(); - } for i in 128u8..255u8 { trie.insert(&[i], &[i]).unwrap(); } @@ -342,7 +331,7 @@ pub mod tests { } pub(crate) fn test_trie( - hashed_value: bool, + hashed_value: StateVersion, ) -> TrieBackend, BlakeTwo256> { let (mdb, root) = test_db(hashed_value); TrieBackend::new(mdb, root) @@ -350,20 +339,20 @@ pub mod tests { #[test] fn read_from_storage_returns_some() { - read_from_storage_returns_some_inner(false); - read_from_storage_returns_some_inner(true); + read_from_storage_returns_some_inner(None); + read_from_storage_returns_some_inner(DEFAULT_STATE_HASHING); } - fn read_from_storage_returns_some_inner(flagged: bool) { - assert_eq!(test_trie(flagged).storage(b"key").unwrap(), Some(b"value".to_vec())); + fn read_from_storage_returns_some_inner(state_hash: StateVersion) { + assert_eq!(test_trie(state_hash).storage(b"key").unwrap(), Some(b"value".to_vec())); } #[test] fn read_from_child_storage_returns_some() { - read_from_child_storage_returns_some_inner(false); - read_from_child_storage_returns_some_inner(true); + read_from_child_storage_returns_some_inner(None); + read_from_child_storage_returns_some_inner(DEFAULT_STATE_HASHING); } - fn read_from_child_storage_returns_some_inner(flagged: bool) { - let test_trie = test_trie(flagged); + fn read_from_child_storage_returns_some_inner(state_hash: StateVersion) { + let test_trie = test_trie(state_hash); assert_eq!( test_trie .child_storage(&ChildInfo::new_default(CHILD_KEY_1), b"value3") @@ -390,20 +379,20 @@ pub mod tests { #[test] fn read_from_storage_returns_none() { - read_from_storage_returns_none_inner(false); - read_from_storage_returns_none_inner(true); + read_from_storage_returns_none_inner(None); + read_from_storage_returns_none_inner(DEFAULT_STATE_HASHING); } - fn read_from_storage_returns_none_inner(flagged: bool) { - assert_eq!(test_trie(flagged).storage(b"non-existing-key").unwrap(), None); + fn read_from_storage_returns_none_inner(state_hash: StateVersion) { + assert_eq!(test_trie(state_hash).storage(b"non-existing-key").unwrap(), None); } #[test] fn pairs_are_not_empty_on_non_empty_storage() { - pairs_are_not_empty_on_non_empty_storage_inner(false); - pairs_are_not_empty_on_non_empty_storage_inner(true); + pairs_are_not_empty_on_non_empty_storage_inner(None); + pairs_are_not_empty_on_non_empty_storage_inner(DEFAULT_STATE_HASHING); } - fn pairs_are_not_empty_on_non_empty_storage_inner(flagged: bool) { - assert!(!test_trie(flagged).pairs().is_empty()); + fn pairs_are_not_empty_on_non_empty_storage_inner(state_hash: StateVersion) { + assert!(!test_trie(state_hash).pairs().is_empty()); } #[test] @@ -418,32 +407,32 @@ pub mod tests { #[test] fn storage_root_is_non_default() { - storage_root_is_non_default_inner(false); - storage_root_is_non_default_inner(true); + storage_root_is_non_default_inner(None); + storage_root_is_non_default_inner(DEFAULT_STATE_HASHING); } - fn storage_root_is_non_default_inner(flagged: bool) { - assert!(test_trie(flagged).storage_root(iter::empty()).0 != H256::repeat_byte(0)); + fn storage_root_is_non_default_inner(state_hash: StateVersion) { + assert!(test_trie(state_hash).storage_root(iter::empty(), state_hash).0 != H256::repeat_byte(0)); } #[test] fn storage_root_transaction_is_non_empty() { - storage_root_transaction_is_non_empty_inner(false); - storage_root_transaction_is_non_empty_inner(true); + storage_root_transaction_is_non_empty_inner(None); + storage_root_transaction_is_non_empty_inner(DEFAULT_STATE_HASHING); } - fn storage_root_transaction_is_non_empty_inner(flagged: bool) { + fn storage_root_transaction_is_non_empty_inner(state_hash: StateVersion) { let (new_root, mut tx) = - test_trie(flagged).storage_root(iter::once((&b"new-key"[..], Some(&b"new-value"[..])))); + test_trie(state_hash).storage_root(iter::once((&b"new-key"[..], Some(&b"new-value"[..]))), state_hash); assert!(!tx.drain().is_empty()); - assert!(new_root != test_trie(false).storage_root(iter::empty()).0); + assert!(new_root != test_trie(state_hash).storage_root(iter::empty(), state_hash).0); } #[test] fn prefix_walking_works() { - prefix_walking_works_inner(false); - prefix_walking_works_inner(true); + prefix_walking_works_inner(None); + prefix_walking_works_inner(DEFAULT_STATE_HASHING); } - fn prefix_walking_works_inner(flagged: bool) { - let trie = test_trie(flagged); + fn prefix_walking_works_inner(state_hash: StateVersion) { + let trie = test_trie(state_hash); let mut seen = HashSet::new(); trie.for_keys_with_prefix(b"value", |key| { diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index f3f8050e252d9..df216f7c4a723 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -204,10 +204,6 @@ pub mod well_known_keys { /// Current extrinsic index (u32) is stored under this key. pub const EXTRINSIC_INDEX: &'static [u8] = b":extrinsic_index"; - /// Configuration for trie internal hashing of value is stored - /// under this key. - pub const TRIE_HASHING_CONFIG: &'static [u8] = b":trie_hashing_conf"; - /// Changes trie configuration is stored under this key. pub const CHANGES_TRIE_CONFIG: &'static [u8] = b":changes_trie"; @@ -247,43 +243,8 @@ pub fn trie_threshold_decode(mut encoded: &[u8]) -> Option { codec::Compact::::decode(&mut encoded).ok().map(|compact| compact.0) } -/// Default value to use as a threshold for testing. -pub const TEST_DEFAULT_ALT_HASH_THRESHOLD: u32 = 34; - -#[cfg(feature = "std")] -impl Storage { - /// Utility function to get trie inner value hash threshold from - /// backend state or pending changes. - pub fn get_trie_alt_hashing_threshold(&self) -> Option { - alt_hashing::get_trie_alt_hashing_threshold(&self.top) - } - - /// Utility function to modify trie inner value hash threshold. - pub fn modify_trie_alt_hashing_threshold(&mut self, threshold: Option) { - match threshold { - Some(threshold) => { - let encoded = trie_threshold_encode(threshold); - self.top.insert(well_known_keys::TRIE_HASHING_CONFIG.to_vec(), encoded); - }, - None => { - self.top.remove(well_known_keys::TRIE_HASHING_CONFIG); - }, - } - } -} - -/// alt hashing related utils. -#[cfg(feature = "std")] -pub mod alt_hashing { - use super::*; - - /// Utility function to get trie inner value hash threshold from - /// backend state or pending changes. - pub fn get_trie_alt_hashing_threshold(map: &StorageMap) -> Option { - map.get(well_known_keys::TRIE_HASHING_CONFIG) - .and_then(|encoded| trie_threshold_decode(&mut encoded.as_slice())) - } -} +/// Default value to use as a threshold for inner hashing. +pub const DEFAULT_ALT_HASH_THRESHOLD: u32 = 33; /// Information related to a child state. #[derive(Debug, Clone)] @@ -457,7 +418,7 @@ impl ChildTrieParentKeyId { pub type StateVersion = Option; /// Default threshold value for activated inner hashing of trie state. -pub const DEFAULT_STATE_HASHING: StateVersion = Some(33); +pub const DEFAULT_STATE_HASHING: StateVersion = Some(DEFAULT_ALT_HASH_THRESHOLD); #[cfg(test)] mod tests { diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 179f44db1e7a0..594fb4c11de19 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -609,7 +609,7 @@ mod tests { use codec::{Compact, Decode, Encode}; use hash_db::{HashDB, Hasher}; use hex_literal::hex; - use sp_core::{storage::TEST_DEFAULT_ALT_HASH_THRESHOLD as TRESHOLD, Blake2Hasher}; + use sp_core::{storage::DEFAULT_ALT_HASH_THRESHOLD as TRESHOLD, Blake2Hasher}; use trie_db::{DBValue, NodeCodec as NodeCodecT, Trie, TrieMut}; use trie_standardmap::{Alphabet, StandardMap, ValueMode}; diff --git a/test-utils/runtime/src/system.rs b/test-utils/runtime/src/system.rs index 14dbddb695a70..16bf0cfff7f4b 100644 --- a/test-utils/runtime/src/system.rs +++ b/test-utils/runtime/src/system.rs @@ -393,10 +393,6 @@ mod tests { blake2_256(&AccountKeyring::Alice.to_raw_public().to_keyed_vec(b"balance:")).to_vec() => { vec![111u8, 0, 0, 0, 0, 0, 0, 0] }, - sp_core::storage::well_known_keys::TRIE_HASHING_CONFIG.to_vec() => - sp_core::storage::trie_threshold_encode( - sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD, - ), ], children_default: map![], }, From 00460da1f926860a5fe88c564dae2bbd7d85a97f Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 31 Aug 2021 10:03:12 +0200 Subject: [PATCH 070/188] still need access to state version from runtime --- client/api/src/cht.rs | 2 +- frame/system/src/lib.rs | 5 +---- primitives/api/src/lib.rs | 5 ++++- primitives/externalities/src/lib.rs | 2 +- primitives/state-machine/src/basic.rs | 2 +- primitives/state-machine/src/ext.rs | 8 +++++--- primitives/state-machine/src/read_only.rs | 2 +- primitives/storage/src/lib.rs | 2 ++ primitives/tasks/src/async_externalities.rs | 2 +- 9 files changed, 17 insertions(+), 13 deletions(-) diff --git a/client/api/src/cht.rs b/client/api/src/cht.rs index 14e1c35ba284e..35c161bed9e0c 100644 --- a/client/api/src/cht.rs +++ b/client/api/src/cht.rs @@ -117,7 +117,7 @@ where .into_iter() .map(|(k, v)| (k, Some(v))) .collect::>(); - let storage = InMemoryBackend::::default().update(vec![(None, transaction)]); + let storage = InMemoryBackend::::default().update(vec![(None, transaction)], None); let trie_storage = storage .as_trie_backend() .expect("InMemoryState::as_trie_backend always returns Some; qed"); diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index af4920d711e21..2c8d23f614397 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -1438,7 +1438,7 @@ impl Pallet { /// Get the basic externalities for this pallet, useful for tests. #[cfg(any(feature = "std", test))] pub fn externalities() -> TestExternalities { - let mut storage = sp_core::storage::Storage { + let storage = sp_core::storage::Storage { top: map![ >::hashed_key_for(T::BlockNumber::zero()) => [69u8; 32].encode(), >::hashed_key().to_vec() => T::BlockNumber::one().encode(), @@ -1446,9 +1446,6 @@ impl Pallet { ], children_default: map![], }; - storage.modify_trie_alt_hashing_threshold(Some( - sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD, - )); TestExternalities::new(storage) } diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index 82954d193e605..3aa45072fa715 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -92,7 +92,7 @@ pub use sp_runtime::{ Header as HeaderT, NumberFor, }, transaction_validity::TransactionValidity, - RuntimeString, TransactionOutcome, + RuntimeString, TransactionOutcome, StateVersion, }; #[doc(hidden)] #[cfg(feature = "std")] @@ -507,6 +507,9 @@ pub trait ApiExt { /// Returns the current active proof recorder. fn proof_recorder(&self) -> Option>; + /// Returns the current state trie inner hashing configuration. + fn state_hash(&self) -> StateVersion; + /// Convert the api object into the storage changes that were done while executing runtime /// api functions. /// diff --git a/primitives/externalities/src/lib.rs b/primitives/externalities/src/lib.rs index 46beceb585784..01621ddc72cef 100644 --- a/primitives/externalities/src/lib.rs +++ b/primitives/externalities/src/lib.rs @@ -227,7 +227,7 @@ pub trait Externalities: ExtensionStore { /// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! /// /// Commits all changes to the database and clears all caches. - fn commit(&mut self, state_hashing: StateVersion); + fn commit(&mut self); /// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! /// Benchmarking related functionality and shouldn't be used anywhere else! diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index 7a82fe7a1a697..0043ccc90f2c3 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -331,7 +331,7 @@ impl Externalities for BasicExternalities { fn wipe(&mut self) {} - fn commit(&mut self, _threshold: StateVersion) {} + fn commit(&mut self) {} fn read_write_count(&self) -> (u32, u32, u32, u32) { unimplemented!("read_write_count is not supported in Basic") diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index 2bcc0d92185ef..193cdd6fd558b 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -24,7 +24,7 @@ use crate::{ use codec::{Decode, Encode, EncodeAppend}; use hash_db::Hasher; use sp_core::{ - hexdisplay::HexDisplay, StateVersion, + hexdisplay::HexDisplay, StateVersion, DEFAULT_STATE_HASHING, storage::{well_known_keys::is_child_storage_key, ChildInfo, TrackedStorageKey}, }; use sp_externalities::{Extension, ExtensionStore, Extensions, Externalities}; @@ -728,7 +728,9 @@ where .expect("We have reset the overlay above, so we can not be in the runtime; qed"); } - fn commit(&mut self, state_threshold: StateVersion) { + fn commit(&mut self) { + // Bench always use latest state. + let state_threshold = DEFAULT_STATE_HASHING; for _ in 0..self.overlay.transaction_depth() { self.overlay.commit_transaction().expect(BENCHMARKING_FN); } @@ -952,7 +954,7 @@ mod tests { use hex_literal::hex; use num_traits::Zero; use sp_core::{ - map, DEFAULT_STATE_HASHING, + map, storage::{well_known_keys::EXTRINSIC_INDEX, Storage, StorageChild}, Blake2Hasher, H256, }; diff --git a/primitives/state-machine/src/read_only.rs b/primitives/state-machine/src/read_only.rs index 510cf6b0a38d9..45e903b4ececf 100644 --- a/primitives/state-machine/src/read_only.rs +++ b/primitives/state-machine/src/read_only.rs @@ -171,7 +171,7 @@ impl<'a, H: Hasher, B: 'a + Backend> Externalities for ReadOnlyExternalities< fn wipe(&mut self) {} - fn commit(&mut self, _threshold: StateVersion) {} + fn commit(&mut self) {} fn read_write_count(&self) -> (u32, u32, u32, u32) { unimplemented!("read_write_count is not supported in ReadOnlyExternalities") diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index df216f7c4a723..c269116f8dbbf 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -176,6 +176,8 @@ pub struct Storage { /// trie kind, so this is exclusively for the `ChildType::ParentKeyId` /// tries. pub children_default: std::collections::HashMap, StorageChild>, + /// State hash to apply on storage. + pub state_hash: StateVersion, } /// Storage change set diff --git a/primitives/tasks/src/async_externalities.rs b/primitives/tasks/src/async_externalities.rs index e5409922ef2dc..ef124533da2b2 100644 --- a/primitives/tasks/src/async_externalities.rs +++ b/primitives/tasks/src/async_externalities.rs @@ -152,7 +152,7 @@ impl Externalities for AsyncExternalities { fn wipe(&mut self) {} - fn commit(&mut self, _state_hashing: StateVersion) {} + fn commit(&mut self) {} fn read_write_count(&self) -> (u32, u32, u32, u32) { unimplemented!("read_write_count is not supported in AsyncExternalities") From bad0723ba48121a3400b01e5be32dc58666e4700 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 31 Aug 2021 10:41:45 +0200 Subject: [PATCH 071/188] state hash in mem: wrong --- client/api/src/backend.rs | 3 ++- client/api/src/in_mem.rs | 12 +++++++++++- client/light/src/backend.rs | 14 ++++++++++---- frame/support/test/tests/instance.rs | 4 +--- primitives/runtime/src/lib.rs | 1 + primitives/storage/src/lib.rs | 2 -- 6 files changed, 25 insertions(+), 11 deletions(-) diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index 8b5bd50ffa614..afab3533326bb 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -28,7 +28,7 @@ use sp_blockchain; use sp_consensus::BlockOrigin; use sp_core::{offchain::OffchainStorage, ChangesTrieConfigurationRange}; use sp_runtime::{ - generic::BlockId, + generic::BlockId, StateVersion, traits::{Block as BlockT, HashFor, NumberFor}, Justification, Justifications, Storage, }; @@ -472,6 +472,7 @@ pub trait Backend: AuxStore + Send + Sync { &self, operation: &mut Self::BlockImportOperation, block: BlockId, + state_hash: StateVersion, ) -> sp_blockchain::Result<()>; /// Commit block insertion. diff --git a/client/api/src/in_mem.rs b/client/api/src/in_mem.rs index e8fce19f8124e..4ad42a11a7073 100644 --- a/client/api/src/in_mem.rs +++ b/client/api/src/in_mem.rs @@ -24,7 +24,7 @@ use sp_core::{ offchain::storage::InMemOffchainStorage as OffchainStorage, storage::well_known_keys, }; use sp_runtime::{ - generic::BlockId, + generic::BlockId, StateVersion, traits::{Block as BlockT, HashFor, Header as HeaderT, NumberFor, Zero}, Justification, Justifications, Storage, }; @@ -583,6 +583,7 @@ pub struct BlockImportOperation { aux: Vec<(Vec, Option>)>, finalized_blocks: Vec<(BlockId, Option)>, set_head: Option>, + state_hash: Option, } impl BlockImportOperation @@ -593,9 +594,14 @@ where &mut self, storage: Storage, commit: bool, + state_hash: Option, ) -> sp_blockchain::Result { check_genesis_storage(&storage)?; + let state_hash = match self.state_hash { + Some(state_hash) => state_hash, + None => return Err(sp_blockchain::Error::Application(Box::from(format!("{:?}", "Missing call to begin state operation")))), + }; let child_delta = storage.children_default.iter().map(|(_storage_key, child_content)| { ( &child_content.child_info, @@ -606,6 +612,7 @@ where let (root, transaction) = self.old_state.full_storage_root( storage.top.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))), child_delta, + state_hash, ); if commit { @@ -775,6 +782,7 @@ where aux: Default::default(), finalized_blocks: Default::default(), set_head: None, + state_hash: None, }) } @@ -782,8 +790,10 @@ where &self, operation: &mut Self::BlockImportOperation, block: BlockId, + state_hash: StateVersion, ) -> sp_blockchain::Result<()> { operation.old_state = self.state_at(block)?; + operation.state_hash = Some(state_hash); Ok(()) } diff --git a/client/light/src/backend.rs b/client/light/src/backend.rs index 3091dce625a3f..3b6315052a78d 100644 --- a/client/light/src/backend.rs +++ b/client/light/src/backend.rs @@ -46,7 +46,7 @@ use sp_core::{ ChangesTrieConfiguration, }; use sp_runtime::{ - generic::BlockId, + generic::BlockId, StateVersion, traits::{Block as BlockT, HashFor, Header, NumberFor, Zero}, Justification, Justifications, Storage, }; @@ -75,6 +75,7 @@ pub struct ImportOperation { set_head: Option>, storage_update: Option>>, changes_trie_config_update: Option>, + state_hash: Option, _phantom: std::marker::PhantomData, } @@ -140,15 +141,18 @@ where set_head: None, storage_update: None, changes_trie_config_update: None, + state_hash: None, _phantom: Default::default(), }) } fn begin_state_operation( &self, - _operation: &mut Self::BlockImportOperation, + operation: &mut Self::BlockImportOperation, _block: BlockId, + state_hash: StateVersion, ) -> ClientResult<()> { + operation.state_hash = Some(state_hash); Ok(()) } @@ -522,12 +526,13 @@ where fn storage_root<'a>( &self, delta: impl Iterator)>, + state_hash: StateVersion, ) -> (H::Out, Self::Transaction) where H::Out: Ord, { match *self { - GenesisOrUnavailableState::Genesis(ref state) => state.storage_root(delta), + GenesisOrUnavailableState::Genesis(ref state) => state.storage_root(delta, state_hash), GenesisOrUnavailableState::Unavailable => Default::default(), } } @@ -536,13 +541,14 @@ where &self, child_info: &ChildInfo, delta: impl Iterator)>, + state_hash: StateVersion, ) -> (H::Out, bool, Self::Transaction) where H::Out: Ord, { match *self { GenesisOrUnavailableState::Genesis(ref state) => { - let (root, is_equal, _) = state.child_storage_root(child_info, delta); + let (root, is_equal, _) = state.child_storage_root(child_info, delta, state_hash); (root, is_equal, Default::default()) }, GenesisOrUnavailableState::Unavailable => (H::Out::default(), true, Default::default()), diff --git a/frame/support/test/tests/instance.rs b/frame/support/test/tests/instance.rs index ba0569145df85..841b69dafed1a 100644 --- a/frame/support/test/tests/instance.rs +++ b/frame/support/test/tests/instance.rs @@ -345,8 +345,6 @@ fn storage_instance_independence() { top: std::collections::BTreeMap::new(), children_default: std::collections::HashMap::new(), }; - storage - .modify_trie_alt_hashing_threshold(Some(sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD)); sp_state_machine::BasicExternalities::execute_with_storage(&mut storage, || { module2::Value::::put(0); module2::Value::::put(0); @@ -362,7 +360,7 @@ fn storage_instance_independence() { module2::DoubleMap::::insert(&0, &0, &0); }); // 12 storage values and threshold. - assert_eq!(storage.top.len(), 13); + assert_eq!(storage.top.len(), 12); } #[test] diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index 4a9c6087fa5cc..3b6e6d75103fc 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -40,6 +40,7 @@ pub use sp_application_crypto as app_crypto; #[cfg(feature = "std")] pub use sp_core::storage::{Storage, StorageChild}; +pub use sp_core::{StateVersion, DEFAULT_STATE_HASHING}; use sp_core::{ crypto::{self, Public}, diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index c269116f8dbbf..df216f7c4a723 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -176,8 +176,6 @@ pub struct Storage { /// trie kind, so this is exclusively for the `ChildType::ParentKeyId` /// tries. pub children_default: std::collections::HashMap, StorageChild>, - /// State hash to apply on storage. - pub state_hash: StateVersion, } /// Storage change set From 537c04f0060d17020f6c487be8d6c04745fb0bea Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 31 Aug 2021 12:21:57 +0200 Subject: [PATCH 072/188] direction likely correct, but passing call to code exec for genesis init seem awkward. --- client/api/src/backend.rs | 4 ++-- client/api/src/in_mem.rs | 17 +++++------------ client/db/src/bench.rs | 10 +++++++--- client/db/src/lib.rs | 18 +++++++++++------- client/db/src/storage_cache.rs | 13 +++++++++---- client/light/src/backend.rs | 14 +++++--------- client/service/src/client/client.rs | 11 +++++++++-- .../api/proc-macro/src/impl_runtime_apis.rs | 6 ++++++ primitives/api/src/lib.rs | 10 ++++++---- 9 files changed, 60 insertions(+), 43 deletions(-) diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index afab3533326bb..77af4fb2ca045 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -171,10 +171,11 @@ pub trait BlockImportOperation { &mut self, storage: Storage, commit: bool, + state_hash: StateVersion, ) -> sp_blockchain::Result; /// Inject storage data into the database replacing any existing data. - fn reset_storage(&mut self, storage: Storage) -> sp_blockchain::Result; + fn reset_storage(&mut self, storage: Storage, state_hash: StateVersion) -> sp_blockchain::Result; /// Set storage changes. fn update_storage( @@ -472,7 +473,6 @@ pub trait Backend: AuxStore + Send + Sync { &self, operation: &mut Self::BlockImportOperation, block: BlockId, - state_hash: StateVersion, ) -> sp_blockchain::Result<()>; /// Commit block insertion. diff --git a/client/api/src/in_mem.rs b/client/api/src/in_mem.rs index 4ad42a11a7073..3b06030d8c71a 100644 --- a/client/api/src/in_mem.rs +++ b/client/api/src/in_mem.rs @@ -583,7 +583,6 @@ pub struct BlockImportOperation { aux: Vec<(Vec, Option>)>, finalized_blocks: Vec<(BlockId, Option)>, set_head: Option>, - state_hash: Option, } impl BlockImportOperation @@ -594,14 +593,10 @@ where &mut self, storage: Storage, commit: bool, - state_hash: Option, + state_hash: StateVersion, ) -> sp_blockchain::Result { check_genesis_storage(&storage)?; - let state_hash = match self.state_hash { - Some(state_hash) => state_hash, - None => return Err(sp_blockchain::Error::Application(Box::from(format!("{:?}", "Missing call to begin state operation")))), - }; let child_delta = storage.children_default.iter().map(|(_storage_key, child_content)| { ( &child_content.child_info, @@ -667,12 +662,13 @@ where &mut self, storage: Storage, commit: bool, + state_hash: StateVersion, ) -> sp_blockchain::Result { - self.apply_storage(storage, commit) + self.apply_storage(storage, commit, state_hash) } - fn reset_storage(&mut self, storage: Storage) -> sp_blockchain::Result { - self.apply_storage(storage, true) + fn reset_storage(&mut self, storage: Storage, state_hash: StateVersion) -> sp_blockchain::Result { + self.apply_storage(storage, true, state_hash) } fn insert_aux(&mut self, ops: I) -> sp_blockchain::Result<()> @@ -782,7 +778,6 @@ where aux: Default::default(), finalized_blocks: Default::default(), set_head: None, - state_hash: None, }) } @@ -790,10 +785,8 @@ where &self, operation: &mut Self::BlockImportOperation, block: BlockId, - state_hash: StateVersion, ) -> sp_blockchain::Result<()> { operation.old_state = self.state_at(block)?; - operation.state_hash = Some(state_hash); Ok(()) } diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index 291af553b3608..062faf6b9ce0c 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -34,7 +34,7 @@ use sp_core::{ }; use sp_runtime::{ traits::{Block as BlockT, HashFor}, - Storage, + Storage, StateVersion, }; use sp_state_machine::{ backend::Backend as StateBackend, ChildStorageCollection, DBValue, ProofRecorder, @@ -111,6 +111,7 @@ impl BenchmarkingState { record_proof: bool, enable_tracking: bool, ) -> Result { + let state_hash = sp_runtime::DEFAULT_STATE_HASHING; let mut root = B::Hash::default(); let mut mdb = MemoryDB::>::default(); sp_state_machine::TrieDBMut::>::new(&mut mdb, &mut root); @@ -144,6 +145,7 @@ impl BenchmarkingState { state.state.borrow_mut().as_mut().unwrap().full_storage_root( genesis.top.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))), child_delta, + state_hash, ); state.genesis = transaction.clone().drain(); state.genesis_root = root.clone(); @@ -421,6 +423,7 @@ impl StateBackend> for BenchmarkingState { fn storage_root<'a>( &self, delta: impl Iterator)>, + state_hash: StateVersion, ) -> (B::Hash, Self::Transaction) where B::Hash: Ord, @@ -428,13 +431,14 @@ impl StateBackend> for BenchmarkingState { self.state .borrow() .as_ref() - .map_or(Default::default(), |s| s.storage_root(delta)) + .map_or(Default::default(), |s| s.storage_root(delta, state_hash)) } fn child_storage_root<'a>( &self, child_info: &ChildInfo, delta: impl Iterator)>, + state_hash: StateVersion, ) -> (B::Hash, bool, Self::Transaction) where B::Hash: Ord, @@ -442,7 +446,7 @@ impl StateBackend> for BenchmarkingState { self.state .borrow() .as_ref() - .map_or(Default::default(), |s| s.child_storage_root(child_info, delta)) + .map_or(Default::default(), |s| s.child_storage_root(child_info, delta, state_hash)) } fn pairs(&self) -> Vec<(Vec, Vec)> { diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 6dae84b967371..16403046978cc 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -88,7 +88,7 @@ use sp_runtime::{ Block as BlockT, Hash, HashFor, Header as HeaderT, NumberFor, One, SaturatedConversion, Zero, }, - Justification, Justifications, Storage, + Justification, Justifications, Storage, StateVersion, }; use sp_state_machine::{ backend::Backend as StateBackend, ChangesTrieCacheAction, ChangesTrieTransaction, @@ -243,22 +243,24 @@ impl StateBackend> for RefTrackingState { fn storage_root<'a>( &self, delta: impl Iterator)>, + state_hash: StateVersion, ) -> (B::Hash, Self::Transaction) where B::Hash: Ord, { - self.state.storage_root(delta) + self.state.storage_root(delta, state_hash) } fn child_storage_root<'a>( &self, child_info: &ChildInfo, delta: impl Iterator)>, + state_hash: StateVersion, ) -> (B::Hash, bool, Self::Transaction) where B::Hash: Ord, { - self.state.child_storage_root(child_info, delta) + self.state.child_storage_root(child_info, delta, state_hash) } fn pairs(&self) -> Vec<(Vec, Vec)> { @@ -824,7 +826,7 @@ impl BlockImportOperation { } } - fn apply_new_state(&mut self, storage: Storage) -> ClientResult { + fn apply_new_state(&mut self, storage: Storage, state_hash: StateVersion) -> ClientResult { if storage.top.keys().any(|k| well_known_keys::is_child_storage_key(&k)) { return Err(sp_blockchain::Error::InvalidState.into()) } @@ -845,6 +847,7 @@ impl BlockImportOperation { (&k[..], Some(&v[..])) }), child_delta, + state_hash, ); let changes_trie_config = match changes_trie_config { @@ -896,7 +899,7 @@ impl sc_client_api::backend::BlockImportOperation Ok(()) } - fn reset_storage(&mut self, storage: Storage) -> ClientResult { + fn reset_storage(&mut self, storage: Storage, state_hash: StateVersion) -> ClientResult { if storage.top.keys().any(|k| well_known_keys::is_child_storage_key(&k)) { return Err(sp_blockchain::Error::GenesisInvalid.into()) } @@ -920,6 +923,7 @@ impl sc_client_api::backend::BlockImportOperation (&k[..], Some(&v[..])) }), child_delta, + state_hash, ); self.db_updates = transaction; @@ -928,8 +932,8 @@ impl sc_client_api::backend::BlockImportOperation Ok(root) } - fn set_genesis_state(&mut self, storage: Storage, commit: bool) -> ClientResult { - let root = self.apply_new_state(storage)?; + fn set_genesis_state(&mut self, storage: Storage, commit: bool, state_hash: StateVersion) -> ClientResult { + let root = self.apply_new_state(storage, state_hash)?; self.commit_state = commit; Ok(root) } diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index a895324a2e7b9..868bb2ea9d6e4 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -27,6 +27,7 @@ use log::trace; use parking_lot::{RwLock, RwLockUpgradableReadGuard}; use sp_core::{hexdisplay::HexDisplay, storage::ChildInfo}; use sp_runtime::traits::{Block as BlockT, HashFor, Header, NumberFor}; +use sp_runtime::StateVersion; use sp_state_machine::{ backend::Backend as StateBackend, ChildStorageCollection, StorageCollection, StorageKey, StorageValue, TrieBackend, @@ -673,22 +674,24 @@ impl>, B: BlockT> StateBackend> for Cachin fn storage_root<'a>( &self, delta: impl Iterator)>, + state_hash: StateVersion, ) -> (B::Hash, Self::Transaction) where B::Hash: Ord, { - self.state.storage_root(delta) + self.state.storage_root(delta, state_hash) } fn child_storage_root<'a>( &self, child_info: &ChildInfo, delta: impl Iterator)>, + state_hash: StateVersion, ) -> (B::Hash, bool, Self::Transaction) where B::Hash: Ord, { - self.state.child_storage_root(child_info, delta) + self.state.child_storage_root(child_info, delta, state_hash) } fn pairs(&self) -> Vec<(Vec, Vec)> { @@ -871,22 +874,24 @@ impl>, B: BlockT> StateBackend> fn storage_root<'a>( &self, delta: impl Iterator)>, + state_hash: StateVersion, ) -> (B::Hash, Self::Transaction) where B::Hash: Ord, { - self.caching_state().storage_root(delta) + self.caching_state().storage_root(delta, state_hash) } fn child_storage_root<'a>( &self, child_info: &ChildInfo, delta: impl Iterator)>, + state_hash: StateVersion, ) -> (B::Hash, bool, Self::Transaction) where B::Hash: Ord, { - self.caching_state().child_storage_root(child_info, delta) + self.caching_state().child_storage_root(child_info, delta, state_hash) } fn pairs(&self) -> Vec<(Vec, Vec)> { diff --git a/client/light/src/backend.rs b/client/light/src/backend.rs index 3b6315052a78d..be4f80592f5e8 100644 --- a/client/light/src/backend.rs +++ b/client/light/src/backend.rs @@ -75,7 +75,6 @@ pub struct ImportOperation { set_head: Option>, storage_update: Option>>, changes_trie_config_update: Option>, - state_hash: Option, _phantom: std::marker::PhantomData, } @@ -141,18 +140,15 @@ where set_head: None, storage_update: None, changes_trie_config_update: None, - state_hash: None, _phantom: Default::default(), }) } fn begin_state_operation( &self, - operation: &mut Self::BlockImportOperation, + _operation: &mut Self::BlockImportOperation, _block: BlockId, - state_hash: StateVersion, ) -> ClientResult<()> { - operation.state_hash = Some(state_hash); Ok(()) } @@ -330,7 +326,7 @@ where Ok(()) } - fn set_genesis_state(&mut self, input: Storage, commit: bool) -> ClientResult { + fn set_genesis_state(&mut self, input: Storage, commit: bool, state_hash: StateVersion) -> ClientResult { check_genesis_storage(&input)?; // changes trie configuration @@ -359,8 +355,8 @@ where storage.insert(Some(storage_child.child_info), storage_child.data); } - let storage_update = InMemoryBackend::from(storage); - let (storage_root, _) = storage_update.full_storage_root(std::iter::empty(), child_delta); + let storage_update = InMemoryBackend::from((storage, state_hash)); + let (storage_root, _) = storage_update.full_storage_root(std::iter::empty(), child_delta, state_hash); if commit { self.storage_update = Some(storage_update); } @@ -368,7 +364,7 @@ where Ok(storage_root) } - fn reset_storage(&mut self, _input: Storage) -> ClientResult { + fn reset_storage(&mut self, _input: Storage, _state_hash: StateVersion) -> ClientResult { Err(ClientError::NotAvailableOnLightClient) } diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index 9439a06a5af95..e81e312feea95 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -74,6 +74,7 @@ use sp_runtime::{ Block as BlockT, DigestFor, HashFor, Header as HeaderT, NumberFor, One, SaturatedConversion, Zero, }, + StateVersion, BuildStorage, Justification, Justifications, }; use sp_state_machine::{ @@ -326,6 +327,7 @@ where backend: Arc, executor: E, build_genesis_storage: &dyn BuildStorage, + genesis_state_hash: StateVersion, fork_blocks: ForkBlocks, bad_blocks: BadBlocks, execution_extensions: ExecutionExtensions, @@ -338,7 +340,7 @@ where let genesis_storage = build_genesis_storage.build_storage().map_err(sp_blockchain::Error::Storage)?; let mut op = backend.begin_operation()?; - let state_root = op.set_genesis_state(genesis_storage, !config.no_genesis)?; + let state_root = op.set_genesis_state(genesis_storage, !config.no_genesis, genesis_state_hash)?; let genesis_block = genesis::construct_genesis_block::(state_root.into()); info!( "🔨 Initializing Genesis block/state (state: {}, header-hash: {})", @@ -825,7 +827,8 @@ where children_default: Default::default(), }; - let state_root = operation.op.reset_storage(storage)?; + let state_hash = self.state_hash_at(BlockId::Hash(parent_hash))?; + let state_root = operation.op.reset_storage(storage, state_hash)?; if state_root != *import_headers.post().state_root() { // State root mismatch when importing state. This should not happen in // safe fast sync mode, but may happen in unsafe mode. @@ -1849,6 +1852,10 @@ where fn runtime_version_at(&self, at: &BlockId) -> Result { self.runtime_version_at(at).map_err(Into::into) } + + fn state_hash_at(&self, at: &BlockId) -> Result { + unimplemented!("TODO") + } } /// NOTE: only use this implementation when you are sure there are NO consensus-level BlockImport diff --git a/primitives/api/proc-macro/src/impl_runtime_apis.rs b/primitives/api/proc-macro/src/impl_runtime_apis.rs index 81287b1fac64b..ba079f80ccd25 100644 --- a/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -297,11 +297,17 @@ fn generate_runtime_api_base_structures() -> Result { #crate_::StorageChanges, String > where Self: Sized { + let at = #crate_::BlockId::Hash(parent_hash.clone()); + let state_hash = self.call + .state_hash_at(&at) + .map_err(|e| format!("{:?}", e))?; + self.changes.replace(Default::default()).into_storage_changes( backend, changes_trie_state, parent_hash, self.storage_transaction_cache.replace(Default::default()), + state_hash, ) } } diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index 3aa45072fa715..9ba2a52d65006 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -92,8 +92,10 @@ pub use sp_runtime::{ Header as HeaderT, NumberFor, }, transaction_validity::TransactionValidity, - RuntimeString, TransactionOutcome, StateVersion, + RuntimeString, TransactionOutcome, }; +#[cfg(feature = "std")] +pub use sp_runtime::StateVersion; #[doc(hidden)] #[cfg(feature = "std")] pub use sp_state_machine::{ @@ -507,9 +509,6 @@ pub trait ApiExt { /// Returns the current active proof recorder. fn proof_recorder(&self) -> Option>; - /// Returns the current state trie inner hashing configuration. - fn state_hash(&self) -> StateVersion; - /// Convert the api object into the storage changes that were done while executing runtime /// api functions. /// @@ -567,6 +566,9 @@ pub trait CallApiAt { /// Returns the runtime version at the given block. fn runtime_version_at(&self, at: &BlockId) -> Result; + + /// Returns the state version at the given block. + fn state_hash_at(&self, at: &BlockId) -> Result; } /// Auxiliary wrapper that holds an api instance and binds it to the given lifetime. From b537440a2f489f243d0504561bcbe7b4503848e3 Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 8 Sep 2021 13:08:34 +0200 Subject: [PATCH 073/188] state version serialize in runtime, wrong approach, just initialize it with no threshold for core api < 4 seems more proper. --- bin/node-template/runtime/src/lib.rs | 2 + bin/node/bench/src/trie.rs | 4 +- bin/node/runtime/src/lib.rs | 1 + client/service/src/builder.rs | 2 + client/service/src/client/client.rs | 11 ++++- client/service/src/client/light.rs | 2 + primitives/api/src/lib.rs | 14 +++++- .../proc-macro/src/decl_runtime_version.rs | 46 ++++++++++++++++++- primitives/version/src/lib.rs | 6 ++- test-utils/client/src/lib.rs | 8 +--- test-utils/runtime/src/lib.rs | 1 + 11 files changed, 84 insertions(+), 13 deletions(-) diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index 63d79e604791d..7e8f1af9ec819 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -86,6 +86,7 @@ pub mod opaque { } } + // To learn more about runtime versioning and what each of the following value means: // https://substrate.dev/docs/en/knowledgebase/runtime/upgrades#runtime-versioning #[sp_version::runtime_version] @@ -102,6 +103,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, + state_version: Some(33), }; /// This determines the average expected block time that we are targeting. diff --git a/bin/node/bench/src/trie.rs b/bin/node/bench/src/trie.rs index 77ca3e85b8b05..e7b917870773a 100644 --- a/bin/node/bench/src/trie.rs +++ b/bin/node/bench/src/trie.rs @@ -145,7 +145,7 @@ impl core::BenchmarkDescription for TrieReadBenchmarkDescription { let root = generate_trie( database.open(self.database_type), key_values, - Some(sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD), + Some(sp_core::storage::DEFAULT_ALT_HASH_THRESHOLD), ); Box::new(TrieReadBenchmark { @@ -257,7 +257,7 @@ impl core::BenchmarkDescription for TrieWriteBenchmarkDescription { let root = generate_trie( database.open(self.database_type), key_values, - Some(sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD), + Some(sp_core::storage::DEFAULT_ALT_HASH_THRESHOLD), ); Box::new(TrieWriteBenchmark { diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 909ff931756ad..58270be6521ed 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -121,6 +121,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 2, + state_version: Some(33), }; /// The BABE epoch configuration at genesis. diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index a1fb1b909773f..612c11d67f1b6 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -450,10 +450,12 @@ where spawn_handle, config.clone(), )?; + let genesis_state_version = Some(33); // TODO resolve from genesis_storage wasm. Ok(crate::client::Client::new( backend, executor, genesis_storage, + genesis_state_version, fork_blocks, bad_blocks, execution_extensions, diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index e81e312feea95..5422f567a55fe 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -240,10 +240,12 @@ where keystore, sc_offchain::OffchainDb::factory_from_backend(&*backend), ); + let genesis_state_version = Some(33); // TODO resolve from genesis storage wasm Client::new( backend, call_executor, build_genesis_storage, + genesis_state_version, Default::default(), Default::default(), extensions, @@ -411,6 +413,11 @@ where self.executor.runtime_version(id) } + /// Get the StateVersion at a given block. + pub fn state_hash_at(&self, id: &BlockId) -> sp_blockchain::Result { + Ok(self.executor.runtime_version(id)?.state_version) + } + /// Reads given header and generates CHT-based header proof for CHT of given size. pub fn header_proof_with_cht_size( &self, @@ -827,7 +834,7 @@ where children_default: Default::default(), }; - let state_hash = self.state_hash_at(BlockId::Hash(parent_hash))?; + let state_hash = self.state_hash_at(&BlockId::Hash(parent_hash))?; let state_root = operation.op.reset_storage(storage, state_hash)?; if state_root != *import_headers.post().state_root() { // State root mismatch when importing state. This should not happen in @@ -1854,7 +1861,7 @@ where } fn state_hash_at(&self, at: &BlockId) -> Result { - unimplemented!("TODO") + Ok(self.runtime_version_at(at)?.state_version) } } diff --git a/client/service/src/client/light.rs b/client/service/src/client/light.rs index 7c13b98843e05..01c586c719472 100644 --- a/client/service/src/client/light.rs +++ b/client/service/src/client/light.rs @@ -68,10 +68,12 @@ where ClientConfig::default(), )?; let executor = GenesisCallExecutor::new(backend.clone(), local_executor); + let genesis_state_version = Some(33); // TODO resolve from genesis_storage wasm. Client::new( backend, executor, genesis_storage, + genesis_state_version, Default::default(), Default::default(), Default::default(), diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index 9ba2a52d65006..960206646e3e8 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -674,6 +674,17 @@ pub struct OldRuntimeVersion { pub apis: ApisVec, } +#[derive(codec::Encode, codec::Decode)] // TODO same use as OldRuntimeVersion +pub struct OldRuntimeVersion2 { + pub spec_name: RuntimeString, + pub impl_name: RuntimeString, + pub authoring_version: u32, + pub spec_version: u32, + pub impl_version: u32, + pub apis: ApisVec, + pub transaction_version: u32, +} + impl From for RuntimeVersion { fn from(x: OldRuntimeVersion) -> Self { Self { @@ -684,6 +695,7 @@ impl From for RuntimeVersion { impl_version: x.impl_version, apis: x.apis, transaction_version: 1, + state_version: None, } } } @@ -704,7 +716,7 @@ impl From for OldRuntimeVersion { decl_runtime_apis! { /// The `Core` runtime api that every Substrate runtime needs to implement. #[core_trait] - #[api_version(3)] + #[api_version(3)] // TODO version to 4 to be able to runtime version decode properly pub trait Core { /// Returns the version of the runtime. fn version() -> RuntimeVersion; diff --git a/primitives/version/proc-macro/src/decl_runtime_version.rs b/primitives/version/proc-macro/src/decl_runtime_version.rs index eef6314be4c81..db70d2a08551c 100644 --- a/primitives/version/proc-macro/src/decl_runtime_version.rs +++ b/primitives/version/proc-macro/src/decl_runtime_version.rs @@ -22,7 +22,7 @@ use syn::{ parse::{Error, Result}, parse_macro_input, spanned::Spanned as _, - Expr, ExprLit, FieldValue, ItemConst, Lit, + Expr, ExprLit, FieldValue, ItemConst, Lit, ExprCall, ExprPath, }; /// This macro accepts a `const` item that has a struct initializer expression of @@ -63,6 +63,7 @@ struct RuntimeVersion { impl_version: u32, apis: u8, transaction_version: u32, + state_version: Option, } #[derive(Default, Debug)] @@ -73,6 +74,7 @@ struct ParseRuntimeVersion { spec_version: Option, impl_version: Option, transaction_version: Option, + state_version: Option>, // TODO would actually make sense to force default from core 4 and no declaration here } impl ParseRuntimeVersion { @@ -122,6 +124,8 @@ impl ParseRuntimeVersion { parse_once(&mut self.impl_version, field_value, Self::parse_num_literal)?; } else if field_name == "transaction_version" { parse_once(&mut self.transaction_version, field_value, Self::parse_num_literal)?; + } else if field_name == "state_version" { + parse_once(&mut self.state_version, field_value, Self::parse_state_verison_literal)?; } else if field_name == "apis" { // Intentionally ignored // @@ -147,6 +151,44 @@ impl ParseRuntimeVersion { lit.base10_parse::() } + fn parse_state_verison_literal(expr: &Expr) -> Result> { + let lit = match &*expr { + Expr::Path(ExprPath { path, .. }) if path.is_ident("None") => { + return Ok(None); + }, + Expr::Call(ExprCall { func, args, .. }) => { + match &**func { + Expr::Path(ExprPath { path, .. }) if path.is_ident("Some") => { + &args[0] + }, + _ => { + return Err(Error::new( + expr.span(), + "state version Option is expected here.", + )) + }, + } + }, + _ => { + return Err(Error::new( + expr.span(), + "state version Option is expected here.", + )) + }, + }; + + let lit = match &*lit { + Expr::Lit(ExprLit { lit: Lit::Int(lit), .. }) => lit, + _ => + return Err(Error::new( + expr.span(), + "state version Option is expected here.", + )), + }; + let threshold = lit.base10_parse::()?; + Ok(Some(threshold)) + } + fn parse_str_literal(expr: &Expr) -> Result { let mac = match *expr { Expr::Macro(syn::ExprMacro { ref mac, .. }) => mac, @@ -182,6 +224,7 @@ impl ParseRuntimeVersion { spec_version, impl_version, transaction_version, + state_version, } = self; Ok(RuntimeVersion { @@ -191,6 +234,7 @@ impl ParseRuntimeVersion { spec_version: required!(spec_version), impl_version: required!(impl_version), transaction_version: required!(transaction_version), + state_version: required!(state_version), apis: 0, }) } diff --git a/primitives/version/src/lib.rs b/primitives/version/src/lib.rs index 65b22436a5ba1..63de23ebb3002 100644 --- a/primitives/version/src/lib.rs +++ b/primitives/version/src/lib.rs @@ -27,7 +27,7 @@ use std::collections::HashSet; use std::fmt; use codec::{Decode, Encode}; -pub use sp_runtime::create_runtime_str; +pub use sp_runtime::{create_runtime_str, StateVersion}; use sp_runtime::RuntimeString; #[doc(hidden)] pub use sp_std; @@ -176,6 +176,10 @@ pub struct RuntimeVersion { /// /// It need *not* change when a new module is added or when a dispatchable is added. pub transaction_version: u32, + + /// Trie state version to use when runing updates. + /// TODO manage the versioning and encode/decode. + pub state_version: StateVersion, } #[cfg(feature = "std")] diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index 95fddb592d34a..dfb95ffc015e8 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -230,12 +230,6 @@ impl { let storage = { let mut storage = self.genesis_init.genesis_storage(); - if self.state_hashed_value { - storage.modify_trie_alt_hashing_threshold(Some( - sp_core::storage::TEST_DEFAULT_ALT_HASH_THRESHOLD, - )); - } - // Add some child storage keys. for (key, child_content) in self.child_storage_extension { storage.children_default.insert( @@ -250,10 +244,12 @@ impl storage }; + let genesis_state_version = Some(33); // TODO get from genesis wasm let client = client::Client::new( self.backend.clone(), executor, &storage, + genesis_state_version, self.fork_blocks, self.bad_blocks, ExecutionExtensions::new( diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index bcf3bdd45c5fd..e7298e8a52d12 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -100,6 +100,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 2, apis: RUNTIME_API_VERSIONS, transaction_version: 1, + state_version: Some(33), }; fn version() -> RuntimeVersion { From b717611bc9c7ed85140abae20995a6c3d9cbbefb Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 8 Sep 2021 14:21:08 +0200 Subject: [PATCH 074/188] stateversion from runtime version (core api >= 4). --- bin/node-template/runtime/src/lib.rs | 1 - bin/node/runtime/src/lib.rs | 1 - client/service/src/client/client.rs | 4 +- primitives/api/src/lib.rs | 3 +- primitives/runtime/src/lib.rs | 2 + .../proc-macro/src/decl_runtime_version.rs | 46 +------------------ primitives/version/src/lib.rs | 16 +++++-- test-utils/runtime/src/lib.rs | 1 - 8 files changed, 17 insertions(+), 57 deletions(-) diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index 7e8f1af9ec819..830cff3216920 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -103,7 +103,6 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, - state_version: Some(33), }; /// This determines the average expected block time that we are targeting. diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 58270be6521ed..909ff931756ad 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -121,7 +121,6 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 2, - state_version: Some(33), }; /// The BABE epoch configuration at genesis. diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index 5422f567a55fe..7db4982644f07 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -415,7 +415,7 @@ where /// Get the StateVersion at a given block. pub fn state_hash_at(&self, id: &BlockId) -> sp_blockchain::Result { - Ok(self.executor.runtime_version(id)?.state_version) + Ok(self.executor.runtime_version(id)?.state_version()) } /// Reads given header and generates CHT-based header proof for CHT of given size. @@ -1861,7 +1861,7 @@ where } fn state_hash_at(&self, at: &BlockId) -> Result { - Ok(self.runtime_version_at(at)?.state_version) + Ok(self.runtime_version_at(at)?.state_version()) } } diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index 960206646e3e8..ba9f7e9e5cafa 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -695,7 +695,6 @@ impl From for RuntimeVersion { impl_version: x.impl_version, apis: x.apis, transaction_version: 1, - state_version: None, } } } @@ -716,7 +715,7 @@ impl From for OldRuntimeVersion { decl_runtime_apis! { /// The `Core` runtime api that every Substrate runtime needs to implement. #[core_trait] - #[api_version(3)] // TODO version to 4 to be able to runtime version decode properly + #[api_version(4)] pub trait Core { /// Returns the version of the runtime. fn version() -> RuntimeVersion; diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index 3b6e6d75103fc..ba7ea9e9e667f 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -41,6 +41,8 @@ pub use sp_application_crypto as app_crypto; #[cfg(feature = "std")] pub use sp_core::storage::{Storage, StorageChild}; pub use sp_core::{StateVersion, DEFAULT_STATE_HASHING}; +#[cfg(feature = "std")] +pub use sp_core::hashing; use sp_core::{ crypto::{self, Public}, diff --git a/primitives/version/proc-macro/src/decl_runtime_version.rs b/primitives/version/proc-macro/src/decl_runtime_version.rs index db70d2a08551c..eef6314be4c81 100644 --- a/primitives/version/proc-macro/src/decl_runtime_version.rs +++ b/primitives/version/proc-macro/src/decl_runtime_version.rs @@ -22,7 +22,7 @@ use syn::{ parse::{Error, Result}, parse_macro_input, spanned::Spanned as _, - Expr, ExprLit, FieldValue, ItemConst, Lit, ExprCall, ExprPath, + Expr, ExprLit, FieldValue, ItemConst, Lit, }; /// This macro accepts a `const` item that has a struct initializer expression of @@ -63,7 +63,6 @@ struct RuntimeVersion { impl_version: u32, apis: u8, transaction_version: u32, - state_version: Option, } #[derive(Default, Debug)] @@ -74,7 +73,6 @@ struct ParseRuntimeVersion { spec_version: Option, impl_version: Option, transaction_version: Option, - state_version: Option>, // TODO would actually make sense to force default from core 4 and no declaration here } impl ParseRuntimeVersion { @@ -124,8 +122,6 @@ impl ParseRuntimeVersion { parse_once(&mut self.impl_version, field_value, Self::parse_num_literal)?; } else if field_name == "transaction_version" { parse_once(&mut self.transaction_version, field_value, Self::parse_num_literal)?; - } else if field_name == "state_version" { - parse_once(&mut self.state_version, field_value, Self::parse_state_verison_literal)?; } else if field_name == "apis" { // Intentionally ignored // @@ -151,44 +147,6 @@ impl ParseRuntimeVersion { lit.base10_parse::() } - fn parse_state_verison_literal(expr: &Expr) -> Result> { - let lit = match &*expr { - Expr::Path(ExprPath { path, .. }) if path.is_ident("None") => { - return Ok(None); - }, - Expr::Call(ExprCall { func, args, .. }) => { - match &**func { - Expr::Path(ExprPath { path, .. }) if path.is_ident("Some") => { - &args[0] - }, - _ => { - return Err(Error::new( - expr.span(), - "state version Option is expected here.", - )) - }, - } - }, - _ => { - return Err(Error::new( - expr.span(), - "state version Option is expected here.", - )) - }, - }; - - let lit = match &*lit { - Expr::Lit(ExprLit { lit: Lit::Int(lit), .. }) => lit, - _ => - return Err(Error::new( - expr.span(), - "state version Option is expected here.", - )), - }; - let threshold = lit.base10_parse::()?; - Ok(Some(threshold)) - } - fn parse_str_literal(expr: &Expr) -> Result { let mac = match *expr { Expr::Macro(syn::ExprMacro { ref mac, .. }) => mac, @@ -224,7 +182,6 @@ impl ParseRuntimeVersion { spec_version, impl_version, transaction_version, - state_version, } = self; Ok(RuntimeVersion { @@ -234,7 +191,6 @@ impl ParseRuntimeVersion { spec_version: required!(spec_version), impl_version: required!(impl_version), transaction_version: required!(transaction_version), - state_version: required!(state_version), apis: 0, }) } diff --git a/primitives/version/src/lib.rs b/primitives/version/src/lib.rs index 63de23ebb3002..0bfa13c95b850 100644 --- a/primitives/version/src/lib.rs +++ b/primitives/version/src/lib.rs @@ -27,7 +27,7 @@ use std::collections::HashSet; use std::fmt; use codec::{Decode, Encode}; -pub use sp_runtime::{create_runtime_str, StateVersion}; +pub use sp_runtime::{create_runtime_str, StateVersion, DEFAULT_STATE_HASHING}; use sp_runtime::RuntimeString; #[doc(hidden)] pub use sp_std; @@ -176,10 +176,6 @@ pub struct RuntimeVersion { /// /// It need *not* change when a new module is added or when a dispatchable is added. pub transaction_version: u32, - - /// Trie state version to use when runing updates. - /// TODO manage the versioning and encode/decode. - pub state_version: StateVersion, } #[cfg(feature = "std")] @@ -217,6 +213,16 @@ impl RuntimeVersion { pub fn api_version(&self, id: &ApiId) -> Option { self.apis.iter().find_map(|a| (a.0 == *id).then(|| a.1)) } + + /// Returns state version to use for update. + pub fn state_version(&self) -> StateVersion { + let core_api_id = sp_runtime::hashing::blake2_64(b"Core"); + if self.has_api_with(&core_api_id, |v| v >= 4) { + DEFAULT_STATE_HASHING + } else { + None + } + } } #[cfg(feature = "std")] diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index e7298e8a52d12..bcf3bdd45c5fd 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -100,7 +100,6 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 2, apis: RUNTIME_API_VERSIONS, transaction_version: 1, - state_version: Some(33), }; fn version() -> RuntimeVersion { From 846fef007516494dd774a0e7a5826377e331caa9 Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 8 Sep 2021 17:18:40 +0200 Subject: [PATCH 075/188] update trie, fix tests --- Cargo.lock | 24 +-- Cargo.toml | 10 +- bin/node/bench/src/generator.rs | 7 +- bin/node/bench/src/simple_trie.rs | 5 - bin/node/bench/src/trie.rs | 6 +- client/api/src/cht.rs | 2 +- .../basic-authorship/src/basic_authorship.rs | 8 +- client/basic-authorship/src/lib.rs | 2 +- client/block-builder/src/lib.rs | 2 +- client/consensus/aura/src/lib.rs | 2 +- client/consensus/babe/src/aux_schema.rs | 2 +- client/consensus/slots/src/aux_schema.rs | 2 +- client/db/src/bench.rs | 13 +- client/db/src/lib.rs | 35 ++--- client/db/src/storage_cache.rs | 2 +- client/executor/runtime-test/Cargo.toml | 2 +- client/executor/src/integration_tests/mod.rs | 60 +------- client/finality-grandpa/src/aux_schema.rs | 8 +- client/light/src/fetcher.rs | 3 +- client/network/src/light_client_requests.rs | 26 +--- client/network/test/src/block_import.rs | 21 ++- client/network/test/src/lib.rs | 10 +- client/offchain/src/lib.rs | 2 +- client/rpc/src/chain/tests.rs | 18 +-- client/rpc/src/state/tests.rs | 26 +--- client/service/src/builder.rs | 2 +- client/service/src/client/client.rs | 2 +- client/service/src/client/light.rs | 2 +- client/service/test/src/client/light.rs | 48 +++--- client/service/test/src/client/mod.rs | 44 ++---- client/transaction-pool/tests/pool.rs | 4 +- frame/support/test/tests/instance.rs | 2 +- frame/system/src/lib.rs | 5 +- .../api/proc-macro/src/impl_runtime_apis.rs | 2 +- primitives/api/src/lib.rs | 11 -- primitives/api/test/benches/bench.rs | 8 +- primitives/io/src/lib.rs | 8 +- primitives/state-machine/src/backend.rs | 12 +- primitives/state-machine/src/basic.rs | 14 +- .../state-machine/src/changes_trie/build.rs | 25 +-- .../state-machine/src/changes_trie/mod.rs | 2 - .../state-machine/src/changes_trie/storage.rs | 2 - primitives/state-machine/src/ext.rs | 16 +- .../state-machine/src/in_memory_backend.rs | 12 +- primitives/state-machine/src/lib.rs | 46 +++--- .../src/overlayed_changes/mod.rs | 2 +- .../state-machine/src/proving_backend.rs | 101 ++++--------- primitives/state-machine/src/testing.rs | 49 ++++-- primitives/state-machine/src/trie_backend.rs | 46 +++--- .../state-machine/src/trie_backend_essence.rs | 42 ------ primitives/storage/src/lib.rs | 27 +++- primitives/trie/src/lib.rs | 140 ++++------------- primitives/trie/src/node_codec.rs | 140 ++++------------- primitives/trie/src/node_header.rs | 45 ++---- primitives/trie/src/storage_proof.rs | 24 +-- primitives/trie/src/trie_codec.rs | 6 +- primitives/trie/src/trie_stream.rs | 142 +++++------------- primitives/version/src/lib.rs | 4 +- test-utils/client/src/lib.rs | 10 +- test-utils/runtime/client/src/lib.rs | 23 ++- test-utils/runtime/src/lib.rs | 4 +- utils/frame/rpc/system/src/lib.rs | 8 +- 62 files changed, 465 insertions(+), 913 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5efaf7b3d5b98..a41ac4d4030f8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2441,7 +2441,7 @@ dependencies = [ [[package]] name = "hash-db" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#618beec2d30931fb2aa7ee371012be5970ff20a6" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#97caad53d595c3ecc604763a30b47dc634fd63cf" [[package]] name = "hash256-std-hasher" @@ -2455,7 +2455,7 @@ dependencies = [ [[package]] name = "hash256-std-hasher" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#618beec2d30931fb2aa7ee371012be5970ff20a6" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#97caad53d595c3ecc604763a30b47dc634fd63cf" dependencies = [ "crunchy", ] @@ -3108,10 +3108,10 @@ dependencies = [ [[package]] name = "keccak-hasher" version = "0.15.3" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#618beec2d30931fb2aa7ee371012be5970ff20a6" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#97caad53d595c3ecc604763a30b47dc634fd63cf" dependencies = [ "hash-db", - "hash256-std-hasher 0.15.2 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4)", + "hash256-std-hasher 0.15.2 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new)", "tiny-keccak", ] @@ -3969,7 +3969,7 @@ dependencies = [ [[package]] name = "memory-db" version = "0.27.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#618beec2d30931fb2aa7ee371012be5970ff20a6" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#97caad53d595c3ecc604763a30b47dc634fd63cf" dependencies = [ "hash-db", "hashbrown 0.11.2", @@ -10427,22 +10427,22 @@ checksum = "a7f741b240f1a48843f9b8e0444fb55fb2a4ff67293b50a9179dfd5ea67f8d41" [[package]] name = "trie-bench" version = "0.28.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#618beec2d30931fb2aa7ee371012be5970ff20a6" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#97caad53d595c3ecc604763a30b47dc634fd63cf" dependencies = [ "criterion", "hash-db", - "keccak-hasher 0.15.3 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4)", + "keccak-hasher 0.15.3 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new)", "memory-db", "parity-scale-codec", "trie-db", "trie-root", - "trie-standardmap 0.15.2 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4)", + "trie-standardmap 0.15.2 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new)", ] [[package]] name = "trie-db" version = "0.22.6" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#618beec2d30931fb2aa7ee371012be5970ff20a6" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#97caad53d595c3ecc604763a30b47dc634fd63cf" dependencies = [ "hash-db", "hashbrown 0.11.2", @@ -10454,7 +10454,7 @@ dependencies = [ [[package]] name = "trie-root" version = "0.16.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#618beec2d30931fb2aa7ee371012be5970ff20a6" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#97caad53d595c3ecc604763a30b47dc634fd63cf" dependencies = [ "hash-db", ] @@ -10472,10 +10472,10 @@ dependencies = [ [[package]] name = "trie-standardmap" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4#618beec2d30931fb2aa7ee371012be5970ff20a6" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#97caad53d595c3ecc604763a30b47dc634fd63cf" dependencies = [ "hash-db", - "keccak-hasher 0.15.3 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4)", + "keccak-hasher 0.15.3 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new)", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index cefa5893813d6..4f7f27a3d36ee 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -267,8 +267,8 @@ zeroize = { opt-level = 3 } panic = "unwind" [patch.crates-io] -hash-db = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple4" } -memory-db = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple4" } -trie-db = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple4" } -trie-root = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple4" } -trie-bench = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple4" } +hash-db = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple4-new" } +memory-db = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple4-new" } +trie-db = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple4-new" } +trie-root = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple4-new" } +trie-bench = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple4-new" } diff --git a/bin/node/bench/src/generator.rs b/bin/node/bench/src/generator.rs index f95811c40ebee..4a8cc88edf3b7 100644 --- a/bin/node/bench/src/generator.rs +++ b/bin/node/bench/src/generator.rs @@ -23,6 +23,7 @@ use node_primitives::Hash; use sp_trie::{trie_types::TrieDBMut, TrieMut}; use crate::simple_trie::SimpleTrie; +use sp_core::StateVersion; /// Generate trie from given `key_values`. /// @@ -31,7 +32,7 @@ use crate::simple_trie::SimpleTrie; pub fn generate_trie( db: Arc, key_values: impl IntoIterator, Vec)>, - alt_hashing: Option, + state_version: StateVersion, ) -> Hash { let mut root = Hash::default(); @@ -44,8 +45,8 @@ pub fn generate_trie( ); let mut trie = SimpleTrie { db, overlay: &mut overlay }; { - let mut trie_db = if let Some(threshold) = alt_hashing { - let layout = sp_trie::Layout::with_alt_hashing(threshold); + let mut trie_db = if let Some(threshold) = state_version.state_value_threshold() { + let layout = sp_trie::Layout::with_max_inline_value(threshold); TrieDBMut::::new_with_layout( &mut trie, &mut root, layout, ) diff --git a/bin/node/bench/src/simple_trie.rs b/bin/node/bench/src/simple_trie.rs index b33a588b463f7..651772c71575f 100644 --- a/bin/node/bench/src/simple_trie.rs +++ b/bin/node/bench/src/simple_trie.rs @@ -65,11 +65,6 @@ impl<'a> HashDB for SimpleTrie<'a> { self.overlay.insert(key, Some(value)); } - fn emplace_ref(&mut self, key: &Hash, prefix: Prefix, value: &[u8]) { - let key = sp_trie::prefixed_key::(key, prefix); - self.overlay.insert(key, Some(value.into())); - } - fn remove(&mut self, key: &Hash, prefix: Prefix) { let key = sp_trie::prefixed_key::(key, prefix); self.overlay.insert(key, None); diff --git a/bin/node/bench/src/trie.rs b/bin/node/bench/src/trie.rs index e7b917870773a..eb8cbdbbac845 100644 --- a/bin/node/bench/src/trie.rs +++ b/bin/node/bench/src/trie.rs @@ -145,7 +145,7 @@ impl core::BenchmarkDescription for TrieReadBenchmarkDescription { let root = generate_trie( database.open(self.database_type), key_values, - Some(sp_core::storage::DEFAULT_ALT_HASH_THRESHOLD), + sp_core::StateVersion::default(), ); Box::new(TrieReadBenchmark { @@ -175,8 +175,6 @@ impl sp_state_machine::Storage for Storage { let key = sp_trie::prefixed_key::(key, prefix); self.0.get(0, &key).map_err(|e| format!("Database backend error: {:?}", e)) } - - fn access_from(&self, _key: &Hash) {} } impl core::Benchmark for TrieReadBenchmark { @@ -257,7 +255,7 @@ impl core::BenchmarkDescription for TrieWriteBenchmarkDescription { let root = generate_trie( database.open(self.database_type), key_values, - Some(sp_core::storage::DEFAULT_ALT_HASH_THRESHOLD), + sp_core::StateVersion::default(), ); Box::new(TrieWriteBenchmark { diff --git a/client/api/src/cht.rs b/client/api/src/cht.rs index 35c161bed9e0c..cbf527f2d8af2 100644 --- a/client/api/src/cht.rs +++ b/client/api/src/cht.rs @@ -117,7 +117,7 @@ where .into_iter() .map(|(k, v)| (k, Some(v))) .collect::>(); - let storage = InMemoryBackend::::default().update(vec![(None, transaction)], None); + let storage = InMemoryBackend::::default().update(vec![(None, transaction)], sp_runtime::StateVersion::V0); let trie_storage = storage .as_trie_backend() .expect("InMemoryState::as_trie_backend always returns Some; qed"); diff --git a/client/basic-authorship/src/basic_authorship.rs b/client/basic-authorship/src/basic_authorship.rs index c4e617de9a76a..144a3ab6850ff 100644 --- a/client/basic-authorship/src/basic_authorship.rs +++ b/client/basic-authorship/src/basic_authorship.rs @@ -518,7 +518,7 @@ mod tests { #[test] fn should_cease_building_block_when_deadline_is_reached() { // given - let client = Arc::new(substrate_test_runtime_client::new(true)); + let client = Arc::new(substrate_test_runtime_client::new()); let spawner = sp_core::testing::TaskExecutor::new(); let txpool = BasicPool::new_full( Default::default(), @@ -574,7 +574,7 @@ mod tests { #[test] fn should_not_panic_when_deadline_is_reached() { - let client = Arc::new(substrate_test_runtime_client::new(true)); + let client = Arc::new(substrate_test_runtime_client::new()); let spawner = sp_core::testing::TaskExecutor::new(); let txpool = BasicPool::new_full( Default::default(), @@ -671,7 +671,7 @@ mod tests { #[test] fn should_not_remove_invalid_transactions_when_skipping() { // given - let mut client = Arc::new(substrate_test_runtime_client::new(true)); + let mut client = Arc::new(substrate_test_runtime_client::new()); let spawner = sp_core::testing::TaskExecutor::new(); let txpool = BasicPool::new_full( Default::default(), @@ -761,7 +761,7 @@ mod tests { #[test] fn should_cease_building_block_when_block_limit_is_reached() { - let client = Arc::new(substrate_test_runtime_client::new(true)); + let client = Arc::new(substrate_test_runtime_client::new()); let spawner = sp_core::testing::TaskExecutor::new(); let txpool = BasicPool::new_full( Default::default(), diff --git a/client/basic-authorship/src/lib.rs b/client/basic-authorship/src/lib.rs index cb83719c119d2..2b2fe554efdff 100644 --- a/client/basic-authorship/src/lib.rs +++ b/client/basic-authorship/src/lib.rs @@ -30,7 +30,7 @@ //! # DefaultTestClientBuilderExt, TestClientBuilderExt, //! # }; //! # use sc_transaction_pool::{BasicPool, FullChainApi}; -//! # let client = Arc::new(substrate_test_runtime_client::new(true)); +//! # let client = Arc::new(substrate_test_runtime_client::new()); //! # let spawner = sp_core::testing::TaskExecutor::new(); //! # let txpool = BasicPool::new_full( //! # Default::default(), diff --git a/client/block-builder/src/lib.rs b/client/block-builder/src/lib.rs index ae8665ba4cfea..e89421edfb168 100644 --- a/client/block-builder/src/lib.rs +++ b/client/block-builder/src/lib.rs @@ -301,7 +301,7 @@ mod tests { #[test] fn block_building_storage_proof_does_not_include_runtime_by_default() { - let builder = substrate_test_runtime_client::TestClientBuilder::new().state_hashed_value(); + let builder = substrate_test_runtime_client::TestClientBuilder::new(); let backend = builder.backend(); let client = builder.build(); diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index b79f5b7db5680..d038db97cb473 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -788,7 +788,7 @@ mod tests { #[test] fn authorities_call_works() { - let client = substrate_test_runtime_client::new(true); + let client = substrate_test_runtime_client::new(); assert_eq!(client.chain_info().best_number, 0); assert_eq!( diff --git a/client/consensus/babe/src/aux_schema.rs b/client/consensus/babe/src/aux_schema.rs index fd62794f1021e..b18220c3e360a 100644 --- a/client/consensus/babe/src/aux_schema.rs +++ b/client/consensus/babe/src/aux_schema.rs @@ -149,7 +149,7 @@ mod test { epoch_index: 1, duration: 100, }; - let client = substrate_test_runtime_client::new(true); + let client = substrate_test_runtime_client::new(); let mut v0_tree = ForkTree::, _>::new(); v0_tree .import::<_, ConsensusError>( diff --git a/client/consensus/slots/src/aux_schema.rs b/client/consensus/slots/src/aux_schema.rs index 3a8dbe4f51049..c2fe3f6f4e6bb 100644 --- a/client/consensus/slots/src/aux_schema.rs +++ b/client/consensus/slots/src/aux_schema.rs @@ -157,7 +157,7 @@ mod test { #[test] fn check_equivocation_works() { - let client = substrate_test_runtime_client::new(true); + let client = substrate_test_runtime_client::new(); let (pair, _seed) = sr25519::Pair::generate(); let public = pair.public(); diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index 57a18da3a5e58..b87eca5990087 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -64,7 +64,7 @@ impl sp_state_machine::Storage> for StorageDb>(key.clone(), backend_value.clone()); + recorder.record(key.clone(), backend_value.clone()); Ok(backend_value) } else { self.db @@ -72,13 +72,8 @@ impl sp_state_machine::Storage> for StorageDb::LENGTH); - } - } } + /// State that manages the backend database reference. Allows runtime to control the database. pub struct BenchmarkingState { root: Cell, @@ -111,7 +106,7 @@ impl BenchmarkingState { record_proof: bool, enable_tracking: bool, ) -> Result { - let state_hash = sp_runtime::DEFAULT_STATE_HASHING; + let state_hash = sp_runtime::StateVersion::default(); let mut root = B::Hash::default(); let mut mdb = MemoryDB::>::default(); sp_state_machine::TrieDBMut::>::new(&mut mdb, &mut root); @@ -612,7 +607,7 @@ impl StateBackend> for BenchmarkingState { fn proof_size(&self) -> Option { self.proof_recorder.as_ref().map(|recorder| { let proof_size = recorder.estimate_encoded_size() as u32; - let proof = recorder.to_storage_proof::>(); + let proof = recorder.to_storage_proof(); let proof_recorder_root = self.proof_recorder_root.get(); if proof_recorder_root == Default::default() || proof_size == 1 { // empty trie diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 16403046978cc..2d7071bba5e20 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -1010,8 +1010,6 @@ impl sp_state_machine::Storage> for StorageDb sc_state_db::NodeDb for StorageDb { @@ -1039,7 +1037,6 @@ impl sp_state_machine::Storage> for DbGenesisStora use hash_db::HashDB; Ok(self.storage.get(key, prefix)) } - fn access_from(&self, _key: &Block::Hash) {} } struct EmptyStorage(pub Block::Hash); @@ -1057,8 +1054,6 @@ impl sp_state_machine::Storage> for EmptyStorage Result, String> { Ok(None) } - - fn access_from(&self, _key: &Block::Hash) {} } /// Frozen `value` at time `at`. @@ -2575,10 +2570,10 @@ pub(crate) mod tests { #[test] fn set_state_data() { - set_state_data_inner(true); - set_state_data_inner(false); + set_state_data_inner(StateVersion::V0); + set_state_data_inner(StateVersion::V1); } - fn set_state_data_inner(alt_hashing: bool) { + fn set_state_data_inner(state_version: StateVersion) { let db = Backend::::new_test(2, 0); let hash = { let mut op = db.begin_operation().unwrap(); @@ -2594,7 +2589,7 @@ pub(crate) mod tests { header.state_root = op .old_state - .storage_root(storage.iter().map(|(x, y)| (&x[..], Some(&y[..])))) + .storage_root(storage.iter().map(|(x, y)| (&x[..], Some(&y[..]))), state_version) .0 .into(); let hash = header.hash(); @@ -2602,7 +2597,7 @@ pub(crate) mod tests { op.reset_storage(Storage { top: storage.into_iter().collect(), children_default: Default::default(), - }) + }, state_version) .unwrap(); op.set_block_data(header.clone(), Some(vec![]), None, None, NewBlockState::Best) .unwrap(); @@ -2633,7 +2628,7 @@ pub(crate) mod tests { let (root, overlay) = op .old_state - .storage_root(storage.iter().map(|(k, v)| (&k[..], v.as_ref().map(|v| &v[..])))); + .storage_root(storage.iter().map(|(k, v)| (&k[..], v.as_ref().map(|v| &v[..]))), state_version); op.update_db_storage(overlay).unwrap(); header.state_root = root.into(); @@ -2654,6 +2649,7 @@ pub(crate) mod tests { #[test] fn delete_only_when_negative_rc() { sp_tracing::try_init_simple(); + let state_version = StateVersion::default(); let key; let backend = Backend::::new_test(1, 0); @@ -2670,13 +2666,13 @@ pub(crate) mod tests { extrinsics_root: Default::default(), }; - header.state_root = op.old_state.storage_root(std::iter::empty()).0.into(); + header.state_root = op.old_state.storage_root(std::iter::empty(), state_version).0.into(); let hash = header.hash(); op.reset_storage(Storage { top: Default::default(), children_default: Default::default(), - }) + }, state_version) .unwrap(); key = op.db_updates.insert(EMPTY_PREFIX, b"hello"); @@ -2710,7 +2706,7 @@ pub(crate) mod tests { header.state_root = op .old_state - .storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y)))) + .storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y))), state_version) .0 .into(); let hash = header.hash(); @@ -2747,7 +2743,7 @@ pub(crate) mod tests { header.state_root = op .old_state - .storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y)))) + .storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y))), state_version) .0 .into(); let hash = header.hash(); @@ -2781,7 +2777,7 @@ pub(crate) mod tests { header.state_root = op .old_state - .storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y)))) + .storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y))), state_version) .0 .into(); @@ -3116,6 +3112,7 @@ pub(crate) mod tests { #[test] fn storage_hash_is_cached_correctly() { + let state_version = StateVersion::default(); let backend = Backend::::new_test(10, 10); let hash0 = { @@ -3135,7 +3132,7 @@ pub(crate) mod tests { header.state_root = op .old_state - .storage_root(storage.iter().map(|(x, y)| (&x[..], Some(&y[..])))) + .storage_root(storage.iter().map(|(x, y)| (&x[..], Some(&y[..]))), state_version) .0 .into(); let hash = header.hash(); @@ -3143,7 +3140,7 @@ pub(crate) mod tests { op.reset_storage(Storage { top: storage.into_iter().collect(), children_default: Default::default(), - }) + }, state_version) .unwrap(); op.set_block_data(header.clone(), Some(vec![]), None, None, NewBlockState::Best) .unwrap(); @@ -3174,7 +3171,7 @@ pub(crate) mod tests { let (root, overlay) = op .old_state - .storage_root(storage.iter().map(|(k, v)| (&k[..], v.as_ref().map(|v| &v[..])))); + .storage_root(storage.iter().map(|(k, v)| (&k[..], v.as_ref().map(|v| &v[..]))), state_version); op.update_db_storage(overlay).unwrap(); header.state_root = root.into(); let hash = header.hash(); diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index 868bb2ea9d6e4..ac41464d366c1 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -1187,7 +1187,7 @@ mod tests { let shared = new_shared_cache::(256 * 1024, (0, 1)); let mut backend = InMemoryBackend::::default(); - backend.insert(std::iter::once((None, vec![(key.clone(), Some(vec![1]))]))); + backend.insert(std::iter::once((None, vec![(key.clone(), Some(vec![1]))])), Default::default()); let mut s = CachingState::new(backend.clone(), shared.clone(), Some(root_parent)); s.cache.sync_cache( diff --git a/client/executor/runtime-test/Cargo.toml b/client/executor/runtime-test/Cargo.toml index 64cf1410495dc..e3da31461fca4 100644 --- a/client/executor/runtime-test/Cargo.toml +++ b/client/executor/runtime-test/Cargo.toml @@ -19,7 +19,7 @@ sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../.. sp-sandbox = { version = "0.10.0-dev", default-features = false, path = "../../../primitives/sandbox" } sp-std = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/std" } sp-tasks = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/tasks" } -sp-storage = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/storage" } +sp-storage = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/storage" } # TODO used? [build-dependencies] substrate-wasm-builder = { version = "5.0.0-dev", path = "../../../utils/wasm-builder" } diff --git a/client/executor/src/integration_tests/mod.rs b/client/executor/src/integration_tests/mod.rs index 357e3ea972a37..cade4450de2c2 100644 --- a/client/executor/src/integration_tests/mod.rs +++ b/client/executor/src/integration_tests/mod.rs @@ -190,15 +190,14 @@ fn storage_should_work(wasm_method: WasmExecutionMethod) { assert_eq!(output, b"all ok!".to_vec().encode()); } - let storage = sp_core::storage::Storage { + let expected = TestExternalities::new(sp_core::storage::Storage { top: map![ b"input".to_vec() => b"Hello worldHello worldHello worldHello world".to_vec(), b"foo".to_vec() => b"bar".to_vec(), b"baz".to_vec() => b"bar".to_vec() ], children_default: map![], - }; - let expected = TestExternalities::new_with_alt_hashing(storage); + }); assert_eq!(ext, expected); } @@ -221,16 +220,14 @@ fn clear_prefix_should_work(wasm_method: WasmExecutionMethod) { assert_eq!(output, b"all ok!".to_vec().encode()); } - let storage = sp_core::storage::Storage { + let expected = TestExternalities::new(sp_core::storage::Storage { top: map![ b"aaa".to_vec() => b"1".to_vec(), b"aab".to_vec() => b"2".to_vec(), b"bbb".to_vec() => b"5".to_vec() ], children_default: map![], - }; - - let expected = TestExternalities::new_with_alt_hashing(storage); + }); assert_eq!(expected, ext); } @@ -662,52 +659,3 @@ fn panic_in_spawned_instance_panics_on_joining_its_result(wasm_method: WasmExecu assert!(format!("{}", error_result).contains("Spawned task")); } - -test_wasm_execution!(state_hashing_update); -fn state_hashing_update(wasm_method: WasmExecutionMethod) { - // use externalities without storage flag. - let mut ext = TestExternalities::new(Default::default()); - - let root1 = { - let mut ext = ext.ext(); - ext.set_storage(b"foo".to_vec(), b"bar".to_vec()); - let output = - call_in_wasm("test_data_in", &vec![1u8; 100].encode(), wasm_method, &mut ext).unwrap(); - - assert_eq!(output, b"all ok!".to_vec().encode()); - ext.storage_root() - }; - - ext.commit_all().unwrap(); - let root2 = { - let mut ext = ext.ext(); - // flag state. - let _ = - call_in_wasm("test_switch_state", Default::default(), wasm_method, &mut ext).unwrap(); - ext.storage_root() - }; - - assert!(root1 != root2); - - ext.commit_all().unwrap(); - let root3 = { - let mut ext = ext.ext(); - let _ = - call_in_wasm("test_data_in", &vec![2u8; 100].to_vec().encode(), wasm_method, &mut ext) - .unwrap(); - ext.storage_root() - }; - assert!(root2 != root3); - - ext.commit_all().unwrap(); - let root3 = { - let mut ext = ext.ext(); - // revert to root 2 state, but this time - // inner hashing should apply - let _ = - call_in_wasm("test_data_in", &vec![1u8; 100].to_vec().encode(), wasm_method, &mut ext) - .unwrap(); - ext.storage_root() - }; - assert!(root2 != root3); -} diff --git a/client/finality-grandpa/src/aux_schema.rs b/client/finality-grandpa/src/aux_schema.rs index 53167bd633897..bad01e6dfc62f 100644 --- a/client/finality-grandpa/src/aux_schema.rs +++ b/client/finality-grandpa/src/aux_schema.rs @@ -504,7 +504,7 @@ mod test { #[test] fn load_decode_from_v0_migrates_data_format() { - let client = substrate_test_runtime_client::new(true); + let client = substrate_test_runtime_client::new(); let authorities = vec![(AuthorityId::default(), 100)]; let set_id = 3; @@ -593,7 +593,7 @@ mod test { #[test] fn load_decode_from_v1_migrates_data_format() { - let client = substrate_test_runtime_client::new(true); + let client = substrate_test_runtime_client::new(); let authorities = vec![(AuthorityId::default(), 100)]; let set_id = 3; @@ -686,7 +686,7 @@ mod test { #[test] fn load_decode_from_v2_migrates_data_format() { - let client = substrate_test_runtime_client::new(true); + let client = substrate_test_runtime_client::new(); let authorities = vec![(AuthorityId::default(), 100)]; let set_id = 3; @@ -754,7 +754,7 @@ mod test { #[test] fn write_read_concluded_rounds() { - let client = substrate_test_runtime_client::new(true); + let client = substrate_test_runtime_client::new(); let hash = H256::random(); let round_state = RoundState::genesis((hash, 0)); diff --git a/client/light/src/fetcher.rs b/client/light/src/fetcher.rs index fba3d854b8d2b..fb16e83c67ab3 100644 --- a/client/light/src/fetcher.rs +++ b/client/light/src/fetcher.rs @@ -169,8 +169,7 @@ impl> LightDataChecker { remote_roots_proof: StorageProof, ) -> ClientResult<()> { // all the checks are sharing the same storage - let storage: sp_state_machine::MemoryDB> = - remote_roots_proof.into_memory_db_no_meta(); + let storage: sp_state_machine::MemoryDB> = remote_roots_proof.into_memory_db(); // remote_roots.keys() are sorted => we can use this to group changes tries roots // that are belongs to the same CHT diff --git a/client/network/src/light_client_requests.rs b/client/network/src/light_client_requests.rs index f3c3597378247..e18b783f219be 100644 --- a/client/network/src/light_client_requests.rs +++ b/client/network/src/light_client_requests.rs @@ -178,8 +178,8 @@ mod tests { type Block = sp_runtime::generic::Block, substrate_test_runtime::Extrinsic>; - fn send_receive(request: sender::Request, pool: &LocalPool, hashed_value: bool) { - let client = Arc::new(substrate_test_runtime_client::new(hashed_value)); + fn send_receive(request: sender::Request, pool: &LocalPool) { + let client = Arc::new(substrate_test_runtime_client::new()); let (handler, protocol_config) = handler::LightClientRequestHandler::new(&protocol_id(), client); pool.spawner().spawn_obj(handler.run().boxed().into()).unwrap(); @@ -222,10 +222,6 @@ mod tests { #[test] fn send_receive_call() { - send_receive_call_inner(true); - send_receive_call_inner(false); - } - fn send_receive_call_inner(hashed_value: bool) { let chan = oneshot::channel(); let request = light::RemoteCallRequest { block: Default::default(), @@ -236,17 +232,13 @@ mod tests { }; let mut pool = LocalPool::new(); - send_receive(sender::Request::Call { request, sender: chan.0 }, &pool, hashed_value); + send_receive(sender::Request::Call { request, sender: chan.0 }, &pool); assert_eq!(vec![42], pool.run_until(chan.1).unwrap().unwrap()); // ^--- from `DummyFetchChecker::check_execution_proof` } #[test] fn send_receive_read() { - send_receive_read_inner(true); - send_receive_read_inner(false); - } - fn send_receive_read_inner(hashed_value: bool) { let chan = oneshot::channel(); let request = light::RemoteReadRequest { header: dummy_header(), @@ -255,7 +247,7 @@ mod tests { retry_count: None, }; let mut pool = LocalPool::new(); - send_receive(sender::Request::Read { request, sender: chan.0 }, &pool, hashed_value); + send_receive(sender::Request::Read { request, sender: chan.0 }, &pool); assert_eq!( Some(vec![42]), pool.run_until(chan.1).unwrap().unwrap().remove(&b":key"[..]).unwrap() @@ -265,10 +257,6 @@ mod tests { #[test] fn send_receive_read_child() { - send_receive_read_child_inner(true); - send_receive_read_child_inner(false); - } - fn send_receive_read_child_inner(hashed_value: bool) { let chan = oneshot::channel(); let child_info = ChildInfo::new_default(&b":child_storage:default:sub"[..]); let request = light::RemoteReadChildRequest { @@ -279,7 +267,7 @@ mod tests { retry_count: None, }; let mut pool = LocalPool::new(); - send_receive(sender::Request::ReadChild { request, sender: chan.0 }, &pool, hashed_value); + send_receive(sender::Request::ReadChild { request, sender: chan.0 }, &pool); assert_eq!( Some(vec![42]), pool.run_until(chan.1).unwrap().unwrap().remove(&b":key"[..]).unwrap() @@ -297,7 +285,7 @@ mod tests { retry_count: None, }; let mut pool = LocalPool::new(); - send_receive(sender::Request::Header { request, sender: chan.0 }, &pool, true); + send_receive(sender::Request::Header { request, sender: chan.0 }, &pool); // The remote does not know block 1: assert_matches!(pool.run_until(chan.1).unwrap(), Err(ClientError::RemoteFetchFailed)); } @@ -320,7 +308,7 @@ mod tests { retry_count: None, }; let mut pool = LocalPool::new(); - send_receive(sender::Request::Changes { request, sender: chan.0 }, &pool, true); + send_receive(sender::Request::Changes { request, sender: chan.0 }, &pool); assert_eq!(vec![(100, 2)], pool.run_until(chan.1).unwrap().unwrap()); // ^--- from `DummyFetchChecker::check_changes_proof` } diff --git a/client/network/test/src/block_import.rs b/client/network/test/src/block_import.rs index 53d7bf0c9c552..1c9b218fc1a1f 100644 --- a/client/network/test/src/block_import.rs +++ b/client/network/test/src/block_import.rs @@ -33,8 +33,8 @@ use substrate_test_runtime_client::{ runtime::{Block, Hash}, }; -fn prepare_good_block(hashed_value: bool) -> (TestClient, Hash, u64, PeerId, IncomingBlock) { - let mut client = substrate_test_runtime_client::new(hashed_value); +fn prepare_good_block() -> (TestClient, Hash, u64, PeerId, IncomingBlock) { + let mut client = substrate_test_runtime_client::new(); let block = client.new_block(Default::default()).unwrap().build().unwrap().block; block_on(client.import(BlockOrigin::File, block)).unwrap(); @@ -64,17 +64,14 @@ fn prepare_good_block(hashed_value: bool) -> (TestClient, Hash, u64, PeerId, Inc #[test] fn import_single_good_block_works() { - import_single_good_block_works_inner(true); - import_single_good_block_works_inner(false); -} -fn import_single_good_block_works_inner(hashed_value: bool) { - let (_, _hash, number, peer_id, block) = prepare_good_block(hashed_value); + let (_, _hash, number, peer_id, block) = prepare_good_block(); let mut expected_aux = ImportedAux::default(); expected_aux.is_new_best = true; + let mut client = substrate_test_runtime_client::new(); match block_on(import_single_block( - &mut substrate_test_runtime_client::new(hashed_value), + &mut client, BlockOrigin::File, block, &mut PassThroughVerifier::new(true), @@ -87,7 +84,7 @@ fn import_single_good_block_works_inner(hashed_value: bool) { #[test] fn import_single_good_known_block_is_ignored() { - let (mut client, _hash, number, _, block) = prepare_good_block(true); + let (mut client, _hash, number, _, block) = prepare_good_block(); match block_on(import_single_block( &mut client, BlockOrigin::File, @@ -101,10 +98,10 @@ fn import_single_good_known_block_is_ignored() { #[test] fn import_single_good_block_without_header_fails() { - let (_, _, _, peer_id, mut block) = prepare_good_block(true); + let (_, _, _, peer_id, mut block) = prepare_good_block(); block.header = None; match block_on(import_single_block( - &mut substrate_test_runtime_client::new(true), + &mut substrate_test_runtime_client::new(), BlockOrigin::File, block, &mut PassThroughVerifier::new(true), @@ -123,7 +120,7 @@ fn async_import_queue_drops() { let queue = BasicQueue::new( verifier, - Box::new(substrate_test_runtime_client::new(true)), + Box::new(substrate_test_runtime_client::new()), None, &executor, None, diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index dfd45028108b8..89d3d5095a26b 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -731,11 +731,11 @@ where fn add_full_peer_with_config(&mut self, config: FullPeerConfig) { let mut test_client_builder = match (config.keep_blocks, config.storage_chain) { (Some(keep_blocks), true) => - TestClientBuilder::with_tx_storage(keep_blocks).state_hashed_value(), - (None, true) => TestClientBuilder::with_tx_storage(u32::MAX).state_hashed_value(), + TestClientBuilder::with_tx_storage(keep_blocks), + (None, true) => TestClientBuilder::with_tx_storage(u32::MAX), (Some(keep_blocks), false) => - TestClientBuilder::with_pruning_window(keep_blocks).state_hashed_value(), - (None, false) => TestClientBuilder::with_default_backend().state_hashed_value(), + TestClientBuilder::with_pruning_window(keep_blocks), + (None, false) => TestClientBuilder::with_default_backend(), }; if matches!(config.sync_mode, SyncMode::Fast { .. }) { test_client_builder = test_client_builder.set_no_genesis(); @@ -870,7 +870,7 @@ where /// Add a light peer. fn add_light_peer(&mut self) { - let (c, backend) = substrate_test_runtime_client::new_light(true); + let (c, backend) = substrate_test_runtime_client::new_light(); let client = Arc::new(c); let (block_import, justification_import, data) = self.make_block_import(PeersClient::Light(client.clone(), backend.clone())); diff --git a/client/offchain/src/lib.rs b/client/offchain/src/lib.rs index a859d4327daf2..be6e4238ca5f1 100644 --- a/client/offchain/src/lib.rs +++ b/client/offchain/src/lib.rs @@ -279,7 +279,7 @@ mod tests { fn should_call_into_runtime_and_produce_extrinsic() { sp_tracing::try_init_simple(); - let client = Arc::new(substrate_test_runtime_client::new(true)); + let client = Arc::new(substrate_test_runtime_client::new()); let spawner = sp_core::testing::TaskExecutor::new(); let pool = TestPool(BasicPool::new_full( Default::default(), diff --git a/client/rpc/src/chain/tests.rs b/client/rpc/src/chain/tests.rs index ec2fc82e1a3c1..caa9f33138b86 100644 --- a/client/rpc/src/chain/tests.rs +++ b/client/rpc/src/chain/tests.rs @@ -30,7 +30,7 @@ use substrate_test_runtime_client::{ #[test] fn should_return_header() { - let client = Arc::new(substrate_test_runtime_client::new(true)); + let client = Arc::new(substrate_test_runtime_client::new()); let api = new_full(client.clone(), SubscriptionManager::new(Arc::new(TaskExecutor))); assert_matches!( @@ -65,11 +65,7 @@ fn should_return_header() { #[test] fn should_return_a_block() { - should_return_a_block_inner(true); - should_return_a_block_inner(false); -} -fn should_return_a_block_inner(hashed_value: bool) { - let mut client = Arc::new(substrate_test_runtime_client::new(hashed_value)); + let mut client = Arc::new(substrate_test_runtime_client::new()); let api = new_full(client.clone(), SubscriptionManager::new(Arc::new(TaskExecutor))); let block = client.new_block(Default::default()).unwrap().build().unwrap().block; @@ -117,7 +113,7 @@ fn should_return_a_block_inner(hashed_value: bool) { #[test] fn should_return_block_hash() { - let mut client = Arc::new(substrate_test_runtime_client::new(true)); + let mut client = Arc::new(substrate_test_runtime_client::new()); let api = new_full(client.clone(), SubscriptionManager::new(Arc::new(TaskExecutor))); assert_matches!( @@ -159,7 +155,7 @@ fn should_return_block_hash() { #[test] fn should_return_finalized_hash() { - let mut client = Arc::new(substrate_test_runtime_client::new(true)); + let mut client = Arc::new(substrate_test_runtime_client::new()); let api = new_full(client.clone(), SubscriptionManager::new(Arc::new(TaskExecutor))); assert_matches!( @@ -189,7 +185,7 @@ fn should_notify_about_latest_block() { let (subscriber, id, mut transport) = Subscriber::new_test("test"); { - let mut client = Arc::new(substrate_test_runtime_client::new(true)); + let mut client = Arc::new(substrate_test_runtime_client::new()); let api = new_full(client.clone(), SubscriptionManager::new(Arc::new(TaskExecutor))); api.subscribe_all_heads(Default::default(), subscriber); @@ -211,7 +207,7 @@ fn should_notify_about_best_block() { let (subscriber, id, mut transport) = Subscriber::new_test("test"); { - let mut client = Arc::new(substrate_test_runtime_client::new(true)); + let mut client = Arc::new(substrate_test_runtime_client::new()); let api = new_full(client.clone(), SubscriptionManager::new(Arc::new(TaskExecutor))); api.subscribe_new_heads(Default::default(), subscriber); @@ -233,7 +229,7 @@ fn should_notify_about_finalized_block() { let (subscriber, id, mut transport) = Subscriber::new_test("test"); { - let mut client = Arc::new(substrate_test_runtime_client::new(true)); + let mut client = Arc::new(substrate_test_runtime_client::new()); let api = new_full(client.clone(), SubscriptionManager::new(Arc::new(TaskExecutor))); api.subscribe_finalized_heads(Default::default(), subscriber); diff --git a/client/rpc/src/state/tests.rs b/client/rpc/src/state/tests.rs index 5cf81e4449c6b..ef13b37ce42fe 100644 --- a/client/rpc/src/state/tests.rs +++ b/client/rpc/src/state/tests.rs @@ -132,7 +132,7 @@ fn should_return_child_storage() { #[test] fn should_call_contract() { - let client = Arc::new(substrate_test_runtime_client::new(true)); + let client = Arc::new(substrate_test_runtime_client::new()); let genesis_hash = client.genesis_hash(); let (client, _child) = new_full(client, SubscriptionManager::new(Arc::new(TaskExecutor)), DenyUnsafe::No, None); @@ -152,7 +152,7 @@ fn should_notify_about_storage_changes() { let (subscriber, id, mut transport) = Subscriber::new_test("test"); { - let mut client = Arc::new(substrate_test_runtime_client::new(true)); + let mut client = Arc::new(substrate_test_runtime_client::new()); let (api, _child) = new_full( client.clone(), SubscriptionManager::new(Arc::new(TaskExecutor)), @@ -188,7 +188,7 @@ fn should_send_initial_storage_changes_and_notifications() { let (subscriber, id, mut transport) = Subscriber::new_test("test"); { - let mut client = Arc::new(substrate_test_runtime_client::new(true)); + let mut client = Arc::new(substrate_test_runtime_client::new()); let (api, _child) = new_full( client.clone(), SubscriptionManager::new(Arc::new(TaskExecutor)), @@ -411,8 +411,7 @@ fn should_query_storage() { ); } - run_tests(Arc::new(substrate_test_runtime_client::new(true)), false); - run_tests(Arc::new(substrate_test_runtime_client::new(false)), false); + run_tests(Arc::new(substrate_test_runtime_client::new()), false); run_tests( Arc::new( TestClientBuilder::new() @@ -421,15 +420,6 @@ fn should_query_storage() { ), true, ); - run_tests( - Arc::new( - TestClientBuilder::new() - .state_hashed_value() - .changes_trie_config(Some(ChangesTrieConfiguration::new(4, 2))) - .build(), - ), - true, - ); } #[test] @@ -443,11 +433,7 @@ fn should_split_ranges() { #[test] fn should_return_runtime_version() { - should_return_runtime_version_inner(true); - should_return_runtime_version_inner(false); -} -fn should_return_runtime_version_inner(hashed_value: bool) { - let client = Arc::new(substrate_test_runtime_client::new(hashed_value)); + let client = Arc::new(substrate_test_runtime_client::new()); let (api, _child) = new_full( client.clone(), SubscriptionManager::new(Arc::new(TaskExecutor)), @@ -475,7 +461,7 @@ fn should_notify_on_runtime_version_initially() { let (subscriber, id, mut transport) = Subscriber::new_test("test"); { - let client = Arc::new(substrate_test_runtime_client::new(true)); + let client = Arc::new(substrate_test_runtime_client::new()); let (api, _child) = new_full( client.clone(), SubscriptionManager::new(Arc::new(TaskExecutor)), diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 379e61cecf0c0..93da822a43c7e 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -450,7 +450,7 @@ where spawn_handle, config.clone(), )?; - let genesis_state_version = Some(33); // TODO resolve from genesis_storage wasm. + let genesis_state_version = sp_runtime::StateVersion::default(); // TODO resolve from genesis_storage wasm. Ok(crate::client::Client::new( backend, executor, diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index b21a7d5d041d7..de15b031360ce 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -240,7 +240,7 @@ where keystore, sc_offchain::OffchainDb::factory_from_backend(&*backend), ); - let genesis_state_version = Some(33); // TODO resolve from genesis storage wasm + let genesis_state_version = sp_runtime::StateVersion::default(); // TODO resolve from genesis_storage wasm. Client::new( backend, call_executor, diff --git a/client/service/src/client/light.rs b/client/service/src/client/light.rs index 01c586c719472..672bb94e90509 100644 --- a/client/service/src/client/light.rs +++ b/client/service/src/client/light.rs @@ -68,7 +68,7 @@ where ClientConfig::default(), )?; let executor = GenesisCallExecutor::new(backend.clone(), local_executor); - let genesis_state_version = Some(33); // TODO resolve from genesis_storage wasm. + let genesis_state_version = sp_runtime::StateVersion::default(); // TODO resolve from genesis_storage wasm. Client::new( backend, executor, diff --git a/client/service/test/src/client/light.rs b/client/service/test/src/client/light.rs index 1d0ffae5bf5ff..f5075456ab3f0 100644 --- a/client/service/test/src/client/light.rs +++ b/client/service/test/src/client/light.rs @@ -43,7 +43,7 @@ use sp_blockchain::{ Result as ClientResult, }; use sp_consensus::BlockOrigin; -use sp_core::{testing::TaskExecutor, NativeOrEncoded, H256}; +use sp_core::{testing::TaskExecutor, NativeOrEncoded, H256, StateVersion}; use sp_externalities::Extensions; use sp_runtime::{ generic::BlockId, @@ -273,7 +273,7 @@ fn local_state_is_created_when_genesis_state_is_available() { Backend::new(Arc::new(DummyBlockchain::new(DummyStorage::new()))); let mut op = backend.begin_operation().unwrap(); op.set_block_data(header0, None, None, None, NewBlockState::Final).unwrap(); - op.set_genesis_state(Default::default(), true).unwrap(); + op.set_genesis_state(Default::default(), true, Default::default()).unwrap(); backend.commit_operation(op).unwrap(); match backend.state_at(BlockId::Number(0)).unwrap() { @@ -295,7 +295,8 @@ fn unavailable_state_is_created_when_genesis_state_is_unavailable() { #[test] fn light_aux_store_is_updated_via_non_importing_op() { - let backend = Backend::new(Arc::new(DummyBlockchain::new(DummyStorage::new()))); + let backend = + Backend::new(Arc::new(DummyBlockchain::new(DummyStorage::new()))); let mut op = ClientBackend::::begin_operation(&backend).unwrap(); BlockImportOperation::::insert_aux(&mut op, vec![(vec![1], Some(vec![2]))]).unwrap(); ClientBackend::::commit_operation(&backend, op).unwrap(); @@ -305,10 +306,6 @@ fn light_aux_store_is_updated_via_non_importing_op() { #[test] fn execution_proof_is_generated_and_checked() { - execution_proof_is_generated_and_checked_inner(true); - execution_proof_is_generated_and_checked_inner(false); -} -fn execution_proof_is_generated_and_checked_inner(hashed_value: bool) { fn execute(remote_client: &TestClient, at: u64, method: &'static str) -> (Vec, Vec) { let remote_block_id = BlockId::Number(at); let remote_header = remote_client.header(&remote_block_id).unwrap().unwrap(); @@ -382,7 +379,8 @@ fn execution_proof_is_generated_and_checked_inner(hashed_value: bool) { } // prepare remote client - let mut remote_client = substrate_test_runtime_client::new(hashed_value); + let mut remote_client = substrate_test_runtime_client::new(); + for i in 1u32..3u32 { let mut digest = Digest::default(); digest.push(sp_runtime::generic::DigestItem::Other::(i.to_le_bytes().to_vec())); @@ -456,16 +454,16 @@ type TestChecker = LightDataChecker< DummyStorage, >; -fn prepare_for_read_proof_check(hashed_value: bool) -> (TestChecker, Header, StorageProof, u32) { +fn prepare_for_read_proof_check(state_version: StateVersion) -> (TestChecker, Header, StorageProof, u32) { // prepare remote client - let remote_client = substrate_test_runtime_client::new(hashed_value); + let remote_client = substrate_test_runtime_client::new(); let remote_block_id = BlockId::Number(0); let remote_block_hash = remote_client.block_hash(0).unwrap().unwrap(); let mut remote_block_header = remote_client.header(&remote_block_id).unwrap().unwrap(); remote_block_header.state_root = remote_client .state_at(&remote_block_id) .unwrap() - .storage_root(std::iter::empty()) + .storage_root(std::iter::empty(), state_version) .0 .into(); @@ -492,7 +490,7 @@ fn prepare_for_read_proof_check(hashed_value: bool) -> (TestChecker, Header, Sto (local_checker, remote_block_header, remote_read_proof, heap_pages) } -fn prepare_for_read_child_proof_check() -> (TestChecker, Header, StorageProof, Vec) { +fn prepare_for_read_child_proof_check(state_version: StateVersion) -> (TestChecker, Header, StorageProof, Vec) { use substrate_test_runtime_client::{DefaultTestClientBuilderExt, TestClientBuilderExt}; let child_info = ChildInfo::new_default(b"child1"); let child_info = &child_info; @@ -506,7 +504,7 @@ fn prepare_for_read_child_proof_check() -> (TestChecker, Header, StorageProof, V remote_block_header.state_root = remote_client .state_at(&remote_block_id) .unwrap() - .storage_root(std::iter::empty()) + .storage_root(std::iter::empty(), state_version) .0 .into(); @@ -536,10 +534,9 @@ fn prepare_for_read_child_proof_check() -> (TestChecker, Header, StorageProof, V fn prepare_for_header_proof_check( insert_cht: bool, - hashed_value: bool, ) -> (TestChecker, Hash, Header, StorageProof) { // prepare remote client - let mut remote_client = substrate_test_runtime_client::new(hashed_value); + let mut remote_client = substrate_test_runtime_client::new(); let mut local_headers_hashes = Vec::new(); for i in 0..4 { let block = remote_client.new_block(Default::default()).unwrap().build().unwrap().block; @@ -582,12 +579,12 @@ fn header_with_computed_extrinsics_root(extrinsics: Vec) -> Header { #[test] fn storage_read_proof_is_generated_and_checked() { - storage_read_proof_is_generated_and_checked_inner(true); - storage_read_proof_is_generated_and_checked_inner(false); + storage_read_proof_is_generated_and_checked_inner(StateVersion::V0); + storage_read_proof_is_generated_and_checked_inner(StateVersion::V1); } -fn storage_read_proof_is_generated_and_checked_inner(hashed_value: bool) { +fn storage_read_proof_is_generated_and_checked_inner(state_version: StateVersion) { let (local_checker, remote_block_header, remote_read_proof, heap_pages) = - prepare_for_read_proof_check(hashed_value); + prepare_for_read_proof_check(state_version); assert_eq!( (&local_checker as &dyn FetchChecker) .check_read_proof( @@ -609,9 +606,10 @@ fn storage_read_proof_is_generated_and_checked_inner(hashed_value: bool) { #[test] fn storage_child_read_proof_is_generated_and_checked() { + let state_version = StateVersion::default(); let child_info = ChildInfo::new_default(&b"child1"[..]); let (local_checker, remote_block_header, remote_read_proof, result) = - prepare_for_read_child_proof_check(); + prepare_for_read_child_proof_check(state_version); assert_eq!( (&local_checker as &dyn FetchChecker) .check_read_child_proof( @@ -634,12 +632,8 @@ fn storage_child_read_proof_is_generated_and_checked() { #[test] fn header_proof_is_generated_and_checked() { - header_proof_is_generated_and_checked_inner(true); - header_proof_is_generated_and_checked_inner(false); -} -fn header_proof_is_generated_and_checked_inner(hashed: bool) { let (local_checker, local_cht_root, remote_block_header, remote_header_proof) = - prepare_for_header_proof_check(true, hashed); + prepare_for_header_proof_check(true); assert_eq!( (&local_checker as &dyn FetchChecker) .check_header_proof( @@ -659,7 +653,7 @@ fn header_proof_is_generated_and_checked_inner(hashed: bool) { #[test] fn check_header_proof_fails_if_cht_root_is_invalid() { let (local_checker, _, mut remote_block_header, remote_header_proof) = - prepare_for_header_proof_check(true, true); + prepare_for_header_proof_check(true); remote_block_header.number = 100; assert!((&local_checker as &dyn FetchChecker) .check_header_proof( @@ -677,7 +671,7 @@ fn check_header_proof_fails_if_cht_root_is_invalid() { #[test] fn check_header_proof_fails_if_invalid_header_provided() { let (local_checker, local_cht_root, mut remote_block_header, remote_header_proof) = - prepare_for_header_proof_check(true, true); + prepare_for_header_proof_check(true); remote_block_header.number = 100; assert!((&local_checker as &dyn FetchChecker) .check_header_proof( diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index 9ebf97c89821e..de0c7923201a0 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -32,7 +32,7 @@ use sp_api::ProvideRuntimeApi; use sp_consensus::{BlockOrigin, BlockStatus, Error as ConsensusError, SelectChain}; use sp_core::{blake2_256, testing::TaskExecutor, ChangesTrieConfiguration, H256}; use sp_runtime::{ - generic::BlockId, + generic::BlockId, StateVersion, traits::{BlakeTwo256, Block as BlockT, Header as HeaderT}, ConsensusEngineId, DigestItem, Justifications, }; @@ -261,7 +261,7 @@ fn construct_genesis_should_work_with_native() { .genesis_map(); let genesis_hash = insert_genesis_block(&mut storage); - let backend = InMemoryBackend::from(storage); + let backend = InMemoryBackend::from((storage, StateVersion::default())); let (b1data, _b1hash) = block1(genesis_hash, &backend); let backend_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&backend); let runtime_code = backend_runtime_code.runtime_code().expect("Code is part of the backend"); @@ -296,7 +296,7 @@ fn construct_genesis_should_work_with_wasm() { .genesis_map(); let genesis_hash = insert_genesis_block(&mut storage); - let backend = InMemoryBackend::from(storage); + let backend = InMemoryBackend::from((storage, StateVersion::default())); // TODO state version from runtime code?? let (b1data, _b1hash) = block1(genesis_hash, &backend); let backend_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&backend); let runtime_code = backend_runtime_code.runtime_code().expect("Code is part of the backend"); @@ -331,7 +331,7 @@ fn construct_genesis_with_bad_transaction_should_panic() { .genesis_map(); let genesis_hash = insert_genesis_block(&mut storage); - let backend = InMemoryBackend::from(storage); + let backend = InMemoryBackend::from((storage, StateVersion::default())); let (b1data, _b1hash) = block1(genesis_hash, &backend); let backend_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&backend); let runtime_code = backend_runtime_code.runtime_code().expect("Code is part of the backend"); @@ -355,11 +355,7 @@ fn construct_genesis_with_bad_transaction_should_panic() { #[test] fn client_initializes_from_genesis_ok() { - client_initializes_from_genesis_ok_inner(false); - client_initializes_from_genesis_ok_inner(true); -} -fn client_initializes_from_genesis_ok_inner(hashed_value: bool) { - let client = substrate_test_runtime_client::new(hashed_value); + let client = substrate_test_runtime_client::new(); assert_eq!( client @@ -385,7 +381,7 @@ fn client_initializes_from_genesis_ok_inner(hashed_value: bool) { #[test] fn block_builder_works_with_no_transactions() { - let mut client = substrate_test_runtime_client::new(true); + let mut client = substrate_test_runtime_client::new(); let block = client.new_block(Default::default()).unwrap().build().unwrap().block; @@ -396,11 +392,7 @@ fn block_builder_works_with_no_transactions() { #[test] fn block_builder_works_with_transactions() { - block_builder_works_with_transactions_inner(true); - block_builder_works_with_transactions_inner(false); -} -fn block_builder_works_with_transactions_inner(hashed_value: bool) { - let mut client = substrate_test_runtime_client::new(hashed_value); + let mut client = substrate_test_runtime_client::new(); let mut builder = client.new_block(Default::default()).unwrap(); @@ -445,7 +437,7 @@ fn block_builder_works_with_transactions_inner(hashed_value: bool) { #[test] fn block_builder_does_not_include_invalid() { - let mut client = substrate_test_runtime_client::new(true); + let mut client = substrate_test_runtime_client::new(); let mut builder = client.new_block(Default::default()).unwrap(); @@ -515,7 +507,7 @@ fn best_containing_with_hash_not_found() { fn uncles_with_only_ancestors() { // block tree: // G -> A1 -> A2 - let mut client = substrate_test_runtime_client::new(true); + let mut client = substrate_test_runtime_client::new(); // G -> A1 let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; @@ -535,7 +527,7 @@ fn uncles_with_multiple_forks() { // A1 -> B2 -> B3 -> B4 // B2 -> C3 // A1 -> D2 - let mut client = substrate_test_runtime_client::new(true); + let mut client = substrate_test_runtime_client::new(); // G -> A1 let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; @@ -1210,7 +1202,7 @@ fn key_changes_works() { #[test] fn import_with_justification() { - let mut client = substrate_test_runtime_client::new(true); + let mut client = substrate_test_runtime_client::new(); // G -> A1 let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; @@ -1247,7 +1239,7 @@ fn import_with_justification() { #[test] fn importing_diverged_finalized_block_should_trigger_reorg() { - let mut client = substrate_test_runtime_client::new(true); + let mut client = substrate_test_runtime_client::new(); // G -> A1 -> A2 // \ @@ -1295,7 +1287,7 @@ fn importing_diverged_finalized_block_should_trigger_reorg() { #[test] fn finalizing_diverged_block_should_trigger_reorg() { let (mut client, select_chain) = - TestClientBuilder::new().state_hashed_value().build_with_longest_chain(); + TestClientBuilder::new().build_with_longest_chain(); // G -> A1 -> A2 // \ @@ -1368,7 +1360,7 @@ fn finalizing_diverged_block_should_trigger_reorg() { #[test] fn get_header_by_block_number_doesnt_panic() { - let client = substrate_test_runtime_client::new(true); + let client = substrate_test_runtime_client::new(); // backend uses u32 for block numbers, make sure we don't panic when // trying to convert @@ -1379,7 +1371,7 @@ fn get_header_by_block_number_doesnt_panic() { #[test] fn state_reverted_on_reorg() { sp_tracing::try_init_simple(); - let mut client = substrate_test_runtime_client::new(true); + let mut client = substrate_test_runtime_client::new(); let current_balance = |client: &substrate_test_runtime_client::TestClient| { client @@ -2035,11 +2027,7 @@ fn storage_keys_iter_prefix_and_start_key_works() { #[test] fn storage_keys_iter_works() { - storage_keys_iter_works_inner(true); - storage_keys_iter_works_inner(false); -} -fn storage_keys_iter_works_inner(hashed_value: bool) { - let client = substrate_test_runtime_client::new(hashed_value); + let client = substrate_test_runtime_client::new(); let prefix = StorageKey(hex!("").to_vec()); diff --git a/client/transaction-pool/tests/pool.rs b/client/transaction-pool/tests/pool.rs index 4d1edf5b04818..6c34d05cd5dcb 100644 --- a/client/transaction-pool/tests/pool.rs +++ b/client/transaction-pool/tests/pool.rs @@ -854,7 +854,7 @@ fn ready_set_should_eventually_resolve_when_block_update_arrives() { fn should_not_accept_old_signatures() { use std::convert::TryFrom; - let client = Arc::new(substrate_test_runtime_client::new(true)); + let client = Arc::new(substrate_test_runtime_client::new()); let pool = Arc::new( BasicPool::new_test(Arc::new(FullChainApi::new( @@ -895,7 +895,7 @@ fn should_not_accept_old_signatures() { #[test] fn import_notification_to_pool_maintain_works() { - let mut client = Arc::new(substrate_test_runtime_client::new(true)); + let mut client = Arc::new(substrate_test_runtime_client::new()); let pool = Arc::new( BasicPool::new_test(Arc::new(FullChainApi::new( diff --git a/frame/support/test/tests/instance.rs b/frame/support/test/tests/instance.rs index 841b69dafed1a..a948853ff2a44 100644 --- a/frame/support/test/tests/instance.rs +++ b/frame/support/test/tests/instance.rs @@ -359,7 +359,7 @@ fn storage_instance_independence() { module2::DoubleMap::::insert(&0, &0, &0); module2::DoubleMap::::insert(&0, &0, &0); }); - // 12 storage values and threshold. + // 12 storage values. assert_eq!(storage.top.len(), 12); } diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index 181a97aaada6b..7b6ec9856d9f4 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -1441,15 +1441,14 @@ impl Pallet { /// Get the basic externalities for this pallet, useful for tests. #[cfg(any(feature = "std", test))] pub fn externalities() -> TestExternalities { - let storage = sp_core::storage::Storage { + TestExternalities::new(sp_core::storage::Storage { top: map![ >::hashed_key_for(T::BlockNumber::zero()) => [69u8; 32].encode(), >::hashed_key().to_vec() => T::BlockNumber::one().encode(), >::hashed_key().to_vec() => [69u8; 32].encode() ], children_default: map![], - }; - TestExternalities::new(storage) + }) } /// Get the current events deposited by the runtime. diff --git a/primitives/api/proc-macro/src/impl_runtime_apis.rs b/primitives/api/proc-macro/src/impl_runtime_apis.rs index ba079f80ccd25..4169d1fc65565 100644 --- a/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -282,7 +282,7 @@ fn generate_runtime_api_base_structures() -> Result { fn extract_proof(&mut self) -> Option<#crate_::StorageProof> { self.recorder .take() - .map(|recorder| recorder.to_storage_proof::<#crate_::HashFor>()) + .map(|recorder| recorder.to_storage_proof()) } fn into_storage_changes( diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index ba9f7e9e5cafa..6bb0cf22dd57f 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -674,17 +674,6 @@ pub struct OldRuntimeVersion { pub apis: ApisVec, } -#[derive(codec::Encode, codec::Decode)] // TODO same use as OldRuntimeVersion -pub struct OldRuntimeVersion2 { - pub spec_name: RuntimeString, - pub impl_name: RuntimeString, - pub authoring_version: u32, - pub spec_version: u32, - pub impl_version: u32, - pub apis: ApisVec, - pub transaction_version: u32, -} - impl From for RuntimeVersion { fn from(x: OldRuntimeVersion) -> Self { Self { diff --git a/primitives/api/test/benches/bench.rs b/primitives/api/test/benches/bench.rs index 3232204b9b128..b3d96a2db6a56 100644 --- a/primitives/api/test/benches/bench.rs +++ b/primitives/api/test/benches/bench.rs @@ -25,7 +25,7 @@ use substrate_test_runtime_client::{ fn sp_api_benchmark(c: &mut Criterion) { c.bench_function("add one with same runtime api", |b| { - let client = substrate_test_runtime_client::new(true); + let client = substrate_test_runtime_client::new(); let runtime_api = client.runtime_api(); let block_id = BlockId::Number(client.chain_info().best_number); @@ -33,14 +33,14 @@ fn sp_api_benchmark(c: &mut Criterion) { }); c.bench_function("add one with recreating runtime api", |b| { - let client = substrate_test_runtime_client::new(true); + let client = substrate_test_runtime_client::new(); let block_id = BlockId::Number(client.chain_info().best_number); b.iter(|| client.runtime_api().benchmark_add_one(&block_id, &1)) }); c.bench_function("vector add one with same runtime api", |b| { - let client = substrate_test_runtime_client::new(true); + let client = substrate_test_runtime_client::new(); let runtime_api = client.runtime_api(); let block_id = BlockId::Number(client.chain_info().best_number); let data = vec![0; 1000]; @@ -49,7 +49,7 @@ fn sp_api_benchmark(c: &mut Criterion) { }); c.bench_function("vector add one with recreating runtime api", |b| { - let client = substrate_test_runtime_client::new(true); + let client = substrate_test_runtime_client::new(); let block_id = BlockId::Number(client.chain_info().best_number); let data = vec![0; 1000]; diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index 4bf655b1656fc..4064dbdb5fd77 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -191,7 +191,7 @@ pub trait Storage { /// /// Returns a `Vec` that holds the SCALE encoded hash. fn root(&mut self) -> Vec { - self.storage_root(None) + self.storage_root(sp_core::StateVersion::V0) } #[version(2)] @@ -201,7 +201,7 @@ pub trait Storage { /// /// Returns a `Vec` that holds the SCALE encoded hash. fn root(&mut self) -> Vec { - self.storage_root(sp_core::DEFAULT_STATE_HASHING) + self.storage_root(sp_core::StateVersion::V1) } /// "Commit" all existing operations and get the resulting storage change root. @@ -389,7 +389,7 @@ pub trait DefaultChildStorage { /// Returns a `Vec` that holds the SCALE encoded hash. fn root(&mut self, storage_key: &[u8]) -> Vec { let child_info = ChildInfo::new_default(storage_key); - self.child_storage_root(&child_info, None) + self.child_storage_root(&child_info, sp_core::StateVersion::V0) } /// Default child root calculation. @@ -404,7 +404,7 @@ pub trait DefaultChildStorage { #[version(2)] fn root(&mut self, storage_key: &[u8]) -> Vec { let child_info = ChildInfo::new_default(storage_key); - self.child_storage_root(&child_info, sp_core::DEFAULT_STATE_HASHING) + self.child_storage_root(&child_info, sp_core::StateVersion::V1) } /// Child storage key iteration. diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 8d68d47b97ce9..e9e22b90c2d0c 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -137,13 +137,11 @@ pub trait Backend: sp_std::fmt::Debug { /// Calculate the storage root, with given delta over what is already stored in /// the backend, and produce a "transaction" that can be used to commit. - /// `alt_hashing` indicate if trie state should apply alternate hashing - /// scheme (inner value hashed). /// Does not include child storage updates. fn storage_root<'a>( &self, delta: impl Iterator)>, - threshold: StateVersion, + state_version: StateVersion, ) -> (H::Out, Self::Transaction) where H::Out: Ord; @@ -155,7 +153,7 @@ pub trait Backend: sp_std::fmt::Debug { &self, child_info: &ChildInfo, delta: impl Iterator)>, - threshold: StateVersion, + state_version: StateVersion, ) -> (H::Out, bool, Self::Transaction) where H::Out: Ord; @@ -190,7 +188,7 @@ pub trait Backend: sp_std::fmt::Debug { child_deltas: impl Iterator< Item = (&'a ChildInfo, impl Iterator)>), >, - threshold: StateVersion, + state_version: StateVersion, ) -> (H::Out, Self::Transaction) where H::Out: Ord + Encode, @@ -199,7 +197,7 @@ pub trait Backend: sp_std::fmt::Debug { let mut child_roots: Vec<_> = Default::default(); // child first for (child_info, child_delta) in child_deltas { - let (child_root, empty, child_txs) = self.child_storage_root(&child_info, child_delta, threshold); + let (child_root, empty, child_txs) = self.child_storage_root(&child_info, child_delta, state_version); let prefixed_storage_key = child_info.prefixed_storage_key(); txs.consolidate(child_txs); if empty { @@ -212,7 +210,7 @@ pub trait Backend: sp_std::fmt::Debug { delta .map(|(k, v)| (&k[..], v.as_ref().map(|v| &v[..]))) .chain(child_roots.iter().map(|(k, v)| (&k[..], v.as_ref().map(|v| &v[..])))), - threshold, + state_version, ); txs.consolidate(parent_txs); (root, txs) diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index 0043ccc90f2c3..0db720ef54ae9 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -273,7 +273,7 @@ impl Externalities for BasicExternalities { crate::ext::StorageAppend::new(current).append(value); } - fn storage_root(&mut self, threshold: StateVersion) -> Vec { + fn storage_root(&mut self, state_version: StateVersion) -> Vec { let mut top = self.inner.top.clone(); let prefixed_keys: Vec<_> = self .inner @@ -286,7 +286,7 @@ impl Externalities for BasicExternalities { // type of child trie support. let empty_hash = empty_child_trie_root::>(); for (prefixed_storage_key, child_info) in prefixed_keys { - let child_root = self.child_storage_root(&child_info, threshold); + let child_root = self.child_storage_root(&child_info, state_version); if &empty_hash[..] == &child_root[..] { top.remove(prefixed_storage_key.as_slice()); } else { @@ -294,19 +294,19 @@ impl Externalities for BasicExternalities { } } - let layout = if let Some(threshold) = threshold { - Layout::::with_alt_hashing(threshold) + let layout = if let Some(threshold) = state_version.state_value_threshold() { + Layout::::with_max_inline_value(threshold) } else { Layout::::default() }; layout.trie_root(self.inner.top.clone()).as_ref().into() } - fn child_storage_root(&mut self, child_info: &ChildInfo, threshold: StateVersion) -> Vec { + fn child_storage_root(&mut self, child_info: &ChildInfo, state_version: StateVersion) -> Vec { if let Some(child) = self.inner.children_default.get(child_info.storage_key()) { let delta = child.data.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))); let in_mem = crate::in_memory_backend::new_in_mem::(); - in_mem.child_storage_root(&child.child_info, delta, threshold).0 + in_mem.child_storage_root(&child.child_info, delta, state_version).0 } else { empty_child_trie_root::>() } @@ -397,7 +397,7 @@ mod tests { const ROOT: [u8; 32] = hex!("39245109cef3758c2eed2ccba8d9b370a917850af3824bc8348d505df2c298fa"); - assert_eq!(&ext.storage_root(None)[..], &ROOT); + assert_eq!(&ext.storage_root(StateVersion::default())[..], &ROOT); } #[test] diff --git a/primitives/state-machine/src/changes_trie/build.rs b/primitives/state-machine/src/changes_trie/build.rs index 732f2e6bfd5e9..d51678b345ae8 100644 --- a/primitives/state-machine/src/changes_trie/build.rs +++ b/primitives/state-machine/src/changes_trie/build.rs @@ -382,17 +382,20 @@ mod test { ) { let child_info_1 = ChildInfo::new_default(b"storage_key1"); let child_info_2 = ChildInfo::new_default(b"storage_key2"); - let backend: InMemoryBackend<_> = (vec![ - (vec![100], vec![255]), - (vec![101], vec![255]), - (vec![102], vec![255]), - (vec![103], vec![255]), - (vec![104], vec![255]), - (vec![105], vec![255]), - ] - .into_iter() - .collect::>(), None) - .into(); + let backend: InMemoryBackend<_> = ( + vec![ + (vec![100], vec![255]), + (vec![101], vec![255]), + (vec![102], vec![255]), + (vec![103], vec![255]), + (vec![104], vec![255]), + (vec![105], vec![255]), + ] + .into_iter() + .collect::>(), + sp_core::StateVersion::V0, + ) + .into(); let prefixed_child_trie_key1 = child_info_1.prefixed_storage_key(); let storage = InMemoryStorage::with_inputs( vec![ diff --git a/primitives/state-machine/src/changes_trie/mod.rs b/primitives/state-machine/src/changes_trie/mod.rs index 1f12cf19ae553..40148095247dd 100644 --- a/primitives/state-machine/src/changes_trie/mod.rs +++ b/primitives/state-machine/src/changes_trie/mod.rs @@ -201,8 +201,6 @@ impl<'a, H: Hasher, N: BlockNumber> crate::TrieBackendStorage fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { self.0.get(key, prefix) } - - fn access_from(&self, _key: &H::Out) {} } /// Changes trie configuration. diff --git a/primitives/state-machine/src/changes_trie/storage.rs b/primitives/state-machine/src/changes_trie/storage.rs index b1fca1eb971a9..6a81edc90ff11 100644 --- a/primitives/state-machine/src/changes_trie/storage.rs +++ b/primitives/state-machine/src/changes_trie/storage.rs @@ -211,6 +211,4 @@ where fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { self.storage.get(key, prefix) } - - fn access_from(&self, _key: &H::Out) {} } diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index 193cdd6fd558b..5b19b6f29ee75 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -24,7 +24,7 @@ use crate::{ use codec::{Decode, Encode, EncodeAppend}; use hash_db::Hasher; use sp_core::{ - hexdisplay::HexDisplay, StateVersion, DEFAULT_STATE_HASHING, + hexdisplay::HexDisplay, StateVersion, storage::{well_known_keys::is_child_storage_key, ChildInfo, TrackedStorageKey}, }; use sp_externalities::{Extension, ExtensionStore, Extensions, Externalities}; @@ -718,7 +718,7 @@ where None, Default::default(), self.storage_transaction_cache, - None, // using any state + Default::default(), // using any state ) .expect(EXT_NOT_ALLOWED_TO_FAIL); self.backend.wipe().expect(EXT_NOT_ALLOWED_TO_FAIL); @@ -730,7 +730,7 @@ where fn commit(&mut self) { // Bench always use latest state. - let state_threshold = DEFAULT_STATE_HASHING; + let state_threshold = StateVersion::default(); for _ in 0..self.overlay.transaction_depth() { self.overlay.commit_transaction().expect(BENCHMARKING_FN); } @@ -1038,7 +1038,7 @@ mod tests { vec![40] => vec![40] ], children_default: map![], - }, DEFAULT_STATE_HASHING) + }, StateVersion::default()) .into(); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); @@ -1082,7 +1082,7 @@ mod tests { vec![30] => vec![30] ], children_default: map![], - }, DEFAULT_STATE_HASHING) + }, StateVersion::default()) .into(); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); @@ -1113,7 +1113,7 @@ mod tests { child_info: child_info.to_owned(), } ], - }, DEFAULT_STATE_HASHING) + }, StateVersion::default()) .into(); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); @@ -1158,7 +1158,7 @@ mod tests { child_info: child_info.to_owned(), } ], - }, DEFAULT_STATE_HASHING) + }, StateVersion::default()) .into(); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); @@ -1195,7 +1195,7 @@ mod tests { child_info: child_info.to_owned(), } ], - }, DEFAULT_STATE_HASHING) + }, StateVersion::default()) .into(); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index 1701e5471a819..6b68337fb2451 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -44,10 +44,10 @@ where pub fn update, StorageCollection)>>( &self, changes: T, - state_threshold: StateVersion, + state_version: StateVersion, ) -> Self { let mut clone = self.clone(); - clone.insert(changes, state_threshold); + clone.insert(changes, state_version); clone } @@ -55,7 +55,7 @@ where pub fn insert, StorageCollection)>>( &mut self, changes: T, - state_threshold: StateVersion, + state_version: StateVersion, ) { let (top, child) = changes.into_iter().partition::, _>(|v| v.0.is_none()); let (root, transaction) = self.full_storage_root( @@ -63,7 +63,7 @@ where child.iter().filter_map(|v| { v.0.as_ref().map(|c| (c, v.1.iter().map(|(k, v)| (&k[..], v.as_deref())))) }), - state_threshold, + state_version, ); self.apply_transaction(root, transaction); @@ -187,7 +187,7 @@ mod tests { /// Assert in memory backend with only child trie keys works as trie backend. #[test] fn in_memory_with_child_trie_only() { - let state_hash = sp_core::DEFAULT_STATE_HASHING; + let state_hash = sp_core::StateVersion::default(); let storage = new_in_mem::(); let child_info = ChildInfo::new_default(b"1"); let child_info = &child_info; @@ -201,7 +201,7 @@ mod tests { #[test] fn insert_multiple_times_child_data_works() { - let state_hash = sp_core::DEFAULT_STATE_HASHING; + let state_hash = sp_core::StateVersion::default(); let mut storage = new_in_mem::(); let child_info = ChildInfo::new_default(b"1"); diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index d79d262199d2a..f245c1fc38693 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -1000,7 +1000,7 @@ mod tests { use codec::{Decode, Encode}; use sp_core::{ map, - DEFAULT_STATE_HASHING, StateVersion, + StateVersion, storage::ChildInfo, testing::TaskExecutor, traits::{CodeExecutor, Externalities, RuntimeCode}, @@ -1072,8 +1072,8 @@ mod tests { #[test] fn execute_works() { - execute_works_inner(None); - execute_works_inner(DEFAULT_STATE_HASHING); + execute_works_inner(StateVersion::V0); + execute_works_inner(StateVersion::V1); } fn execute_works_inner(hashed: StateVersion) { let backend = trie_backend::tests::test_trie(hashed); @@ -1102,8 +1102,8 @@ mod tests { #[test] fn execute_works_with_native_else_wasm() { - execute_works_with_native_else_wasm_inner(None); - execute_works_with_native_else_wasm_inner(DEFAULT_STATE_HASHING); + execute_works_with_native_else_wasm_inner(StateVersion::V0); + execute_works_with_native_else_wasm_inner(StateVersion::V1); } fn execute_works_with_native_else_wasm_inner(state_hash: StateVersion) { let backend = trie_backend::tests::test_trie(state_hash); @@ -1132,8 +1132,8 @@ mod tests { #[test] fn dual_execution_strategy_detects_consensus_failure() { - dual_execution_strategy_detects_consensus_failure_inner(None); - dual_execution_strategy_detects_consensus_failure_inner(DEFAULT_STATE_HASHING); + dual_execution_strategy_detects_consensus_failure_inner(StateVersion::V0); + dual_execution_strategy_detects_consensus_failure_inner(StateVersion::V1); } fn dual_execution_strategy_detects_consensus_failure_inner(state_hash: StateVersion) { let mut consensus_failed = false; @@ -1172,8 +1172,8 @@ mod tests { #[test] fn prove_execution_and_proof_check_works() { - prove_execution_and_proof_check_works_inner(DEFAULT_STATE_HASHING); - prove_execution_and_proof_check_works_inner(None); + prove_execution_and_proof_check_works_inner(StateVersion::V0); + prove_execution_and_proof_check_works_inner(StateVersion::V1); } fn prove_execution_and_proof_check_works_inner(state_hash: StateVersion) { let executor = DummyCodeExecutor { @@ -1223,7 +1223,7 @@ mod tests { b"abc".to_vec() => b"2".to_vec(), b"bbb".to_vec() => b"3".to_vec() ]; - let state = InMemoryBackend::::from((initial, DEFAULT_STATE_HASHING)); + let state = InMemoryBackend::::from((initial, StateVersion::default())); let backend = state.as_trie_backend().unwrap(); let mut overlay = OverlayedChanges::default(); @@ -1304,7 +1304,7 @@ mod tests { b"d".to_vec() => b"3".to_vec() ], ]; - let backend = InMemoryBackend::::from((initial, DEFAULT_STATE_HASHING)); + let backend = InMemoryBackend::::from((initial, StateVersion::default())); let mut overlay = OverlayedChanges::default(); overlay.set_child_storage(&child_info, b"1".to_vec(), Some(b"1312".to_vec())); @@ -1352,7 +1352,7 @@ mod tests { b"d".to_vec() => b"3".to_vec() ], ]; - let backend = InMemoryBackend::::from((initial, DEFAULT_STATE_HASHING)); + let backend = InMemoryBackend::::from((initial, StateVersion::default())); let mut overlay = OverlayedChanges::default(); let mut cache = StorageTransactionCache::default(); let mut ext = Ext::new( @@ -1540,8 +1540,8 @@ mod tests { #[test] fn prove_read_and_proof_check_works() { - prove_read_and_proof_check_works_inner(None); - prove_read_and_proof_check_works_inner(DEFAULT_STATE_HASHING); + prove_read_and_proof_check_works_inner(StateVersion::V0); + prove_read_and_proof_check_works_inner(StateVersion::V1); } fn prove_read_and_proof_check_works_inner(state_hash: StateVersion) { let child_info = ChildInfo::new_default(b"sub1"); @@ -1592,7 +1592,7 @@ mod tests { #[test] fn prove_read_with_size_limit_works() { - let state_hash = None; + let state_hash = StateVersion::V0; let remote_backend = trie_backend::tests::test_trie(state_hash); let remote_root = remote_backend.storage_root(::std::iter::empty(), state_hash).0; let (proof, count) = @@ -1645,7 +1645,7 @@ mod tests { #[test] fn inner_state_hashing_switch_proofs() { let mut layout = Layout::default(); - let mut state_hash = None; + let mut state_hash = StateVersion::V0; let (mut mdb, mut root) = trie_backend::tests::test_db(state_hash); { let mut trie = @@ -1681,8 +1681,8 @@ mod tests { let root1 = root.clone(); // do switch - layout = Layout::with_alt_hashing(sp_core::storage::DEFAULT_ALT_HASH_THRESHOLD); - state_hash = DEFAULT_STATE_HASHING; + layout = Layout::with_max_inline_value(sp_core::storage::DEFAULT_MAX_INLINE_VALUE); + state_hash = StateVersion::V1; // update with same value do not change { let mut trie = @@ -1715,8 +1715,8 @@ mod tests { #[test] fn compact_multiple_child_trie() { - let size_inner_hash = compact_multiple_child_trie_inner(DEFAULT_STATE_HASHING); - let size_no_inner_hash = compact_multiple_child_trie_inner(None); + let size_inner_hash = compact_multiple_child_trie_inner(StateVersion::V0); + let size_no_inner_hash = compact_multiple_child_trie_inner(StateVersion::V1); assert!(size_inner_hash < size_no_inner_hash); } fn compact_multiple_child_trie_inner(state_hash: StateVersion) -> usize { @@ -1778,7 +1778,7 @@ mod tests { #[test] fn child_storage_uuid() { - let state_hash = None; + let state_hash = StateVersion::V0; let child_info_1 = ChildInfo::new_default(b"sub_test1"); let child_info_2 = ChildInfo::new_default(b"sub_test2"); @@ -1817,7 +1817,7 @@ mod tests { b"aaa".to_vec() => b"0".to_vec(), b"bbb".to_vec() => b"".to_vec() ]; - let state = InMemoryBackend::::from((initial, DEFAULT_STATE_HASHING)); + let state = InMemoryBackend::::from((initial, StateVersion::default())); let backend = state.as_trie_backend().unwrap(); let mut overlay = OverlayedChanges::default(); @@ -1849,7 +1849,7 @@ mod tests { #[test] fn runtime_registered_extensions_are_removed_after_execution() { - let state_hash = DEFAULT_STATE_HASHING; + let state_hash = StateVersion::default(); use sp_externalities::ExternalitiesExt; sp_externalities::decl_extension! { struct DummyExt(u32); diff --git a/primitives/state-machine/src/overlayed_changes/mod.rs b/primitives/state-machine/src/overlayed_changes/mod.rs index e97948709ccaa..5db811ba0f854 100644 --- a/primitives/state-machine/src/overlayed_changes/mod.rs +++ b/primitives/state-machine/src/overlayed_changes/mod.rs @@ -926,7 +926,7 @@ mod tests { #[test] fn overlayed_storage_root_works() { - let state_hash = None; + let state_hash = StateVersion::default(); let initial: BTreeMap<_, _> = vec![ (b"doe".to_vec(), b"reindeer".to_vec()), (b"dog".to_vec(), b"puppyXXX".to_vec()), diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 6af9c6a2aafe9..d7d626003c9c2 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -31,9 +31,12 @@ use sp_core::StateVersion; pub use sp_trie::trie_types::TrieError; use sp_trie::{ empty_child_trie_root, read_child_trie_value_with, read_trie_value_with, record_all_keys, - Layout, MemoryDB, Meta, Recorder, StorageProof, + Layout, MemoryDB, Recorder, StorageProof, +}; +use std::{ + collections::{hash_map::Entry, HashMap}, + sync::Arc, }; -use std::{collections::HashMap, sync::Arc}; /// Patricia trie-based backend specialized in get value proofs. pub struct ProvingBackendRecorder<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { @@ -109,7 +112,7 @@ where #[derive(Default)] struct ProofRecorderInner { /// All the records that we have stored so far. - records: HashMap>, + records: HashMap>, /// The encoded size of all recorded values. encoded_size: usize, } @@ -120,47 +123,25 @@ pub struct ProofRecorder { inner: Arc>>, } -impl ProofRecorder { +impl ProofRecorder { /// Record the given `key` => `val` combination. - pub fn record(&self, key: Hash, val: Option) { + pub fn record(&self, key: Hash, val: Option) { let mut inner = self.inner.write(); + let encoded_size = if let Entry::Vacant(entry) = inner.records.entry(key) { + let encoded_size = val.as_ref().map(Encode::encoded_size).unwrap_or(0); - let ProofRecorderInner { encoded_size, records } = &mut *inner; - records.entry(key).or_insert_with(|| { - val.map(|val| { - let mut val = (val, Meta::default(), false); - sp_trie::resolve_encoded_meta::(&mut val); - *encoded_size += sp_trie::estimate_entry_size(&val, H::LENGTH); - val - }) - }); - } + entry.insert(val); + encoded_size + } else { + 0 + }; - /// Record actual trie level value access. - pub fn access_from(&self, key: &Hash, hash_len: usize) { - let mut inner = self.inner.write(); - let ProofRecorderInner { encoded_size, records, .. } = &mut *inner; - records.entry(key.clone()).and_modify(|entry| { - if let Some(entry) = entry.as_mut() { - if !entry.2 { - let old_size = sp_trie::estimate_entry_size(entry, hash_len); - entry.2 = true; - let new_size = sp_trie::estimate_entry_size(entry, hash_len); - *encoded_size += new_size; - *encoded_size -= old_size; - } - } - }); + inner.encoded_size += encoded_size; } /// Returns the value at the given `key`. pub fn get(&self, key: &Hash) -> Option> { - self.inner - .read() - .records - .get(key) - .as_ref() - .map(|v| v.as_ref().map(|v| v.0.clone())) + self.inner.read().records.get(key).cloned() } /// Returns the estimated encoded size of the proof. @@ -173,24 +154,13 @@ impl ProofRecorder { } /// Convert into a [`StorageProof`]. - pub fn to_storage_proof(&self) -> StorageProof { + pub fn to_storage_proof(&self) -> StorageProof { let trie_nodes = self .inner .read() .records .iter() - .filter_map(|(_k, v)| { - v.as_ref().map(|v| { - let mut meta = v.1.clone(); - if let Some(hashed) = - sp_trie::to_hashed_variant::(v.0.as_slice(), &mut meta, v.2) - { - hashed - } else { - v.0.clone() - } - }) - }) + .filter_map(|(_k, v)| v.as_ref().map(|v| v.to_vec())) .collect(); StorageProof::new(trie_nodes) @@ -239,7 +209,7 @@ where /// Extracting the gathered unordered proof. pub fn extract_proof(&self) -> StorageProof { - self.0.essence().backend_storage().proof_recorder.to_storage_proof::() + self.0.essence().backend_storage().proof_recorder.to_storage_proof() } /// Returns the estimated encoded size of the proof. @@ -262,13 +232,9 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> TrieBackendStorage } let backend_value = self.backend.get(key, prefix)?; - self.proof_recorder.record::(key.clone(), backend_value.clone()); + self.proof_recorder.record(key.clone(), backend_value.clone()); Ok(backend_value) } - - fn access_from(&self, key: &H::Out) { - self.proof_recorder.access_from(key, H::LENGTH); - } } impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> std::fmt::Debug @@ -419,7 +385,6 @@ mod tests { }; use sp_runtime::traits::BlakeTwo256; use sp_trie::PrefixedMemoryDB; - use sp_core::DEFAULT_STATE_HASHING; fn test_proving<'a>( trie_backend: &'a TrieBackend, BlakeTwo256>, @@ -429,8 +394,8 @@ mod tests { #[test] fn proof_is_empty_until_value_is_read() { - proof_is_empty_until_value_is_read_inner(None); - proof_is_empty_until_value_is_read_inner(DEFAULT_STATE_HASHING); + proof_is_empty_until_value_is_read_inner(StateVersion::V0); + proof_is_empty_until_value_is_read_inner(StateVersion::V1); } fn proof_is_empty_until_value_is_read_inner(test_hash: StateVersion) { let trie_backend = test_trie(test_hash); @@ -439,8 +404,8 @@ mod tests { #[test] fn proof_is_non_empty_after_value_is_read() { - proof_is_non_empty_after_value_is_read_inner(None); - proof_is_non_empty_after_value_is_read_inner(DEFAULT_STATE_HASHING); + proof_is_non_empty_after_value_is_read_inner(StateVersion::V0); + proof_is_non_empty_after_value_is_read_inner(StateVersion::V1); } fn proof_is_non_empty_after_value_is_read_inner(test_hash: StateVersion) { let trie_backend = test_trie(test_hash); @@ -461,8 +426,8 @@ mod tests { #[test] fn passes_through_backend_calls() { - passes_through_backend_calls_inner(None); - passes_through_backend_calls_inner(DEFAULT_STATE_HASHING); + passes_through_backend_calls_inner(StateVersion::V0); + passes_through_backend_calls_inner(StateVersion::V1); } fn passes_through_backend_calls_inner(state_hash: StateVersion) { let trie_backend = test_trie(state_hash); @@ -478,8 +443,8 @@ mod tests { #[test] fn proof_recorded_and_checked_top() { - proof_recorded_and_checked_inner(DEFAULT_STATE_HASHING); - proof_recorded_and_checked_inner(None); + proof_recorded_and_checked_inner(StateVersion::V0); + proof_recorded_and_checked_inner(StateVersion::V1); } fn proof_recorded_and_checked_inner(state_hash: StateVersion) { let size_content = 34; // above hashable value treshold. @@ -514,8 +479,8 @@ mod tests { #[test] fn proof_recorded_and_checked_with_child() { - proof_recorded_and_checked_with_child_inner(None); - proof_recorded_and_checked_with_child_inner(DEFAULT_STATE_HASHING); + proof_recorded_and_checked_with_child_inner(StateVersion::V0); + proof_recorded_and_checked_with_child_inner(StateVersion::V1); } fn proof_recorded_and_checked_with_child_inner(state_hash: StateVersion) { let child_info_1 = ChildInfo::new_default(b"sub1"); @@ -574,8 +539,8 @@ mod tests { #[test] fn storage_proof_encoded_size_estimation_works() { - storage_proof_encoded_size_estimation_works_inner(None); - storage_proof_encoded_size_estimation_works_inner(DEFAULT_STATE_HASHING); + storage_proof_encoded_size_estimation_works_inner(StateVersion::V0); + storage_proof_encoded_size_estimation_works_inner(StateVersion::V1); } fn storage_proof_encoded_size_estimation_works_inner(state_hash: StateVersion) { let trie_backend = test_trie(state_hash); diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index 313e8c99514a2..f403aa4caf9c7 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -62,8 +62,8 @@ where changes_trie_storage: ChangesTrieInMemoryStorage, /// Extensions. pub extensions: Extensions, - /// State hashing to apply during tests. - pub state_hashing: StateVersion, + /// State version to use during tests. + pub state_version: StateVersion, } impl TestExternalities @@ -90,17 +90,22 @@ where /// Create a new instance of `TestExternalities` with storage. pub fn new(storage: Storage) -> Self { - Self::new_with_code(&[], storage) + Self::new_with_code_and_state(&[], storage, Default::default()) + } + + /// Create a new instance of `TestExternalities` with storage for a given state version. + pub fn new_with_state_version(storage: Storage, state_version: StateVersion) -> Self { + Self::new_with_code_and_state(&[], storage, state_version) } /// New empty test externalities. pub fn new_empty() -> Self { - Self::new_with_code(&[], Storage::default()) + Self::new_with_code_and_state(&[], Storage::default(), Default::default()) } /// Create a new instance of `TestExternalities` with code and storage. pub fn new_with_code(code: &[u8], storage: Storage) -> Self { - Self::new_with_code_and_state(code, storage, sp_core::DEFAULT_STATE_HASHING) + Self::new_with_code_and_state(code, storage, Default::default()) } /// Create a new instance of `TestExternalities` with code and storage for a given state @@ -108,7 +113,7 @@ where pub fn new_with_code_and_state( code: &[u8], mut storage: Storage, - state_hashing: StateVersion, + state_version: StateVersion, ) -> Self { let mut overlay = OverlayedChanges::default(); let changes_trie_config = storage @@ -127,15 +132,17 @@ where let offchain_db = TestPersistentOffchainDB::new(); + let backend = (storage, state_version).into(); + TestExternalities { overlay, offchain_db, changes_trie_config, extensions, changes_trie_storage: ChangesTrieInMemoryStorage::new(), - backend: (storage, state_hashing).into(), + backend, storage_transaction_cache: Default::default(), - state_hashing, + state_version, } } @@ -156,7 +163,7 @@ where /// Insert key/value into backend pub fn insert(&mut self, k: StorageKey, v: StorageValue) { - self.backend.insert(vec![(None, vec![(k, Some(v))])], self.state_hashing); + self.backend.insert(vec![(None, vec![(k, Some(v))])], self.state_version); } /// Registers the given extension for this instance. @@ -185,7 +192,7 @@ where )) } - self.backend.update(transaction, self.state_hashing) + self.backend.update(transaction, self.state_version) } /// Commit all pending changes to the underlying backend. @@ -199,7 +206,7 @@ where None, Default::default(), &mut Default::default(), - self.state_hashing, + self.state_version, )?; self.backend @@ -256,7 +263,8 @@ where H::Out: Ord + 'static + codec::Codec, { fn default() -> Self { - Self::new(Default::default()) + // default to default version. + Self::new_with_state_version(Storage::default(), Default::default()) } } @@ -265,7 +273,16 @@ where H::Out: Ord + 'static + codec::Codec, { fn from(storage: Storage) -> Self { - Self::new(storage) + Self::new_with_state_version(storage, Default::default()) + } +} + +impl From<(Storage, StateVersion)> for TestExternalities +where + H::Out: Ord + 'static + codec::Codec, +{ + fn from((storage, state_version): (Storage, StateVersion)) -> Self { + Self::new_with_state_version(storage, state_version) } } @@ -322,20 +339,20 @@ where mod tests { use super::*; use hex_literal::hex; - use sp_core::{storage::ChildInfo, traits::Externalities, H256, DEFAULT_STATE_HASHING}; + use sp_core::{storage::ChildInfo, traits::Externalities, H256}; use sp_runtime::traits::BlakeTwo256; #[test] fn commit_should_work() { let storage = Storage::default(); // avoid adding the trie threshold. - let mut ext = TestExternalities::::from(storage); + let mut ext = TestExternalities::::from((storage, Default::default())); let mut ext = ext.ext(); ext.set_storage(b"doe".to_vec(), b"reindeer".to_vec()); ext.set_storage(b"dog".to_vec(), b"puppy".to_vec()); ext.set_storage(b"dogglesworth".to_vec(), b"cat".to_vec()); let root = H256::from(hex!("ed4d8c799d996add422395a6abd7545491d40bd838d738afafa1b8a4de625489")); - assert_eq!(H256::from_slice(ext.storage_root(DEFAULT_STATE_HASHING).as_slice()), root); + assert_eq!(H256::from_slice(ext.storage_root(Default::default()).as_slice()), root); } #[test] diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 62787a378f2d8..f2b31c293667a 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -187,7 +187,7 @@ where fn storage_root<'a>( &self, delta: impl Iterator)>, - threshold: StateVersion, + state_version: StateVersion, ) -> (H::Out, Self::Transaction) where H::Out: Ord, @@ -198,8 +198,8 @@ where { let mut eph = Ephemeral::new(self.essence.backend_storage(), &mut write_overlay); let res = || { - let layout = if let Some(threshold) = threshold { - sp_trie::Layout::with_alt_hashing(threshold) + let layout = if let Some(threshold) = state_version.state_value_threshold() { + sp_trie::Layout::with_max_inline_value(threshold) } else { sp_trie::Layout::default() }; @@ -219,7 +219,7 @@ where &self, child_info: &ChildInfo, delta: impl Iterator)>, - threshold: StateVersion, + state_version: StateVersion, ) -> (H::Out, bool, Self::Transaction) where H::Out: Ord, @@ -227,8 +227,8 @@ where let default_root = match child_info.child_type() { ChildType::ParentKeyId => empty_child_trie_root::>(), }; - let layout = if let Some(threshold) = threshold { - sp_trie::Layout::with_alt_hashing(threshold) + let layout = if let Some(threshold) = state_version.state_value_threshold() { + sp_trie::Layout::with_max_inline_value(threshold) } else { sp_trie::Layout::default() }; @@ -284,7 +284,7 @@ where pub mod tests { use super::*; use codec::Encode; - use sp_core::{H256, DEFAULT_STATE_HASHING}; + use sp_core::H256; use sp_runtime::traits::BlakeTwo256; use sp_trie::{trie_types::TrieDBMut, KeySpacedDBMut, PrefixedMemoryDB, TrieMut}; use std::{collections::HashSet, iter}; @@ -305,8 +305,8 @@ pub mod tests { { let mut sub_root = Vec::new(); root.encode_to(&mut sub_root); - let mut trie = if let Some(hash) = hashed_value { - let layout = Layout::with_alt_hashing(hash); + let mut trie = if let Some(hash) = hashed_value.state_value_threshold() { + let layout = Layout::with_max_inline_value(hash); TrieDBMut::new_with_layout(&mut mdb, &mut root, layout) } else { TrieDBMut::new(&mut mdb, &mut root) @@ -334,8 +334,8 @@ pub mod tests { #[test] fn read_from_storage_returns_some() { - read_from_storage_returns_some_inner(None); - read_from_storage_returns_some_inner(DEFAULT_STATE_HASHING); + read_from_storage_returns_some_inner(StateVersion::V0); + read_from_storage_returns_some_inner(StateVersion::V1); } fn read_from_storage_returns_some_inner(state_hash: StateVersion) { assert_eq!(test_trie(state_hash).storage(b"key").unwrap(), Some(b"value".to_vec())); @@ -343,8 +343,8 @@ pub mod tests { #[test] fn read_from_child_storage_returns_some() { - read_from_child_storage_returns_some_inner(None); - read_from_child_storage_returns_some_inner(DEFAULT_STATE_HASHING); + read_from_child_storage_returns_some_inner(StateVersion::V0); + read_from_child_storage_returns_some_inner(StateVersion::V1); } fn read_from_child_storage_returns_some_inner(state_hash: StateVersion) { let test_trie = test_trie(state_hash); @@ -374,8 +374,8 @@ pub mod tests { #[test] fn read_from_storage_returns_none() { - read_from_storage_returns_none_inner(None); - read_from_storage_returns_none_inner(DEFAULT_STATE_HASHING); + read_from_storage_returns_none_inner(StateVersion::V0); + read_from_storage_returns_none_inner(StateVersion::V1); } fn read_from_storage_returns_none_inner(state_hash: StateVersion) { assert_eq!(test_trie(state_hash).storage(b"non-existing-key").unwrap(), None); @@ -383,8 +383,8 @@ pub mod tests { #[test] fn pairs_are_not_empty_on_non_empty_storage() { - pairs_are_not_empty_on_non_empty_storage_inner(None); - pairs_are_not_empty_on_non_empty_storage_inner(DEFAULT_STATE_HASHING); + pairs_are_not_empty_on_non_empty_storage_inner(StateVersion::V0); + pairs_are_not_empty_on_non_empty_storage_inner(StateVersion::V1); } fn pairs_are_not_empty_on_non_empty_storage_inner(state_hash: StateVersion) { assert!(!test_trie(state_hash).pairs().is_empty()); @@ -402,8 +402,8 @@ pub mod tests { #[test] fn storage_root_is_non_default() { - storage_root_is_non_default_inner(None); - storage_root_is_non_default_inner(DEFAULT_STATE_HASHING); + storage_root_is_non_default_inner(StateVersion::V0); + storage_root_is_non_default_inner(StateVersion::V1); } fn storage_root_is_non_default_inner(state_hash: StateVersion) { assert!(test_trie(state_hash).storage_root(iter::empty(), state_hash).0 != H256::repeat_byte(0)); @@ -411,8 +411,8 @@ pub mod tests { #[test] fn storage_root_transaction_is_non_empty() { - storage_root_transaction_is_non_empty_inner(None); - storage_root_transaction_is_non_empty_inner(DEFAULT_STATE_HASHING); + storage_root_transaction_is_non_empty_inner(StateVersion::V0); + storage_root_transaction_is_non_empty_inner(StateVersion::V1); } fn storage_root_transaction_is_non_empty_inner(state_hash: StateVersion) { let (new_root, mut tx) = @@ -423,8 +423,8 @@ pub mod tests { #[test] fn prefix_walking_works() { - prefix_walking_works_inner(None); - prefix_walking_works_inner(DEFAULT_STATE_HASHING); + prefix_walking_works_inner(StateVersion::V0); + prefix_walking_works_inner(StateVersion::V1); } fn prefix_walking_works_inner(state_hash: StateVersion) { let trie = test_trie(state_hash); diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 2029b3e1bddee..1dd8b80792d0f 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -48,9 +48,6 @@ type Result = sp_std::result::Result; pub trait Storage: Send + Sync { /// Get a trie node. fn get(&self, key: &H::Out, prefix: Prefix) -> Result>; - - /// Call back when value get accessed in trie. - fn access_from(&self, key: &H::Out); } /// Local cache for child root. @@ -476,12 +473,6 @@ impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::HashDB } } - fn access_from(&self, key: &H::Out, _at: Option<&H::Out>) -> Option { - // call back to storage even if the overlay was hit. - self.storage.access_from(key); - None - } - fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { HashDB::get(self, key, prefix).is_some() } @@ -494,10 +485,6 @@ impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::HashDB HashDB::emplace(self.overlay, key, prefix, value) } - fn emplace_ref(&mut self, key: &H::Out, prefix: Prefix, value: &[u8]) { - HashDB::emplace_ref(self.overlay, key, prefix, value) - } - fn remove(&mut self, key: &H::Out, prefix: Prefix) { HashDB::remove(self.overlay, key, prefix) } @@ -508,10 +495,6 @@ impl<'a, S: 'a + TrieBackendStorage, H: Hasher> HashDBRef for Eph HashDB::get(self, key, prefix) } - fn access_from(&self, key: &H::Out, at: Option<&H::Out>) -> Option { - HashDB::access_from(self, key, at) - } - fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { HashDB::contains(self, key, prefix) } @@ -524,9 +507,6 @@ pub trait TrieBackendStorage: Send + Sync { /// Get the value stored at key. fn get(&self, key: &H::Out, prefix: Prefix) -> Result>; - - /// Call back when value get accessed in trie. - fn access_from(&self, key: &H::Out); } // This implementation is used by normal storage trie clients. @@ -537,10 +517,6 @@ impl TrieBackendStorage for Arc> { fn get(&self, key: &H::Out, prefix: Prefix) -> Result> { Storage::::get(self.deref(), key, prefix) } - - fn access_from(&self, key: &H::Out) { - Storage::::access_from(self.deref(), key) - } } impl TrieBackendStorage for sp_trie::GenericMemoryDB @@ -553,10 +529,6 @@ where fn get(&self, key: &H::Out, prefix: Prefix) -> Result> { Ok(hash_db::HashDB::get(self, key, prefix)) } - - fn access_from(&self, key: &H::Out) { - HashDB::access_from(self, key, None); - } } impl, H: Hasher> AsHashDB for TrieBackendEssence { @@ -582,12 +554,6 @@ impl, H: Hasher> HashDB for TrieBackendEsse } } - fn access_from(&self, key: &H::Out, _at: Option<&H::Out>) -> Option { - // access storage since this is only to register access for proof. - self.storage.access_from(key); - None - } - fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { HashDB::get(self, key, prefix).is_some() } @@ -600,10 +566,6 @@ impl, H: Hasher> HashDB for TrieBackendEsse unimplemented!(); } - fn emplace_ref(&mut self, _key: &H::Out, _prefix: Prefix, _value: &[u8]) { - unimplemented!(); - } - fn remove(&mut self, _key: &H::Out, _prefix: Prefix) { unimplemented!(); } @@ -614,10 +576,6 @@ impl, H: Hasher> HashDBRef for TrieBackendE HashDB::get(self, key, prefix) } - fn access_from(&self, key: &H::Out, at: Option<&H::Out>) -> Option { - HashDB::access_from(self, key, at) - } - fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { HashDB::contains(self, key, prefix) } diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index df216f7c4a723..9a234f08bba0f 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -244,7 +244,7 @@ pub fn trie_threshold_decode(mut encoded: &[u8]) -> Option { } /// Default value to use as a threshold for inner hashing. -pub const DEFAULT_ALT_HASH_THRESHOLD: u32 = 33; +pub const DEFAULT_MAX_INLINE_VALUE: u32 = 33; /// Information related to a child state. #[derive(Debug, Clone)] @@ -415,10 +415,31 @@ impl ChildTrieParentKeyId { /// Different state that can be applied. TODO rename to StateValueHashing. /// /// When a value is define, apply inner hashing over the given threshold. -pub type StateVersion = Option; +#[derive(Debug, Clone, Copy, Eq, PartialEq)] +pub enum StateVersion { + /// Old state version, no value nodes. + V0, + /// New state version can use value nodes. + V1, +} + +impl Default for StateVersion { + fn default() -> Self { + StateVersion::V1 + } +} +impl StateVersion { + /// Threshold to apply for inline value of trie state. + pub fn state_value_threshold(&self) -> Option { + match self { + StateVersion::V0 => None, + StateVersion::V1 => DEFAULT_STATE_HASHING, + } + } +} /// Default threshold value for activated inner hashing of trie state. -pub const DEFAULT_STATE_HASHING: StateVersion = Some(DEFAULT_ALT_HASH_THRESHOLD); +pub const DEFAULT_STATE_HASHING: Option = Some(DEFAULT_MAX_INLINE_VALUE); #[cfg(test)] mod tests { diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 594fb4c11de19..a474ec8c15700 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -36,7 +36,7 @@ pub use memory_db::prefixed_key; pub use memory_db::KeyFunction; /// The Substrate format implementation of `NodeCodec`. pub use node_codec::NodeCodec; -use sp_std::{borrow::Borrow, boxed::Box, fmt, marker::PhantomData, vec, vec::Vec}; +use sp_std::{borrow::Borrow, boxed::Box, fmt, marker::PhantomData, vec::Vec}; pub use storage_proof::{CompactProof, StorageProof}; /// Trie codec reexport, mainly child trie support /// for trie compact proof. @@ -47,8 +47,8 @@ use trie_db::proof::{generate_proof, verify_proof}; pub use trie_db::{ nibble_ops, node::{NodePlan, ValuePlan}, - CError, DBValue, Meta, Query, Recorder, Trie, TrieConfiguration, TrieDBIterator, - TrieDBKeyIterator, TrieLayout, TrieMut, + CError, DBValue, Query, Recorder, Trie, TrieConfiguration, TrieDBIterator, TrieDBKeyIterator, + TrieLayout, TrieMut, }; /// The Substrate format implementation of `TrieStream`. pub use trie_stream::TrieStream; @@ -75,9 +75,8 @@ impl Default for Layout { } impl Layout { - /// Layout with inner hashing active. - /// Will flag trie for hashing. - pub fn with_alt_hashing(threshold: u32) -> Self { + /// Layout with inner hash value size limit active. + pub fn with_max_inline_value(threshold: u32) -> Self { Layout(Some(threshold), sp_std::marker::PhantomData) } } @@ -88,12 +87,11 @@ where { const USE_EXTENSION: bool = false; const ALLOW_EMPTY: bool = true; - const USE_META: bool = true; type Hash = H; type Codec = NodeCodec; - fn alt_threshold(&self) -> Option { + fn max_inline_value(&self) -> Option { self.0 } } @@ -108,7 +106,7 @@ where A: AsRef<[u8]> + Ord, B: AsRef<[u8]>, { - trie_root::trie_root_no_extension::(input, self.alt_threshold()) + trie_root::trie_root_no_extension::(input, self.max_inline_value()) } fn trie_root_unhashed(&self, input: I) -> Vec @@ -117,7 +115,10 @@ where A: AsRef<[u8]> + Ord, B: AsRef<[u8]>, { - trie_root::unhashed_trie_no_extension::(input, self.alt_threshold()) + trie_root::unhashed_trie_no_extension::( + input, + self.max_inline_value(), + ) } fn encode_index(input: u32) -> Vec { @@ -214,9 +215,7 @@ where K: 'a + AsRef<[u8]>, V: 'a + AsRef<[u8]>, { - // No specific info to read from layout. - let layout = Default::default(); - verify_proof::, _, _, _>(root, proof, items, layout) + verify_proof::, _, _, _>(root, proof, items) } /// Determine a trie root given a hash DB and delta values. @@ -444,10 +443,6 @@ where self.0.get(key, (&derived_prefix.0, derived_prefix.1)) } - fn access_from(&self, key: &H::Out, at: Option<&H::Out>) -> Option { - self.0.access_from(key, at) - } - fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); self.0.contains(key, (&derived_prefix.0, derived_prefix.1)) @@ -465,10 +460,6 @@ where self.0.get(key, (&derived_prefix.0, derived_prefix.1)) } - fn access_from(&self, key: &H::Out, at: Option<&H::Out>) -> Option { - self.0.access_from(key, at) - } - fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); self.0.contains(key, (&derived_prefix.0, derived_prefix.1)) @@ -484,11 +475,6 @@ where self.0.emplace(key, (&derived_prefix.0, derived_prefix.1), value) } - fn emplace_ref(&mut self, key: &H::Out, prefix: Prefix, value: &[u8]) { - let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.emplace_ref(key, (&derived_prefix.0, derived_prefix.1), value) - } - fn remove(&mut self, key: &H::Out, prefix: Prefix) { let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); self.0.remove(key, (&derived_prefix.0, derived_prefix.1)) @@ -510,90 +496,9 @@ where } } -/// Representation of node with with inner hash instead of value. -fn inner_hashed_value(x: &[u8], range: Option<(usize, usize)>) -> Vec { - if let Some((start, end)) = range { - let len = x.len(); - if start < len && end == len { - // terminal inner hash - let hash_end = H::hash(&x[start..]); - let mut buff = vec![0; x.len() + hash_end.as_ref().len() - (end - start)]; - buff[..start].copy_from_slice(&x[..start]); - buff[start..].copy_from_slice(hash_end.as_ref()); - return buff - } - if start == 0 && end < len { - // start inner hash - let hash_start = H::hash(&x[..start]); - let hash_len = hash_start.as_ref().len(); - let mut buff = vec![0; x.len() + hash_len - (end - start)]; - buff[..hash_len].copy_from_slice(hash_start.as_ref()); - buff[hash_len..].copy_from_slice(&x[end..]); - return buff - } - if start < len && end < len { - // middle inner hash - let hash_middle = H::hash(&x[start..end]); - let hash_len = hash_middle.as_ref().len(); - let mut buff = vec![0; x.len() + hash_len - (end - start)]; - buff[..start].copy_from_slice(&x[..start]); - buff[start..start + hash_len].copy_from_slice(hash_middle.as_ref()); - buff[start + hash_len..].copy_from_slice(&x[end..]); - return buff - } - } - // if anything wrong default to hash - x.to_vec() -} - -/// Estimate encoded size of node. -pub fn estimate_entry_size(entry: &(DBValue, Meta, bool), hash_len: usize) -> usize { - use codec::Encode; - let mut full_encoded = entry.0.encoded_size(); - if !entry.2 && entry.1.apply_inner_hashing { - if let Some(range) = entry.1.range.as_ref() { - let value_size = range.end - range.start; - full_encoded -= value_size; - full_encoded += hash_len; - full_encoded += 1; - } - } - - full_encoded -} - -/// Switch to hashed value variant. -pub fn to_hashed_variant( - value: &[u8], - meta: &mut Meta, - used_value: bool, -) -> Option { - if !meta.contain_hash && meta.apply_inner_hashing && !used_value && meta.range.is_some() { - let mut stored = Vec::with_capacity(value.len() + 1); - // Warning this assumes that encoded value cannot start by this, - // so it is tightly coupled with the header type of the codec. - stored.push(trie_constants::DEAD_HEADER_META_HASHED_VALUE); - let range = meta.range.as_ref().expect("Tested in condition"); - // store hash instead of value. - let value = inner_hashed_value::(value, Some((range.start, range.end))); - stored.extend_from_slice(value.as_slice()); - meta.contain_hash = true; - return Some(stored) - } - None -} - -/// Decode plan in order to update meta early (needed to register proofs). -pub fn resolve_encoded_meta(entry: &mut (DBValue, Meta, bool)) { - use trie_db::NodeCodec; - let _ = as TrieLayout>::Codec::decode_plan(entry.0.as_slice(), &mut entry.1); -} - /// Constants used into trie simplification codec. mod trie_constants { const FIRST_PREFIX: u8 = 0b_00 << 6; - /// In proof this header is used when only hashed value is stored. - pub const DEAD_HEADER_META_HASHED_VALUE: u8 = EMPTY_TRIE | 0b_00_01; pub const NIBBLE_SIZE_BOUND: usize = u16::max_value() as usize; pub const LEAF_PREFIX_MASK: u8 = 0b_01 << 6; pub const BRANCH_WITHOUT_MASK: u8 = 0b_10 << 6; @@ -601,6 +506,7 @@ mod trie_constants { pub const EMPTY_TRIE: u8 = FIRST_PREFIX | (0b_00 << 4); pub const ALT_HASHING_LEAF_PREFIX_MASK: u8 = FIRST_PREFIX | (0b_1 << 5); pub const ALT_HASHING_BRANCH_WITH_MASK: u8 = FIRST_PREFIX | (0b_01 << 4); + pub const ESCAPE_COMPACT_HEADER: u8 = EMPTY_TRIE | 0b_00_01; } #[cfg(test)] @@ -609,7 +515,7 @@ mod tests { use codec::{Compact, Decode, Encode}; use hash_db::{HashDB, Hasher}; use hex_literal::hex; - use sp_core::{storage::DEFAULT_ALT_HASH_THRESHOLD as TRESHOLD, Blake2Hasher}; + use sp_core::{storage::DEFAULT_MAX_INLINE_VALUE as TRESHOLD, Blake2Hasher}; use trie_db::{DBValue, NodeCodec as NodeCodecT, Trie, TrieMut}; use trie_standardmap::{Alphabet, StandardMap, ValueMode}; @@ -665,7 +571,7 @@ mod tests { let layout = Layout::default(); check_equivalent::(input, layout.clone()); check_iteration::(input, layout); - let layout = Layout::with_alt_hashing(TRESHOLD); + let layout = Layout::with_max_inline_value(TRESHOLD); check_equivalent::(input, layout.clone()); check_iteration::(input, layout); } @@ -811,7 +717,7 @@ mod tests { random_should_work_inner(true); random_should_work_inner(false); } - fn random_should_work_inner(flag: bool) { + fn random_should_work_inner(limit_inline_value: bool) { let mut seed = ::Out::zero(); for test_i in 0..10_000 { if test_i % 50 == 0 { @@ -826,7 +732,11 @@ mod tests { } .make_with(seed.as_fixed_bytes_mut()); - let layout = if flag { Layout::with_alt_hashing(TRESHOLD) } else { Layout::default() }; + let layout = if limit_inline_value { + Layout::with_max_inline_value(TRESHOLD) + } else { + Layout::default() + }; let real = layout.trie_root(x.clone()); let mut memdb = MemoryDB::default(); let mut root = Default::default(); @@ -919,8 +829,12 @@ mod tests { iterator_works_inner(true); iterator_works_inner(false); } - fn iterator_works_inner(flag: bool) { - let layout = if flag { Layout::with_alt_hashing(TRESHOLD) } else { Layout::default() }; + fn iterator_works_inner(limit_inline_value: bool) { + let layout = if limit_inline_value { + Layout::with_max_inline_value(TRESHOLD) + } else { + Layout::default() + }; let pairs = vec![ (hex!("0103000000000000000464").to_vec(), hex!("0400000000").to_vec()), diff --git a/primitives/trie/src/node_codec.rs b/primitives/trie/src/node_codec.rs index 1c6a2bec72924..e630f3222de1e 100644 --- a/primitives/trie/src/node_codec.rs +++ b/primitives/trie/src/node_codec.rs @@ -25,7 +25,7 @@ use sp_std::{borrow::Borrow, marker::PhantomData, ops::Range, vec::Vec}; use trie_db::{ self, nibble_ops, node::{NibbleSlicePlan, NodeHandlePlan, NodePlan, Value, ValuePlan}, - ChildReference, Meta, NodeCodec as NodeCodecT, Partial, + ChildReference, NodeCodec as NodeCodecT, Partial, }; /// Helper struct for trie node decoder. This implements `codec::Input` on a byte slice, while @@ -81,24 +81,22 @@ impl<'a> Input for ByteSliceInput<'a> { pub struct NodeCodec(PhantomData); impl NodeCodec { - fn decode_plan_inner_hashed(data: &[u8], meta: &mut Meta) -> Result { + fn decode_plan_inner_hashed(data: &[u8]) -> Result { let mut input = ByteSliceInput::new(data); let header = NodeHeader::decode(&mut input)?; let contains_hash = header.contains_hash_of_value(); - let alt_hashing = header.alt_hashing(); - meta.apply_inner_hashing = alt_hashing; let branch_has_value = if let NodeHeader::Branch(has_value, _) = &header { *has_value } else { - // alt_hash_branch + // hashed_value_branch true }; match header { NodeHeader::Null => Ok(NodePlan::Empty), - NodeHeader::AltHashBranch(nibble_count, _) | NodeHeader::Branch(_, nibble_count) => { + NodeHeader::HashedValueBranch(nibble_count) | NodeHeader::Branch(_, nibble_count) => { let padding = nibble_count % nibble_ops::NIBBLE_PER_BYTE != 0; // check that the padding is valid (if any) if padding && nibble_ops::pad_left(data[input.offset]) != 0 { @@ -112,12 +110,11 @@ impl NodeCodec { let bitmap_range = input.take(BITMAP_LENGTH)?; let bitmap = Bitmap::decode(&data[bitmap_range])?; let value = if branch_has_value { - if alt_hashing && contains_hash { + if contains_hash { ValuePlan::HashedValue(input.take(H::LENGTH)?) } else { - let with_len = input.offset; let count = >::decode(&mut input)?.0 as usize; - ValuePlan::Value(input.take(count)?, with_len) + ValuePlan::Value(input.take(count)?) } } else { ValuePlan::NoValue @@ -143,7 +140,7 @@ impl NodeCodec { children, }) }, - NodeHeader::AltHashLeaf(nibble_count, _) | NodeHeader::Leaf(nibble_count) => { + NodeHeader::HashedValueLeaf(nibble_count) | NodeHeader::Leaf(nibble_count) => { let padding = nibble_count % nibble_ops::NIBBLE_PER_BYTE != 0; // check that the padding is valid (if any) if padding && nibble_ops::pad_left(data[input.offset]) != 0 { @@ -154,12 +151,11 @@ impl NodeCodec { nibble_ops::NIBBLE_PER_BYTE, )?; let partial_padding = nibble_ops::number_padding(nibble_count); - let value = if alt_hashing && contains_hash { + let value = if contains_hash { ValuePlan::HashedValue(input.take(H::LENGTH)?) } else { - let with_len = input.offset; let count = >::decode(&mut input)?.0 as usize; - ValuePlan::Value(input.take(count)?, with_len) + ValuePlan::Value(input.take(count)?) }; Ok(NodePlan::Leaf { @@ -175,7 +171,7 @@ impl NodeCodecT for NodeCodec where H: Hasher, { - const OFFSET_CONTAINS_HASH: usize = 1; + const ESCAPE_HEADER: Option<&'static [u8]> = Some(&[trie_constants::ESCAPE_COMPACT_HEADER]); type Error = Error; type HashOut = H::Out; @@ -183,15 +179,8 @@ where H::hash(::empty_node()) } - fn decode_plan(data: &[u8], meta: &mut Meta) -> Result { - Self::decode_plan_inner_hashed(data, meta).map(|plan| { - meta.decoded_callback(&plan); - plan - }) - } - - fn decode_plan_inner(_data: &[u8]) -> Result { - unreachable!("decode_plan is implemented") + fn decode_plan(data: &[u8]) -> Result { + Self::decode_plan_inner_hashed(data) } fn is_empty_node(data: &[u8]) -> bool { @@ -202,44 +191,23 @@ where &[trie_constants::EMPTY_TRIE] } - fn leaf_node(partial: Partial, value: Value, meta: &mut Meta) -> Vec { + fn leaf_node(partial: Partial, value: Value) -> Vec { let contains_hash = matches!(&value, Value::HashedValue(..)); - // Note that we use AltHash type only if inner hashing will occur, - // this way we allow changing hash threshold. - // With fix inner hashing alt hash can be use with all node, but - // that is not better (encoding can use an additional nibble byte - // sometime). - let mut output = if meta - .try_inner_hashing - .as_ref() - .map(|threshold| value_do_hash(&value, threshold)) - .unwrap_or(meta.apply_inner_hashing) - { - if contains_hash { - partial_encode(partial, NodeKind::AltHashLeafHash) - } else { - partial_encode(partial, NodeKind::AltHashLeaf) - } + let mut output = if contains_hash { + partial_encode(partial, NodeKind::HashedValueLeaf) } else { partial_encode(partial, NodeKind::Leaf) }; match value { Value::Value(value) => { - let with_len = output.len(); Compact(value.len() as u32).encode_to(&mut output); - let start = output.len(); output.extend_from_slice(value); - let end = output.len(); - meta.encoded_value_callback(ValuePlan::Value(start..end, with_len)); }, - Value::HashedValue(hash) => { + Value::HashedValue(hash, _) => { debug_assert!(hash.len() == H::LENGTH); - let start = output.len(); output.extend_from_slice(hash); - let end = output.len(); - meta.encoded_value_callback(ValuePlan::HashedValue(start..end)); }, - Value::NoValue => unimplemented!("No support for incomplete nodes"), + Value::NoValue => unreachable!("Leaf node always with value."), } output } @@ -248,17 +216,15 @@ where _partial: impl Iterator, _nbnibble: usize, _child: ChildReference<::Out>, - _meta: &mut Meta, ) -> Vec { - unreachable!() + unreachable!("No extension codec.") } fn branch_node( _children: impl Iterator::Out>>>>, _maybe_value: Value, - _meta: &mut Meta, ) -> Vec { - unreachable!() + unreachable!("No extension codec.") } fn branch_node_nibbled( @@ -266,34 +232,15 @@ where number_nibble: usize, children: impl Iterator::Out>>>>, value: Value, - meta: &mut Meta, ) -> Vec { let contains_hash = matches!(&value, Value::HashedValue(..)); - let mut output = match ( - &value, - meta.try_inner_hashing - .as_ref() - .map(|threshold| value_do_hash(&value, threshold)) - .unwrap_or(meta.apply_inner_hashing), - ) { + let mut output = match (&value, contains_hash) { (&Value::NoValue, _) => partial_from_iterator_encode(partial, number_nibble, NodeKind::BranchNoValue), (_, false) => partial_from_iterator_encode(partial, number_nibble, NodeKind::BranchWithValue), (_, true) => - if contains_hash { - partial_from_iterator_encode( - partial, - number_nibble, - NodeKind::AltHashBranchWithValueHash, - ) - } else { - partial_from_iterator_encode( - partial, - number_nibble, - NodeKind::AltHashBranchWithValue, - ) - }, + partial_from_iterator_encode(partial, number_nibble, NodeKind::HashedValueBranch), }; let bitmap_index = output.len(); @@ -301,19 +248,12 @@ where (0..BITMAP_LENGTH).for_each(|_| output.push(0)); match value { Value::Value(value) => { - let with_len = output.len(); Compact(value.len() as u32).encode_to(&mut output); - let start = output.len(); output.extend_from_slice(value); - let end = output.len(); - meta.encoded_value_callback(ValuePlan::Value(start..end, with_len)); }, - Value::HashedValue(hash) => { + Value::HashedValue(hash, _) => { debug_assert!(hash.len() == H::LENGTH); - let start = output.len(); output.extend_from_slice(hash); - let end = output.len(); - meta.encoded_value_callback(ValuePlan::HashedValue(start..end)); }, Value::NoValue => (), } @@ -339,14 +279,6 @@ where // utils -fn value_do_hash(val: &Value, threshold: &u32) -> bool { - match val { - Value::Value(val) => val.encoded_size() >= *threshold as usize, - Value::HashedValue(..) => true, // can only keep hashed - Value::NoValue => false, - } -} - /// Encode and allocate node type header (type and size), and partial value. /// It uses an iterator over encoded partial bytes as input. fn partial_from_iterator_encode>( @@ -356,19 +288,15 @@ fn partial_from_iterator_encode>( ) -> Vec { let nibble_count = sp_std::cmp::min(trie_constants::NIBBLE_SIZE_BOUND, nibble_count); - let mut output = Vec::with_capacity(3 + (nibble_count / nibble_ops::NIBBLE_PER_BYTE)); + let mut output = Vec::with_capacity(4 + (nibble_count / nibble_ops::NIBBLE_PER_BYTE)); match node_kind { NodeKind::Leaf => NodeHeader::Leaf(nibble_count).encode_to(&mut output), NodeKind::BranchWithValue => NodeHeader::Branch(true, nibble_count).encode_to(&mut output), NodeKind::BranchNoValue => NodeHeader::Branch(false, nibble_count).encode_to(&mut output), - NodeKind::AltHashLeaf => - NodeHeader::AltHashLeaf(nibble_count, false).encode_to(&mut output), - NodeKind::AltHashBranchWithValue => - NodeHeader::AltHashBranch(nibble_count, false).encode_to(&mut output), - NodeKind::AltHashLeafHash => - NodeHeader::AltHashLeaf(nibble_count, true).encode_to(&mut output), - NodeKind::AltHashBranchWithValueHash => - NodeHeader::AltHashBranch(nibble_count, true).encode_to(&mut output), + NodeKind::HashedValueLeaf => + NodeHeader::HashedValueLeaf(nibble_count).encode_to(&mut output), + NodeKind::HashedValueBranch => + NodeHeader::HashedValueBranch(nibble_count).encode_to(&mut output), }; output.extend(partial); output @@ -382,19 +310,15 @@ fn partial_encode(partial: Partial, node_kind: NodeKind) -> Vec { let nibble_count = sp_std::cmp::min(trie_constants::NIBBLE_SIZE_BOUND, nibble_count); - let mut output = Vec::with_capacity(3 + partial.1.len()); + let mut output = Vec::with_capacity(4 + partial.1.len()); match node_kind { NodeKind::Leaf => NodeHeader::Leaf(nibble_count).encode_to(&mut output), NodeKind::BranchWithValue => NodeHeader::Branch(true, nibble_count).encode_to(&mut output), NodeKind::BranchNoValue => NodeHeader::Branch(false, nibble_count).encode_to(&mut output), - NodeKind::AltHashLeaf => - NodeHeader::AltHashLeaf(nibble_count, false).encode_to(&mut output), - NodeKind::AltHashBranchWithValue => - NodeHeader::AltHashBranch(nibble_count, false).encode_to(&mut output), - NodeKind::AltHashLeafHash => - NodeHeader::AltHashLeaf(nibble_count, true).encode_to(&mut output), - NodeKind::AltHashBranchWithValueHash => - NodeHeader::AltHashBranch(nibble_count, true).encode_to(&mut output), + NodeKind::HashedValueLeaf => + NodeHeader::HashedValueLeaf(nibble_count).encode_to(&mut output), + NodeKind::HashedValueBranch => + NodeHeader::HashedValueBranch(nibble_count).encode_to(&mut output), }; if number_nibble_encoded > 0 { output.push(nibble_ops::pad_right((partial.0).1)); diff --git a/primitives/trie/src/node_header.rs b/primitives/trie/src/node_header.rs index 2443ad03dc53e..839fffb87058f 100644 --- a/primitives/trie/src/node_header.rs +++ b/primitives/trie/src/node_header.rs @@ -29,16 +29,16 @@ pub(crate) enum NodeHeader { Branch(bool, usize), // contains nibble count Leaf(usize), - // contains nibble count and wether the value is a hash. - AltHashBranch(usize, bool), - // contains nibble count and wether the value is a hash. - AltHashLeaf(usize, bool), + // contains nibble count. + HashedValueBranch(usize), + // contains nibble count. + HashedValueLeaf(usize), } impl NodeHeader { pub(crate) fn contains_hash_of_value(&self) -> bool { match self { - NodeHeader::AltHashBranch(_, true) | NodeHeader::AltHashLeaf(_, true) => true, + NodeHeader::HashedValueBranch(_) | NodeHeader::HashedValueLeaf(_) => true, _ => false, } } @@ -49,17 +49,12 @@ pub(crate) enum NodeKind { Leaf, BranchNoValue, BranchWithValue, - AltHashLeaf, - AltHashBranchWithValue, - AltHashLeafHash, - AltHashBranchWithValueHash, + HashedValueLeaf, + HashedValueBranch, } impl Encode for NodeHeader { fn encode_to(&self, output: &mut T) { - if self.contains_hash_of_value() { - output.write(&[trie_constants::DEAD_HEADER_META_HASHED_VALUE]); - } match self { NodeHeader::Null => output.push_byte(trie_constants::EMPTY_TRIE), NodeHeader::Branch(true, nibble_count) => @@ -72,13 +67,13 @@ impl Encode for NodeHeader { ), NodeHeader::Leaf(nibble_count) => encode_size_and_prefix(*nibble_count, trie_constants::LEAF_PREFIX_MASK, 2, output), - NodeHeader::AltHashBranch(nibble_count, _) => encode_size_and_prefix( + NodeHeader::HashedValueBranch(nibble_count) => encode_size_and_prefix( *nibble_count, trie_constants::ALT_HASHING_BRANCH_WITH_MASK, 4, output, ), - NodeHeader::AltHashLeaf(nibble_count, _) => encode_size_and_prefix( + NodeHeader::HashedValueLeaf(nibble_count) => encode_size_and_prefix( *nibble_count, trie_constants::ALT_HASHING_LEAF_PREFIX_MASK, 3, @@ -88,30 +83,14 @@ impl Encode for NodeHeader { } } -impl NodeHeader { - /// Is this header using alternate hashing scheme. - pub(crate) fn alt_hashing(&self) -> bool { - match self { - NodeHeader::Null | NodeHeader::Leaf(..) | NodeHeader::Branch(..) => false, - NodeHeader::AltHashBranch(..) | NodeHeader::AltHashLeaf(..) => true, - } - } -} - impl codec::EncodeLike for NodeHeader {} impl Decode for NodeHeader { fn decode(input: &mut I) -> Result { - let mut i = input.read_byte()?; + let i = input.read_byte()?; if i == trie_constants::EMPTY_TRIE { return Ok(NodeHeader::Null) } - let contain_hash = if trie_constants::DEAD_HEADER_META_HASHED_VALUE == i { - i = input.read_byte()?; - true - } else { - false - }; match i & (0b11 << 6) { trie_constants::LEAF_PREFIX_MASK => Ok(NodeHeader::Leaf(decode_size(i, input, 2)?)), trie_constants::BRANCH_WITH_MASK => @@ -120,9 +99,9 @@ impl Decode for NodeHeader { Ok(NodeHeader::Branch(false, decode_size(i, input, 2)?)), trie_constants::EMPTY_TRIE => { if i & (0b111 << 5) == trie_constants::ALT_HASHING_LEAF_PREFIX_MASK { - Ok(NodeHeader::AltHashLeaf(decode_size(i, input, 3)?, contain_hash)) + Ok(NodeHeader::HashedValueLeaf(decode_size(i, input, 3)?)) } else if i & (0b1111 << 4) == trie_constants::ALT_HASHING_BRANCH_WITH_MASK { - Ok(NodeHeader::AltHashBranch(decode_size(i, input, 4)?, contain_hash)) + Ok(NodeHeader::HashedValueBranch(decode_size(i, input, 4)?)) } else { // do not allow any special encoding Err("Unallowed encoding".into()) diff --git a/primitives/trie/src/storage_proof.rs b/primitives/trie/src/storage_proof.rs index 94dc9c1ea8a13..d498888696d3a 100644 --- a/primitives/trie/src/storage_proof.rs +++ b/primitives/trie/src/storage_proof.rs @@ -15,11 +15,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::{Layout, TrieLayout}; +use crate::Layout; use codec::{Decode, Encode}; use hash_db::{HashDB, Hasher}; use sp_std::vec::Vec; -use trie_db::NodeCodec; /// A proof that some set of key-value pairs are included in the storage trie. The proof contains /// the storage values so that the partial storage backend can be reconstructed by a verifier that @@ -73,16 +72,6 @@ impl StorageProof { self.into() } - /// Creates a `MemoryDB` from `Self`. In case we do not need - /// to check meta (using alt hashing will always be disabled). - pub fn into_memory_db_no_meta(self) -> crate::MemoryDB { - let mut db = crate::MemoryDB::default(); - for item in self.iter_nodes() { - db.insert(crate::EMPTY_PREFIX, &item); - } - db - } - /// Merges multiple storage proofs covering potentially different sets of keys into one proof /// covering all keys. The merged proof output may be smaller than the aggregate size of the /// input proofs due to deduplication of trie nodes. @@ -175,15 +164,8 @@ impl Iterator for StorageProofNodeIterator { impl From for crate::MemoryDB { fn from(proof: StorageProof) -> Self { let mut db = crate::MemoryDB::default(); - for item in proof.trie_nodes.iter() { - let mut meta = Default::default(); - // Read meta from state (required for value layout). - let _ = as TrieLayout>::Codec::decode_plan(item.as_slice(), &mut meta); - db.alt_insert( - crate::EMPTY_PREFIX, - item, - meta.resolve_alt_hashing::< as TrieLayout>::Codec>(), - ); + for item in proof.iter_nodes() { + db.insert(crate::EMPTY_PREFIX, &item); } db } diff --git a/primitives/trie/src/trie_codec.rs b/primitives/trie/src/trie_codec.rs index 4fe7750a552d2..41aa09da31629 100644 --- a/primitives/trie/src/trie_codec.rs +++ b/primitives/trie/src/trie_codec.rs @@ -114,8 +114,7 @@ where let mut nodes_iter = encoded.into_iter(); // Layout does not change trie reading. let layout = L::default(); - let (top_root, _nb_used) = - trie_db::decode_compact_from_iter::(db, &mut nodes_iter, &layout)?; + let (top_root, _nb_used) = trie_db::decode_compact_from_iter::(db, &mut nodes_iter)?; // Only check root if expected root is passed as argument. if let Some(expected_root) = expected_root { @@ -165,8 +164,7 @@ where let mut previous_extracted_child_trie = None; for child_root in child_tries.into_iter() { if previous_extracted_child_trie.is_none() { - let (top_root, _) = - trie_db::decode_compact_from_iter::(db, &mut nodes_iter, &layout)?; + let (top_root, _) = trie_db::decode_compact_from_iter::(db, &mut nodes_iter)?; previous_extracted_child_trie = Some(top_root); } diff --git a/primitives/trie/src/trie_stream.rs b/primitives/trie/src/trie_stream.rs index aba5ea3d4aa14..20cc35c6b8708 100644 --- a/primitives/trie/src/trie_stream.rs +++ b/primitives/trie/src/trie_stream.rs @@ -18,29 +18,19 @@ //! `TrieStream` implementation for Substrate's trie format. use crate::{ - node_codec::Bitmap, node_header::{size_and_prefix_iterator, NodeKind}, trie_constants, }; use codec::{Compact, Encode}; use hash_db::Hasher; -use sp_std::{ops::Range, vec::Vec}; +use sp_std::vec::Vec; use trie_root; -const BRANCH_NODE_NO_VALUE: u8 = 254; -const BRANCH_NODE_WITH_VALUE: u8 = 255; - #[derive(Default, Clone)] /// Codec-flavored TrieStream. pub struct TrieStream { /// Current node buffer. buffer: Vec, - /// Global trie alt hashing activation. - inner_value_hashing: Option, - /// For current node, do we use alt hashing. - apply_inner_hashing: bool, - /// Keep trace of position of encoded value. - current_value_range: Option>, } impl TrieStream { @@ -72,79 +62,73 @@ fn fuse_nibbles_node<'a>(nibbles: &'a [u8], kind: NodeKind) -> impl Iterator size_and_prefix_iterator(size, trie_constants::BRANCH_WITH_MASK, 2), - NodeKind::AltHashLeaf => + NodeKind::HashedValueLeaf => size_and_prefix_iterator(size, trie_constants::ALT_HASHING_LEAF_PREFIX_MASK, 3), - NodeKind::AltHashBranchWithValue => + NodeKind::HashedValueBranch => size_and_prefix_iterator(size, trie_constants::ALT_HASHING_BRANCH_WITH_MASK, 4), - NodeKind::AltHashBranchWithValueHash | NodeKind::AltHashLeafHash => - unreachable!("only added value that do not contain hash"), }; iter_start .chain(if nibbles.len() % 2 == 1 { Some(nibbles[0]) } else { None }) .chain(nibbles[nibbles.len() % 2..].chunks(2).map(|ch| ch[0] << 4 | ch[1])) } +use trie_root::Value as TrieStreamValue; impl trie_root::TrieStream for TrieStream { - fn new(meta: Option) -> Self { - Self { - buffer: Vec::new(), - inner_value_hashing: meta, - apply_inner_hashing: false, - current_value_range: None, - } + fn new() -> Self { + Self { buffer: Vec::new() } } fn append_empty_data(&mut self) { self.buffer.push(trie_constants::EMPTY_TRIE); } - fn append_leaf(&mut self, key: &[u8], value: &[u8]) { - self.apply_inner_hashing = self - .inner_value_hashing - .as_ref() - .map(|threshold| value_do_hash(value, threshold)) - .unwrap_or(false); - let kind = if self.apply_inner_hashing { NodeKind::AltHashLeaf } else { NodeKind::Leaf }; + fn append_leaf(&mut self, key: &[u8], value: TrieStreamValue) { + let kind = match &value { + TrieStreamValue::NoValue => unreachable!(), + TrieStreamValue::Value(..) => NodeKind::Leaf, + TrieStreamValue::HashedValue(..) => NodeKind::HashedValueLeaf, + }; self.buffer.extend(fuse_nibbles_node(key, kind)); - let start = self.buffer.len(); - Compact(value.len() as u32).encode_to(&mut self.buffer); - self.buffer.extend_from_slice(value); - self.current_value_range = Some(start..self.buffer.len()); + match &value { + TrieStreamValue::NoValue => unreachable!(), + TrieStreamValue::Value(value) => { + Compact(value.len() as u32).encode_to(&mut self.buffer); + self.buffer.extend_from_slice(value); + }, + TrieStreamValue::HashedValue(hash) => { + self.buffer.extend_from_slice(hash.as_slice()); + }, + }; } fn begin_branch( &mut self, maybe_partial: Option<&[u8]>, - maybe_value: Option<&[u8]>, + maybe_value: TrieStreamValue, has_children: impl Iterator, ) { if let Some(partial) = maybe_partial { - if let Some(value) = maybe_value { - self.apply_inner_hashing = self - .inner_value_hashing - .as_ref() - .map(|threshold| value_do_hash(value, threshold)) - .unwrap_or(false); - let kind = if self.apply_inner_hashing { - NodeKind::AltHashBranchWithValue - } else { - NodeKind::BranchWithValue - }; - self.buffer.extend(fuse_nibbles_node(partial, kind)); - } else { - self.buffer.extend(fuse_nibbles_node(partial, NodeKind::BranchNoValue)); - } + let kind = match &maybe_value { + TrieStreamValue::NoValue => NodeKind::BranchNoValue, + TrieStreamValue::Value(..) => NodeKind::BranchWithValue, + TrieStreamValue::HashedValue(..) => NodeKind::HashedValueBranch, + }; + + self.buffer.extend(fuse_nibbles_node(partial, kind)); let bm = branch_node_bit_mask(has_children); self.buffer.extend([bm.0, bm.1].iter()); } else { - debug_assert!(false, "trie stream codec only for no extension trie"); - self.buffer.extend(&branch_node(maybe_value.is_some(), has_children)); + unreachable!("trie stream codec only for no extension trie"); } - if let Some(value) = maybe_value { - let start = self.buffer.len(); - Compact(value.len() as u32).encode_to(&mut self.buffer); - self.buffer.extend_from_slice(value); - self.current_value_range = Some(start..self.buffer.len()); + match maybe_value { + TrieStreamValue::NoValue => (), + TrieStreamValue::Value(value) => { + Compact(value.len() as u32).encode_to(&mut self.buffer); + self.buffer.extend_from_slice(value); + }, + TrieStreamValue::HashedValue(hash) => { + self.buffer.extend_from_slice(hash.as_slice()); + }, } } @@ -153,35 +137,10 @@ impl trie_root::TrieStream for TrieStream { } fn append_substream(&mut self, other: Self) { - let apply_inner_hashing = other.apply_inner_hashing; - let range = other.current_value_range.clone(); let data = other.out(); match data.len() { 0..=31 => data.encode_to(&mut self.buffer), - _ => - if apply_inner_hashing { - hash_db::AltHashing { - encoded_offset: 0, - value_range: range.map(|r| (r.start, r.end)), - } - .alt_hash::(&data) - .as_ref() - .encode_to(&mut self.buffer); - } else { - H::hash(&data).as_ref().encode_to(&mut self.buffer); - }, - } - } - - fn hash_root(self) -> H::Out { - let apply_inner_hashing = self.apply_inner_hashing; - let range = self.current_value_range; - let data = self.buffer; - if apply_inner_hashing { - hash_db::AltHashing { encoded_offset: 0, value_range: range.map(|r| (r.start, r.end)) } - .alt_hash::(&data) - } else { - H::hash(&data) + _ => H::hash(&data).as_ref().encode_to(&mut self.buffer), } } @@ -189,22 +148,3 @@ impl trie_root::TrieStream for TrieStream { self.buffer } } - -fn branch_node(has_value: bool, has_children: impl Iterator) -> [u8; 3] { - let mut result = [0, 0, 0]; - branch_node_buffered(has_value, has_children, &mut result[..]); - result -} - -fn branch_node_buffered(has_value: bool, has_children: I, output: &mut [u8]) -where - I: Iterator, -{ - let first = if has_value { BRANCH_NODE_WITH_VALUE } else { BRANCH_NODE_NO_VALUE }; - output[0] = first; - Bitmap::encode(has_children, &mut output[1..]); -} - -fn value_do_hash(val: &[u8], threshold: &u32) -> bool { - val.encoded_size() >= *threshold as usize -} diff --git a/primitives/version/src/lib.rs b/primitives/version/src/lib.rs index 0bfa13c95b850..bd784a5374889 100644 --- a/primitives/version/src/lib.rs +++ b/primitives/version/src/lib.rs @@ -218,9 +218,9 @@ impl RuntimeVersion { pub fn state_version(&self) -> StateVersion { let core_api_id = sp_runtime::hashing::blake2_64(b"Core"); if self.has_api_with(&core_api_id, |v| v >= 4) { - DEFAULT_STATE_HASHING + StateVersion::V1 } else { - None + StateVersion::V0 } } } diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index dfb95ffc015e8..f6ce2588a664e 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -85,7 +85,6 @@ pub struct TestClientBuilder, bad_blocks: BadBlocks, enable_offchain_indexing_api: bool, - state_hashed_value: bool, no_genesis: bool, } @@ -138,7 +137,6 @@ impl fork_blocks: None, bad_blocks: None, enable_offchain_indexing_api: false, - state_hashed_value: false, no_genesis: false, } } @@ -203,12 +201,6 @@ impl self } - /// Enable the internal value hash of state. - pub fn state_hashed_value(mut self) -> Self { - self.state_hashed_value = true; - self - } - /// Disable writing genesis. pub fn set_no_genesis(mut self) -> Self { self.no_genesis = true; @@ -244,7 +236,7 @@ impl storage }; - let genesis_state_version = Some(33); // TODO get from genesis wasm + let genesis_state_version = sp_runtime::StateVersion::default(); // TODO resolve from genesis_storage wasm. let client = client::Client::new( self.backend.clone(), executor, diff --git a/test-utils/runtime/client/src/lib.rs b/test-utils/runtime/client/src/lib.rs index 014616a3afa35..dc5ccadc4574f 100644 --- a/test-utils/runtime/client/src/lib.rs +++ b/test-utils/runtime/client/src/lib.rs @@ -395,18 +395,12 @@ impl Fetcher for LightFetcher { } /// Creates new client instance used for tests. -pub fn new(hashed_state: bool) -> Client { - let mut builder = TestClientBuilder::new(); - if hashed_state { - builder = builder.state_hashed_value(); - } - builder.build() +pub fn new() -> Client { + TestClientBuilder::new().build() } /// Creates new light client instance used for tests. -pub fn new_light( - hashed_state: bool, -) -> ( +pub fn new_light() -> ( client::Client< LightBackend, LightExecutor, @@ -428,11 +422,12 @@ pub fn new_light( .expect("Creates LocalCallExecutor"); let call_executor = LightExecutor::new(backend.clone(), local_call_executor); - let mut builder = TestClientBuilder::with_backend(backend.clone()); - if hashed_state { - builder = builder.state_hashed_value(); - } - (builder.build_with_executor(call_executor).0, backend) + ( + TestClientBuilder::with_backend(backend.clone()) + .build_with_executor(call_executor) + .0, + backend, + ) } /// Creates new light client fetcher used for tests. diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index bcf3bdd45c5fd..91bce13348d6b 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -1239,9 +1239,9 @@ fn test_witness(proof: StorageProof, root: crate::Hash) { None, ); assert!(ext.storage(b"value3").is_some()); - assert!(ext.storage_root(sp_core::DEFAULT_STATE_HASHING).as_slice() == &root[..]); + assert!(ext.storage_root(Default::default()).as_slice() == &root[..]); ext.place_storage(vec![0], Some(vec![1])); - assert!(ext.storage_root(sp_core::DEFAULT_STATE_HASHING).as_slice() != &root[..]); + assert!(ext.storage_root(Default::default()).as_slice() != &root[..]); } #[cfg(test)] diff --git a/utils/frame/rpc/system/src/lib.rs b/utils/frame/rpc/system/src/lib.rs index b668a720aa75f..f0f37f0b20675 100644 --- a/utils/frame/rpc/system/src/lib.rs +++ b/utils/frame/rpc/system/src/lib.rs @@ -291,7 +291,7 @@ mod tests { sp_tracing::try_init_simple(); // given - let client = Arc::new(substrate_test_runtime_client::new(true)); + let client = Arc::new(substrate_test_runtime_client::new()); let spawner = sp_core::testing::TaskExecutor::new(); let pool = BasicPool::new_full(Default::default(), true.into(), None, spawner, client.clone()); @@ -326,7 +326,7 @@ mod tests { sp_tracing::try_init_simple(); // given - let client = Arc::new(substrate_test_runtime_client::new(true)); + let client = Arc::new(substrate_test_runtime_client::new()); let spawner = sp_core::testing::TaskExecutor::new(); let pool = BasicPool::new_full(Default::default(), true.into(), None, spawner, client.clone()); @@ -345,7 +345,7 @@ mod tests { sp_tracing::try_init_simple(); // given - let client = Arc::new(substrate_test_runtime_client::new(true)); + let client = Arc::new(substrate_test_runtime_client::new()); let spawner = sp_core::testing::TaskExecutor::new(); let pool = BasicPool::new_full(Default::default(), true.into(), None, spawner, client.clone()); @@ -374,7 +374,7 @@ mod tests { sp_tracing::try_init_simple(); // given - let client = Arc::new(substrate_test_runtime_client::new(true)); + let client = Arc::new(substrate_test_runtime_client::new()); let spawner = sp_core::testing::TaskExecutor::new(); let pool = BasicPool::new_full(Default::default(), true.into(), None, spawner, client.clone()); From 0a6dc15c108bc000c3a9a50071eaf35d3e574817 Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 8 Sep 2021 17:21:04 +0200 Subject: [PATCH 076/188] unused import --- Cargo.lock | 1 - client/executor/runtime-test/Cargo.toml | 1 - 2 files changed, 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a41ac4d4030f8..1ef365adf2cef 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8104,7 +8104,6 @@ dependencies = [ "sp-runtime", "sp-sandbox", "sp-std", - "sp-storage", "sp-tasks", "substrate-wasm-builder", ] diff --git a/client/executor/runtime-test/Cargo.toml b/client/executor/runtime-test/Cargo.toml index e3da31461fca4..a4fbc88cf5662 100644 --- a/client/executor/runtime-test/Cargo.toml +++ b/client/executor/runtime-test/Cargo.toml @@ -19,7 +19,6 @@ sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../.. sp-sandbox = { version = "0.10.0-dev", default-features = false, path = "../../../primitives/sandbox" } sp-std = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/std" } sp-tasks = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/tasks" } -sp-storage = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/storage" } # TODO used? [build-dependencies] substrate-wasm-builder = { version = "5.0.0-dev", path = "../../../utils/wasm-builder" } From 4e10a6828f011633414981100bda26a2a5a40313 Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 8 Sep 2021 17:23:04 +0200 Subject: [PATCH 077/188] clean some TODOs --- primitives/io/src/lib.rs | 3 --- primitives/storage/src/lib.rs | 4 ++-- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index 4064dbdb5fd77..84456d0ad55b1 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -398,9 +398,6 @@ pub trait DefaultChildStorage { /// The hashing algorithm is defined by the `Block`. /// /// Returns a `Vec` that holds the SCALE encoded hash. - /// TODO this will be use by default for all new runtime that is an issue: we want it - /// to be call only when we choose to migrate, otherwhise lazy migration will apply too - /// soon. -> Maybe just name it differently. #[version(2)] fn root(&mut self, storage_key: &[u8]) -> Vec { let child_info = ChildInfo::new_default(storage_key); diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 9a234f08bba0f..60012f7671882 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -412,9 +412,9 @@ impl ChildTrieParentKeyId { } } -/// Different state that can be applied. TODO rename to StateValueHashing. +/// Different possible state version. /// -/// When a value is define, apply inner hashing over the given threshold. +/// Currently only enable trie value nodes. #[derive(Debug, Clone, Copy, Eq, PartialEq)] pub enum StateVersion { /// Old state version, no value nodes. From bf31362c3dd808273e74bcfcc78e2c8f35dc9cce Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 8 Sep 2021 18:02:21 +0200 Subject: [PATCH 078/188] Require RuntimeVersionOf for executor --- client/service/src/builder.rs | 2 -- client/service/src/client/client.rs | 21 +++++++++++++++------ client/service/src/client/light.rs | 2 -- client/service/src/client/wasm_override.rs | 6 +++--- 4 files changed, 18 insertions(+), 13 deletions(-) diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 93da822a43c7e..f0c037aee232f 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -450,12 +450,10 @@ where spawn_handle, config.clone(), )?; - let genesis_state_version = sp_runtime::StateVersion::default(); // TODO resolve from genesis_storage wasm. Ok(crate::client::Client::new( backend, executor, genesis_storage, - genesis_state_version, fork_blocks, bad_blocks, execution_extensions, diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index de15b031360ce..7f4537d5a64f6 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -48,7 +48,7 @@ use sc_client_api::{ use sc_consensus::{ BlockCheckParams, BlockImportParams, ForkChoiceStrategy, ImportResult, StateAction, }; -use sc_executor::RuntimeVersion; +use sc_executor::{RuntimeVersion, RuntimeVersionOf}; use sc_light::fetcher::ChangesProof; use sc_telemetry::{telemetry, TelemetryHandle, SUBSTRATE_INFO}; use sp_api::{ @@ -97,7 +97,6 @@ use std::{ use { super::call_executor::LocalCallExecutor, sc_client_api::in_mem, - sc_executor::RuntimeVersionOf, sp_core::traits::{CodeExecutor, SpawnNamed}, }; @@ -240,12 +239,10 @@ where keystore, sc_offchain::OffchainDb::factory_from_backend(&*backend), ); - let genesis_state_version = sp_runtime::StateVersion::default(); // TODO resolve from genesis_storage wasm. Client::new( backend, call_executor, build_genesis_storage, - genesis_state_version, Default::default(), Default::default(), extensions, @@ -329,7 +326,6 @@ where backend: Arc, executor: E, build_genesis_storage: &dyn BuildStorage, - genesis_state_hash: StateVersion, fork_blocks: ForkBlocks, bad_blocks: BadBlocks, execution_extensions: ExecutionExtensions, @@ -341,8 +337,21 @@ where if info.finalized_state.is_none() { let genesis_storage = build_genesis_storage.build_storage().map_err(sp_blockchain::Error::Storage)?; + let genesis_state_version = if let Some(wasm) = genesis_storage.top.get(well_known_keys::CODE) { + + let ext = sp_state_machine::BasicExternalities::new_empty(); // just to read runtime version. + let code_fetcher = crate::client::wasm_override::WasmBlob::new(wasm.clone()); + let hash = code_fetcher.hash.clone(); + let runtime_code = sp_core::traits::RuntimeCode { code_fetcher: &code_fetcher, heap_pages: None, hash }; + let runtime_version = executor + .runtime_version(&mut ext, &runtime_code) + .map_err(|e| sp_blockchain::Error::VersionInvalid(format!("{:?}", e)).into())?; + runtime_version.state_version() + } else { + Default::default() + }; let mut op = backend.begin_operation()?; - let state_root = op.set_genesis_state(genesis_storage, !config.no_genesis, genesis_state_hash)?; + let state_root = op.set_genesis_state(genesis_storage, !config.no_genesis, genesis_state_version)?; let genesis_block = genesis::construct_genesis_block::(state_root.into()); info!( "🔨 Initializing Genesis block/state (state: {}, header-hash: {})", diff --git a/client/service/src/client/light.rs b/client/service/src/client/light.rs index 672bb94e90509..7c13b98843e05 100644 --- a/client/service/src/client/light.rs +++ b/client/service/src/client/light.rs @@ -68,12 +68,10 @@ where ClientConfig::default(), )?; let executor = GenesisCallExecutor::new(backend.clone(), local_executor); - let genesis_state_version = sp_runtime::StateVersion::default(); // TODO resolve from genesis_storage wasm. Client::new( backend, executor, genesis_storage, - genesis_state_version, Default::default(), Default::default(), Default::default(), diff --git a/client/service/src/client/wasm_override.rs b/client/service/src/client/wasm_override.rs index 6d5a071269d4d..bb2098d479ed6 100644 --- a/client/service/src/client/wasm_override.rs +++ b/client/service/src/client/wasm_override.rs @@ -49,13 +49,13 @@ use std::{ #[derive(Clone, Debug, PartialEq)] /// Auxiliary structure that holds a wasm blob and its hash. -struct WasmBlob { +pub(crate) struct WasmBlob { code: Vec, - hash: Vec, + pub(crate) hash: Vec, } impl WasmBlob { - fn new(code: Vec) -> Self { + pub(crate) fn new(code: Vec) -> Self { let hash = make_hash(&code); Self { code, hash } } From 8aba5dd50379bbd0c89e705eabf7c478fccb0c1e Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 8 Sep 2021 18:32:10 +0200 Subject: [PATCH 079/188] use RuntimeVersionOf to resolve genesis state version. --- client/light/src/call_executor.rs | 15 ++++++- client/service/src/client/call_executor.rs | 16 +++++++- client/service/src/client/client.rs | 46 +++++++++++----------- test-utils/client/src/client_ext.rs | 2 +- test-utils/client/src/lib.rs | 4 +- 5 files changed, 54 insertions(+), 29 deletions(-) diff --git a/client/light/src/call_executor.rs b/client/light/src/call_executor.rs index a0776131e406d..0e96fa7e0c43a 100644 --- a/client/light/src/call_executor.rs +++ b/client/light/src/call_executor.rs @@ -44,7 +44,7 @@ use sp_blockchain::{Error as ClientError, Result as ClientResult}; use sc_client_api::{ backend::RemoteBackend, call_executor::CallExecutor, light::RemoteCallRequest, }; -use sc_executor::RuntimeVersion; +use sc_executor::{RuntimeVersion, RuntimeVersionOf}; /// Call executor that is able to execute calls only on genesis state. /// @@ -164,6 +164,19 @@ where } } +impl RuntimeVersionOf for GenesisCallExecutor +where + Local: RuntimeVersionOf, +{ + fn runtime_version( + &self, + ext: &mut dyn sp_externalities::Externalities, + runtime_code: &sp_core::traits::RuntimeCode, + ) -> Result { + self.local.runtime_version(ext, runtime_code) + } +} + /// Check remote contextual execution proof using given backend. /// /// Proof should include the method execution proof. diff --git a/client/service/src/client/call_executor.rs b/client/service/src/client/call_executor.rs index 9b8774ce6d497..bd642ec798d33 100644 --- a/client/service/src/client/call_executor.rs +++ b/client/service/src/client/call_executor.rs @@ -93,7 +93,7 @@ where Block: BlockT, B: backend::Backend, { - let spec = self.runtime_version(id)?.spec_version; + let spec = CallExecutor::runtime_version(self, id)?.spec_version; let code = if let Some(d) = self .wasm_override .as_ref() @@ -330,6 +330,20 @@ where } } +impl RuntimeVersionOf for LocalCallExecutor +where + E: RuntimeVersionOf, + Block: BlockT, +{ + fn runtime_version( + &self, + ext: &mut dyn sp_externalities::Externalities, + runtime_code: &sp_core::traits::RuntimeCode, + ) -> Result { + RuntimeVersionOf::runtime_version(&self.executor, ext, runtime_code) + } +} + impl sp_version::GetRuntimeVersionAt for LocalCallExecutor where B: backend::Backend, diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index 7f4537d5a64f6..4eb50dcef69e0 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -264,7 +264,7 @@ where impl LockImportRun for Client where B: backend::Backend, - E: CallExecutor, + E: CallExecutor + RuntimeVersionOf, Block: BlockT, { fn lock_import_and_run(&self, f: F) -> Result @@ -303,7 +303,7 @@ impl LockImportRun for &Client where Block: BlockT, B: backend::Backend, - E: CallExecutor, + E: CallExecutor + RuntimeVersionOf, { fn lock_import_and_run(&self, f: F) -> Result where @@ -317,7 +317,7 @@ where impl Client where B: backend::Backend, - E: CallExecutor, + E: CallExecutor + RuntimeVersionOf, Block: BlockT, Block::Header: Clone, { @@ -339,13 +339,13 @@ where build_genesis_storage.build_storage().map_err(sp_blockchain::Error::Storage)?; let genesis_state_version = if let Some(wasm) = genesis_storage.top.get(well_known_keys::CODE) { - let ext = sp_state_machine::BasicExternalities::new_empty(); // just to read runtime version. + let mut ext = sp_state_machine::BasicExternalities::new_empty(); // just to read runtime version. let code_fetcher = crate::client::wasm_override::WasmBlob::new(wasm.clone()); let hash = code_fetcher.hash.clone(); let runtime_code = sp_core::traits::RuntimeCode { code_fetcher: &code_fetcher, heap_pages: None, hash }; - let runtime_version = executor - .runtime_version(&mut ext, &runtime_code) - .map_err(|e| sp_blockchain::Error::VersionInvalid(format!("{:?}", e)).into())?; + let runtime_version = RuntimeVersionOf::runtime_version( + &executor, &mut ext, &runtime_code) + .map_err(|e| sp_blockchain::Error::VersionInvalid(format!("{:?}", e)))?; runtime_version.state_version() } else { Default::default() @@ -419,12 +419,12 @@ where /// Get the RuntimeVersion at a given block. pub fn runtime_version_at(&self, id: &BlockId) -> sp_blockchain::Result { - self.executor.runtime_version(id) + CallExecutor::runtime_version(&self.executor, id) } /// Get the StateVersion at a given block. pub fn state_hash_at(&self, id: &BlockId) -> sp_blockchain::Result { - Ok(self.executor.runtime_version(id)?.state_version()) + Ok(self.runtime_version_at(id)?.state_version()) } /// Reads given header and generates CHT-based header proof for CHT of given size. @@ -1286,7 +1286,7 @@ where impl UsageProvider for Client where B: backend::Backend, - E: CallExecutor, + E: CallExecutor + RuntimeVersionOf, Block: BlockT, { /// Get usage info about current client. @@ -1298,7 +1298,7 @@ where impl ProofProvider for Client where B: backend::Backend, - E: CallExecutor, + E: CallExecutor + RuntimeVersionOf, Block: BlockT, { fn read_proof( @@ -1423,7 +1423,7 @@ where impl BlockBuilderProvider for Client where B: backend::Backend + Send + Sync + 'static, - E: CallExecutor + Send + Sync + 'static, + E: CallExecutor + RuntimeVersionOf + Send + Sync + 'static, Block: BlockT, Self: ChainHeaderBackend + ProvideRuntimeApi, >::Api: @@ -1481,7 +1481,7 @@ where impl StorageProvider for Client where B: backend::Backend, - E: CallExecutor, + E: CallExecutor + RuntimeVersionOf, Block: BlockT, { fn storage_keys( @@ -1707,7 +1707,7 @@ where impl ProvideUncles for Client where B: backend::Backend, - E: CallExecutor, + E: CallExecutor + RuntimeVersionOf, Block: BlockT, { fn uncles( @@ -1866,7 +1866,7 @@ where } fn runtime_version_at(&self, at: &BlockId) -> Result { - self.runtime_version_at(at).map_err(Into::into) + CallExecutor::runtime_version(&self.executor, at).map_err(Into::into) } fn state_hash_at(&self, at: &BlockId) -> Result { @@ -1881,7 +1881,7 @@ where impl sc_consensus::BlockImport for &Client where B: backend::Backend, - E: CallExecutor + Send + Sync, + E: CallExecutor + RuntimeVersionOf + Send + Sync, Block: BlockT, Client: ProvideRuntimeApi, as ProvideRuntimeApi>::Api: @@ -1996,7 +1996,7 @@ where impl sc_consensus::BlockImport for Client where B: backend::Backend, - E: CallExecutor + Send + Sync, + E: CallExecutor + RuntimeVersionOf + Send + Sync, Block: BlockT, Self: ProvideRuntimeApi, >::Api: @@ -2026,7 +2026,7 @@ where impl Finalizer for Client where B: backend::Backend, - E: CallExecutor, + E: CallExecutor + RuntimeVersionOf, Block: BlockT, { fn apply_finality( @@ -2062,7 +2062,7 @@ where impl Finalizer for &Client where B: backend::Backend, - E: CallExecutor, + E: CallExecutor + RuntimeVersionOf, Block: BlockT, { fn apply_finality( @@ -2116,7 +2116,7 @@ where impl BlockBackend for Client where B: backend::Backend, - E: CallExecutor, + E: CallExecutor + RuntimeVersionOf, Block: BlockT, { fn block_body( @@ -2165,7 +2165,7 @@ where impl backend::AuxStore for Client where B: backend::Backend, - E: CallExecutor, + E: CallExecutor + RuntimeVersionOf, Block: BlockT, Self: ProvideRuntimeApi, >::Api: CoreApi, @@ -2197,7 +2197,7 @@ where impl backend::AuxStore for &Client where B: backend::Backend, - E: CallExecutor, + E: CallExecutor + RuntimeVersionOf, Block: BlockT, Client: ProvideRuntimeApi, as ProvideRuntimeApi>::Api: CoreApi, @@ -2224,7 +2224,7 @@ where impl sp_consensus::block_validation::Chain for Client where BE: backend::Backend, - E: CallExecutor, + E: CallExecutor + RuntimeVersionOf, B: BlockT, { fn block_status( diff --git a/test-utils/client/src/client_ext.rs b/test-utils/client/src/client_ext.rs index bf1c9898972ca..72828fdcc9188 100644 --- a/test-utils/client/src/client_ext.rs +++ b/test-utils/client/src/client_ext.rs @@ -69,7 +69,7 @@ pub trait ClientBlockImportExt: Sized { impl ClientExt for Client where B: sc_client_api::backend::Backend, - E: sc_client_api::CallExecutor + 'static, + E: sc_client_api::CallExecutor + sc_executor::RuntimeVersionOf + 'static, Self: BlockImport, Block: BlockT, { diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index f6ce2588a664e..559ca18cbf31c 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -216,7 +216,7 @@ impl sc_consensus::LongestChain, ) where - ExecutorDispatch: sc_client_api::CallExecutor + 'static, + ExecutorDispatch: sc_client_api::CallExecutor + sc_executor::RuntimeVersionOf + 'static, Backend: sc_client_api::backend::Backend, >::OffchainStorage: 'static, { @@ -236,12 +236,10 @@ impl storage }; - let genesis_state_version = sp_runtime::StateVersion::default(); // TODO resolve from genesis_storage wasm. let client = client::Client::new( self.backend.clone(), executor, &storage, - genesis_state_version, self.fork_blocks, self.bad_blocks, ExecutionExtensions::new( From 499ab5b09de88791f62d8f9ab191f30f4ff19b57 Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 8 Sep 2021 21:11:57 +0200 Subject: [PATCH 080/188] update runtime version test --- client/rpc/src/state/tests.rs | 2 +- frame/executive/src/lib.rs | 14 +++++++++++--- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/client/rpc/src/state/tests.rs b/client/rpc/src/state/tests.rs index ef13b37ce42fe..bad43d3cda9e9 100644 --- a/client/rpc/src/state/tests.rs +++ b/client/rpc/src/state/tests.rs @@ -442,7 +442,7 @@ fn should_return_runtime_version() { ); let result = "{\"specName\":\"test\",\"implName\":\"parity-test\",\"authoringVersion\":1,\ - \"specVersion\":2,\"implVersion\":2,\"apis\":[[\"0xdf6acb689907609b\",3],\ + \"specVersion\":2,\"implVersion\":2,\"apis\":[[\"0xdf6acb689907609b\",4],\ [\"0x37e397fc7c91f5e4\",1],[\"0xd2bc9897eed08f15\",3],[\"0x40fe3ad401f8959a\",5],\ [\"0xc6e9a76309f39b09\",1],[\"0xdd718d5cc53262d4\",1],[\"0xcbca25e39f142387\",2],\ [\"0xf78b278be53f454c\",2],[\"0xab3c0572291feb8b\",1],[\"0xbc9d89904f5b923f\",1]],\ diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index 5f1ae23c2f531..ebfd5bcb0ebab 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -846,9 +846,17 @@ mod tests { t.into() } + fn new_test_ext_v0(balance_factor: Balance) -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + pallet_balances::GenesisConfig:: { balances: vec![(1, 111 * balance_factor)] } + .assimilate_storage(&mut t) + .unwrap(); + (t, sp_runtime::StateVersion::V0).into() + } + #[test] fn block_import_works() { - new_test_ext(1).execute_with(|| { + new_test_ext_v0(1).execute_with(|| { Executive::execute_block(Block { header: Header { parent_hash: [69u8; 32].into(), @@ -871,7 +879,7 @@ mod tests { #[test] #[should_panic] fn block_import_of_bad_state_root_fails() { - new_test_ext(1).execute_with(|| { + new_test_ext_v0(1).execute_with(|| { Executive::execute_block(Block { header: Header { parent_hash: [69u8; 32].into(), @@ -891,7 +899,7 @@ mod tests { #[test] #[should_panic] fn block_import_of_bad_extrinsic_root_fails() { - new_test_ext(1).execute_with(|| { + new_test_ext_v0(1).execute_with(|| { Executive::execute_block(Block { header: Header { parent_hash: [69u8; 32].into(), From 27cf1363f225d54566db56ab9bf300b2fc00ef4e Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 8 Sep 2021 21:20:11 +0200 Subject: [PATCH 081/188] fix state-machine tests --- primitives/state-machine/src/lib.rs | 22 +++++----------------- 1 file changed, 5 insertions(+), 17 deletions(-) diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index f245c1fc38693..2187a8f9474a6 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -1660,7 +1660,7 @@ mod tests { let check_proof = |mdb, root, state_hash| -> StorageProof { let remote_backend = TrieBackend::new(mdb, root); - let remote_root = remote_backend.storage_root(::std::iter::empty(), state_hash).0; + let remote_root = remote_backend.storage_root(std::iter::empty(), state_hash).0; let remote_proof = prove_read(remote_backend, &[b"foo222"]).unwrap(); // check proof locally let local_result1 = @@ -1683,25 +1683,13 @@ mod tests { // do switch layout = Layout::with_max_inline_value(sp_core::storage::DEFAULT_MAX_INLINE_VALUE); state_hash = StateVersion::V1; - // update with same value do not change { let mut trie = TrieDBMut::from_existing_with_layout(&mut mdb, &mut root, layout.clone()).unwrap(); trie.insert(b"foo222", vec![5u8; 100].as_slice()) // inner hash .expect("insert failed"); - } - let root3 = root.clone(); - assert!(root1 == root3); - // different value then same is enough to update - // from triedbmut persipective (do not - // work with state machine as only changes do makes - // it to payload (would require a special host function). - { - let mut trie = - TrieDBMut::from_existing_with_layout(&mut mdb, &mut root, layout.clone()).unwrap(); - trie.insert(b"foo222", vec![4u8].as_slice()) // inner hash - .expect("insert failed"); - trie.insert(b"foo222", vec![5u8; 100].as_slice()) // inner hash + // update with same value do change + trie.insert(b"foo", vec![1u8; 1000].as_slice()) // inner hash .expect("insert failed"); } let root3 = root.clone(); @@ -1715,8 +1703,8 @@ mod tests { #[test] fn compact_multiple_child_trie() { - let size_inner_hash = compact_multiple_child_trie_inner(StateVersion::V0); - let size_no_inner_hash = compact_multiple_child_trie_inner(StateVersion::V1); + let size_no_inner_hash = compact_multiple_child_trie_inner(StateVersion::V0); + let size_inner_hash = compact_multiple_child_trie_inner(StateVersion::V1); assert!(size_inner_hash < size_no_inner_hash); } fn compact_multiple_child_trie_inner(state_hash: StateVersion) -> usize { From ac4f7dc0386630cdb94f97a1b65c16c1064d8e13 Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 8 Sep 2021 23:42:54 +0200 Subject: [PATCH 082/188] TODO --- client/service/src/client/client.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index 4eb50dcef69e0..3844377cc6f4d 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -843,6 +843,7 @@ where children_default: Default::default(), }; + panic!("TODO state_hash for code in changes like genesis.") let state_hash = self.state_hash_at(&BlockId::Hash(parent_hash))?; let state_root = operation.op.reset_storage(storage, state_hash)?; if state_root != *import_headers.post().state_root() { From 1c817d627c7487db46934abcbcd2955bb872fa5f Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 9 Sep 2021 09:13:59 +0200 Subject: [PATCH 083/188] Use runtime version from storage wasm with fast sync. --- client/service/src/client/client.rs | 37 ++++++++++++++++------------- 1 file changed, 20 insertions(+), 17 deletions(-) diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index 3844377cc6f4d..28dcfe57d24a6 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -64,7 +64,7 @@ use sp_consensus::{BlockOrigin, BlockStatus, Error as ConsensusError}; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; use sp_core::{ convert_hash, - storage::{well_known_keys, ChildInfo, PrefixedStorageKey, StorageData, StorageKey}, + storage::{well_known_keys, ChildInfo, PrefixedStorageKey, StorageData, StorageKey, Storage}, ChangesTrieConfiguration, NativeOrEncoded, }; #[cfg(feature = "test-helpers")] @@ -337,19 +337,7 @@ where if info.finalized_state.is_none() { let genesis_storage = build_genesis_storage.build_storage().map_err(sp_blockchain::Error::Storage)?; - let genesis_state_version = if let Some(wasm) = genesis_storage.top.get(well_known_keys::CODE) { - - let mut ext = sp_state_machine::BasicExternalities::new_empty(); // just to read runtime version. - let code_fetcher = crate::client::wasm_override::WasmBlob::new(wasm.clone()); - let hash = code_fetcher.hash.clone(); - let runtime_code = sp_core::traits::RuntimeCode { code_fetcher: &code_fetcher, heap_pages: None, hash }; - let runtime_version = RuntimeVersionOf::runtime_version( - &executor, &mut ext, &runtime_code) - .map_err(|e| sp_blockchain::Error::VersionInvalid(format!("{:?}", e)))?; - runtime_version.state_version() - } else { - Default::default() - }; + let genesis_state_version = Self::resolve_state_version_from_wasm(&genesis_storage, &executor)?; let mut op = backend.begin_operation()?; let state_root = op.set_genesis_state(genesis_storage, !config.no_genesis, genesis_state_version)?; let genesis_block = genesis::construct_genesis_block::(state_root.into()); @@ -843,9 +831,9 @@ where children_default: Default::default(), }; - panic!("TODO state_hash for code in changes like genesis.") - let state_hash = self.state_hash_at(&BlockId::Hash(parent_hash))?; - let state_root = operation.op.reset_storage(storage, state_hash)?; + // This is use by fast sync so runtime version need to be resolve from changes. + let state_version = Self::resolve_state_version_from_wasm(&storage, &self.executor)?; + let state_root = operation.op.reset_storage(storage, state_version)?; if state_root != *import_headers.post().state_root() { // State root mismatch when importing state. This should not happen in // safe fast sync mode, but may happen in unsafe mode. @@ -1282,6 +1270,21 @@ where trace!("Collected {} uncles", uncles.len()); Ok(uncles) } + + fn resolve_state_version_from_wasm(storage: &Storage, executor: &E) -> sp_blockchain::Result { + Ok(if let Some(wasm) = storage.top.get(well_known_keys::CODE) { + let mut ext = sp_state_machine::BasicExternalities::new_empty(); // just to read runtime version. + let code_fetcher = crate::client::wasm_override::WasmBlob::new(wasm.clone()); + let hash = code_fetcher.hash.clone(); + let runtime_code = sp_core::traits::RuntimeCode { code_fetcher: &code_fetcher, heap_pages: None, hash }; + let runtime_version = RuntimeVersionOf::runtime_version( + executor, &mut ext, &runtime_code) + .map_err(|e| sp_blockchain::Error::VersionInvalid(format!("{:?}", e)))?; + runtime_version.state_version() + } else { + Default::default() + }) + } } impl UsageProvider for Client From f0d9326e13c2fd2f2f7d7fc39789b5341b022768 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 9 Sep 2021 09:17:10 +0200 Subject: [PATCH 084/188] rustfmt --- bin/node-template/runtime/src/lib.rs | 1 - client/api/src/backend.rs | 10 +- client/api/src/cht.rs | 3 +- client/api/src/in_mem.rs | 10 +- client/db/src/bench.rs | 2 +- client/db/src/lib.rs | 68 ++++++--- client/db/src/storage_cache.rs | 11 +- client/light/src/backend.rs | 20 ++- client/network/test/src/lib.rs | 6 +- client/service/src/client/client.rs | 36 +++-- client/service/test/src/client/light.rs | 17 +-- client/service/test/src/client/mod.rs | 7 +- primitives/api/src/lib.rs | 4 +- primitives/externalities/src/lib.rs | 8 +- primitives/runtime/src/lib.rs | 4 +- primitives/state-machine/src/backend.rs | 9 +- primitives/state-machine/src/basic.rs | 18 ++- primitives/state-machine/src/ext.rs | 134 ++++++++++-------- .../state-machine/src/in_memory_backend.rs | 24 ++-- primitives/state-machine/src/lib.rs | 3 +- .../src/overlayed_changes/mod.rs | 11 +- .../state-machine/src/proving_backend.rs | 6 +- primitives/state-machine/src/trie_backend.rs | 14 +- primitives/tasks/src/async_externalities.rs | 9 +- primitives/version/src/lib.rs | 2 +- test-utils/client/src/lib.rs | 3 +- 26 files changed, 273 insertions(+), 167 deletions(-) diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index a5bf227768005..eae40e1ab3564 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -86,7 +86,6 @@ pub mod opaque { } } - // To learn more about runtime versioning and what each of the following value means: // https://substrate.dev/docs/en/knowledgebase/runtime/upgrades#runtime-versioning #[sp_version::runtime_version] diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index 77af4fb2ca045..0a1f08a51dc53 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -28,9 +28,9 @@ use sp_blockchain; use sp_consensus::BlockOrigin; use sp_core::{offchain::OffchainStorage, ChangesTrieConfigurationRange}; use sp_runtime::{ - generic::BlockId, StateVersion, + generic::BlockId, traits::{Block as BlockT, HashFor, NumberFor}, - Justification, Justifications, Storage, + Justification, Justifications, StateVersion, Storage, }; use sp_state_machine::{ ChangesTrieState, ChangesTrieStorage as StateChangesTrieStorage, ChangesTrieTransaction, @@ -175,7 +175,11 @@ pub trait BlockImportOperation { ) -> sp_blockchain::Result; /// Inject storage data into the database replacing any existing data. - fn reset_storage(&mut self, storage: Storage, state_hash: StateVersion) -> sp_blockchain::Result; + fn reset_storage( + &mut self, + storage: Storage, + state_hash: StateVersion, + ) -> sp_blockchain::Result; /// Set storage changes. fn update_storage( diff --git a/client/api/src/cht.rs b/client/api/src/cht.rs index cbf527f2d8af2..163422432c362 100644 --- a/client/api/src/cht.rs +++ b/client/api/src/cht.rs @@ -117,7 +117,8 @@ where .into_iter() .map(|(k, v)| (k, Some(v))) .collect::>(); - let storage = InMemoryBackend::::default().update(vec![(None, transaction)], sp_runtime::StateVersion::V0); + let storage = InMemoryBackend::::default() + .update(vec![(None, transaction)], sp_runtime::StateVersion::V0); let trie_storage = storage .as_trie_backend() .expect("InMemoryState::as_trie_backend always returns Some; qed"); diff --git a/client/api/src/in_mem.rs b/client/api/src/in_mem.rs index 3b06030d8c71a..7662847f3c8ae 100644 --- a/client/api/src/in_mem.rs +++ b/client/api/src/in_mem.rs @@ -24,9 +24,9 @@ use sp_core::{ offchain::storage::InMemOffchainStorage as OffchainStorage, storage::well_known_keys, }; use sp_runtime::{ - generic::BlockId, StateVersion, + generic::BlockId, traits::{Block as BlockT, HashFor, Header as HeaderT, NumberFor, Zero}, - Justification, Justifications, Storage, + Justification, Justifications, StateVersion, Storage, }; use sp_state_machine::{ Backend as StateBackend, ChangesTrieTransaction, ChildStorageCollection, InMemoryBackend, @@ -667,7 +667,11 @@ where self.apply_storage(storage, commit, state_hash) } - fn reset_storage(&mut self, storage: Storage, state_hash: StateVersion) -> sp_blockchain::Result { + fn reset_storage( + &mut self, + storage: Storage, + state_hash: StateVersion, + ) -> sp_blockchain::Result { self.apply_storage(storage, true, state_hash) } diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index b87eca5990087..b65e88073275c 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -34,7 +34,7 @@ use sp_core::{ }; use sp_runtime::{ traits::{Block as BlockT, HashFor}, - Storage, StateVersion, + StateVersion, Storage, }; use sp_state_machine::{ backend::Backend as StateBackend, ChildStorageCollection, DBValue, ProofRecorder, diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 2d7071bba5e20..2f17f8fa27cc2 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -88,7 +88,7 @@ use sp_runtime::{ Block as BlockT, Hash, HashFor, Header as HeaderT, NumberFor, One, SaturatedConversion, Zero, }, - Justification, Justifications, Storage, StateVersion, + Justification, Justifications, StateVersion, Storage, }; use sp_state_machine::{ backend::Backend as StateBackend, ChangesTrieCacheAction, ChangesTrieTransaction, @@ -826,7 +826,11 @@ impl BlockImportOperation { } } - fn apply_new_state(&mut self, storage: Storage, state_hash: StateVersion) -> ClientResult { + fn apply_new_state( + &mut self, + storage: Storage, + state_hash: StateVersion, + ) -> ClientResult { if storage.top.keys().any(|k| well_known_keys::is_child_storage_key(&k)) { return Err(sp_blockchain::Error::InvalidState.into()) } @@ -899,7 +903,11 @@ impl sc_client_api::backend::BlockImportOperation Ok(()) } - fn reset_storage(&mut self, storage: Storage, state_hash: StateVersion) -> ClientResult { + fn reset_storage( + &mut self, + storage: Storage, + state_hash: StateVersion, + ) -> ClientResult { if storage.top.keys().any(|k| well_known_keys::is_child_storage_key(&k)) { return Err(sp_blockchain::Error::GenesisInvalid.into()) } @@ -932,7 +940,12 @@ impl sc_client_api::backend::BlockImportOperation Ok(root) } - fn set_genesis_state(&mut self, storage: Storage, commit: bool, state_hash: StateVersion) -> ClientResult { + fn set_genesis_state( + &mut self, + storage: Storage, + commit: bool, + state_hash: StateVersion, + ) -> ClientResult { let root = self.apply_new_state(storage, state_hash)?; self.commit_state = commit; Ok(root) @@ -2594,10 +2607,13 @@ pub(crate) mod tests { .into(); let hash = header.hash(); - op.reset_storage(Storage { - top: storage.into_iter().collect(), - children_default: Default::default(), - }, state_version) + op.reset_storage( + Storage { + top: storage.into_iter().collect(), + children_default: Default::default(), + }, + state_version, + ) .unwrap(); op.set_block_data(header.clone(), Some(vec![]), None, None, NewBlockState::Best) .unwrap(); @@ -2626,9 +2642,10 @@ pub(crate) mod tests { let storage = vec![(vec![1, 3, 5], None), (vec![5, 5, 5], Some(vec![4, 5, 6]))]; - let (root, overlay) = op - .old_state - .storage_root(storage.iter().map(|(k, v)| (&k[..], v.as_ref().map(|v| &v[..]))), state_version); + let (root, overlay) = op.old_state.storage_root( + storage.iter().map(|(k, v)| (&k[..], v.as_ref().map(|v| &v[..]))), + state_version, + ); op.update_db_storage(overlay).unwrap(); header.state_root = root.into(); @@ -2666,13 +2683,14 @@ pub(crate) mod tests { extrinsics_root: Default::default(), }; - header.state_root = op.old_state.storage_root(std::iter::empty(), state_version).0.into(); + header.state_root = + op.old_state.storage_root(std::iter::empty(), state_version).0.into(); let hash = header.hash(); - op.reset_storage(Storage { - top: Default::default(), - children_default: Default::default(), - }, state_version) + op.reset_storage( + Storage { top: Default::default(), children_default: Default::default() }, + state_version, + ) .unwrap(); key = op.db_updates.insert(EMPTY_PREFIX, b"hello"); @@ -3137,10 +3155,13 @@ pub(crate) mod tests { .into(); let hash = header.hash(); - op.reset_storage(Storage { - top: storage.into_iter().collect(), - children_default: Default::default(), - }, state_version) + op.reset_storage( + Storage { + top: storage.into_iter().collect(), + children_default: Default::default(), + }, + state_version, + ) .unwrap(); op.set_block_data(header.clone(), Some(vec![]), None, None, NewBlockState::Best) .unwrap(); @@ -3169,9 +3190,10 @@ pub(crate) mod tests { let storage = vec![(b"test".to_vec(), Some(b"test2".to_vec()))]; - let (root, overlay) = op - .old_state - .storage_root(storage.iter().map(|(k, v)| (&k[..], v.as_ref().map(|v| &v[..]))), state_version); + let (root, overlay) = op.old_state.storage_root( + storage.iter().map(|(k, v)| (&k[..], v.as_ref().map(|v| &v[..]))), + state_version, + ); op.update_db_storage(overlay).unwrap(); header.state_root = root.into(); let hash = header.hash(); diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index ac41464d366c1..37d2ee6fc8169 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -26,8 +26,10 @@ use linked_hash_map::{Entry, LinkedHashMap}; use log::trace; use parking_lot::{RwLock, RwLockUpgradableReadGuard}; use sp_core::{hexdisplay::HexDisplay, storage::ChildInfo}; -use sp_runtime::traits::{Block as BlockT, HashFor, Header, NumberFor}; -use sp_runtime::StateVersion; +use sp_runtime::{ + traits::{Block as BlockT, HashFor, Header, NumberFor}, + StateVersion, +}; use sp_state_machine::{ backend::Backend as StateBackend, ChildStorageCollection, StorageCollection, StorageKey, StorageValue, TrieBackend, @@ -1187,7 +1189,10 @@ mod tests { let shared = new_shared_cache::(256 * 1024, (0, 1)); let mut backend = InMemoryBackend::::default(); - backend.insert(std::iter::once((None, vec![(key.clone(), Some(vec![1]))])), Default::default()); + backend.insert( + std::iter::once((None, vec![(key.clone(), Some(vec![1]))])), + Default::default(), + ); let mut s = CachingState::new(backend.clone(), shared.clone(), Some(root_parent)); s.cache.sync_cache( diff --git a/client/light/src/backend.rs b/client/light/src/backend.rs index be4f80592f5e8..6c6d78d4d1228 100644 --- a/client/light/src/backend.rs +++ b/client/light/src/backend.rs @@ -46,9 +46,9 @@ use sp_core::{ ChangesTrieConfiguration, }; use sp_runtime::{ - generic::BlockId, StateVersion, + generic::BlockId, traits::{Block as BlockT, HashFor, Header, NumberFor, Zero}, - Justification, Justifications, Storage, + Justification, Justifications, StateVersion, Storage, }; use sp_state_machine::{ Backend as StateBackend, ChangesTrieTransaction, ChildStorageCollection, InMemoryBackend, @@ -326,7 +326,12 @@ where Ok(()) } - fn set_genesis_state(&mut self, input: Storage, commit: bool, state_hash: StateVersion) -> ClientResult { + fn set_genesis_state( + &mut self, + input: Storage, + commit: bool, + state_hash: StateVersion, + ) -> ClientResult { check_genesis_storage(&input)?; // changes trie configuration @@ -356,7 +361,8 @@ where } let storage_update = InMemoryBackend::from((storage, state_hash)); - let (storage_root, _) = storage_update.full_storage_root(std::iter::empty(), child_delta, state_hash); + let (storage_root, _) = + storage_update.full_storage_root(std::iter::empty(), child_delta, state_hash); if commit { self.storage_update = Some(storage_update); } @@ -364,7 +370,11 @@ where Ok(storage_root) } - fn reset_storage(&mut self, _input: Storage, _state_hash: StateVersion) -> ClientResult { + fn reset_storage( + &mut self, + _input: Storage, + _state_hash: StateVersion, + ) -> ClientResult { Err(ClientError::NotAvailableOnLightClient) } diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index 89d3d5095a26b..bb49cef8c642c 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -730,11 +730,9 @@ where /// Add a full peer. fn add_full_peer_with_config(&mut self, config: FullPeerConfig) { let mut test_client_builder = match (config.keep_blocks, config.storage_chain) { - (Some(keep_blocks), true) => - TestClientBuilder::with_tx_storage(keep_blocks), + (Some(keep_blocks), true) => TestClientBuilder::with_tx_storage(keep_blocks), (None, true) => TestClientBuilder::with_tx_storage(u32::MAX), - (Some(keep_blocks), false) => - TestClientBuilder::with_pruning_window(keep_blocks), + (Some(keep_blocks), false) => TestClientBuilder::with_pruning_window(keep_blocks), (None, false) => TestClientBuilder::with_default_backend(), }; if matches!(config.sync_mode, SyncMode::Fast { .. }) { diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index 28dcfe57d24a6..2bce77646f474 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -64,7 +64,7 @@ use sp_consensus::{BlockOrigin, BlockStatus, Error as ConsensusError}; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; use sp_core::{ convert_hash, - storage::{well_known_keys, ChildInfo, PrefixedStorageKey, StorageData, StorageKey, Storage}, + storage::{well_known_keys, ChildInfo, PrefixedStorageKey, Storage, StorageData, StorageKey}, ChangesTrieConfiguration, NativeOrEncoded, }; #[cfg(feature = "test-helpers")] @@ -75,8 +75,7 @@ use sp_runtime::{ Block as BlockT, DigestFor, HashFor, Header as HeaderT, NumberFor, One, SaturatedConversion, Zero, }, - StateVersion, - BuildStorage, Justification, Justifications, + BuildStorage, Justification, Justifications, StateVersion, }; use sp_state_machine::{ key_changes, key_changes_proof, prove_child_read, prove_range_read_with_size, prove_read, @@ -337,9 +336,11 @@ where if info.finalized_state.is_none() { let genesis_storage = build_genesis_storage.build_storage().map_err(sp_blockchain::Error::Storage)?; - let genesis_state_version = Self::resolve_state_version_from_wasm(&genesis_storage, &executor)?; + let genesis_state_version = + Self::resolve_state_version_from_wasm(&genesis_storage, &executor)?; let mut op = backend.begin_operation()?; - let state_root = op.set_genesis_state(genesis_storage, !config.no_genesis, genesis_state_version)?; + let state_root = + op.set_genesis_state(genesis_storage, !config.no_genesis, genesis_state_version)?; let genesis_block = genesis::construct_genesis_block::(state_root.into()); info!( "🔨 Initializing Genesis block/state (state: {}, header-hash: {})", @@ -831,8 +832,10 @@ where children_default: Default::default(), }; - // This is use by fast sync so runtime version need to be resolve from changes. - let state_version = Self::resolve_state_version_from_wasm(&storage, &self.executor)?; + // This is use by fast sync so runtime version need to be resolve from + // changes. + let state_version = + Self::resolve_state_version_from_wasm(&storage, &self.executor)?; let state_root = operation.op.reset_storage(storage, state_version)?; if state_root != *import_headers.post().state_root() { // State root mismatch when importing state. This should not happen in @@ -1271,15 +1274,22 @@ where Ok(uncles) } - fn resolve_state_version_from_wasm(storage: &Storage, executor: &E) -> sp_blockchain::Result { + fn resolve_state_version_from_wasm( + storage: &Storage, + executor: &E, + ) -> sp_blockchain::Result { Ok(if let Some(wasm) = storage.top.get(well_known_keys::CODE) { let mut ext = sp_state_machine::BasicExternalities::new_empty(); // just to read runtime version. - let code_fetcher = crate::client::wasm_override::WasmBlob::new(wasm.clone()); + let code_fetcher = crate::client::wasm_override::WasmBlob::new(wasm.clone()); let hash = code_fetcher.hash.clone(); - let runtime_code = sp_core::traits::RuntimeCode { code_fetcher: &code_fetcher, heap_pages: None, hash }; - let runtime_version = RuntimeVersionOf::runtime_version( - executor, &mut ext, &runtime_code) - .map_err(|e| sp_blockchain::Error::VersionInvalid(format!("{:?}", e)))?; + let runtime_code = sp_core::traits::RuntimeCode { + code_fetcher: &code_fetcher, + heap_pages: None, + hash, + }; + let runtime_version = + RuntimeVersionOf::runtime_version(executor, &mut ext, &runtime_code) + .map_err(|e| sp_blockchain::Error::VersionInvalid(format!("{:?}", e)))?; runtime_version.state_version() } else { Default::default() diff --git a/client/service/test/src/client/light.rs b/client/service/test/src/client/light.rs index f5075456ab3f0..36b082924e973 100644 --- a/client/service/test/src/client/light.rs +++ b/client/service/test/src/client/light.rs @@ -43,7 +43,7 @@ use sp_blockchain::{ Result as ClientResult, }; use sp_consensus::BlockOrigin; -use sp_core::{testing::TaskExecutor, NativeOrEncoded, H256, StateVersion}; +use sp_core::{testing::TaskExecutor, NativeOrEncoded, StateVersion, H256}; use sp_externalities::Extensions; use sp_runtime::{ generic::BlockId, @@ -295,8 +295,7 @@ fn unavailable_state_is_created_when_genesis_state_is_unavailable() { #[test] fn light_aux_store_is_updated_via_non_importing_op() { - let backend = - Backend::new(Arc::new(DummyBlockchain::new(DummyStorage::new()))); + let backend = Backend::new(Arc::new(DummyBlockchain::new(DummyStorage::new()))); let mut op = ClientBackend::::begin_operation(&backend).unwrap(); BlockImportOperation::::insert_aux(&mut op, vec![(vec![1], Some(vec![2]))]).unwrap(); ClientBackend::::commit_operation(&backend, op).unwrap(); @@ -454,7 +453,9 @@ type TestChecker = LightDataChecker< DummyStorage, >; -fn prepare_for_read_proof_check(state_version: StateVersion) -> (TestChecker, Header, StorageProof, u32) { +fn prepare_for_read_proof_check( + state_version: StateVersion, +) -> (TestChecker, Header, StorageProof, u32) { // prepare remote client let remote_client = substrate_test_runtime_client::new(); let remote_block_id = BlockId::Number(0); @@ -490,7 +491,9 @@ fn prepare_for_read_proof_check(state_version: StateVersion) -> (TestChecker, He (local_checker, remote_block_header, remote_read_proof, heap_pages) } -fn prepare_for_read_child_proof_check(state_version: StateVersion) -> (TestChecker, Header, StorageProof, Vec) { +fn prepare_for_read_child_proof_check( + state_version: StateVersion, +) -> (TestChecker, Header, StorageProof, Vec) { use substrate_test_runtime_client::{DefaultTestClientBuilderExt, TestClientBuilderExt}; let child_info = ChildInfo::new_default(b"child1"); let child_info = &child_info; @@ -532,9 +535,7 @@ fn prepare_for_read_child_proof_check(state_version: StateVersion) -> (TestCheck (local_checker, remote_block_header, remote_read_proof, child_value) } -fn prepare_for_header_proof_check( - insert_cht: bool, -) -> (TestChecker, Hash, Header, StorageProof) { +fn prepare_for_header_proof_check(insert_cht: bool) -> (TestChecker, Hash, Header, StorageProof) { // prepare remote client let mut remote_client = substrate_test_runtime_client::new(); let mut local_headers_hashes = Vec::new(); diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index de0c7923201a0..c4661a6caf8a9 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -32,9 +32,9 @@ use sp_api::ProvideRuntimeApi; use sp_consensus::{BlockOrigin, BlockStatus, Error as ConsensusError, SelectChain}; use sp_core::{blake2_256, testing::TaskExecutor, ChangesTrieConfiguration, H256}; use sp_runtime::{ - generic::BlockId, StateVersion, + generic::BlockId, traits::{BlakeTwo256, Block as BlockT, Header as HeaderT}, - ConsensusEngineId, DigestItem, Justifications, + ConsensusEngineId, DigestItem, Justifications, StateVersion, }; use sp_state_machine::{ backend::Backend as _, ExecutionStrategy, InMemoryBackend, OverlayedChanges, StateMachine, @@ -1286,8 +1286,7 @@ fn importing_diverged_finalized_block_should_trigger_reorg() { #[test] fn finalizing_diverged_block_should_trigger_reorg() { - let (mut client, select_chain) = - TestClientBuilder::new().build_with_longest_chain(); + let (mut client, select_chain) = TestClientBuilder::new().build_with_longest_chain(); // G -> A1 -> A2 // \ diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index 6bb0cf22dd57f..c6708dca5da22 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -84,6 +84,8 @@ pub use sp_core::NativeOrEncoded; use sp_core::OpaqueMetadata; #[doc(hidden)] pub use sp_core::{offchain, ExecutionContext}; +#[cfg(feature = "std")] +pub use sp_runtime::StateVersion; #[doc(hidden)] pub use sp_runtime::{ generic::BlockId, @@ -94,8 +96,6 @@ pub use sp_runtime::{ transaction_validity::TransactionValidity, RuntimeString, TransactionOutcome, }; -#[cfg(feature = "std")] -pub use sp_runtime::StateVersion; #[doc(hidden)] #[cfg(feature = "std")] pub use sp_state_machine::{ diff --git a/primitives/externalities/src/lib.rs b/primitives/externalities/src/lib.rs index 01621ddc72cef..5055cd292e698 100644 --- a/primitives/externalities/src/lib.rs +++ b/primitives/externalities/src/lib.rs @@ -31,7 +31,7 @@ use sp_std::{ vec::Vec, }; -use sp_storage::{ChildInfo, TrackedStorageKey, StateVersion}; +use sp_storage::{ChildInfo, StateVersion, TrackedStorageKey}; pub use extensions::{Extension, ExtensionStore, Extensions}; pub use scope_limited::{set_and_run_with_externalities, with_externalities}; @@ -165,7 +165,11 @@ pub trait Externalities: ExtensionStore { /// /// If the storage root equals the default hash as defined by the trie, the key in the top-level /// storage map will be removed. - fn child_storage_root(&mut self, child_info: &ChildInfo, state_hashing: StateVersion) -> Vec; + fn child_storage_root( + &mut self, + child_info: &ChildInfo, + state_hashing: StateVersion, + ) -> Vec; /// Append storage item. /// diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index ba7ea9e9e667f..c08ffd29d6b0c 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -38,11 +38,11 @@ pub use paste; #[doc(hidden)] pub use sp_application_crypto as app_crypto; +#[cfg(feature = "std")] +pub use sp_core::hashing; #[cfg(feature = "std")] pub use sp_core::storage::{Storage, StorageChild}; pub use sp_core::{StateVersion, DEFAULT_STATE_HASHING}; -#[cfg(feature = "std")] -pub use sp_core::hashing; use sp_core::{ crypto::{self, Public}, diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index e9e22b90c2d0c..1cc13777ec3f1 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -23,10 +23,12 @@ use crate::{ }; use codec::{Decode, Encode}; use hash_db::Hasher; -use sp_core::storage::{well_known_keys, ChildInfo, TrackedStorageKey}; -use sp_core::StateVersion; #[cfg(feature = "std")] use sp_core::traits::RuntimeCode; +use sp_core::{ + storage::{well_known_keys, ChildInfo, TrackedStorageKey}, + StateVersion, +}; use sp_std::vec::Vec; /// A state backend is used to read state data and can have changes committed @@ -197,7 +199,8 @@ pub trait Backend: sp_std::fmt::Debug { let mut child_roots: Vec<_> = Default::default(); // child first for (child_info, child_delta) in child_deltas { - let (child_root, empty, child_txs) = self.child_storage_root(&child_info, child_delta, state_version); + let (child_root, empty, child_txs) = + self.child_storage_root(&child_info, child_delta, state_version); let prefixed_storage_key = child_info.prefixed_storage_key(); txs.consolidate(child_txs); if empty { diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index 0db720ef54ae9..6ddd1f30761e6 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -107,8 +107,8 @@ impl BasicExternalities { impl PartialEq for BasicExternalities { fn eq(&self, other: &BasicExternalities) -> bool { - self.inner.top.eq(&other.inner.top) - && self.inner.children_default.eq(&other.inner.children_default) + self.inner.top.eq(&other.inner.top) && + self.inner.children_default.eq(&other.inner.children_default) } } @@ -174,16 +174,16 @@ impl Externalities for BasicExternalities { fn place_storage(&mut self, key: StorageKey, maybe_value: Option) { if is_child_storage_key(&key) { warn!(target: "trie", "Refuse to set child storage key via main storage"); - return; + return } match maybe_value { Some(value) => { self.inner.top.insert(key, value); - } + }, None => { self.inner.top.remove(&key); - } + }, } } @@ -224,7 +224,7 @@ impl Externalities for BasicExternalities { target: "trie", "Refuse to clear prefix that is part of child storage key via main storage" ); - return (false, 0); + return (false, 0) } let to_remove = self @@ -302,7 +302,11 @@ impl Externalities for BasicExternalities { layout.trie_root(self.inner.top.clone()).as_ref().into() } - fn child_storage_root(&mut self, child_info: &ChildInfo, state_version: StateVersion) -> Vec { + fn child_storage_root( + &mut self, + child_info: &ChildInfo, + state_version: StateVersion, + ) -> Vec { if let Some(child) = self.inner.children_default.get(child_info.storage_key()) { let delta = child.data.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))); let in_mem = crate::in_memory_backend::new_in_mem::(); diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index 5b19b6f29ee75..ecf5c095f25a4 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -24,8 +24,9 @@ use crate::{ use codec::{Decode, Encode, EncodeAppend}; use hash_db::Hasher; use sp_core::{ - hexdisplay::HexDisplay, StateVersion, + hexdisplay::HexDisplay, storage::{well_known_keys::is_child_storage_key, ChildInfo, TrackedStorageKey}, + StateVersion, }; use sp_externalities::{Extension, ExtensionStore, Extensions, Externalities}; use sp_trie::{empty_child_trie_root, trie_types::Layout}; @@ -534,7 +535,9 @@ where return root.encode() } - let root = self.overlay.storage_root(self.backend, self.storage_transaction_cache, threshold); + let root = + self.overlay + .storage_root(self.backend, self.storage_transaction_cache, threshold); trace!( target: "state", method = "StorageRoot", @@ -1031,15 +1034,18 @@ mod tests { let mut overlay = OverlayedChanges::default(); overlay.set_storage(vec![20], None); overlay.set_storage(vec![30], Some(vec![31])); - let backend = (Storage { - top: map![ - vec![10] => vec![10], - vec![20] => vec![20], - vec![40] => vec![40] - ], - children_default: map![], - }, StateVersion::default()) - .into(); + let backend = ( + Storage { + top: map![ + vec![10] => vec![10], + vec![20] => vec![20], + vec![40] => vec![40] + ], + children_default: map![], + }, + StateVersion::default(), + ) + .into(); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); @@ -1077,13 +1083,16 @@ mod tests { overlay.set_storage(vec![27], None); overlay.set_storage(vec![28], None); overlay.set_storage(vec![29], None); - let backend = (Storage { - top: map![ - vec![30] => vec![30] - ], - children_default: map![], - }, StateVersion::default()) - .into(); + let backend = ( + Storage { + top: map![ + vec![30] => vec![30] + ], + children_default: map![], + }, + StateVersion::default(), + ) + .into(); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); @@ -1101,20 +1110,23 @@ mod tests { let mut overlay = OverlayedChanges::default(); overlay.set_child_storage(child_info, vec![20], None); overlay.set_child_storage(child_info, vec![30], Some(vec![31])); - let backend = (Storage { - top: map![], - children_default: map![ - child_info.storage_key().to_vec() => StorageChild { - data: map![ - vec![10] => vec![10], - vec![20] => vec![20], - vec![40] => vec![40] - ], - child_info: child_info.to_owned(), - } - ], - }, StateVersion::default()) - .into(); + let backend = ( + Storage { + top: map![], + children_default: map![ + child_info.storage_key().to_vec() => StorageChild { + data: map![ + vec![10] => vec![10], + vec![20] => vec![20], + vec![40] => vec![40] + ], + child_info: child_info.to_owned(), + } + ], + }, + StateVersion::default(), + ) + .into(); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); @@ -1146,20 +1158,23 @@ mod tests { let mut overlay = OverlayedChanges::default(); overlay.set_child_storage(child_info, vec![20], None); overlay.set_child_storage(child_info, vec![30], Some(vec![31])); - let backend = (Storage { - top: map![], - children_default: map![ - child_info.storage_key().to_vec() => StorageChild { - data: map![ - vec![10] => vec![10], - vec![20] => vec![20], - vec![30] => vec![40] - ], - child_info: child_info.to_owned(), - } - ], - }, StateVersion::default()) - .into(); + let backend = ( + Storage { + top: map![], + children_default: map![ + child_info.storage_key().to_vec() => StorageChild { + data: map![ + vec![10] => vec![10], + vec![20] => vec![20], + vec![30] => vec![40] + ], + child_info: child_info.to_owned(), + } + ], + }, + StateVersion::default(), + ) + .into(); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); @@ -1185,18 +1200,21 @@ mod tests { let child_info = &child_info; let mut cache = StorageTransactionCache::default(); let mut overlay = OverlayedChanges::default(); - let backend = (Storage { - top: map![], - children_default: map![ - child_info.storage_key().to_vec() => StorageChild { - data: map![ - vec![30] => vec![40] - ], - child_info: child_info.to_owned(), - } - ], - }, StateVersion::default()) - .into(); + let backend = ( + Storage { + top: map![], + children_default: map![ + child_info.storage_key().to_vec() => StorageChild { + data: map![ + vec![30] => vec![40] + ], + child_info: child_info.to_owned(), + } + ], + }, + StateVersion::default(), + ) + .into(); let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index 6b68337fb2451..700b30d667912 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -22,8 +22,10 @@ use crate::{ }; use codec::Codec; use hash_db::Hasher; -use sp_core::storage::{ChildInfo, Storage}; -use sp_core::StateVersion; +use sp_core::{ + storage::{ChildInfo, Storage}, + StateVersion, +}; use sp_trie::{empty_trie_root, Layout, MemoryDB}; use std::collections::{BTreeMap, HashMap}; @@ -191,8 +193,10 @@ mod tests { let storage = new_in_mem::(); let child_info = ChildInfo::new_default(b"1"); let child_info = &child_info; - let storage = storage - .update(vec![(Some(child_info.clone()), vec![(b"2".to_vec(), Some(b"3".to_vec()))])], state_hash); + let storage = storage.update( + vec![(Some(child_info.clone()), vec![(b"2".to_vec(), Some(b"3".to_vec()))])], + state_hash, + ); let trie_backend = storage.as_trie_backend().unwrap(); assert_eq!(trie_backend.child_storage(child_info, b"2").unwrap(), Some(b"3".to_vec())); let storage_key = child_info.prefixed_storage_key(); @@ -205,10 +209,14 @@ mod tests { let mut storage = new_in_mem::(); let child_info = ChildInfo::new_default(b"1"); - storage - .insert(vec![(Some(child_info.clone()), vec![(b"2".to_vec(), Some(b"3".to_vec()))])], state_hash); - storage - .insert(vec![(Some(child_info.clone()), vec![(b"1".to_vec(), Some(b"3".to_vec()))])], state_hash); + storage.insert( + vec![(Some(child_info.clone()), vec![(b"2".to_vec(), Some(b"3".to_vec()))])], + state_hash, + ); + storage.insert( + vec![(Some(child_info.clone()), vec![(b"1".to_vec(), Some(b"3".to_vec()))])], + state_hash, + ); assert_eq!(storage.child_storage(&child_info, &b"2"[..]), Ok(Some(b"3".to_vec()))); assert_eq!(storage.child_storage(&child_info, &b"1"[..]), Ok(Some(b"3".to_vec()))); diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 2187a8f9474a6..7c5f5bc2cb632 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -1000,11 +1000,10 @@ mod tests { use codec::{Decode, Encode}; use sp_core::{ map, - StateVersion, storage::ChildInfo, testing::TaskExecutor, traits::{CodeExecutor, Externalities, RuntimeCode}, - NativeOrEncoded, NeverNativeValue, + NativeOrEncoded, NeverNativeValue, StateVersion, }; use sp_runtime::traits::BlakeTwo256; use std::{ diff --git a/primitives/state-machine/src/overlayed_changes/mod.rs b/primitives/state-machine/src/overlayed_changes/mod.rs index 5db811ba0f854..2e895b5fda63f 100644 --- a/primitives/state-machine/src/overlayed_changes/mod.rs +++ b/primitives/state-machine/src/overlayed_changes/mod.rs @@ -38,8 +38,9 @@ use crate::{ use codec::{Decode, Encode}; use hash_db::Hasher; use sp_core::{ - offchain::OffchainOverlayedChange, StateVersion, + offchain::OffchainOverlayedChange, storage::{well_known_keys::EXTRINSIC_INDEX, ChildInfo}, + StateVersion, }; use sp_externalities::{Extension, Extensions}; #[cfg(not(feature = "std"))] @@ -551,7 +552,13 @@ impl OverlayedChanges { where H::Out: Ord + Encode + 'static, { - self.drain_storage_changes(backend, changes_trie_state, parent_hash, &mut cache, state_threshold) + self.drain_storage_changes( + backend, + changes_trie_state, + parent_hash, + &mut cache, + state_threshold, + ) } /// Drain all changes into a [`StorageChanges`] instance. Leave empty overlay in place. diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index d7d626003c9c2..da085d8e1cfaa 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -26,8 +26,7 @@ use codec::{Codec, Decode, Encode}; use hash_db::{HashDB, Hasher, Prefix, EMPTY_PREFIX}; use log::debug; use parking_lot::RwLock; -use sp_core::storage::ChildInfo; -use sp_core::StateVersion; +use sp_core::{storage::ChildInfo, StateVersion}; pub use sp_trie::trie_types::TrieError; use sp_trie::{ empty_child_trie_root, read_child_trie_value_with, read_trie_value_with, record_all_keys, @@ -436,7 +435,8 @@ mod tests { assert_eq!(trie_backend.pairs(), proving_backend.pairs()); let (trie_root, mut trie_mdb) = trie_backend.storage_root(std::iter::empty(), state_hash); - let (proving_root, mut proving_mdb) = proving_backend.storage_root(std::iter::empty(), state_hash); + let (proving_root, mut proving_mdb) = + proving_backend.storage_root(std::iter::empty(), state_hash); assert_eq!(trie_root, proving_root); assert_eq!(trie_mdb.drain(), proving_mdb.drain()); } diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index f2b31c293667a..61c45a4e39c3e 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -24,8 +24,10 @@ use crate::{ }; use codec::{Codec, Decode}; use hash_db::Hasher; -use sp_core::storage::{ChildInfo, ChildType}; -use sp_core::StateVersion; +use sp_core::{ + storage::{ChildInfo, ChildType}, + StateVersion, +}; use sp_std::{boxed::Box, vec::Vec}; use sp_trie::{ child_delta_trie_root, delta_trie_root, empty_child_trie_root, @@ -406,7 +408,9 @@ pub mod tests { storage_root_is_non_default_inner(StateVersion::V1); } fn storage_root_is_non_default_inner(state_hash: StateVersion) { - assert!(test_trie(state_hash).storage_root(iter::empty(), state_hash).0 != H256::repeat_byte(0)); + assert!( + test_trie(state_hash).storage_root(iter::empty(), state_hash).0 != H256::repeat_byte(0) + ); } #[test] @@ -415,8 +419,8 @@ pub mod tests { storage_root_transaction_is_non_empty_inner(StateVersion::V1); } fn storage_root_transaction_is_non_empty_inner(state_hash: StateVersion) { - let (new_root, mut tx) = - test_trie(state_hash).storage_root(iter::once((&b"new-key"[..], Some(&b"new-value"[..]))), state_hash); + let (new_root, mut tx) = test_trie(state_hash) + .storage_root(iter::once((&b"new-key"[..], Some(&b"new-value"[..]))), state_hash); assert!(!tx.drain().is_empty()); assert!(new_root != test_trie(state_hash).storage_root(iter::empty(), state_hash).0); } diff --git a/primitives/tasks/src/async_externalities.rs b/primitives/tasks/src/async_externalities.rs index ef124533da2b2..4ee595acd6619 100644 --- a/primitives/tasks/src/async_externalities.rs +++ b/primitives/tasks/src/async_externalities.rs @@ -19,8 +19,9 @@ //! Async externalities. use sp_core::{ - storage::{ChildInfo, TrackedStorageKey}, StateVersion, + storage::{ChildInfo, TrackedStorageKey}, traits::{Externalities, RuntimeSpawn, RuntimeSpawnExt, SpawnNamed, TaskExecutorExt}, + StateVersion, }; use sp_externalities::{Extensions, ExternalitiesExt as _}; use std::any::{Any, TypeId}; @@ -130,7 +131,11 @@ impl Externalities for AsyncExternalities { panic!("`storage_root`: should not be used in async externalities!") } - fn child_storage_root(&mut self, _child_info: &ChildInfo, _state_hashing: StateVersion) -> Vec { + fn child_storage_root( + &mut self, + _child_info: &ChildInfo, + _state_hashing: StateVersion, + ) -> Vec { panic!("`child_storage_root`: should not be used in async externalities!") } diff --git a/primitives/version/src/lib.rs b/primitives/version/src/lib.rs index bd784a5374889..27ceaf6104297 100644 --- a/primitives/version/src/lib.rs +++ b/primitives/version/src/lib.rs @@ -27,8 +27,8 @@ use std::collections::HashSet; use std::fmt; use codec::{Decode, Encode}; -pub use sp_runtime::{create_runtime_str, StateVersion, DEFAULT_STATE_HASHING}; use sp_runtime::RuntimeString; +pub use sp_runtime::{create_runtime_str, StateVersion, DEFAULT_STATE_HASHING}; #[doc(hidden)] pub use sp_std; diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index 559ca18cbf31c..fea960e20939a 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -216,7 +216,8 @@ impl sc_consensus::LongestChain, ) where - ExecutorDispatch: sc_client_api::CallExecutor + sc_executor::RuntimeVersionOf + 'static, + ExecutorDispatch: + sc_client_api::CallExecutor + sc_executor::RuntimeVersionOf + 'static, Backend: sc_client_api::backend::Backend, >::OffchainStorage: 'static, { From 877c36e77dfca9b1b453e83238bf4570c993b748 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 9 Sep 2021 10:07:04 +0200 Subject: [PATCH 085/188] fmt --- primitives/state-machine/Cargo.toml | 1 - primitives/trie/src/trie_codec.rs | 23 +++++++++++------------ 2 files changed, 11 insertions(+), 13 deletions(-) diff --git a/primitives/state-machine/Cargo.toml b/primitives/state-machine/Cargo.toml index dc54486e2078c..e5c9ea9890687 100644 --- a/primitives/state-machine/Cargo.toml +++ b/primitives/state-machine/Cargo.toml @@ -35,7 +35,6 @@ tracing = { version = "0.1.22", optional = true } hex-literal = "0.3.1" sp-runtime = { version = "4.0.0-dev", path = "../runtime" } pretty_assertions = "0.6.1" -rand = { version = "0.7.2", feature = ["small_rng"] } [features] default = ["std"] diff --git a/primitives/trie/src/trie_codec.rs b/primitives/trie/src/trie_codec.rs index b9fc428263d25..f7eb79e2864c4 100644 --- a/primitives/trie/src/trie_codec.rs +++ b/primitives/trie/src/trie_codec.rs @@ -83,7 +83,7 @@ impl fmt::Display for Error { Error::ExtraneousChildNode => write!(f, "Child node content with no root in proof"), Error::ExtraneousChildProof(root) => { write!(f, "Proof of child trie {:x?} not in parent proof", root.as_ref()) - }, + } Error::RootMismatch(root, expected) => write!( f, "Verification error, root is {:x?}, expected: {:x?}", @@ -119,7 +119,7 @@ where // Only check root if expected root is passed as argument. if let Some(expected_root) = expected_root { if expected_root != &top_root { - return Err(Error::RootMismatch(top_root.clone(), expected_root.clone())) + return Err(Error::RootMismatch(top_root.clone(), expected_root.clone())); } } @@ -140,11 +140,11 @@ where let mut root = TrieHash::::default(); // still in a proof so prevent panic if root.as_mut().len() != value.as_slice().len() { - return Err(Error::InvalidChildRoot(key, value)) + return Err(Error::InvalidChildRoot(key, value)); } root.as_mut().copy_from_slice(value.as_ref()); child_tries.push(root); - }, + } // allow incomplete database error: we only // require access to data in the proof. Some(Err(error)) => match *error { @@ -158,15 +158,14 @@ where } if !HashDBT::::contains(db, &top_root, EMPTY_PREFIX) { - return Err(Error::IncompleteProof) + return Err(Error::IncompleteProof); } let mut previous_extracted_child_trie = None; let mut nodes_iter = nodes_iter.peekable(); for child_root in child_tries.into_iter() { if previous_extracted_child_trie.is_none() && nodes_iter.peek().is_some() { - let (top_root, _) = - trie_db::decode_compact_from_iter::(db, &mut nodes_iter)?; + let (top_root, _) = trie_db::decode_compact_from_iter::(db, &mut nodes_iter)?; previous_extracted_child_trie = Some(top_root); } @@ -181,11 +180,11 @@ where if let Some(child_root) = previous_extracted_child_trie { // A child root was read from proof but is not present // in top trie. - return Err(Error::ExtraneousChildProof(child_root)) + return Err(Error::ExtraneousChildProof(child_root)); } if nodes_iter.next().is_some() { - return Err(Error::ExtraneousChildNode) + return Err(Error::ExtraneousChildNode); } Ok(top_root) @@ -220,11 +219,11 @@ where let mut root = TrieHash::::default(); if root.as_mut().len() != value.as_slice().len() { // some child trie root in top trie are not an encoded hash. - return Err(Error::InvalidChildRoot(key.to_vec(), value.to_vec())) + return Err(Error::InvalidChildRoot(key.to_vec(), value.to_vec())); } root.as_mut().copy_from_slice(value.as_ref()); child_tries.push(root); - }, + } // allow incomplete database error: we only // require access to data in the proof. Some(Err(error)) => match *error { @@ -243,7 +242,7 @@ where if !HashDBT::::contains(&partial_db, &child_root, EMPTY_PREFIX) { // child proof are allowed to be missing (unused root can be included // due to trie structure modification). - continue + continue; } let trie = crate::TrieDB::::new(&partial_db, &child_root)?; From 8514d5fd90a4babf70f12466e0b9d1e0c646d45d Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 9 Sep 2021 10:23:59 +0200 Subject: [PATCH 086/188] fix test --- primitives/state-machine/src/lib.rs | 3 ++- primitives/trie/src/trie_codec.rs | 20 ++++++++++---------- 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index a4045af772654..6d81702fa25ab 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -1630,7 +1630,8 @@ mod tests { storage.insert(Some(child_info), items); } - let trie: InMemoryBackend = storage.clone().into(); + let trie: InMemoryBackend = + (storage.clone(), StateVersion::default()).into(); let trie_root = trie.root().clone(); let backend = crate::ProvingBackend::new(&trie); let mut queries = Vec::new(); diff --git a/primitives/trie/src/trie_codec.rs b/primitives/trie/src/trie_codec.rs index f7eb79e2864c4..dfddec47baba5 100644 --- a/primitives/trie/src/trie_codec.rs +++ b/primitives/trie/src/trie_codec.rs @@ -83,7 +83,7 @@ impl fmt::Display for Error { Error::ExtraneousChildNode => write!(f, "Child node content with no root in proof"), Error::ExtraneousChildProof(root) => { write!(f, "Proof of child trie {:x?} not in parent proof", root.as_ref()) - } + }, Error::RootMismatch(root, expected) => write!( f, "Verification error, root is {:x?}, expected: {:x?}", @@ -119,7 +119,7 @@ where // Only check root if expected root is passed as argument. if let Some(expected_root) = expected_root { if expected_root != &top_root { - return Err(Error::RootMismatch(top_root.clone(), expected_root.clone())); + return Err(Error::RootMismatch(top_root.clone(), expected_root.clone())) } } @@ -140,11 +140,11 @@ where let mut root = TrieHash::::default(); // still in a proof so prevent panic if root.as_mut().len() != value.as_slice().len() { - return Err(Error::InvalidChildRoot(key, value)); + return Err(Error::InvalidChildRoot(key, value)) } root.as_mut().copy_from_slice(value.as_ref()); child_tries.push(root); - } + }, // allow incomplete database error: we only // require access to data in the proof. Some(Err(error)) => match *error { @@ -158,7 +158,7 @@ where } if !HashDBT::::contains(db, &top_root, EMPTY_PREFIX) { - return Err(Error::IncompleteProof); + return Err(Error::IncompleteProof) } let mut previous_extracted_child_trie = None; @@ -180,11 +180,11 @@ where if let Some(child_root) = previous_extracted_child_trie { // A child root was read from proof but is not present // in top trie. - return Err(Error::ExtraneousChildProof(child_root)); + return Err(Error::ExtraneousChildProof(child_root)) } if nodes_iter.next().is_some() { - return Err(Error::ExtraneousChildNode); + return Err(Error::ExtraneousChildNode) } Ok(top_root) @@ -219,11 +219,11 @@ where let mut root = TrieHash::::default(); if root.as_mut().len() != value.as_slice().len() { // some child trie root in top trie are not an encoded hash. - return Err(Error::InvalidChildRoot(key.to_vec(), value.to_vec())); + return Err(Error::InvalidChildRoot(key.to_vec(), value.to_vec())) } root.as_mut().copy_from_slice(value.as_ref()); child_tries.push(root); - } + }, // allow incomplete database error: we only // require access to data in the proof. Some(Err(error)) => match *error { @@ -242,7 +242,7 @@ where if !HashDBT::::contains(&partial_db, &child_root, EMPTY_PREFIX) { // child proof are allowed to be missing (unused root can be included // due to trie structure modification). - continue; + continue } let trie = crate::TrieDB::::new(&partial_db, &child_root)?; From 3385eaeff5ea439398b0f6d45fbb8b9816a77b8f Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 9 Sep 2021 14:11:31 +0200 Subject: [PATCH 087/188] revert useless changes. --- client/api/src/cht.rs | 5 ++-- client/db/src/lib.rs | 47 ++++++++----------------------------- client/light/src/fetcher.rs | 2 +- 3 files changed, 13 insertions(+), 41 deletions(-) diff --git a/client/api/src/cht.rs b/client/api/src/cht.rs index 163422432c362..e10096953dc97 100644 --- a/client/api/src/cht.rs +++ b/client/api/src/cht.rs @@ -32,8 +32,7 @@ use sp_trie; use sp_core::{convert_hash, H256}; use sp_runtime::traits::{AtLeast32Bit, Header as HeaderT, One, Zero}; use sp_state_machine::{ - prove_read_on_trie_backend, read_proof_check, - read_proof_check_on_proving_backend_generic as read_proof_check_on_proving_backend, + prove_read_on_trie_backend, read_proof_check, read_proof_check_on_proving_backend, Backend as StateBackend, InMemoryBackend, MemoryDB, StorageProof, TrieBackend, }; @@ -174,7 +173,7 @@ where local_number, remote_hash, |_, local_cht_key| { - read_proof_check_on_proving_backend::(proving_backend, local_cht_key) + read_proof_check_on_proving_backend::(proving_backend, local_cht_key) .map_err(ClientError::from_state) }, ) diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index ad061c05b4242..9536690124ebe 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -243,24 +243,24 @@ impl StateBackend> for RefTrackingState { fn storage_root<'a>( &self, delta: impl Iterator)>, - state_hash: StateVersion, + state_version: StateVersion, ) -> (B::Hash, Self::Transaction) where B::Hash: Ord, { - self.state.storage_root(delta, state_hash) + self.state.storage_root(delta, state_version) } fn child_storage_root<'a>( &self, child_info: &ChildInfo, delta: impl Iterator)>, - state_hash: StateVersion, + state_version: StateVersion, ) -> (B::Hash, bool, Self::Transaction) where B::Hash: Ord, { - self.state.child_storage_root(child_info, delta, state_hash) + self.state.child_storage_root(child_info, delta, state_version) } fn pairs(&self) -> Vec<(Vec, Vec)> { @@ -845,7 +845,7 @@ impl BlockImportOperation { fn apply_new_state( &mut self, storage: Storage, - state_hash: StateVersion, + state_version: StateVersion, ) -> ClientResult { if storage.top.keys().any(|k| well_known_keys::is_child_storage_key(&k)) { return Err(sp_blockchain::Error::InvalidState.into()) @@ -867,7 +867,7 @@ impl BlockImportOperation { (&k[..], Some(&v[..])) }), child_delta, - state_hash, + state_version, ); let changes_trie_config = match changes_trie_config { @@ -922,36 +922,9 @@ impl sc_client_api::backend::BlockImportOperation fn reset_storage( &mut self, storage: Storage, - state_hash: StateVersion, + state_version: StateVersion, ) -> ClientResult { - if storage.top.keys().any(|k| well_known_keys::is_child_storage_key(&k)) { - return Err(sp_blockchain::Error::GenesisInvalid.into()) - } - - let child_delta = storage.children_default.iter().map(|(_storage_key, child_content)| { - ( - &child_content.child_info, - child_content.data.iter().map(|(k, v)| (&k[..], Some(&v[..]))), - ) - }); - - let mut changes_trie_config: Option = None; - let (root, transaction) = self.old_state.full_storage_root( - storage.top.iter().map(|(k, v)| { - if &k[..] == well_known_keys::CHANGES_TRIE_CONFIG { - changes_trie_config = Some( - Decode::decode(&mut &v[..]) - .expect("changes trie configuration is encoded properly at genesis"), - ); - } - (&k[..], Some(&v[..])) - }), - child_delta, - state_hash, - ); - - self.db_updates = transaction; - self.changes_trie_config_update = Some(changes_trie_config); + let root = self.apply_new_state(storage, state_version)?; self.commit_state = true; Ok(root) } @@ -960,9 +933,9 @@ impl sc_client_api::backend::BlockImportOperation &mut self, storage: Storage, commit: bool, - state_hash: StateVersion, + state_version: StateVersion, ) -> ClientResult { - let root = self.apply_new_state(storage, state_hash)?; + let root = self.apply_new_state(storage, state_version)?; self.commit_state = commit; Ok(root) } diff --git a/client/light/src/fetcher.rs b/client/light/src/fetcher.rs index fb16e83c67ab3..5740e407a5e89 100644 --- a/client/light/src/fetcher.rs +++ b/client/light/src/fetcher.rs @@ -169,7 +169,7 @@ impl> LightDataChecker { remote_roots_proof: StorageProof, ) -> ClientResult<()> { // all the checks are sharing the same storage - let storage: sp_state_machine::MemoryDB> = remote_roots_proof.into_memory_db(); + let storage = remote_roots_proof.into_memory_db(); // remote_roots.keys() are sorted => we can use this to group changes tries roots // that are belongs to the same CHT From 3be7538ea05b9a589ddb35328a435dfe5761b161 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 9 Sep 2021 14:46:19 +0200 Subject: [PATCH 088/188] clean some unused changes --- client/network/test/src/block_import.rs | 3 +- client/service/test/src/client/mod.rs | 2 +- primitives/blockchain/src/error.rs | 3 - primitives/externalities/src/lib.rs | 4 +- primitives/state-machine/src/basic.rs | 5 +- .../state-machine/src/changes_trie/storage.rs | 2 +- primitives/state-machine/src/ext.rs | 12 +-- .../state-machine/src/in_memory_backend.rs | 16 ++-- primitives/state-machine/src/lib.rs | 83 ++++++++----------- .../src/overlayed_changes/mod.rs | 18 ++-- .../state-machine/src/proving_backend.rs | 38 ++++----- primitives/state-machine/src/read_only.rs | 4 +- primitives/state-machine/src/trie_backend.rs | 32 +++---- primitives/storage/src/lib.rs | 12 +-- primitives/tasks/src/async_externalities.rs | 4 +- primitives/trie/src/trie_codec.rs | 4 +- 16 files changed, 107 insertions(+), 135 deletions(-) diff --git a/client/network/test/src/block_import.rs b/client/network/test/src/block_import.rs index 1c9b218fc1a1f..7b5804e0edb77 100644 --- a/client/network/test/src/block_import.rs +++ b/client/network/test/src/block_import.rs @@ -69,9 +69,8 @@ fn import_single_good_block_works() { let mut expected_aux = ImportedAux::default(); expected_aux.is_new_best = true; - let mut client = substrate_test_runtime_client::new(); match block_on(import_single_block( - &mut client, + &mut substrate_test_runtime_client::new(), BlockOrigin::File, block, &mut PassThroughVerifier::new(true), diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index c4661a6caf8a9..b1a3c84beefce 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -296,7 +296,7 @@ fn construct_genesis_should_work_with_wasm() { .genesis_map(); let genesis_hash = insert_genesis_block(&mut storage); - let backend = InMemoryBackend::from((storage, StateVersion::default())); // TODO state version from runtime code?? + let backend = InMemoryBackend::from((storage, StateVersion::default())); let (b1data, _b1hash) = block1(genesis_hash, &backend); let backend_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&backend); let runtime_code = backend_runtime_code.runtime_code().expect("Code is part of the backend"); diff --git a/primitives/blockchain/src/error.rs b/primitives/blockchain/src/error.rs index 6e60b2da6549f..ef3afa5bce942 100644 --- a/primitives/blockchain/src/error.rs +++ b/primitives/blockchain/src/error.rs @@ -90,9 +90,6 @@ pub enum Error { #[error("Failed to get runtime version: {0}")] VersionInvalid(String), - #[error("Genesis config provided is invalid")] - GenesisInvalid, - #[error("Provided state is invalid")] InvalidState, diff --git a/primitives/externalities/src/lib.rs b/primitives/externalities/src/lib.rs index 5055cd292e698..2465f86e4fc0b 100644 --- a/primitives/externalities/src/lib.rs +++ b/primitives/externalities/src/lib.rs @@ -157,7 +157,7 @@ pub trait Externalities: ExtensionStore { /// This will also update all child storage keys in the top-level storage map. /// /// The returned hash is defined by the `Block` and is SCALE encoded. - fn storage_root(&mut self, state_hashing: StateVersion) -> Vec; + fn storage_root(&mut self, state_version: StateVersion) -> Vec; /// Get the trie root of a child storage map. /// @@ -168,7 +168,7 @@ pub trait Externalities: ExtensionStore { fn child_storage_root( &mut self, child_info: &ChildInfo, - state_hashing: StateVersion, + state_version: StateVersion, ) -> Vec; /// Append storage item. diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index 6ddd1f30761e6..1236d9299bd44 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -309,8 +309,9 @@ impl Externalities for BasicExternalities { ) -> Vec { if let Some(child) = self.inner.children_default.get(child_info.storage_key()) { let delta = child.data.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))); - let in_mem = crate::in_memory_backend::new_in_mem::(); - in_mem.child_storage_root(&child.child_info, delta, state_version).0 + crate::in_memory_backend::new_in_mem::() + .child_storage_root(&child.child_info, delta, state_version) + .0 } else { empty_child_trie_root::>() } diff --git a/primitives/state-machine/src/changes_trie/storage.rs b/primitives/state-machine/src/changes_trie/storage.rs index 6a81edc90ff11..bd5e3a32b5657 100644 --- a/primitives/state-machine/src/changes_trie/storage.rs +++ b/primitives/state-machine/src/changes_trie/storage.rs @@ -191,7 +191,7 @@ impl Storage for InMemoryStorage Result, String> { - Ok( as hash_db::HashDBRef>::get(&self.data.read().mdb, key, prefix)) + MemoryDB::::get(&self.data.read().mdb, key, prefix) } } diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index ecf5c095f25a4..f3ba89dc0d02d 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -522,7 +522,7 @@ where StorageAppend::new(current_value).append(value); } - fn storage_root(&mut self, threshold: StateVersion) -> Vec { + fn storage_root(&mut self, state_version: StateVersion) -> Vec { let _guard = guard(); if let Some(ref root) = self.storage_transaction_cache.transaction_storage_root { trace!( @@ -537,7 +537,7 @@ where let root = self.overlay - .storage_root(self.backend, self.storage_transaction_cache, threshold); + .storage_root(self.backend, self.storage_transaction_cache, state_version); trace!( target: "state", method = "StorageRoot", @@ -548,7 +548,7 @@ where root.encode() } - fn child_storage_root(&mut self, child_info: &ChildInfo, threshold: StateVersion) -> Vec { + fn child_storage_root(&mut self, child_info: &ChildInfo, state_version: StateVersion) -> Vec { let _guard = guard(); let storage_key = child_info.storage_key(); let prefixed_storage_key = child_info.prefixed_storage_key(); @@ -569,7 +569,7 @@ where } else { let root = if let Some((changes, info)) = self.overlay.child_changes(storage_key) { let delta = changes.map(|(k, v)| (k.as_ref(), v.value().map(AsRef::as_ref))); - Some(self.backend.child_storage_root(info, delta, threshold)) + Some(self.backend.child_storage_root(info, delta, state_version)) } else { None }; @@ -733,7 +733,7 @@ where fn commit(&mut self) { // Bench always use latest state. - let state_threshold = StateVersion::default(); + let state_version = StateVersion::default(); for _ in 0..self.overlay.transaction_depth() { self.overlay.commit_transaction().expect(BENCHMARKING_FN); } @@ -745,7 +745,7 @@ where None, Default::default(), self.storage_transaction_cache, - state_threshold, + state_version, ) .expect(EXT_NOT_ALLOWED_TO_FAIL); self.backend diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index 700b30d667912..b260beebbf0ff 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -115,7 +115,7 @@ where H::Out: Codec + Ord, { fn from( - (inner, state_hashing): ( + (inner, state_version): ( HashMap, BTreeMap>, StateVersion, ), @@ -125,7 +125,7 @@ where inner .into_iter() .map(|(k, m)| (k, m.into_iter().map(|(k, v)| (k, Some(v))).collect())), - state_hashing, + state_version, ); backend } @@ -135,14 +135,14 @@ impl From<(Storage, StateVersion)> for TrieBackend, H> where H::Out: Codec + Ord, { - fn from((inners, state_hashing): (Storage, StateVersion)) -> Self { + fn from((inners, state_version): (Storage, StateVersion)) -> Self { let mut inner: HashMap, BTreeMap> = inners .children_default .into_iter() .map(|(_k, c)| (Some(c.child_info), c.data)) .collect(); inner.insert(None, inners.top); - (inner, state_hashing).into() + (inner, state_version).into() } } @@ -151,10 +151,10 @@ impl From<(BTreeMap, StateVersion)> where H::Out: Codec + Ord, { - fn from((inner, state_hashing): (BTreeMap, StateVersion)) -> Self { + fn from((inner, state_version): (BTreeMap, StateVersion)) -> Self { let mut expanded = HashMap::new(); expanded.insert(None, inner); - (expanded, state_hashing).into() + (expanded, state_version).into() } } @@ -164,7 +164,7 @@ where H::Out: Codec + Ord, { fn from( - (inner, state_hashing): (Vec<(Option, StorageCollection)>, StateVersion), + (inner, state_version): (Vec<(Option, StorageCollection)>, StateVersion), ) -> Self { let mut expanded: HashMap, BTreeMap> = HashMap::new(); @@ -176,7 +176,7 @@ where } } } - (expanded, state_hashing).into() + (expanded, state_version).into() } } diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 6d81702fa25ab..f06041647b9ed 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -928,19 +928,6 @@ mod execution { where H: Hasher, H::Out: Ord + Codec, - { - read_proof_check_on_proving_backend_generic(proving_backend, key) - } - - /// Check storage read proof on pre-created proving backend. - pub fn read_proof_check_on_proving_backend_generic( - proving_backend: &TrieBackend, H>, - key: &[u8], - ) -> Result>, Box> - where - H: Hasher, - H::Out: Ord + Codec, - KF: sp_trie::KeyFunction + Send + Sync, { proving_backend.storage(key).map_err(|e| Box::new(e) as Box) } @@ -1074,8 +1061,8 @@ mod tests { execute_works_inner(StateVersion::V0); execute_works_inner(StateVersion::V1); } - fn execute_works_inner(hashed: StateVersion) { - let backend = trie_backend::tests::test_trie(hashed); + fn execute_works_inner(state_version: StateVersion) { + let backend = trie_backend::tests::test_trie(state_version); let mut overlayed_changes = Default::default(); let wasm_code = RuntimeCode::empty(); @@ -1104,8 +1091,8 @@ mod tests { execute_works_with_native_else_wasm_inner(StateVersion::V0); execute_works_with_native_else_wasm_inner(StateVersion::V1); } - fn execute_works_with_native_else_wasm_inner(state_hash: StateVersion) { - let backend = trie_backend::tests::test_trie(state_hash); + fn execute_works_with_native_else_wasm_inner(state_version: StateVersion) { + let backend = trie_backend::tests::test_trie(state_version); let mut overlayed_changes = Default::default(); let wasm_code = RuntimeCode::empty(); @@ -1134,9 +1121,9 @@ mod tests { dual_execution_strategy_detects_consensus_failure_inner(StateVersion::V0); dual_execution_strategy_detects_consensus_failure_inner(StateVersion::V1); } - fn dual_execution_strategy_detects_consensus_failure_inner(state_hash: StateVersion) { + fn dual_execution_strategy_detects_consensus_failure_inner(state_version: StateVersion) { let mut consensus_failed = false; - let backend = trie_backend::tests::test_trie(state_hash); + let backend = trie_backend::tests::test_trie(state_version); let mut overlayed_changes = Default::default(); let wasm_code = RuntimeCode::empty(); @@ -1174,7 +1161,7 @@ mod tests { prove_execution_and_proof_check_works_inner(StateVersion::V0); prove_execution_and_proof_check_works_inner(StateVersion::V1); } - fn prove_execution_and_proof_check_works_inner(state_hash: StateVersion) { + fn prove_execution_and_proof_check_works_inner(state_version: StateVersion) { let executor = DummyCodeExecutor { change_changes_trie_config: false, native_available: true, @@ -1183,8 +1170,8 @@ mod tests { }; // fetch execution proof from 'remote' full node - let mut remote_backend = trie_backend::tests::test_trie(state_hash); - let remote_root = remote_backend.storage_root(std::iter::empty(), state_hash).0; + let mut remote_backend = trie_backend::tests::test_trie(state_version); + let remote_root = remote_backend.storage_root(std::iter::empty(), state_version).0; let (remote_result, remote_proof) = prove_execution::<_, _, u64, _, _>( &mut remote_backend, &mut Default::default(), @@ -1542,14 +1529,14 @@ mod tests { prove_read_and_proof_check_works_inner(StateVersion::V0); prove_read_and_proof_check_works_inner(StateVersion::V1); } - fn prove_read_and_proof_check_works_inner(state_hash: StateVersion) { + fn prove_read_and_proof_check_works_inner(state_version: StateVersion) { let child_info = ChildInfo::new_default(b"sub1"); let missing_child_info = ChildInfo::new_default(b"sub1sub2"); // key will include other child root to proof. let child_info = &child_info; let missing_child_info = &missing_child_info; // fetch read proof from 'remote' full node - let remote_backend = trie_backend::tests::test_trie(state_hash); - let remote_root = remote_backend.storage_root(std::iter::empty(), state_hash).0; + let remote_backend = trie_backend::tests::test_trie(state_version); + let remote_root = remote_backend.storage_root(std::iter::empty(), state_version).0; let remote_proof = prove_read(remote_backend, &[b"value2"]).unwrap(); let remote_proof = test_compact(remote_proof, &remote_root); // check proof locally @@ -1566,8 +1553,8 @@ mod tests { ); assert_eq!(local_result2, false); // on child trie - let remote_backend = trie_backend::tests::test_trie(state_hash); - let remote_root = remote_backend.storage_root(std::iter::empty(), state_hash).0; + let remote_backend = trie_backend::tests::test_trie(state_version); + let remote_root = remote_backend.storage_root(std::iter::empty(), state_version).0; let remote_proof = prove_child_read(remote_backend, child_info, &[b"value3"]).unwrap(); let remote_proof = test_compact(remote_proof, &remote_root); let local_result1 = read_child_proof_check::( @@ -1694,16 +1681,16 @@ mod tests { #[test] fn prove_read_with_size_limit_works() { - let state_hash = StateVersion::V0; - let remote_backend = trie_backend::tests::test_trie(state_hash); - let remote_root = remote_backend.storage_root(::std::iter::empty(), state_hash).0; + let state_version = StateVersion::V0; + let remote_backend = trie_backend::tests::test_trie(state_version); + let remote_root = remote_backend.storage_root(::std::iter::empty(), state_version).0; let (proof, count) = prove_range_read_with_size(remote_backend, None, None, 0, None).unwrap(); // Alwasys contains at least some nodes. assert_eq!(proof.into_memory_db::().drain().len(), 3); assert_eq!(count, 1); - let remote_backend = trie_backend::tests::test_trie(state_hash); + let remote_backend = trie_backend::tests::test_trie(state_version); let (proof, count) = prove_range_read_with_size(remote_backend, None, None, 800, Some(&[])).unwrap(); assert_eq!(proof.clone().into_memory_db::().drain().len(), 9); @@ -1726,7 +1713,7 @@ mod tests { assert_eq!(results.len() as u32, 101); assert_eq!(completed, false); - let remote_backend = trie_backend::tests::test_trie(state_hash); + let remote_backend = trie_backend::tests::test_trie(state_version); let (proof, count) = prove_range_read_with_size(remote_backend, None, None, 50000, Some(&[])).unwrap(); assert_eq!(proof.clone().into_memory_db::().drain().len(), 11); @@ -1745,10 +1732,10 @@ mod tests { } #[test] - fn inner_state_hashing_switch_proofs() { + fn inner_state_versioning_switch_proofs() { let mut layout = Layout::default(); - let mut state_hash = StateVersion::V0; - let (mut mdb, mut root) = trie_backend::tests::test_db(state_hash); + let mut state_version = StateVersion::V0; + let (mut mdb, mut root) = trie_backend::tests::test_db(state_version); { let mut trie = TrieDBMut::from_existing_with_layout(&mut mdb, &mut root, layout.clone()).unwrap(); @@ -1760,9 +1747,9 @@ mod tests { .expect("insert failed"); } - let check_proof = |mdb, root, state_hash| -> StorageProof { + let check_proof = |mdb, root, state_version| -> StorageProof { let remote_backend = TrieBackend::new(mdb, root); - let remote_root = remote_backend.storage_root(std::iter::empty(), state_hash).0; + let remote_root = remote_backend.storage_root(std::iter::empty(), state_version).0; let remote_proof = prove_read(remote_backend, &[b"foo222"]).unwrap(); // check proof locally let local_result1 = @@ -1776,7 +1763,7 @@ mod tests { remote_proof }; - let remote_proof = check_proof(mdb.clone(), root.clone(), state_hash); + let remote_proof = check_proof(mdb.clone(), root.clone(), state_version); // check full values in proof assert!(remote_proof.encode().len() > 1_100); assert!(remote_proof.encoded_size() > 1_100); @@ -1784,7 +1771,7 @@ mod tests { // do switch layout = Layout::with_max_inline_value(sp_core::storage::DEFAULT_MAX_INLINE_VALUE); - state_hash = StateVersion::V1; + state_version = StateVersion::V1; { let mut trie = TrieDBMut::from_existing_with_layout(&mut mdb, &mut root, layout.clone()).unwrap(); @@ -1796,7 +1783,7 @@ mod tests { } let root3 = root.clone(); assert!(root1 != root3); - let remote_proof = check_proof(mdb.clone(), root.clone(), state_hash); + let remote_proof = check_proof(mdb.clone(), root.clone(), state_version); // nodes foo is replaced by its hashed value form. assert!(remote_proof.encode().len() < 1000); assert!(remote_proof.encoded_size() < 1000); @@ -1809,14 +1796,14 @@ mod tests { let size_inner_hash = compact_multiple_child_trie_inner(StateVersion::V1); assert!(size_inner_hash < size_no_inner_hash); } - fn compact_multiple_child_trie_inner(state_hash: StateVersion) -> usize { + fn compact_multiple_child_trie_inner(state_version: StateVersion) -> usize { // this root will be queried let child_info1 = ChildInfo::new_default(b"sub1"); // this root will not be include in proof let child_info2 = ChildInfo::new_default(b"sub2"); // this root will be include in proof let child_info3 = ChildInfo::new_default(b"sub"); - let remote_backend = trie_backend::tests::test_trie(state_hash); + let remote_backend = trie_backend::tests::test_trie(state_version); let long_vec: Vec = (0..1024usize).map(|_| 8u8).collect(); let (remote_root, transaction) = remote_backend.full_storage_root( std::iter::empty(), @@ -1846,7 +1833,7 @@ mod tests { ), ] .into_iter(), - state_hash, + state_version, ); let mut remote_storage = remote_backend.into_storage(); remote_storage.consolidate(transaction); @@ -1868,7 +1855,7 @@ mod tests { #[test] fn child_storage_uuid() { - let state_hash = StateVersion::V0; + let state_version = StateVersion::V0; let child_info_1 = ChildInfo::new_default(b"sub_test1"); let child_info_2 = ChildInfo::new_default(b"sub_test2"); @@ -1876,7 +1863,7 @@ mod tests { let mut overlay = OverlayedChanges::default(); let mut transaction = { - let backend = test_trie(state_hash); + let backend = test_trie(state_version); let mut cache = StorageTransactionCache::default(); let mut ext = Ext::new( &mut overlay, @@ -1887,7 +1874,7 @@ mod tests { ); ext.set_child_storage(&child_info_1, b"abc".to_vec(), b"def".to_vec()); ext.set_child_storage(&child_info_2, b"abc".to_vec(), b"def".to_vec()); - ext.storage_root(state_hash); + ext.storage_root(state_version); cache.transaction.unwrap() }; let mut duplicate = false; @@ -1939,13 +1926,13 @@ mod tests { #[test] fn runtime_registered_extensions_are_removed_after_execution() { - let state_hash = StateVersion::default(); + let state_version = StateVersion::default(); use sp_externalities::ExternalitiesExt; sp_externalities::decl_extension! { struct DummyExt(u32); } - let backend = trie_backend::tests::test_trie(state_hash); + let backend = trie_backend::tests::test_trie(state_version); let mut overlayed_changes = Default::default(); let wasm_code = RuntimeCode::empty(); diff --git a/primitives/state-machine/src/overlayed_changes/mod.rs b/primitives/state-machine/src/overlayed_changes/mod.rs index 2e895b5fda63f..2ac77ffc388e3 100644 --- a/primitives/state-machine/src/overlayed_changes/mod.rs +++ b/primitives/state-machine/src/overlayed_changes/mod.rs @@ -547,7 +547,7 @@ impl OverlayedChanges { changes_trie_state: Option<&ChangesTrieState>, parent_hash: H::Out, mut cache: StorageTransactionCache, - state_threshold: StateVersion, + state_version: StateVersion, ) -> Result, DefaultError> where H::Out: Ord + Encode + 'static, @@ -557,7 +557,7 @@ impl OverlayedChanges { changes_trie_state, parent_hash, &mut cache, - state_threshold, + state_version, ) } @@ -568,14 +568,14 @@ impl OverlayedChanges { #[cfg(feature = "std")] changes_trie_state: Option<&ChangesTrieState>, parent_hash: H::Out, mut cache: &mut StorageTransactionCache, - state_threshold: StateVersion, + state_version: StateVersion, ) -> Result, DefaultError> where H::Out: Ord + Encode + 'static, { // If the transaction does not exist, we generate it. if cache.transaction.is_none() { - self.storage_root(backend, &mut cache, state_threshold); + self.storage_root(backend, &mut cache, state_version); } let (transaction, transaction_storage_root) = cache @@ -651,7 +651,7 @@ impl OverlayedChanges { &self, backend: &B, cache: &mut StorageTransactionCache, - threshold: sp_core::StateVersion, + state_version: sp_core::StateVersion, ) -> H::Out where H::Out: Ord + Encode, @@ -661,7 +661,7 @@ impl OverlayedChanges { (info, changes.map(|(k, v)| (&k[..], v.value().map(|v| &v[..])))) }); - let (root, transaction) = backend.full_storage_root(delta, child_delta, threshold); + let (root, transaction) = backend.full_storage_root(delta, child_delta, state_version); cache.transaction = Some(transaction); cache.transaction_storage_root = Some(root); @@ -933,7 +933,7 @@ mod tests { #[test] fn overlayed_storage_root_works() { - let state_hash = StateVersion::default(); + let state_version = StateVersion::default(); let initial: BTreeMap<_, _> = vec![ (b"doe".to_vec(), b"reindeer".to_vec()), (b"dog".to_vec(), b"puppyXXX".to_vec()), @@ -942,7 +942,7 @@ mod tests { ] .into_iter() .collect(); - let backend = InMemoryBackend::::from((initial, state_hash)); + let backend = InMemoryBackend::::from((initial, state_version)); let mut overlay = OverlayedChanges::default(); overlay.set_collect_extrinsics(false); @@ -967,7 +967,7 @@ mod tests { const ROOT: [u8; 32] = hex!("39245109cef3758c2eed2ccba8d9b370a917850af3824bc8348d505df2c298fa"); - assert_eq!(&ext.storage_root(state_hash)[..], &ROOT); + assert_eq!(&ext.storage_root(state_version)[..], &ROOT); } #[test] diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index da085d8e1cfaa..96686d25a7e7b 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -330,24 +330,24 @@ where fn storage_root<'b>( &self, delta: impl Iterator)>, - threshold: StateVersion, + state_version: StateVersion, ) -> (H::Out, Self::Transaction) where H::Out: Ord, { - self.0.storage_root(delta, threshold) + self.0.storage_root(delta, state_version) } fn child_storage_root<'b>( &self, child_info: &ChildInfo, delta: impl Iterator)>, - threshold: StateVersion, + state_version: StateVersion, ) -> (H::Out, bool, Self::Transaction) where H::Out: Ord, { - self.0.child_storage_root(child_info, delta, threshold) + self.0.child_storage_root(child_info, delta, state_version) } fn register_overlay_stats(&self, _stats: &crate::stats::StateMachineStats) {} @@ -428,15 +428,15 @@ mod tests { passes_through_backend_calls_inner(StateVersion::V0); passes_through_backend_calls_inner(StateVersion::V1); } - fn passes_through_backend_calls_inner(state_hash: StateVersion) { - let trie_backend = test_trie(state_hash); + fn passes_through_backend_calls_inner(state_version: StateVersion) { + let trie_backend = test_trie(state_version); let proving_backend = test_proving(&trie_backend); assert_eq!(trie_backend.storage(b"key").unwrap(), proving_backend.storage(b"key").unwrap()); assert_eq!(trie_backend.pairs(), proving_backend.pairs()); - let (trie_root, mut trie_mdb) = trie_backend.storage_root(std::iter::empty(), state_hash); + let (trie_root, mut trie_mdb) = trie_backend.storage_root(std::iter::empty(), state_version); let (proving_root, mut proving_mdb) = - proving_backend.storage_root(std::iter::empty(), state_hash); + proving_backend.storage_root(std::iter::empty(), state_version); assert_eq!(trie_root, proving_root); assert_eq!(trie_mdb.drain(), proving_mdb.drain()); } @@ -446,7 +446,7 @@ mod tests { proof_recorded_and_checked_inner(StateVersion::V0); proof_recorded_and_checked_inner(StateVersion::V1); } - fn proof_recorded_and_checked_inner(state_hash: StateVersion) { + fn proof_recorded_and_checked_inner(state_version: StateVersion) { let size_content = 34; // above hashable value treshold. let value_range = 0..64; let contents = value_range @@ -454,14 +454,14 @@ mod tests { .map(|i| (vec![i], Some(vec![i; size_content]))) .collect::>(); let in_memory = InMemoryBackend::::default(); - let in_memory = in_memory.update(vec![(None, contents)], state_hash); - let in_memory_root = in_memory.storage_root(std::iter::empty(), state_hash).0; + let in_memory = in_memory.update(vec![(None, contents)], state_version); + let in_memory_root = in_memory.storage_root(std::iter::empty(), state_version).0; value_range.clone().for_each(|i| { assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i; size_content]) }); let trie = in_memory.as_trie_backend().unwrap(); - let trie_root = trie.storage_root(std::iter::empty(), state_hash).0; + let trie_root = trie.storage_root(std::iter::empty(), state_version).0; assert_eq!(in_memory_root, trie_root); value_range .clone() @@ -482,7 +482,7 @@ mod tests { proof_recorded_and_checked_with_child_inner(StateVersion::V0); proof_recorded_and_checked_with_child_inner(StateVersion::V1); } - fn proof_recorded_and_checked_with_child_inner(state_hash: StateVersion) { + fn proof_recorded_and_checked_with_child_inner(state_version: StateVersion) { let child_info_1 = ChildInfo::new_default(b"sub1"); let child_info_2 = ChildInfo::new_default(b"sub2"); let child_info_1 = &child_info_1; @@ -492,14 +492,14 @@ mod tests { (Some(child_info_1.clone()), (28..65).map(|i| (vec![i], Some(vec![i]))).collect()), (Some(child_info_2.clone()), (10..15).map(|i| (vec![i], Some(vec![i]))).collect()), ]; - let mut in_memory = InMemoryBackend::::default(); - in_memory = in_memory.update(contents, state_hash); + let in_memory = InMemoryBackend::::default(); + let in_memory = in_memory.update(contents, state_version); let child_storage_keys = vec![child_info_1.to_owned(), child_info_2.to_owned()]; let in_memory_root = in_memory .full_storage_root( std::iter::empty(), child_storage_keys.iter().map(|k| (k, std::iter::empty())), - state_hash, + state_version, ) .0; (0..64).for_each(|i| assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i])); @@ -511,7 +511,7 @@ mod tests { }); let trie = in_memory.as_trie_backend().unwrap(); - let trie_root = trie.storage_root(std::iter::empty(), state_hash).0; + let trie_root = trie.storage_root(std::iter::empty(), state_version).0; assert_eq!(in_memory_root, trie_root); (0..64).for_each(|i| assert_eq!(trie.storage(&[i]).unwrap().unwrap(), vec![i])); @@ -542,8 +542,8 @@ mod tests { storage_proof_encoded_size_estimation_works_inner(StateVersion::V0); storage_proof_encoded_size_estimation_works_inner(StateVersion::V1); } - fn storage_proof_encoded_size_estimation_works_inner(state_hash: StateVersion) { - let trie_backend = test_trie(state_hash); + fn storage_proof_encoded_size_estimation_works_inner(state_version: StateVersion) { + let trie_backend = test_trie(state_version); let backend = test_proving(&trie_backend); let check_estimation = diff --git a/primitives/state-machine/src/read_only.rs b/primitives/state-machine/src/read_only.rs index 45e903b4ececf..a1dcb31e68382 100644 --- a/primitives/state-machine/src/read_only.rs +++ b/primitives/state-machine/src/read_only.rs @@ -145,11 +145,11 @@ impl<'a, H: Hasher, B: 'a + Backend> Externalities for ReadOnlyExternalities< unimplemented!("storage_append is not supported in ReadOnlyExternalities") } - fn storage_root(&mut self, _threshold: StateVersion) -> Vec { + fn storage_root(&mut self, _state_version: StateVersion) -> Vec { unimplemented!("storage_root is not supported in ReadOnlyExternalities") } - fn child_storage_root(&mut self, _child_info: &ChildInfo, _threshold: StateVersion) -> Vec { + fn child_storage_root(&mut self, _child_info: &ChildInfo, _state_version: StateVersion) -> Vec { unimplemented!("child_storage_root is not supported in ReadOnlyExternalities") } diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 61c45a4e39c3e..0b09d01691f1a 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -339,8 +339,8 @@ pub mod tests { read_from_storage_returns_some_inner(StateVersion::V0); read_from_storage_returns_some_inner(StateVersion::V1); } - fn read_from_storage_returns_some_inner(state_hash: StateVersion) { - assert_eq!(test_trie(state_hash).storage(b"key").unwrap(), Some(b"value".to_vec())); + fn read_from_storage_returns_some_inner(state_version: StateVersion) { + assert_eq!(test_trie(state_version).storage(b"key").unwrap(), Some(b"value".to_vec())); } #[test] @@ -348,8 +348,8 @@ pub mod tests { read_from_child_storage_returns_some_inner(StateVersion::V0); read_from_child_storage_returns_some_inner(StateVersion::V1); } - fn read_from_child_storage_returns_some_inner(state_hash: StateVersion) { - let test_trie = test_trie(state_hash); + fn read_from_child_storage_returns_some_inner(state_version: StateVersion) { + let test_trie = test_trie(state_version); assert_eq!( test_trie .child_storage(&ChildInfo::new_default(CHILD_KEY_1), b"value3") @@ -379,8 +379,8 @@ pub mod tests { read_from_storage_returns_none_inner(StateVersion::V0); read_from_storage_returns_none_inner(StateVersion::V1); } - fn read_from_storage_returns_none_inner(state_hash: StateVersion) { - assert_eq!(test_trie(state_hash).storage(b"non-existing-key").unwrap(), None); + fn read_from_storage_returns_none_inner(state_version: StateVersion) { + assert_eq!(test_trie(state_version).storage(b"non-existing-key").unwrap(), None); } #[test] @@ -388,8 +388,8 @@ pub mod tests { pairs_are_not_empty_on_non_empty_storage_inner(StateVersion::V0); pairs_are_not_empty_on_non_empty_storage_inner(StateVersion::V1); } - fn pairs_are_not_empty_on_non_empty_storage_inner(state_hash: StateVersion) { - assert!(!test_trie(state_hash).pairs().is_empty()); + fn pairs_are_not_empty_on_non_empty_storage_inner(state_version: StateVersion) { + assert!(!test_trie(state_version).pairs().is_empty()); } #[test] @@ -407,9 +407,9 @@ pub mod tests { storage_root_is_non_default_inner(StateVersion::V0); storage_root_is_non_default_inner(StateVersion::V1); } - fn storage_root_is_non_default_inner(state_hash: StateVersion) { + fn storage_root_is_non_default_inner(state_version: StateVersion) { assert!( - test_trie(state_hash).storage_root(iter::empty(), state_hash).0 != H256::repeat_byte(0) + test_trie(state_version).storage_root(iter::empty(), state_version).0 != H256::repeat_byte(0) ); } @@ -418,11 +418,11 @@ pub mod tests { storage_root_transaction_is_non_empty_inner(StateVersion::V0); storage_root_transaction_is_non_empty_inner(StateVersion::V1); } - fn storage_root_transaction_is_non_empty_inner(state_hash: StateVersion) { - let (new_root, mut tx) = test_trie(state_hash) - .storage_root(iter::once((&b"new-key"[..], Some(&b"new-value"[..]))), state_hash); + fn storage_root_transaction_is_non_empty_inner(state_version: StateVersion) { + let (new_root, mut tx) = test_trie(state_version) + .storage_root(iter::once((&b"new-key"[..], Some(&b"new-value"[..]))), state_version); assert!(!tx.drain().is_empty()); - assert!(new_root != test_trie(state_hash).storage_root(iter::empty(), state_hash).0); + assert!(new_root != test_trie(state_version).storage_root(iter::empty(), state_version).0); } #[test] @@ -430,8 +430,8 @@ pub mod tests { prefix_walking_works_inner(StateVersion::V0); prefix_walking_works_inner(StateVersion::V1); } - fn prefix_walking_works_inner(state_hash: StateVersion) { - let trie = test_trie(state_hash); + fn prefix_walking_works_inner(state_version: StateVersion) { + let trie = test_trie(state_version); let mut seen = HashSet::new(); trie.for_keys_with_prefix(b"value", |key| { diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 60012f7671882..68e7fe6cf8397 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -232,17 +232,6 @@ pub mod well_known_keys { } } -/// Configuration value for a given threshold. -pub fn trie_threshold_encode(threshold: u32) -> Vec { - codec::Compact(threshold).encode() -} - -/// Configuration threshold from encoded, invalid encoded -/// is same as no threshold. -pub fn trie_threshold_decode(mut encoded: &[u8]) -> Option { - codec::Compact::::decode(&mut encoded).ok().map(|compact| compact.0) -} - /// Default value to use as a threshold for inner hashing. pub const DEFAULT_MAX_INLINE_VALUE: u32 = 33; @@ -428,6 +417,7 @@ impl Default for StateVersion { StateVersion::V1 } } + impl StateVersion { /// Threshold to apply for inline value of trie state. pub fn state_value_threshold(&self) -> Option { diff --git a/primitives/tasks/src/async_externalities.rs b/primitives/tasks/src/async_externalities.rs index 4ee595acd6619..7449ea15a93fd 100644 --- a/primitives/tasks/src/async_externalities.rs +++ b/primitives/tasks/src/async_externalities.rs @@ -127,14 +127,14 @@ impl Externalities for AsyncExternalities { panic!("`storage_append`: should not be used in async externalities!") } - fn storage_root(&mut self, _state_hashing: StateVersion) -> Vec { + fn storage_root(&mut self, _state_version: StateVersion) -> Vec { panic!("`storage_root`: should not be used in async externalities!") } fn child_storage_root( &mut self, _child_info: &ChildInfo, - _state_hashing: StateVersion, + _state_version: StateVersion, ) -> Vec { panic!("`child_storage_root`: should not be used in async externalities!") } diff --git a/primitives/trie/src/trie_codec.rs b/primitives/trie/src/trie_codec.rs index dfddec47baba5..6e2ad37d6d4a1 100644 --- a/primitives/trie/src/trie_codec.rs +++ b/primitives/trie/src/trie_codec.rs @@ -204,9 +204,7 @@ where let mut child_tries = Vec::new(); let partial_db = proof.into_memory_db(); let mut compact_proof = { - // Layout does not change trie reading. - // And meta for writing are read from state - // (no new node so using trie without threshold is safe here). + // Layout does not change trie reading, so can use default here. let trie = crate::TrieDB::::new(&partial_db, &root)?; let mut iter = trie.iter()?; From c858c28528e4fba12d8532317675e0ff532940ca Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 9 Sep 2021 14:54:07 +0200 Subject: [PATCH 089/188] fmt --- primitives/state-machine/src/ext.rs | 6 +++++- primitives/state-machine/src/proving_backend.rs | 3 ++- primitives/state-machine/src/read_only.rs | 6 +++++- primitives/state-machine/src/trie_backend.rs | 3 ++- 4 files changed, 14 insertions(+), 4 deletions(-) diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index f3ba89dc0d02d..f4de5707a4ae2 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -548,7 +548,11 @@ where root.encode() } - fn child_storage_root(&mut self, child_info: &ChildInfo, state_version: StateVersion) -> Vec { + fn child_storage_root( + &mut self, + child_info: &ChildInfo, + state_version: StateVersion, + ) -> Vec { let _guard = guard(); let storage_key = child_info.storage_key(); let prefixed_storage_key = child_info.prefixed_storage_key(); diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 96686d25a7e7b..ce60dcfbf5f70 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -434,7 +434,8 @@ mod tests { assert_eq!(trie_backend.storage(b"key").unwrap(), proving_backend.storage(b"key").unwrap()); assert_eq!(trie_backend.pairs(), proving_backend.pairs()); - let (trie_root, mut trie_mdb) = trie_backend.storage_root(std::iter::empty(), state_version); + let (trie_root, mut trie_mdb) = + trie_backend.storage_root(std::iter::empty(), state_version); let (proving_root, mut proving_mdb) = proving_backend.storage_root(std::iter::empty(), state_version); assert_eq!(trie_root, proving_root); diff --git a/primitives/state-machine/src/read_only.rs b/primitives/state-machine/src/read_only.rs index a1dcb31e68382..311fb814c046a 100644 --- a/primitives/state-machine/src/read_only.rs +++ b/primitives/state-machine/src/read_only.rs @@ -149,7 +149,11 @@ impl<'a, H: Hasher, B: 'a + Backend> Externalities for ReadOnlyExternalities< unimplemented!("storage_root is not supported in ReadOnlyExternalities") } - fn child_storage_root(&mut self, _child_info: &ChildInfo, _state_version: StateVersion) -> Vec { + fn child_storage_root( + &mut self, + _child_info: &ChildInfo, + _state_version: StateVersion, + ) -> Vec { unimplemented!("child_storage_root is not supported in ReadOnlyExternalities") } diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 0b09d01691f1a..d86bca78dc7cc 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -409,7 +409,8 @@ pub mod tests { } fn storage_root_is_non_default_inner(state_version: StateVersion) { assert!( - test_trie(state_version).storage_root(iter::empty(), state_version).0 != H256::repeat_byte(0) + test_trie(state_version).storage_root(iter::empty(), state_version).0 != + H256::repeat_byte(0) ); } From 63265eb57e94d26b3f0b7e1f4fe9a26b7aa10543 Mon Sep 17 00:00:00 2001 From: Emeric Chevalier Date: Fri, 10 Sep 2021 13:09:19 +0200 Subject: [PATCH 090/188] removing useless trait function. --- client/service/src/client/client.rs | 9 --------- primitives/api/proc-macro/src/impl_runtime_apis.rs | 3 ++- primitives/api/src/lib.rs | 3 --- 3 files changed, 2 insertions(+), 13 deletions(-) diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index 2bce77646f474..e6bda782ab2cd 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -411,11 +411,6 @@ where CallExecutor::runtime_version(&self.executor, id) } - /// Get the StateVersion at a given block. - pub fn state_hash_at(&self, id: &BlockId) -> sp_blockchain::Result { - Ok(self.runtime_version_at(id)?.state_version()) - } - /// Reads given header and generates CHT-based header proof for CHT of given size. pub fn header_proof_with_cht_size( &self, @@ -1882,10 +1877,6 @@ where fn runtime_version_at(&self, at: &BlockId) -> Result { CallExecutor::runtime_version(&self.executor, at).map_err(Into::into) } - - fn state_hash_at(&self, at: &BlockId) -> Result { - Ok(self.runtime_version_at(at)?.state_version()) - } } /// NOTE: only use this implementation when you are sure there are NO consensus-level BlockImport diff --git a/primitives/api/proc-macro/src/impl_runtime_apis.rs b/primitives/api/proc-macro/src/impl_runtime_apis.rs index 4169d1fc65565..584b61aa424dd 100644 --- a/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -299,7 +299,8 @@ fn generate_runtime_api_base_structures() -> Result { > where Self: Sized { let at = #crate_::BlockId::Hash(parent_hash.clone()); let state_hash = self.call - .state_hash_at(&at) + .runtime_version_at(&at) + .map(|v| v.state_version()) .map_err(|e| format!("{:?}", e))?; self.changes.replace(Default::default()).into_storage_changes( diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index c6708dca5da22..d3337f33902dc 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -566,9 +566,6 @@ pub trait CallApiAt { /// Returns the runtime version at the given block. fn runtime_version_at(&self, at: &BlockId) -> Result; - - /// Returns the state version at the given block. - fn state_hash_at(&self, at: &BlockId) -> Result; } /// Auxiliary wrapper that holds an api instance and binds it to the given lifetime. From 4142b8dbec59a7ae8a05dbe0e14dc2162aa5ed0e Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 14 Sep 2021 09:15:21 +0200 Subject: [PATCH 091/188] remove remaining reference to state_hash --- client/service/src/client/client.rs | 9 --------- primitives/api/proc-macro/src/impl_runtime_apis.rs | 9 +++++---- primitives/api/src/lib.rs | 3 --- 3 files changed, 5 insertions(+), 16 deletions(-) diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index 2bce77646f474..e6bda782ab2cd 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -411,11 +411,6 @@ where CallExecutor::runtime_version(&self.executor, id) } - /// Get the StateVersion at a given block. - pub fn state_hash_at(&self, id: &BlockId) -> sp_blockchain::Result { - Ok(self.runtime_version_at(id)?.state_version()) - } - /// Reads given header and generates CHT-based header proof for CHT of given size. pub fn header_proof_with_cht_size( &self, @@ -1882,10 +1877,6 @@ where fn runtime_version_at(&self, at: &BlockId) -> Result { CallExecutor::runtime_version(&self.executor, at).map_err(Into::into) } - - fn state_hash_at(&self, at: &BlockId) -> Result { - Ok(self.runtime_version_at(at)?.state_version()) - } } /// NOTE: only use this implementation when you are sure there are NO consensus-level BlockImport diff --git a/primitives/api/proc-macro/src/impl_runtime_apis.rs b/primitives/api/proc-macro/src/impl_runtime_apis.rs index 4169d1fc65565..c63ce9c157373 100644 --- a/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -298,16 +298,17 @@ fn generate_runtime_api_base_structures() -> Result { String > where Self: Sized { let at = #crate_::BlockId::Hash(parent_hash.clone()); - let state_hash = self.call - .state_hash_at(&at) - .map_err(|e| format!("{:?}", e))?; + let state_version = self.call + .runtime_version_at(&at) + .map_err(|e| format!("{:?}", e))? + .state_version(); self.changes.replace(Default::default()).into_storage_changes( backend, changes_trie_state, parent_hash, self.storage_transaction_cache.replace(Default::default()), - state_hash, + state_version, ) } } diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index c6708dca5da22..d3337f33902dc 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -566,9 +566,6 @@ pub trait CallApiAt { /// Returns the runtime version at the given block. fn runtime_version_at(&self, at: &BlockId) -> Result; - - /// Returns the state version at the given block. - fn state_hash_at(&self, at: &BlockId) -> Result; } /// Auxiliary wrapper that holds an api instance and binds it to the given lifetime. From 5ed824a53d6d9b557bcc751f85f84771084dbe65 Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 1 Oct 2021 11:48:37 +0200 Subject: [PATCH 092/188] fix some imports --- primitives/state-machine/src/backend.rs | 2 +- primitives/state-machine/src/ext.rs | 2 +- primitives/version/src/lib.rs | 1 - 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index caf3429161825..44876557d2ce9 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -26,7 +26,7 @@ use hash_db::Hasher; #[cfg(feature = "std")] use sp_core::traits::RuntimeCode; use sp_core::{ - storage::{well_known_keys, ChildInfo, TrackedStorageKey}, + storage::{ChildInfo, TrackedStorageKey}, StateVersion, }; use sp_std::vec::Vec; diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index d371020c82851..5833cf3f9d98c 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -28,7 +28,7 @@ use sp_core::{ storage::{well_known_keys::is_child_storage_key, ChildInfo, TrackedStorageKey}, StateVersion, }; -use sp_externalities::{Extension, ExtensionStore, Extensions, Externalities}; +use sp_externalities::{Extension, ExtensionStore, Externalities}; use sp_trie::{empty_child_trie_root, trie_types::Layout}; #[cfg(feature = "std")] diff --git a/primitives/version/src/lib.rs b/primitives/version/src/lib.rs index 9cadefe1e0c24..10edbca4eb24f 100644 --- a/primitives/version/src/lib.rs +++ b/primitives/version/src/lib.rs @@ -28,7 +28,6 @@ use std::fmt; use codec::{Decode, Encode}; use scale_info::TypeInfo; -pub use sp_runtime::create_runtime_str; use sp_runtime::RuntimeString; pub use sp_runtime::{create_runtime_str, StateVersion, DEFAULT_STATE_HASHING}; #[doc(hidden)] From b0b8a850deebfa4df733980bbd5757a9f0b9a098 Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 1 Oct 2021 11:39:34 +0200 Subject: [PATCH 093/188] Follow chain state version management. --- .../try-runtime/cli/src/commands/execute_block.rs | 2 +- .../try-runtime/cli/src/commands/follow_chain.rs | 11 ++++++++--- .../try-runtime/cli/src/commands/offchain_worker.rs | 2 +- .../cli/src/commands/on_runtime_upgrade.rs | 2 +- utils/frame/try-runtime/cli/src/lib.rs | 7 +++++-- 5 files changed, 16 insertions(+), 8 deletions(-) diff --git a/utils/frame/try-runtime/cli/src/commands/execute_block.rs b/utils/frame/try-runtime/cli/src/commands/execute_block.rs index 19422db90119f..f83d4ffc98e7e 100644 --- a/utils/frame/try-runtime/cli/src/commands/execute_block.rs +++ b/utils/frame/try-runtime/cli/src/commands/execute_block.rs @@ -157,7 +157,7 @@ where header.digest_mut().pop(); let block = Block::new(header, extrinsics); - let (expected_spec_name, expected_spec_version) = + let (expected_spec_name, expected_spec_version, _) = local_spec::(&ext, &executor); ensure_matching_spec::( block_ws_uri.clone(), diff --git a/utils/frame/try-runtime/cli/src/commands/follow_chain.rs b/utils/frame/try-runtime/cli/src/commands/follow_chain.rs index 0526f5d327fb2..53ed252e4b18f 100644 --- a/utils/frame/try-runtime/cli/src/commands/follow_chain.rs +++ b/utils/frame/try-runtime/cli/src/commands/follow_chain.rs @@ -121,7 +121,7 @@ where new_ext.as_backend().root() ); - let (expected_spec_name, expected_spec_version) = + let (expected_spec_name, expected_spec_version, spec_state_version) = local_spec::(&new_ext, &executor); ensure_matching_spec::( command.uri.clone(), @@ -131,10 +131,10 @@ where ) .await; - maybe_state_ext = Some(new_ext); + maybe_state_ext = Some((new_ext, spec_state_version)); } - let state_ext = + let (state_ext, spec_state_version) = maybe_state_ext.as_mut().expect("state_ext either existed or was just created"); let (mut changes, encoded_result) = state_machine_call::( @@ -155,6 +155,11 @@ where None, Default::default(), &mut Default::default(), + // Note that in case a block contains a runtime upgrade, + // state version could potentially be incorrect here, + // this is very niche and would only result in unaligned + // roots, so this use case is ignored for now. + *spec_state_version, ) .unwrap(); state_ext.backend.apply_transaction( diff --git a/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs b/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs index 6f37e4b3849fa..5c467c09cec92 100644 --- a/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs +++ b/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs @@ -140,7 +140,7 @@ where builder.build().await? }; - let (expected_spec_name, expected_spec_version) = + let (expected_spec_name, expected_spec_version, _) = local_spec::(&ext, &executor); ensure_matching_spec::( header_ws_uri, diff --git a/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs b/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs index 86f5548b8aafa..ea57e2cc339a5 100644 --- a/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs +++ b/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs @@ -58,7 +58,7 @@ where }; if let Some(uri) = command.state.live_uri() { - let (expected_spec_name, expected_spec_version) = + let (expected_spec_name, expected_spec_version, _) = local_spec::(&ext, &executor); ensure_matching_spec::( uri, diff --git a/utils/frame/try-runtime/cli/src/lib.rs b/utils/frame/try-runtime/cli/src/lib.rs index d5ccca9560252..54049f6fd67ea 100644 --- a/utils/frame/try-runtime/cli/src/lib.rs +++ b/utils/frame/try-runtime/cli/src/lib.rs @@ -699,7 +699,7 @@ pub(crate) fn state_machine_call( ext: &TestExternalities, executor: &NativeElseWasmExecutor, -) -> (String, u32) { +) -> (String, u32, sp_core::StateVersion) { let (_, encoded) = state_machine_call::( &ext, &executor, @@ -711,6 +711,9 @@ pub(crate) fn local_spec( .expect("all runtimes should have version; qed"); ::decode(&mut &*encoded) .map_err(|e| format!("failed to decode output: {:?}", e)) - .map(|v| (v.spec_name.into(), v.spec_version)) + .map(|v| { + let state_version = v.state_version(); + (v.spec_name.into(), v.spec_version, state_version) + }) .expect("all runtimes should have version; qed") } From e7267c0963b900cae2b800e484a81ae95338e79d Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 4 Oct 2021 16:17:47 +0200 Subject: [PATCH 094/188] trie update, fix and constant threshold for trie layouts. --- Cargo.lock | 24 +- bin/node/bench/src/generator.rs | 13 +- bin/node/bench/src/trie.rs | 18 +- bin/node/executor/tests/common.rs | 10 +- client/api/src/cht.rs | 3 +- client/db/src/bench.rs | 2 +- client/db/src/lib.rs | 5 +- client/executor/src/integration_tests/mod.rs | 7 +- client/service/test/src/client/light.rs | 4 +- client/service/test/src/client/mod.rs | 4 +- frame/session/src/historical/mod.rs | 4 +- primitives/io/src/lib.rs | 16 +- primitives/state-machine/src/backend.rs | 4 +- primitives/state-machine/src/basic.rs | 18 +- .../state-machine/src/changes_trie/mod.rs | 4 +- primitives/state-machine/src/ext.rs | 8 +- .../state-machine/src/in_memory_backend.rs | 5 +- primitives/state-machine/src/lib.rs | 13 +- .../state-machine/src/proving_backend.rs | 14 +- primitives/state-machine/src/trie_backend.rs | 105 +++++---- .../state-machine/src/trie_backend_essence.rs | 9 +- .../transaction-storage-proof/src/lib.rs | 2 +- primitives/trie/src/lib.rs | 220 +++++++++--------- primitives/trie/src/storage_proof.rs | 4 +- primitives/trie/src/trie_codec.rs | 5 +- test-utils/runtime/src/lib.rs | 9 +- 26 files changed, 271 insertions(+), 259 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f63018da76aa5..a9e01a5ace372 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2539,7 +2539,7 @@ dependencies = [ [[package]] name = "hash-db" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#97caad53d595c3ecc604763a30b47dc634fd63cf" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new2#910c79222905b9da18303c6382dea0e8c7e16941" [[package]] name = "hash256-std-hasher" @@ -2553,7 +2553,7 @@ dependencies = [ [[package]] name = "hash256-std-hasher" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#97caad53d595c3ecc604763a30b47dc634fd63cf" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new2#910c79222905b9da18303c6382dea0e8c7e16941" dependencies = [ "crunchy", ] @@ -3214,10 +3214,10 @@ dependencies = [ [[package]] name = "keccak-hasher" version = "0.15.3" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#97caad53d595c3ecc604763a30b47dc634fd63cf" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new2#910c79222905b9da18303c6382dea0e8c7e16941" dependencies = [ "hash-db", - "hash256-std-hasher 0.15.2 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new)", + "hash256-std-hasher 0.15.2 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new2)", "tiny-keccak", ] @@ -4150,7 +4150,7 @@ dependencies = [ [[package]] name = "memory-db" version = "0.27.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#97caad53d595c3ecc604763a30b47dc634fd63cf" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new2#910c79222905b9da18303c6382dea0e8c7e16941" dependencies = [ "hash-db", "hashbrown 0.11.2", @@ -10776,22 +10776,22 @@ checksum = "a7f741b240f1a48843f9b8e0444fb55fb2a4ff67293b50a9179dfd5ea67f8d41" [[package]] name = "trie-bench" version = "0.28.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#97caad53d595c3ecc604763a30b47dc634fd63cf" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new2#910c79222905b9da18303c6382dea0e8c7e16941" dependencies = [ "criterion", "hash-db", - "keccak-hasher 0.15.3 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new)", + "keccak-hasher 0.15.3 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new2)", "memory-db", "parity-scale-codec", "trie-db", "trie-root", - "trie-standardmap 0.15.2 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new)", + "trie-standardmap 0.15.2 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new2)", ] [[package]] name = "trie-db" version = "0.22.6" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#97caad53d595c3ecc604763a30b47dc634fd63cf" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new2#910c79222905b9da18303c6382dea0e8c7e16941" dependencies = [ "hash-db", "hashbrown 0.11.2", @@ -10803,7 +10803,7 @@ dependencies = [ [[package]] name = "trie-root" version = "0.16.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#97caad53d595c3ecc604763a30b47dc634fd63cf" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new2#910c79222905b9da18303c6382dea0e8c7e16941" dependencies = [ "hash-db", ] @@ -10821,10 +10821,10 @@ dependencies = [ [[package]] name = "trie-standardmap" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#97caad53d595c3ecc604763a30b47dc634fd63cf" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new2#910c79222905b9da18303c6382dea0e8c7e16941" dependencies = [ "hash-db", - "keccak-hasher 0.15.3 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new)", + "keccak-hasher 0.15.3 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new2)", ] [[package]] diff --git a/bin/node/bench/src/generator.rs b/bin/node/bench/src/generator.rs index 4a8cc88edf3b7..d57142893f38f 100644 --- a/bin/node/bench/src/generator.rs +++ b/bin/node/bench/src/generator.rs @@ -20,10 +20,9 @@ use std::{collections::HashMap, sync::Arc}; use kvdb::KeyValueDB; use node_primitives::Hash; -use sp_trie::{trie_types::TrieDBMut, TrieMut}; +use sp_trie::{trie_types::TrieDBMutV1, TrieMut}; use crate::simple_trie::SimpleTrie; -use sp_core::StateVersion; /// Generate trie from given `key_values`. /// @@ -32,7 +31,6 @@ use sp_core::StateVersion; pub fn generate_trie( db: Arc, key_values: impl IntoIterator, Vec)>, - state_version: StateVersion, ) -> Hash { let mut root = Hash::default(); @@ -45,14 +43,7 @@ pub fn generate_trie( ); let mut trie = SimpleTrie { db, overlay: &mut overlay }; { - let mut trie_db = if let Some(threshold) = state_version.state_value_threshold() { - let layout = sp_trie::Layout::with_max_inline_value(threshold); - TrieDBMut::::new_with_layout( - &mut trie, &mut root, layout, - ) - } else { - TrieDBMut::new(&mut trie, &mut root) - }; + let mut trie_db = TrieDBMutV1::::new(&mut trie, &mut root); for (key, value) in key_values { trie_db.insert(&key, &value).expect("trie insertion failed"); } diff --git a/bin/node/bench/src/trie.rs b/bin/node/bench/src/trie.rs index eb8cbdbbac845..374ed3568475e 100644 --- a/bin/node/bench/src/trie.rs +++ b/bin/node/bench/src/trie.rs @@ -23,7 +23,7 @@ use kvdb::KeyValueDB; use lazy_static::lazy_static; use rand::Rng; use sp_state_machine::Backend as _; -use sp_trie::{trie_types::TrieDBMut, TrieMut as _}; +use sp_trie::{trie_types::TrieDBMutV1, TrieMut as _}; use std::{borrow::Cow, collections::HashMap, sync::Arc}; use node_primitives::Hash; @@ -142,11 +142,7 @@ impl core::BenchmarkDescription for TrieReadBenchmarkDescription { assert_eq!(warmup_keys.len(), SAMPLE_SIZE); assert_eq!(query_keys.len(), SAMPLE_SIZE); - let root = generate_trie( - database.open(self.database_type), - key_values, - sp_core::StateVersion::default(), - ); + let root = generate_trie(database.open(self.database_type), key_values); Box::new(TrieReadBenchmark { database, @@ -252,11 +248,7 @@ impl core::BenchmarkDescription for TrieWriteBenchmarkDescription { assert_eq!(warmup_keys.len(), SAMPLE_SIZE); - let root = generate_trie( - database.open(self.database_type), - key_values, - sp_core::StateVersion::default(), - ); + let root = generate_trie(database.open(self.database_type), key_values); Box::new(TrieWriteBenchmark { database, @@ -294,8 +286,8 @@ impl core::Benchmark for TrieWriteBenchmark { let mut overlay = HashMap::new(); let mut trie = SimpleTrie { db: kvdb.clone(), overlay: &mut overlay }; - let mut trie_db_mut = - TrieDBMut::from_existing(&mut trie, &mut new_root).expect("Failed to create TrieDBMut"); + let mut trie_db_mut = TrieDBMutV1::from_existing(&mut trie, &mut new_root) + .expect("Failed to create TrieDBMut"); for (warmup_key, warmup_value) in self.warmup_keys.iter() { let value = trie_db_mut diff --git a/bin/node/executor/tests/common.rs b/bin/node/executor/tests/common.rs index a8866a336768d..026e7f92cad0f 100644 --- a/bin/node/executor/tests/common.rs +++ b/bin/node/executor/tests/common.rs @@ -145,16 +145,16 @@ pub fn construct_block( extrinsics: Vec, babe_slot: Slot, ) -> (Vec, Hash) { - use sp_trie::{trie_types::Layout, TrieConfiguration}; + use sp_trie::{LayoutV1 as Layout, TrieConfiguration}; // sign extrinsics. let extrinsics = extrinsics.into_iter().map(sign).collect::>(); // calculate the header fields that we can. - let extrinsics_root = Layout::::default() - .ordered_trie_root(extrinsics.iter().map(Encode::encode)) - .to_fixed_bytes() - .into(); + let extrinsics_root = + Layout::::ordered_trie_root(extrinsics.iter().map(Encode::encode)) + .to_fixed_bytes() + .into(); let header = Header { parent_hash, diff --git a/client/api/src/cht.rs b/client/api/src/cht.rs index e10096953dc97..8fe6075729778 100644 --- a/client/api/src/cht.rs +++ b/client/api/src/cht.rs @@ -94,8 +94,7 @@ where I: IntoIterator>>, { use sp_trie::TrieConfiguration; - Ok(sp_trie::Layout::::default() - .trie_root(build_pairs::(cht_size, cht_num, hashes)?)) + Ok(sp_trie::LayoutV0::::trie_root(build_pairs::(cht_size, cht_num, hashes)?)) } /// Build CHT-based header proof. diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index b65e88073275c..5e17cda304de2 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -109,7 +109,7 @@ impl BenchmarkingState { let state_hash = sp_runtime::StateVersion::default(); let mut root = B::Hash::default(); let mut mdb = MemoryDB::>::default(); - sp_state_machine::TrieDBMut::>::new(&mut mdb, &mut root); + sp_state_machine::TrieDBMutV1::>::new(&mut mdb, &mut root); let mut state = BenchmarkingState { state: RefCell::new(None), diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 9536690124ebe..1fc77a01f8c37 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -1047,7 +1047,8 @@ impl EmptyStorage { pub fn new() -> Self { let mut root = Block::Hash::default(); let mut mdb = MemoryDB::>::default(); - sp_state_machine::TrieDBMut::>::new(&mut mdb, &mut root); + // both triedbmut are the same on empty storage. + sp_state_machine::TrieDBMutV1::>::new(&mut mdb, &mut root); EmptyStorage(root) } } @@ -2438,7 +2439,7 @@ pub(crate) mod tests { traits::{BlakeTwo256, Hash}, ConsensusEngineId, }; - use sp_state_machine::{TrieDBMut, TrieMut}; + use sp_state_machine::{TrieDBMutV1 as TrieDBMut, TrieMut}; const CONS0_ENGINE_ID: ConsensusEngineId = *b"CON0"; const CONS1_ENGINE_ID: ConsensusEngineId = *b"CON1"; diff --git a/client/executor/src/integration_tests/mod.rs b/client/executor/src/integration_tests/mod.rs index 0343b8e5ff2ec..27aa4f3f2192f 100644 --- a/client/executor/src/integration_tests/mod.rs +++ b/client/executor/src/integration_tests/mod.rs @@ -33,7 +33,7 @@ use sp_core::{ }; use sp_runtime::traits::BlakeTwo256; use sp_state_machine::TestExternalities as CoreTestExternalities; -use sp_trie::{Layout, TrieConfiguration}; +use sp_trie::{LayoutV1 as Layout, TrieConfiguration}; use sp_wasm_interface::HostFunctions as _; use std::sync::Arc; use tracing_subscriber::layer::SubscriberExt; @@ -372,10 +372,7 @@ fn ordered_trie_root_should_work(wasm_method: WasmExecutionMethod) { let trie_input = vec![b"zero".to_vec(), b"one".to_vec(), b"two".to_vec()]; assert_eq!( call_in_wasm("test_ordered_trie_root", &[0], wasm_method, &mut ext.ext(),).unwrap(), - Layout::::default() - .ordered_trie_root(trie_input.iter()) - .as_bytes() - .encode(), + Layout::::ordered_trie_root(trie_input.iter()).as_bytes().encode(), ); } diff --git a/client/service/test/src/client/light.rs b/client/service/test/src/client/light.rs index 36b082924e973..3bcfdf32dec36 100644 --- a/client/service/test/src/client/light.rs +++ b/client/service/test/src/client/light.rs @@ -570,9 +570,9 @@ fn prepare_for_header_proof_check(insert_cht: bool) -> (TestChecker, Hash, Heade } fn header_with_computed_extrinsics_root(extrinsics: Vec) -> Header { - use sp_trie::{trie_types::Layout, TrieConfiguration}; + use sp_trie::{LayoutV0, TrieConfiguration}; let iter = extrinsics.iter().map(Encode::encode); - let extrinsics_root = Layout::::default().ordered_trie_root(iter); + let extrinsics_root = LayoutV0::::ordered_trie_root(iter); // only care about `extrinsics_root` Header::new(0, extrinsics_root, H256::zero(), H256::zero(), Default::default()) diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index 0b1b3052fdc79..baaf945a0802e 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -40,7 +40,7 @@ use sp_state_machine::{ backend::Backend as _, ExecutionStrategy, InMemoryBackend, OverlayedChanges, StateMachine, }; use sp_storage::{ChildInfo, StorageKey}; -use sp_trie::{Layout, TrieConfiguration}; +use sp_trie::{LayoutV1, TrieConfiguration}; use std::{ collections::{HashMap, HashSet}, sync::Arc, @@ -170,7 +170,7 @@ fn construct_block( let transactions = txs.into_iter().map(|tx| tx.into_signed_tx()).collect::>(); let iter = transactions.iter().map(Encode::encode); - let extrinsics_root = Layout::::default().ordered_trie_root(iter).into(); + let extrinsics_root = LayoutV1::::ordered_trie_root(iter).into(); let mut header = Header { parent_hash, diff --git a/frame/session/src/historical/mod.rs b/frame/session/src/historical/mod.rs index 0801b2aca1701..52058a9eef5c4 100644 --- a/frame/session/src/historical/mod.rs +++ b/frame/session/src/historical/mod.rs @@ -40,7 +40,7 @@ use sp_runtime::{ use sp_session::{MembershipProof, ValidatorCount}; use sp_std::prelude::*; use sp_trie::{ - trie_types::{TrieDB, TrieDBMut}, + trie_types::{TrieDB, TrieDBMutV0}, MemoryDB, Recorder, Trie, TrieMut, EMPTY_PREFIX, }; @@ -224,7 +224,7 @@ impl ProvingTrie { let mut root = Default::default(); { - let mut trie = TrieDBMut::new(&mut db, &mut root); + let mut trie = TrieDBMutV0::new(&mut db, &mut root); for (i, (validator, full_id)) in validators.into_iter().enumerate() { let i = i as u32; let keys = match >::load_keys(&validator) { diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index ebfed40a2e961..8104b7ce29637 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -55,7 +55,7 @@ use sp_core::{ }; #[cfg(feature = "std")] -use sp_trie::{Layout, TrieConfiguration}; +use sp_trie::{LayoutV0, TrieConfiguration}; use sp_runtime_interface::{ pass_by::{PassBy, PassByCodec}, @@ -419,27 +419,28 @@ pub trait DefaultChildStorage { pub trait Trie { /// A trie root formed from the iterated items. fn blake2_256_root(input: Vec<(Vec, Vec)>) -> H256 { - Layout::::default().trie_root(input) + LayoutV0::::trie_root(input) } /// A trie root formed from the enumerated items. fn blake2_256_ordered_root(input: Vec>) -> H256 { - Layout::::default().ordered_trie_root(input) + LayoutV0::::ordered_trie_root(input) } /// A trie root formed from the iterated items. fn keccak_256_root(input: Vec<(Vec, Vec)>) -> H256 { - Layout::::default().trie_root(input) + LayoutV0::::trie_root(input) } /// A trie root formed from the enumerated items. fn keccak_256_ordered_root(input: Vec>) -> H256 { - Layout::::default().ordered_trie_root(input) + LayoutV0::::ordered_trie_root(input) } /// Verify trie proof fn blake2_256_verify_proof(root: H256, proof: &[Vec], key: &[u8], value: &[u8]) -> bool { - sp_trie::verify_trie_proof::, _, _, _>( + // Here V1 would work too + sp_trie::verify_trie_proof::, _, _, _>( &root, proof, &[(key, Some(value))], @@ -449,7 +450,8 @@ pub trait Trie { /// Verify trie proof fn keccak_256_verify_proof(root: H256, proof: &[Vec], key: &[u8], value: &[u8]) -> bool { - sp_trie::verify_trie_proof::, _, _, _>( + // Here V1 would work too + sp_trie::verify_trie_proof::, _, _, _>( &root, proof, &[(key, Some(value))], diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 44876557d2ce9..bb70b70e905f5 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -313,11 +313,11 @@ where H: Hasher, I: IntoIterator, { - use sp_trie::{trie_types::TrieDBMut, TrieMut}; + use sp_trie::{trie_types::TrieDBMutV1, TrieMut}; let mut root = ::Out::default(); { - let mut trie = TrieDBMut::::new(mdb, &mut root); + let mut trie = TrieDBMutV1::::new(mdb, &mut root); for (key, value) in input { if let Err(e) = trie.insert(&key, &value) { log::warn!(target: "trie", "Failed to write to trie: {}", e); diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index 1236d9299bd44..10ecba7346737 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -29,7 +29,7 @@ use sp_core::{ Blake2Hasher, StateVersion, }; use sp_externalities::{Extension, Extensions}; -use sp_trie::{empty_child_trie_root, Layout, TrieConfiguration}; +use sp_trie::{empty_child_trie_root, LayoutV0, LayoutV1, TrieConfiguration}; use std::{ any::{Any, TypeId}, collections::BTreeMap, @@ -284,7 +284,7 @@ impl Externalities for BasicExternalities { // Single child trie implementation currently allows using the same child // empty root for all child trie. Using null storage key until multiple // type of child trie support. - let empty_hash = empty_child_trie_root::>(); + let empty_hash = empty_child_trie_root::>(); for (prefixed_storage_key, child_info) in prefixed_keys { let child_root = self.child_storage_root(&child_info, state_version); if &empty_hash[..] == &child_root[..] { @@ -294,12 +294,12 @@ impl Externalities for BasicExternalities { } } - let layout = if let Some(threshold) = state_version.state_value_threshold() { - Layout::::with_max_inline_value(threshold) - } else { - Layout::::default() - }; - layout.trie_root(self.inner.top.clone()).as_ref().into() + match state_version { + StateVersion::V0 => + LayoutV0::::trie_root(self.inner.top.clone()).as_ref().into(), + StateVersion::V1 => + LayoutV1::::trie_root(self.inner.top.clone()).as_ref().into(), + } } fn child_storage_root( @@ -313,7 +313,7 @@ impl Externalities for BasicExternalities { .child_storage_root(&child.child_info, delta, state_version) .0 } else { - empty_child_trie_root::>() + empty_child_trie_root::>() } .encode() } diff --git a/primitives/state-machine/src/changes_trie/mod.rs b/primitives/state-machine/src/changes_trie/mod.rs index 40148095247dd..79022cf69dafd 100644 --- a/primitives/state-machine/src/changes_trie/mod.rs +++ b/primitives/state-machine/src/changes_trie/mod.rs @@ -80,11 +80,13 @@ use codec::{Decode, Encode}; use hash_db::{Hasher, Prefix}; use num_traits::{One, Zero}; use sp_core::{self, storage::PrefixedStorageKey}; -use sp_trie::{trie_types::TrieDBMut, DBValue, MemoryDB, TrieMut}; +use sp_trie::{DBValue, MemoryDB, TrieMut}; use std::{ collections::{HashMap, HashSet}, convert::TryInto, }; +// change trie using V0 trie (no need for attached node value). +use sp_trie::trie_types::TrieDBMutV0 as TrieDBMut; /// Requirements for block number that can be used with changes tries. pub trait BlockNumber: diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index 5833cf3f9d98c..fa27f8809ea54 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -29,7 +29,7 @@ use sp_core::{ StateVersion, }; use sp_externalities::{Extension, ExtensionStore, Externalities}; -use sp_trie::{empty_child_trie_root, trie_types::Layout}; +use sp_trie::{empty_child_trie_root, LayoutV1}; #[cfg(feature = "std")] use crate::changes_trie::State as ChangesTrieState; @@ -560,7 +560,8 @@ where let root = self .storage(prefixed_storage_key.as_slice()) .and_then(|k| Decode::decode(&mut &k[..]).ok()) - .unwrap_or_else(|| empty_child_trie_root::>()); + // V1 is equivalent to V0 on empty root. + .unwrap_or_else(|| empty_child_trie_root::>()); trace!( target: "state", method = "ChildStorageRoot", @@ -606,7 +607,8 @@ where let root = self .storage(prefixed_storage_key.as_slice()) .and_then(|k| Decode::decode(&mut &k[..]).ok()) - .unwrap_or_else(|| empty_child_trie_root::>()); + // V1 is equivalent to V0 on empty root. + .unwrap_or_else(|| empty_child_trie_root::>()); trace!( target: "state", diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index b260beebbf0ff..de1e10c04afec 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -26,7 +26,7 @@ use sp_core::{ storage::{ChildInfo, Storage}, StateVersion, }; -use sp_trie::{empty_trie_root, Layout, MemoryDB}; +use sp_trie::{empty_trie_root, LayoutV1, MemoryDB}; use std::collections::{BTreeMap, HashMap}; /// Create a new empty instance of in-memory backend. @@ -35,7 +35,8 @@ where H::Out: Codec + Ord, { let db = MemoryDB::default(); - TrieBackend::new(db, empty_trie_root::>()) + // V1 is same as V0 for an empty trie. + TrieBackend::new(db, empty_trie_root::>()) } impl TrieBackend, H> diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 8287edfbee008..598ecf6fce3fc 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -169,7 +169,10 @@ mod std_reexport { read_only::{InspectState, ReadOnlyExternalities}, testing::TestExternalities, }; - pub use sp_trie::{trie_types::TrieDBMut, DBValue, Layout, MemoryDB, StorageProof, TrieMut}; + pub use sp_trie::{ + trie_types::{TrieDBMutV0, TrieDBMutV1}, + DBValue, LayoutV0, LayoutV1, MemoryDB, StorageProof, TrieMut, + }; } #[cfg(feature = "std")] @@ -1743,12 +1746,10 @@ mod tests { #[test] fn inner_state_versioning_switch_proofs() { - let mut layout = Layout::default(); let mut state_version = StateVersion::V0; let (mut mdb, mut root) = trie_backend::tests::test_db(state_version); { - let mut trie = - TrieDBMut::from_existing_with_layout(&mut mdb, &mut root, layout.clone()).unwrap(); + let mut trie = TrieDBMutV0::from_existing(&mut mdb, &mut root).unwrap(); trie.insert(b"foo", vec![1u8; 1_000].as_slice()) // big inner hash .expect("insert failed"); trie.insert(b"foo2", vec![3u8; 16].as_slice()) // no inner hash @@ -1780,11 +1781,9 @@ mod tests { let root1 = root.clone(); // do switch - layout = Layout::with_max_inline_value(sp_core::storage::DEFAULT_MAX_INLINE_VALUE); state_version = StateVersion::V1; { - let mut trie = - TrieDBMut::from_existing_with_layout(&mut mdb, &mut root, layout.clone()).unwrap(); + let mut trie = TrieDBMutV1::from_existing(&mut mdb, &mut root).unwrap(); trie.insert(b"foo222", vec![5u8; 100].as_slice()) // inner hash .expect("insert failed"); // update with same value do change diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index ce60dcfbf5f70..6d359c132e2a7 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -30,7 +30,7 @@ use sp_core::{storage::ChildInfo, StateVersion}; pub use sp_trie::trie_types::TrieError; use sp_trie::{ empty_child_trie_root, read_child_trie_value_with, read_trie_value_with, record_all_keys, - Layout, MemoryDB, Recorder, StorageProof, + LayoutV1, MemoryDB, Recorder, StorageProof, }; use std::{ collections::{hash_map::Entry, HashMap}, @@ -56,7 +56,8 @@ where let map_e = |e| format!("Trie lookup error: {}", e); - read_trie_value_with::, _, Ephemeral>( + // V1 is equivalent to V0 on read. + read_trie_value_with::, _, Ephemeral>( &eph, self.backend.root(), key, @@ -75,14 +76,16 @@ where let root = self .storage(storage_key)? .and_then(|r| Decode::decode(&mut &r[..]).ok()) - .unwrap_or_else(|| empty_child_trie_root::>()); + // V1 is equivalent to V0 on empty trie + .unwrap_or_else(|| empty_child_trie_root::>()); let mut read_overlay = S::Overlay::default(); let eph = Ephemeral::new(self.backend.backend_storage(), &mut read_overlay); let map_e = |e| format!("Trie lookup error: {}", e); - read_child_trie_value_with::, _, _>( + // V1 is equivalent to V0 on read + read_child_trie_value_with::, _, _>( child_info.keyspace(), &eph, &root.as_ref(), @@ -99,7 +102,8 @@ where let mut iter = move || -> Result<(), Box>> { let root = self.backend.root(); - record_all_keys::, _>(&eph, root, &mut *self.proof_recorder) + // V1 and V is equivalent to V0 on read and recorder is key read. + record_all_keys::, _>(&eph, root, &mut *self.proof_recorder) }; if let Err(e) = iter() { diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index d86bca78dc7cc..6b36bdb153544 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -32,7 +32,7 @@ use sp_std::{boxed::Box, vec::Vec}; use sp_trie::{ child_delta_trie_root, delta_trie_root, empty_child_trie_root, trie_types::{TrieDB, TrieError}, - Layout, Trie, + LayoutV0, LayoutV1, Trie, }; /// Patricia trie-based backend. Transaction type is an overlay of changes to commit. @@ -199,13 +199,11 @@ where { let mut eph = Ephemeral::new(self.essence.backend_storage(), &mut write_overlay); - let res = || { - let layout = if let Some(threshold) = state_version.state_value_threshold() { - sp_trie::Layout::with_max_inline_value(threshold) - } else { - sp_trie::Layout::default() - }; - delta_trie_root::, _, _, _, _, _>(&mut eph, root, delta, layout) + let res = || match state_version { + StateVersion::V0 => + delta_trie_root::, _, _, _, _, _>(&mut eph, root, delta), + StateVersion::V1 => + delta_trie_root::, _, _, _, _, _>(&mut eph, root, delta), }; match res() { @@ -227,14 +225,8 @@ where H::Out: Ord, { let default_root = match child_info.child_type() { - ChildType::ParentKeyId => empty_child_trie_root::>(), + ChildType::ParentKeyId => empty_child_trie_root::>(), }; - let layout = if let Some(threshold) = state_version.state_value_threshold() { - sp_trie::Layout::with_max_inline_value(threshold) - } else { - sp_trie::Layout::default() - }; - let mut write_overlay = S::Overlay::default(); let prefixed_storage_key = child_info.prefixed_storage_key(); let mut root = match self.storage(prefixed_storage_key.as_slice()) { @@ -249,14 +241,20 @@ where { let mut eph = Ephemeral::new(self.essence.backend_storage(), &mut write_overlay); - - match child_delta_trie_root::, _, _, _, _, _, _>( - child_info.keyspace(), - &mut eph, - root, - delta, - layout, - ) { + match match state_version { + StateVersion::V0 => child_delta_trie_root::, _, _, _, _, _, _>( + child_info.keyspace(), + &mut eph, + root, + delta, + ), + StateVersion::V1 => child_delta_trie_root::, _, _, _, _, _, _>( + child_info.keyspace(), + &mut eph, + root, + delta, + ), + } { Ok(ret) => root = ret, Err(e) => warn!(target: "trie", "Failed to write to trie: {}", e), } @@ -288,41 +286,64 @@ pub mod tests { use codec::Encode; use sp_core::H256; use sp_runtime::traits::BlakeTwo256; - use sp_trie::{trie_types::TrieDBMut, KeySpacedDBMut, PrefixedMemoryDB, TrieMut}; + use sp_trie::{ + trie_types::{TrieDBMutV0, TrieDBMutV1}, + KeySpacedDBMut, PrefixedMemoryDB, TrieMut, + }; use std::{collections::HashSet, iter}; const CHILD_KEY_1: &[u8] = b"sub1"; - pub(crate) fn test_db(hashed_value: StateVersion) -> (PrefixedMemoryDB, H256) { + pub(crate) fn test_db(state_version: StateVersion) -> (PrefixedMemoryDB, H256) { let child_info = ChildInfo::new_default(CHILD_KEY_1); let mut root = H256::default(); let mut mdb = PrefixedMemoryDB::::default(); { let mut mdb = KeySpacedDBMut::new(&mut mdb, child_info.keyspace()); - let mut trie = TrieDBMut::new(&mut mdb, &mut root); - trie.insert(b"value3", &[142]).expect("insert failed"); - trie.insert(b"value4", &[124]).expect("insert failed"); + match state_version { + StateVersion::V0 => { + let mut trie = TrieDBMutV0::new(&mut mdb, &mut root); + trie.insert(b"value3", &[142]).expect("insert failed"); + trie.insert(b"value4", &[124]).expect("insert failed"); + }, + StateVersion::V1 => { + let mut trie = TrieDBMutV1::new(&mut mdb, &mut root); + trie.insert(b"value3", &[142]).expect("insert failed"); + trie.insert(b"value4", &[124]).expect("insert failed"); + }, + }; }; { let mut sub_root = Vec::new(); root.encode_to(&mut sub_root); - let mut trie = if let Some(hash) = hashed_value.state_value_threshold() { - let layout = Layout::with_max_inline_value(hash); - TrieDBMut::new_with_layout(&mut mdb, &mut root, layout) - } else { - TrieDBMut::new(&mut mdb, &mut root) - }; - trie.insert(child_info.prefixed_storage_key().as_slice(), &sub_root[..]) - .expect("insert failed"); - trie.insert(b"key", b"value").expect("insert failed"); - trie.insert(b"value1", &[42]).expect("insert failed"); - trie.insert(b"value2", &[24]).expect("insert failed"); - trie.insert(b":code", b"return 42").expect("insert failed"); - for i in 128u8..255u8 { - trie.insert(&[i], &[i]).unwrap(); + fn build( + mut trie: sp_trie::TrieDBMut, + child_info: &ChildInfo, + sub_root: &[u8], + ) { + trie.insert(child_info.prefixed_storage_key().as_slice(), sub_root) + .expect("insert failed"); + trie.insert(b"key", b"value").expect("insert failed"); + trie.insert(b"value1", &[42]).expect("insert failed"); + trie.insert(b"value2", &[24]).expect("insert failed"); + trie.insert(b":code", b"return 42").expect("insert failed"); + for i in 128u8..255u8 { + trie.insert(&[i], &[i]).unwrap(); + } } + + match state_version { + StateVersion::V0 => { + let trie = TrieDBMutV0::new(&mut mdb, &mut root); + build(trie, &child_info, &sub_root[..]) + }, + StateVersion::V1 => { + let trie = TrieDBMutV1::new(&mut mdb, &mut root); + build(trie, &child_info, &sub_root[..]) + }, + }; } (mdb, root) } diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 2be75e245c5c4..945fd05ebffb4 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -28,12 +28,15 @@ use sp_std::{boxed::Box, vec::Vec}; use sp_trie::{ empty_child_trie_root, read_child_trie_value, read_trie_value, trie_types::{TrieDB, TrieError}, - DBValue, KeySpacedDB, Layout, PrefixedMemoryDB, Trie, TrieDBIterator, TrieDBKeyIterator, + DBValue, KeySpacedDB, PrefixedMemoryDB, Trie, TrieDBIterator, TrieDBKeyIterator, }; #[cfg(feature = "std")] use std::collections::HashMap; #[cfg(feature = "std")] use std::sync::Arc; +// In this module, we only use layout for read operation and empty root, +// where V1 and V0 are equivalent. +use sp_trie::LayoutV1 as Layout; #[cfg(not(feature = "std"))] macro_rules! format { @@ -588,7 +591,9 @@ impl, H: Hasher> HashDBRef for TrieBackendE mod test { use super::*; use sp_core::{Blake2Hasher, H256}; - use sp_trie::{trie_types::TrieDBMut, KeySpacedDBMut, PrefixedMemoryDB, TrieMut}; + use sp_trie::{ + trie_types::TrieDBMutV1 as TrieDBMut, KeySpacedDBMut, PrefixedMemoryDB, TrieMut, + }; #[test] fn next_storage_key_and_next_child_storage_key_work() { diff --git a/primitives/transaction-storage-proof/src/lib.rs b/primitives/transaction-storage-proof/src/lib.rs index 4b01a8d45d454..af3838adcc97a 100644 --- a/primitives/transaction-storage-proof/src/lib.rs +++ b/primitives/transaction-storage-proof/src/lib.rs @@ -143,7 +143,7 @@ pub mod registration { use sp_trie::TrieMut; type Hasher = sp_core::Blake2Hasher; - type TrieLayout = sp_trie::Layout; + type TrieLayout = sp_trie::LayoutV0; /// Create a new inherent data provider instance for a given parent block hash. pub fn new_data_provider( diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index a474ec8c15700..bb8cea1782178 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -36,7 +36,7 @@ pub use memory_db::prefixed_key; pub use memory_db::KeyFunction; /// The Substrate format implementation of `NodeCodec`. pub use node_codec::NodeCodec; -use sp_std::{borrow::Borrow, boxed::Box, fmt, marker::PhantomData, vec::Vec}; +use sp_std::{borrow::Borrow, boxed::Box, marker::PhantomData, vec::Vec}; pub use storage_proof::{CompactProof, StorageProof}; /// Trie codec reexport, mainly child trie support /// for trie compact proof. @@ -54,62 +54,79 @@ pub use trie_db::{ pub use trie_stream::TrieStream; /// substrate trie layout -pub struct Layout(Option, sp_std::marker::PhantomData); +pub struct LayoutV0(sp_std::marker::PhantomData); -impl fmt::Debug for Layout { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("Layout").finish() - } +/// substrate trie layout, with external value nodes. +pub struct LayoutV1(sp_std::marker::PhantomData); + +impl TrieLayout for LayoutV0 +where + H: Hasher, +{ + const USE_EXTENSION: bool = false; + const ALLOW_EMPTY: bool = true; + const MAX_INLINE_VALUE: Option = None; + + type Hash = H; + type Codec = NodeCodec; } -impl Clone for Layout { - fn clone(&self) -> Self { - Layout(self.0, sp_std::marker::PhantomData) +impl TrieConfiguration for LayoutV0 +where + H: Hasher, +{ + fn trie_root(input: I) -> ::Out + where + I: IntoIterator, + A: AsRef<[u8]> + Ord, + B: AsRef<[u8]>, + { + trie_root::trie_root_no_extension::(input, Self::MAX_INLINE_VALUE) } -} -impl Default for Layout { - fn default() -> Self { - Layout(None, sp_std::marker::PhantomData) + fn trie_root_unhashed(input: I) -> Vec + where + I: IntoIterator, + A: AsRef<[u8]> + Ord, + B: AsRef<[u8]>, + { + trie_root::unhashed_trie_no_extension::( + input, + Self::MAX_INLINE_VALUE, + ) } -} -impl Layout { - /// Layout with inner hash value size limit active. - pub fn with_max_inline_value(threshold: u32) -> Self { - Layout(Some(threshold), sp_std::marker::PhantomData) + fn encode_index(input: u32) -> Vec { + codec::Encode::encode(&codec::Compact(input)) } } -impl TrieLayout for Layout +impl TrieLayout for LayoutV1 where H: Hasher, { const USE_EXTENSION: bool = false; const ALLOW_EMPTY: bool = true; + const MAX_INLINE_VALUE: Option = sp_core::storage::DEFAULT_STATE_HASHING; type Hash = H; type Codec = NodeCodec; - - fn max_inline_value(&self) -> Option { - self.0 - } } -impl TrieConfiguration for Layout +impl TrieConfiguration for LayoutV1 where H: Hasher, { - fn trie_root(&self, input: I) -> ::Out + fn trie_root(input: I) -> ::Out where I: IntoIterator, A: AsRef<[u8]> + Ord, B: AsRef<[u8]>, { - trie_root::trie_root_no_extension::(input, self.max_inline_value()) + trie_root::trie_root_no_extension::(input, Self::MAX_INLINE_VALUE) } - fn trie_root_unhashed(&self, input: I) -> Vec + fn trie_root_unhashed(input: I) -> Vec where I: IntoIterator, A: AsRef<[u8]> + Ord, @@ -117,7 +134,7 @@ where { trie_root::unhashed_trie_no_extension::( input, - self.max_inline_value(), + Self::MAX_INLINE_VALUE, ) } @@ -161,13 +178,19 @@ pub type TrieHash = <::Hash as Hasher>::Out; /// This module is for non generic definition of trie type. /// Only the `Hasher` trait is generic in this case. pub mod trie_types { - pub type Layout = super::Layout; + use super::*; + /// Persistent trie database read-access interface for the a given hasher. - pub type TrieDB<'a, H> = super::TrieDB<'a, Layout>; + /// Read only V1 and V0 are compatible, thus we always use V1. + pub type TrieDB<'a, H> = super::TrieDB<'a, LayoutV1>; + /// Persistent trie database write-access interface for the a given hasher. + pub type TrieDBMutV0<'a, H> = super::TrieDBMut<'a, LayoutV0>; /// Persistent trie database write-access interface for the a given hasher. - pub type TrieDBMut<'a, H> = super::TrieDBMut<'a, Layout>; + pub type TrieDBMutV1<'a, H> = super::TrieDBMut<'a, LayoutV1>; /// Querying interface, as in `trie_db` but less generic. - pub type Lookup<'a, H, Q> = trie_db::Lookup<'a, Layout, Q>; + pub type LookupV0<'a, H, Q> = trie_db::Lookup<'a, LayoutV0, Q>; + /// Querying interface, as in `trie_db` but less generic. + pub type LookupV1<'a, H, Q> = trie_db::Lookup<'a, LayoutV1, Q>; /// As in `trie_db`, but less generic, error type for the crate. pub type TrieError = trie_db::TrieError; } @@ -215,7 +238,8 @@ where K: 'a + AsRef<[u8]>, V: 'a + AsRef<[u8]>, { - verify_proof::, _, _, _>(root, proof, items) + // for read proof layout v0 and v1 are the same. + verify_proof::, _, _, _>(root, proof, items) } /// Determine a trie root given a hash DB and delta values. @@ -223,7 +247,6 @@ pub fn delta_trie_root( db: &mut DB, mut root: TrieHash, delta: I, - layout: L, ) -> Result, Box>> where I: IntoIterator, @@ -233,7 +256,7 @@ where DB: hash_db::HashDB, { { - let mut trie = TrieDBMut::::from_existing_with_layout(db, &mut root, layout)?; + let mut trie = TrieDBMut::::from_existing(db, &mut root)?; let mut delta = delta.into_iter().collect::>(); delta.sort_by(|l, r| l.0.borrow().cmp(r.0.borrow())); @@ -281,26 +304,23 @@ where /// Determine the empty trie root. pub fn empty_trie_root() -> ::Out { - L::default().trie_root::<_, Vec, Vec>(core::iter::empty()) + L::trie_root::<_, Vec, Vec>(core::iter::empty()) } /// Determine the empty child trie root. pub fn empty_child_trie_root() -> ::Out { - L::default().trie_root::<_, Vec, Vec>(core::iter::empty()) + L::trie_root::<_, Vec, Vec>(core::iter::empty()) } /// Determine a child trie root given its ordered contents, closed form. H is the default hasher, /// but a generic implementation may ignore this type parameter and use other hashers. -pub fn child_trie_root( - layout: &L, - input: I, -) -> ::Out +pub fn child_trie_root(input: I) -> ::Out where I: IntoIterator, A: AsRef<[u8]> + Ord, B: AsRef<[u8]>, { - layout.trie_root(input) + L::trie_root(input) } /// Determine a child trie root given a hash DB and delta values. H is the default hasher, @@ -310,7 +330,6 @@ pub fn child_delta_trie_root( db: &mut DB, root_data: RD, delta: I, - layout: L, ) -> Result<::Out, Box>> where I: IntoIterator, @@ -325,7 +344,7 @@ where root.as_mut().copy_from_slice(root_data.as_ref()); let mut db = KeySpacedDBMut::new(&mut *db, keyspace); - delta_trie_root::(&mut db, root, delta, layout) + delta_trie_root::(&mut db, root, delta) } /// Record all keys for a given root. @@ -515,11 +534,12 @@ mod tests { use codec::{Compact, Decode, Encode}; use hash_db::{HashDB, Hasher}; use hex_literal::hex; - use sp_core::{storage::DEFAULT_MAX_INLINE_VALUE as TRESHOLD, Blake2Hasher}; + use sp_core::Blake2Hasher; use trie_db::{DBValue, NodeCodec as NodeCodecT, Trie, TrieMut}; use trie_standardmap::{Alphabet, StandardMap, ValueMode}; - type Layout = super::Layout; + type LayoutV0 = super::LayoutV0; + type LayoutV1 = super::LayoutV1; type MemoryDBMeta = memory_db::MemoryDB, trie_db::DBValue, MemTracker>; @@ -528,15 +548,15 @@ mod tests { ::hashed_null_node() } - fn check_equivalent(input: &Vec<(&[u8], &[u8])>, layout: T) { + fn check_equivalent(input: &Vec<(&[u8], &[u8])>) { { - let closed_form = layout.trie_root(input.clone()); - let d = layout.trie_root_unhashed(input.clone()); + let closed_form = T::trie_root(input.clone()); + let d = T::trie_root_unhashed(input.clone()); println!("Data: {:#x?}, {:#x?}", d, Blake2Hasher::hash(&d[..])); let persistent = { let mut memdb = MemoryDBMeta::default(); let mut root = Default::default(); - let mut t = TrieDBMut::::new_with_layout(&mut memdb, &mut root, layout); + let mut t = TrieDBMut::::new(&mut memdb, &mut root); for (x, y) in input.iter().rev() { t.insert(x, y).unwrap(); } @@ -546,17 +566,17 @@ mod tests { } } - fn check_iteration(input: &Vec<(&[u8], &[u8])>, layout: T) { + fn check_iteration(input: &Vec<(&[u8], &[u8])>) { let mut memdb = MemoryDBMeta::default(); let mut root = Default::default(); { - let mut t = TrieDBMut::::new_with_layout(&mut memdb, &mut root, layout.clone()); + let mut t = TrieDBMut::::new(&mut memdb, &mut root); for (x, y) in input.clone() { t.insert(x, y).unwrap(); } } { - let t = TrieDB::::new_with_layout(&mut memdb, &root, layout).unwrap(); + let t = TrieDB::::new(&mut memdb, &root).unwrap(); assert_eq!( input.iter().map(|(i, j)| (i.to_vec(), j.to_vec())).collect::>(), t.iter() @@ -568,23 +588,20 @@ mod tests { } fn check_input(input: &Vec<(&[u8], &[u8])>) { - let layout = Layout::default(); - check_equivalent::(input, layout.clone()); - check_iteration::(input, layout); - let layout = Layout::with_max_inline_value(TRESHOLD); - check_equivalent::(input, layout.clone()); - check_iteration::(input, layout); + check_equivalent::(input); + check_iteration::(input); + check_equivalent::(input); + check_iteration::(input); } #[test] fn default_trie_root() { let mut db = MemoryDB::default(); - let mut root = TrieHash::::default(); - let mut empty = TrieDBMut::::new(&mut db, &mut root); + let mut root = TrieHash::::default(); + let mut empty = TrieDBMut::::new(&mut db, &mut root); empty.commit(); let root1 = empty.root().as_ref().to_vec(); - let root2: Vec = Layout::default() - .trie_root::<_, Vec, Vec>(std::iter::empty()) + let root2: Vec = LayoutV1::trie_root::<_, Vec, Vec>(std::iter::empty()) .as_ref() .iter() .cloned() @@ -688,12 +705,11 @@ mod tests { db: &'db mut dyn HashDB, root: &'db mut TrieHash, v: &[(Vec, Vec)], - layout: T, ) -> TrieDBMut<'db, T> where T: TrieConfiguration, { - let mut t = TrieDBMut::::new_with_layout(db, root, layout); + let mut t = TrieDBMut::::new(db, root); for i in 0..v.len() { let key: &[u8] = &v[i].0; let val: &[u8] = &v[i].1; @@ -714,10 +730,10 @@ mod tests { #[test] fn random_should_work() { - random_should_work_inner(true); - random_should_work_inner(false); + random_should_work_inner::(); + random_should_work_inner::(); } - fn random_should_work_inner(limit_inline_value: bool) { + fn random_should_work_inner() { let mut seed = ::Out::zero(); for test_i in 0..10_000 { if test_i % 50 == 0 { @@ -732,16 +748,11 @@ mod tests { } .make_with(seed.as_fixed_bytes_mut()); - let layout = if limit_inline_value { - Layout::with_max_inline_value(TRESHOLD) - } else { - Layout::default() - }; - let real = layout.trie_root(x.clone()); + let real = L::trie_root(x.clone()); let mut memdb = MemoryDB::default(); let mut root = Default::default(); - let mut memtrie = populate_trie::(&mut memdb, &mut root, &x, layout.clone()); + let mut memtrie = populate_trie::(&mut memdb, &mut root, &x); memtrie.commit(); if *memtrie.root() != real { @@ -753,9 +764,9 @@ mod tests { } } assert_eq!(*memtrie.root(), real); - unpopulate_trie::(&mut memtrie, &x); + unpopulate_trie::(&mut memtrie, &x); memtrie.commit(); - let hashed_null_node = hashed_null_node::(); + let hashed_null_node = hashed_null_node::(); if *memtrie.root() != hashed_null_node { println!("- TRIE MISMATCH"); println!(""); @@ -774,18 +785,16 @@ mod tests { #[test] fn codec_trie_empty() { - let layout = Layout::default(); let input: Vec<(&[u8], &[u8])> = vec![]; - let trie = layout.trie_root_unhashed(input); + let trie = LayoutV1::trie_root_unhashed(input); println!("trie: {:#x?}", trie); assert_eq!(trie, vec![0x0]); } #[test] fn codec_trie_single_tuple() { - let layout = Layout::default(); let input = vec![(vec![0xaa], vec![0xbb])]; - let trie = layout.trie_root_unhashed(input); + let trie = LayoutV1::trie_root_unhashed(input); println!("trie: {:#x?}", trie); assert_eq!( trie, @@ -800,9 +809,8 @@ mod tests { #[test] fn codec_trie_two_tuples_disjoint_keys() { - let layout = Layout::default(); let input = vec![(&[0x48, 0x19], &[0xfe]), (&[0x13, 0x14], &[0xff])]; - let trie = layout.trie_root_unhashed(input); + let trie = LayoutV1::trie_root_unhashed(input); println!("trie: {:#x?}", trie); let mut ex = Vec::::new(); ex.push(0x80); // branch, no value (0b_10..) no nibble @@ -826,16 +834,10 @@ mod tests { #[test] fn iterator_works() { - iterator_works_inner(true); - iterator_works_inner(false); - } - fn iterator_works_inner(limit_inline_value: bool) { - let layout = if limit_inline_value { - Layout::with_max_inline_value(TRESHOLD) - } else { - Layout::default() - }; - + iterator_works_inner::(); + iterator_works_inner::(); + } + fn iterator_works_inner() { let pairs = vec![ (hex!("0103000000000000000464").to_vec(), hex!("0400000000").to_vec()), (hex!("0103000000000000000469").to_vec(), hex!("0401000000").to_vec()), @@ -843,9 +845,9 @@ mod tests { let mut mdb = MemoryDB::default(); let mut root = Default::default(); - let _ = populate_trie::(&mut mdb, &mut root, &pairs, layout.clone()); + let _ = populate_trie::(&mut mdb, &mut root, &pairs); - let trie = TrieDB::::new_with_layout(&mdb, &root, layout).unwrap(); + let trie = TrieDB::::new(&mdb, &root).unwrap(); let iter = trie.iter().unwrap(); let mut iter_pairs = Vec::new(); @@ -866,16 +868,15 @@ mod tests { let mut memdb = MemoryDB::default(); let mut root = Default::default(); - let layout = Layout::default(); - populate_trie::(&mut memdb, &mut root, &pairs, layout); + populate_trie::(&mut memdb, &mut root, &pairs); let non_included_key: Vec = hex!("0909").to_vec(); let proof = - generate_trie_proof::(&memdb, root, &[non_included_key.clone()]) + generate_trie_proof::(&memdb, root, &[non_included_key.clone()]) .unwrap(); // Verifying that the K was not included into the trie should work. - assert!(verify_trie_proof::>( + assert!(verify_trie_proof::>( &root, &proof, &[(non_included_key.clone(), None)], @@ -883,7 +884,7 @@ mod tests { .is_ok()); // Verifying that the K was included into the trie should fail. - assert!(verify_trie_proof::>( + assert!(verify_trie_proof::>( &root, &proof, &[(non_included_key, Some(hex!("1010").to_vec()))], @@ -900,14 +901,13 @@ mod tests { let mut memdb = MemoryDB::default(); let mut root = Default::default(); - let layout = Layout::default(); - populate_trie::(&mut memdb, &mut root, &pairs, layout); + populate_trie::(&mut memdb, &mut root, &pairs); let proof = - generate_trie_proof::(&memdb, root, &[pairs[0].0.clone()]).unwrap(); + generate_trie_proof::(&memdb, root, &[pairs[0].0.clone()]).unwrap(); // Check that a K, V included into the proof are verified. - assert!(verify_trie_proof::( + assert!(verify_trie_proof::( &root, &proof, &[(pairs[0].0.clone(), Some(pairs[0].1.clone()))] @@ -915,7 +915,7 @@ mod tests { .is_ok()); // Absence of the V is not verified with the proof that has K, V included. - assert!(verify_trie_proof::>( + assert!(verify_trie_proof::>( &root, &proof, &[(pairs[0].0.clone(), None)] @@ -923,7 +923,7 @@ mod tests { .is_err()); // K not included into the trie is not verified. - assert!(verify_trie_proof::( + assert!(verify_trie_proof::( &root, &proof, &[(hex!("4242").to_vec(), Some(pairs[0].1.clone()))] @@ -931,7 +931,7 @@ mod tests { .is_err()); // K included into the trie but not included into the proof is not verified. - assert!(verify_trie_proof::( + assert!(verify_trie_proof::( &root, &proof, &[(pairs[1].0.clone(), Some(pairs[1].1.clone()))] @@ -956,18 +956,16 @@ mod tests { .unwrap(); let proof_db = proof.into_memory_db::(); - let first_storage_root = delta_trie_root::( + let first_storage_root = delta_trie_root::( &mut proof_db.clone(), storage_root, valid_delta, - Default::default(), ) .unwrap(); - let second_storage_root = delta_trie_root::( + let second_storage_root = delta_trie_root::( &mut proof_db.clone(), storage_root, invalid_delta, - Default::default(), ) .unwrap(); diff --git a/primitives/trie/src/storage_proof.rs b/primitives/trie/src/storage_proof.rs index aea1571fcccb4..a1f057f8072b8 100644 --- a/primitives/trie/src/storage_proof.rs +++ b/primitives/trie/src/storage_proof.rs @@ -15,11 +15,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::Layout; use codec::{Decode, Encode}; use hash_db::{HashDB, Hasher}; use scale_info::TypeInfo; use sp_std::vec::Vec; +// Note that `LayoutV1` usage here (proof compaction) is compatible +// with `LayoutV0`. +use crate::LayoutV1 as Layout; /// A proof that some set of key-value pairs are included in the storage trie. The proof contains /// the storage values so that the partial storage backend can be reconstructed by a verifier that diff --git a/primitives/trie/src/trie_codec.rs b/primitives/trie/src/trie_codec.rs index 6e2ad37d6d4a1..f485ee13e3329 100644 --- a/primitives/trie/src/trie_codec.rs +++ b/primitives/trie/src/trie_codec.rs @@ -112,8 +112,6 @@ where I: IntoIterator, { let mut nodes_iter = encoded.into_iter(); - // Layout does not change trie reading. - let layout = L::default(); let (top_root, _nb_used) = trie_db::decode_compact_from_iter::(db, &mut nodes_iter)?; // Only check root if expected root is passed as argument. @@ -126,7 +124,7 @@ where let mut child_tries = Vec::new(); { // fetch child trie roots - let trie = crate::TrieDB::::new_with_layout(db, &top_root, layout.clone())?; + let trie = crate::TrieDB::::new(db, &top_root)?; let mut iter = trie.iter()?; @@ -204,7 +202,6 @@ where let mut child_tries = Vec::new(); let partial_db = proof.into_memory_db(); let mut compact_proof = { - // Layout does not change trie reading, so can use default here. let trie = crate::TrieDB::::new(&partial_db, &root)?; let mut iter = trie.iter()?; diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index da2c38e36fc7f..fcb691961aadf 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -29,10 +29,7 @@ use sp_std::{marker::PhantomData, prelude::*}; use sp_application_crypto::{ecdsa, ed25519, sr25519, RuntimeAppPublic}; use sp_core::{offchain::KeyTypeId, ChangesTrieConfiguration, OpaqueMetadata, RuntimeDebug}; -use sp_trie::{ - trie_types::{TrieDB, TrieDBMut}, - PrefixedMemoryDB, StorageProof, -}; +use sp_trie::{trie_types::TrieDB, PrefixedMemoryDB, StorageProof}; use trie_db::{Trie, TrieMut}; use cfg_if::cfg_if; @@ -62,6 +59,8 @@ use sp_runtime::{ #[cfg(any(feature = "std", test))] use sp_version::NativeVersion; use sp_version::RuntimeVersion; +// bench on latest state. +use sp_trie::trie_types::TrieDBMutV1 as TrieDBMut; // Ensure Babe and Aura use the same crypto to simplify things a bit. pub use sp_consensus_babe::{AllowedSlots, AuthorityId, Slot}; @@ -1341,7 +1340,7 @@ mod tests { let mut root = crate::Hash::default(); let mut mdb = sp_trie::MemoryDB::::default(); { - let mut trie = sp_trie::trie_types::TrieDBMut::new(&mut mdb, &mut root); + let mut trie = sp_trie::trie_types::TrieDBMutV1::new(&mut mdb, &mut root); trie.insert(b"value3", &[142]).expect("insert failed"); trie.insert(b"value4", &[124]).expect("insert failed"); }; From 599714fa2cef458203909ebc9daf1e2be9d8912e Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 4 Oct 2021 16:18:30 +0200 Subject: [PATCH 095/188] update deps --- Cargo.lock | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a9e01a5ace372..ada129a0eaab3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2539,7 +2539,7 @@ dependencies = [ [[package]] name = "hash-db" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new2#910c79222905b9da18303c6382dea0e8c7e16941" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#910c79222905b9da18303c6382dea0e8c7e16941" [[package]] name = "hash256-std-hasher" @@ -2553,7 +2553,7 @@ dependencies = [ [[package]] name = "hash256-std-hasher" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new2#910c79222905b9da18303c6382dea0e8c7e16941" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#910c79222905b9da18303c6382dea0e8c7e16941" dependencies = [ "crunchy", ] @@ -3214,10 +3214,10 @@ dependencies = [ [[package]] name = "keccak-hasher" version = "0.15.3" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new2#910c79222905b9da18303c6382dea0e8c7e16941" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#910c79222905b9da18303c6382dea0e8c7e16941" dependencies = [ "hash-db", - "hash256-std-hasher 0.15.2 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new2)", + "hash256-std-hasher 0.15.2 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new)", "tiny-keccak", ] @@ -4150,7 +4150,7 @@ dependencies = [ [[package]] name = "memory-db" version = "0.27.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new2#910c79222905b9da18303c6382dea0e8c7e16941" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#910c79222905b9da18303c6382dea0e8c7e16941" dependencies = [ "hash-db", "hashbrown 0.11.2", @@ -10776,22 +10776,22 @@ checksum = "a7f741b240f1a48843f9b8e0444fb55fb2a4ff67293b50a9179dfd5ea67f8d41" [[package]] name = "trie-bench" version = "0.28.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new2#910c79222905b9da18303c6382dea0e8c7e16941" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#910c79222905b9da18303c6382dea0e8c7e16941" dependencies = [ "criterion", "hash-db", - "keccak-hasher 0.15.3 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new2)", + "keccak-hasher 0.15.3 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new)", "memory-db", "parity-scale-codec", "trie-db", "trie-root", - "trie-standardmap 0.15.2 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new2)", + "trie-standardmap 0.15.2 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new)", ] [[package]] name = "trie-db" version = "0.22.6" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new2#910c79222905b9da18303c6382dea0e8c7e16941" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#910c79222905b9da18303c6382dea0e8c7e16941" dependencies = [ "hash-db", "hashbrown 0.11.2", @@ -10803,7 +10803,7 @@ dependencies = [ [[package]] name = "trie-root" version = "0.16.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new2#910c79222905b9da18303c6382dea0e8c7e16941" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#910c79222905b9da18303c6382dea0e8c7e16941" dependencies = [ "hash-db", ] @@ -10821,10 +10821,10 @@ dependencies = [ [[package]] name = "trie-standardmap" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new2#910c79222905b9da18303c6382dea0e8c7e16941" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#910c79222905b9da18303c6382dea0e8c7e16941" dependencies = [ "hash-db", - "keccak-hasher 0.15.3 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new2)", + "keccak-hasher 0.15.3 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new)", ] [[package]] From 60459527e15c8c0c7e86ce8023081d0f9dba99d1 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 5 Oct 2021 10:54:05 +0200 Subject: [PATCH 096/188] Update to latest trie pr changes. --- Cargo.lock | 16 ++++++++-------- primitives/trie/src/node_codec.rs | 23 +++++++++++------------ 2 files changed, 19 insertions(+), 20 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ada129a0eaab3..3fabfdda52960 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2539,7 +2539,7 @@ dependencies = [ [[package]] name = "hash-db" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#910c79222905b9da18303c6382dea0e8c7e16941" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#61ae49bfdaf20e42856f9460465028ffef691584" [[package]] name = "hash256-std-hasher" @@ -2553,7 +2553,7 @@ dependencies = [ [[package]] name = "hash256-std-hasher" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#910c79222905b9da18303c6382dea0e8c7e16941" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#61ae49bfdaf20e42856f9460465028ffef691584" dependencies = [ "crunchy", ] @@ -3214,7 +3214,7 @@ dependencies = [ [[package]] name = "keccak-hasher" version = "0.15.3" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#910c79222905b9da18303c6382dea0e8c7e16941" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#61ae49bfdaf20e42856f9460465028ffef691584" dependencies = [ "hash-db", "hash256-std-hasher 0.15.2 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new)", @@ -4150,7 +4150,7 @@ dependencies = [ [[package]] name = "memory-db" version = "0.27.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#910c79222905b9da18303c6382dea0e8c7e16941" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#61ae49bfdaf20e42856f9460465028ffef691584" dependencies = [ "hash-db", "hashbrown 0.11.2", @@ -10776,7 +10776,7 @@ checksum = "a7f741b240f1a48843f9b8e0444fb55fb2a4ff67293b50a9179dfd5ea67f8d41" [[package]] name = "trie-bench" version = "0.28.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#910c79222905b9da18303c6382dea0e8c7e16941" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#61ae49bfdaf20e42856f9460465028ffef691584" dependencies = [ "criterion", "hash-db", @@ -10791,7 +10791,7 @@ dependencies = [ [[package]] name = "trie-db" version = "0.22.6" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#910c79222905b9da18303c6382dea0e8c7e16941" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#61ae49bfdaf20e42856f9460465028ffef691584" dependencies = [ "hash-db", "hashbrown 0.11.2", @@ -10803,7 +10803,7 @@ dependencies = [ [[package]] name = "trie-root" version = "0.16.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#910c79222905b9da18303c6382dea0e8c7e16941" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#61ae49bfdaf20e42856f9460465028ffef691584" dependencies = [ "hash-db", ] @@ -10821,7 +10821,7 @@ dependencies = [ [[package]] name = "trie-standardmap" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#910c79222905b9da18303c6382dea0e8c7e16941" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#61ae49bfdaf20e42856f9460465028ffef691584" dependencies = [ "hash-db", "keccak-hasher 0.15.3 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new)", diff --git a/primitives/trie/src/node_codec.rs b/primitives/trie/src/node_codec.rs index e630f3222de1e..7e5e38563d360 100644 --- a/primitives/trie/src/node_codec.rs +++ b/primitives/trie/src/node_codec.rs @@ -110,14 +110,14 @@ impl NodeCodec { let bitmap_range = input.take(BITMAP_LENGTH)?; let bitmap = Bitmap::decode(&data[bitmap_range])?; let value = if branch_has_value { - if contains_hash { + Some(if contains_hash { ValuePlan::HashedValue(input.take(H::LENGTH)?) } else { let count = >::decode(&mut input)?.0 as usize; ValuePlan::Value(input.take(count)?) - } + }) } else { - ValuePlan::NoValue + None }; let mut children = [ None, None, None, None, None, None, None, None, None, None, None, None, None, @@ -171,7 +171,7 @@ impl NodeCodecT for NodeCodec where H: Hasher, { - const ESCAPE_HEADER: Option<&'static [u8]> = Some(&[trie_constants::ESCAPE_COMPACT_HEADER]); + const ESCAPE_HEADER: Option = Some(trie_constants::ESCAPE_COMPACT_HEADER); type Error = Error; type HashOut = H::Out; @@ -207,7 +207,6 @@ where debug_assert!(hash.len() == H::LENGTH); output.extend_from_slice(hash); }, - Value::NoValue => unreachable!("Leaf node always with value."), } output } @@ -222,7 +221,7 @@ where fn branch_node( _children: impl Iterator::Out>>>>, - _maybe_value: Value, + _maybe_value: Option, ) -> Vec { unreachable!("No extension codec.") } @@ -231,11 +230,11 @@ where partial: impl Iterator, number_nibble: usize, children: impl Iterator::Out>>>>, - value: Value, + value: Option, ) -> Vec { - let contains_hash = matches!(&value, Value::HashedValue(..)); + let contains_hash = matches!(&value, Some(Value::HashedValue(..))); let mut output = match (&value, contains_hash) { - (&Value::NoValue, _) => + (&None, _) => partial_from_iterator_encode(partial, number_nibble, NodeKind::BranchNoValue), (_, false) => partial_from_iterator_encode(partial, number_nibble, NodeKind::BranchWithValue), @@ -247,15 +246,15 @@ where let mut bitmap: [u8; BITMAP_LENGTH] = [0; BITMAP_LENGTH]; (0..BITMAP_LENGTH).for_each(|_| output.push(0)); match value { - Value::Value(value) => { + Some(Value::Value(value)) => { Compact(value.len() as u32).encode_to(&mut output); output.extend_from_slice(value); }, - Value::HashedValue(hash, _) => { + Some(Value::HashedValue(hash, _)) => { debug_assert!(hash.len() == H::LENGTH); output.extend_from_slice(hash); }, - Value::NoValue => (), + None => (), } Bitmap::encode( children.map(|maybe_child| match maybe_child.borrow() { From fb8ff8e17b0dd6616fdd879e849c488bf0ac0d9e Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 5 Oct 2021 11:05:18 +0200 Subject: [PATCH 097/188] fix benches --- bin/node/executor/benches/bench.rs | 10 +++++----- primitives/trie/benches/bench.rs | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/bin/node/executor/benches/bench.rs b/bin/node/executor/benches/bench.rs index ee77c4ff09629..ae13d0af7aa50 100644 --- a/bin/node/executor/benches/bench.rs +++ b/bin/node/executor/benches/bench.rs @@ -83,16 +83,16 @@ fn construct_block( parent_hash: Hash, extrinsics: Vec, ) -> (Vec, Hash) { - use sp_trie::{trie_types::Layout, TrieConfiguration}; + use sp_trie::{LayoutV0, TrieConfiguration}; // sign extrinsics. let extrinsics = extrinsics.into_iter().map(sign).collect::>(); // calculate the header fields that we can. - let extrinsics_root = Layout::::default() - .ordered_trie_root(extrinsics.iter().map(Encode::encode)) - .to_fixed_bytes() - .into(); + let extrinsics_root = + LayoutV0::::ordered_trie_root(extrinsics.iter().map(Encode::encode)) + .to_fixed_bytes() + .into(); let header = Header { parent_hash, diff --git a/primitives/trie/benches/bench.rs b/primitives/trie/benches/bench.rs index 8c84c6354f2c3..d78ceadff7283 100644 --- a/primitives/trie/benches/bench.rs +++ b/primitives/trie/benches/bench.rs @@ -21,11 +21,11 @@ criterion_main!(benches); fn benchmark(c: &mut Criterion) { trie_bench::standard_benchmark::< - sp_trie::Layout, + sp_trie::LayoutV1, sp_trie::TrieStream, >(c, "substrate-blake2"); trie_bench::standard_benchmark::< - sp_trie::Layout, + sp_trie::LayoutV1, sp_trie::TrieStream, >(c, "substrate-keccak"); } From af86f39432563f82542824bbea1684d2e485200d Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 5 Oct 2021 15:45:05 +0200 Subject: [PATCH 098/188] Verify proof requires right layout. --- primitives/io/src/lib.rs | 2 -- primitives/trie/src/lib.rs | 5 ++--- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index 8104b7ce29637..7e54b54dc73b3 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -439,7 +439,6 @@ pub trait Trie { /// Verify trie proof fn blake2_256_verify_proof(root: H256, proof: &[Vec], key: &[u8], value: &[u8]) -> bool { - // Here V1 would work too sp_trie::verify_trie_proof::, _, _, _>( &root, proof, @@ -450,7 +449,6 @@ pub trait Trie { /// Verify trie proof fn keccak_256_verify_proof(root: H256, proof: &[Vec], key: &[u8], value: &[u8]) -> bool { - // Here V1 would work too sp_trie::verify_trie_proof::, _, _, _>( &root, proof, diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index bb8cea1782178..e8fdd0d990afb 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -231,15 +231,14 @@ pub fn verify_trie_proof<'a, L, I, K, V>( root: &TrieHash, proof: &[Vec], items: I, -) -> Result<(), VerifyError, error::Error>> +) -> Result<(), VerifyError, CError>> where L: TrieConfiguration, I: IntoIterator)>, K: 'a + AsRef<[u8]>, V: 'a + AsRef<[u8]>, { - // for read proof layout v0 and v1 are the same. - verify_proof::, _, _, _>(root, proof, items) + verify_proof::(root, proof, items) } /// Determine a trie root given a hash DB and delta values. From 66daec0fbf5aed0f02dadbf49a7783bc05ae377a Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 7 Oct 2021 09:38:14 +0200 Subject: [PATCH 099/188] update trie_root --- Cargo.lock | 16 ++++++++-------- primitives/trie/src/trie_stream.rs | 16 +++++++--------- 2 files changed, 15 insertions(+), 17 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3fabfdda52960..78bb396890c07 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2539,7 +2539,7 @@ dependencies = [ [[package]] name = "hash-db" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#61ae49bfdaf20e42856f9460465028ffef691584" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#3e668b03c4ed183e92e623b3b56010e8edefc28c" [[package]] name = "hash256-std-hasher" @@ -2553,7 +2553,7 @@ dependencies = [ [[package]] name = "hash256-std-hasher" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#61ae49bfdaf20e42856f9460465028ffef691584" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#3e668b03c4ed183e92e623b3b56010e8edefc28c" dependencies = [ "crunchy", ] @@ -3214,7 +3214,7 @@ dependencies = [ [[package]] name = "keccak-hasher" version = "0.15.3" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#61ae49bfdaf20e42856f9460465028ffef691584" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#3e668b03c4ed183e92e623b3b56010e8edefc28c" dependencies = [ "hash-db", "hash256-std-hasher 0.15.2 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new)", @@ -4150,7 +4150,7 @@ dependencies = [ [[package]] name = "memory-db" version = "0.27.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#61ae49bfdaf20e42856f9460465028ffef691584" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#3e668b03c4ed183e92e623b3b56010e8edefc28c" dependencies = [ "hash-db", "hashbrown 0.11.2", @@ -10776,7 +10776,7 @@ checksum = "a7f741b240f1a48843f9b8e0444fb55fb2a4ff67293b50a9179dfd5ea67f8d41" [[package]] name = "trie-bench" version = "0.28.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#61ae49bfdaf20e42856f9460465028ffef691584" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#3e668b03c4ed183e92e623b3b56010e8edefc28c" dependencies = [ "criterion", "hash-db", @@ -10791,7 +10791,7 @@ dependencies = [ [[package]] name = "trie-db" version = "0.22.6" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#61ae49bfdaf20e42856f9460465028ffef691584" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#3e668b03c4ed183e92e623b3b56010e8edefc28c" dependencies = [ "hash-db", "hashbrown 0.11.2", @@ -10803,7 +10803,7 @@ dependencies = [ [[package]] name = "trie-root" version = "0.16.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#61ae49bfdaf20e42856f9460465028ffef691584" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#3e668b03c4ed183e92e623b3b56010e8edefc28c" dependencies = [ "hash-db", ] @@ -10821,7 +10821,7 @@ dependencies = [ [[package]] name = "trie-standardmap" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#61ae49bfdaf20e42856f9460465028ffef691584" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#3e668b03c4ed183e92e623b3b56010e8edefc28c" dependencies = [ "hash-db", "keccak-hasher 0.15.3 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new)", diff --git a/primitives/trie/src/trie_stream.rs b/primitives/trie/src/trie_stream.rs index 20cc35c6b8708..9768d78f3daa3 100644 --- a/primitives/trie/src/trie_stream.rs +++ b/primitives/trie/src/trie_stream.rs @@ -84,13 +84,11 @@ impl trie_root::TrieStream for TrieStream { fn append_leaf(&mut self, key: &[u8], value: TrieStreamValue) { let kind = match &value { - TrieStreamValue::NoValue => unreachable!(), TrieStreamValue::Value(..) => NodeKind::Leaf, TrieStreamValue::HashedValue(..) => NodeKind::HashedValueLeaf, }; self.buffer.extend(fuse_nibbles_node(key, kind)); match &value { - TrieStreamValue::NoValue => unreachable!(), TrieStreamValue::Value(value) => { Compact(value.len() as u32).encode_to(&mut self.buffer); self.buffer.extend_from_slice(value); @@ -104,14 +102,14 @@ impl trie_root::TrieStream for TrieStream { fn begin_branch( &mut self, maybe_partial: Option<&[u8]>, - maybe_value: TrieStreamValue, + maybe_value: Option, has_children: impl Iterator, ) { if let Some(partial) = maybe_partial { let kind = match &maybe_value { - TrieStreamValue::NoValue => NodeKind::BranchNoValue, - TrieStreamValue::Value(..) => NodeKind::BranchWithValue, - TrieStreamValue::HashedValue(..) => NodeKind::HashedValueBranch, + None => NodeKind::BranchNoValue, + Some(TrieStreamValue::Value(..)) => NodeKind::BranchWithValue, + Some(TrieStreamValue::HashedValue(..)) => NodeKind::HashedValueBranch, }; self.buffer.extend(fuse_nibbles_node(partial, kind)); @@ -121,12 +119,12 @@ impl trie_root::TrieStream for TrieStream { unreachable!("trie stream codec only for no extension trie"); } match maybe_value { - TrieStreamValue::NoValue => (), - TrieStreamValue::Value(value) => { + None => (), + Some(TrieStreamValue::Value(value)) => { Compact(value.len() as u32).encode_to(&mut self.buffer); self.buffer.extend_from_slice(value); }, - TrieStreamValue::HashedValue(hash) => { + Some(TrieStreamValue::HashedValue(hash)) => { self.buffer.extend_from_slice(hash.as_slice()); }, } From 4ff900978f3ecac540815596c5109c75007510a5 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 12 Oct 2021 17:59:22 +0200 Subject: [PATCH 100/188] Update trie deps to latest --- Cargo.lock | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 633e37adf5b7f..9c1cb53247d47 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2537,7 +2537,7 @@ dependencies = [ [[package]] name = "hash-db" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#3e668b03c4ed183e92e623b3b56010e8edefc28c" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#3caaba033a7ef57a7552e2996339a4319de8788a" [[package]] name = "hash256-std-hasher" @@ -2551,7 +2551,7 @@ dependencies = [ [[package]] name = "hash256-std-hasher" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#3e668b03c4ed183e92e623b3b56010e8edefc28c" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#3caaba033a7ef57a7552e2996339a4319de8788a" dependencies = [ "crunchy", ] @@ -3192,7 +3192,7 @@ dependencies = [ [[package]] name = "keccak-hasher" version = "0.15.3" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#3e668b03c4ed183e92e623b3b56010e8edefc28c" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#3caaba033a7ef57a7552e2996339a4319de8788a" dependencies = [ "hash-db", "hash256-std-hasher 0.15.2 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new)", @@ -4112,7 +4112,7 @@ dependencies = [ [[package]] name = "memory-db" version = "0.27.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#3e668b03c4ed183e92e623b3b56010e8edefc28c" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#3caaba033a7ef57a7552e2996339a4319de8788a" dependencies = [ "hash-db", "hashbrown 0.11.2", @@ -10686,7 +10686,7 @@ checksum = "a7f741b240f1a48843f9b8e0444fb55fb2a4ff67293b50a9179dfd5ea67f8d41" [[package]] name = "trie-bench" version = "0.28.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#3e668b03c4ed183e92e623b3b56010e8edefc28c" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#3caaba033a7ef57a7552e2996339a4319de8788a" dependencies = [ "criterion", "hash-db", @@ -10701,7 +10701,7 @@ dependencies = [ [[package]] name = "trie-db" version = "0.22.6" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#3e668b03c4ed183e92e623b3b56010e8edefc28c" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#3caaba033a7ef57a7552e2996339a4319de8788a" dependencies = [ "hash-db", "hashbrown 0.11.2", @@ -10713,7 +10713,7 @@ dependencies = [ [[package]] name = "trie-root" version = "0.16.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#3e668b03c4ed183e92e623b3b56010e8edefc28c" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#3caaba033a7ef57a7552e2996339a4319de8788a" dependencies = [ "hash-db", ] @@ -10731,7 +10731,7 @@ dependencies = [ [[package]] name = "trie-standardmap" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#3e668b03c4ed183e92e623b3b56010e8edefc28c" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#3caaba033a7ef57a7552e2996339a4319de8788a" dependencies = [ "hash-db", "keccak-hasher 0.15.3 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new)", @@ -10830,8 +10830,8 @@ version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1f559b464de2e2bdabcac6a210d12e9b5a5973c251e102c44c585c71d51bd78e" dependencies = [ - "cfg-if 1.0.0", - "rand 0.8.4", + "cfg-if 0.1.10", + "rand 0.6.5", "static_assertions", ] From a343138f400ad45f047d7fb277e14cc4bec5806b Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 18 Oct 2021 11:52:33 +0200 Subject: [PATCH 101/188] Update to latest trie versioning --- Cargo.lock | 39 ++++++++++++++++++----------- primitives/state-machine/Cargo.toml | 4 +-- primitives/trie/Cargo.toml | 8 +++--- primitives/trie/src/node_codec.rs | 20 +++++++-------- primitives/trie/src/trie_stream.rs | 16 ++++++------ test-utils/runtime/Cargo.toml | 2 +- 6 files changed, 50 insertions(+), 39 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 372d3f9783765..e1e9264bf5f34 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2537,7 +2537,7 @@ dependencies = [ [[package]] name = "hash-db" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#3caaba033a7ef57a7552e2996339a4319de8788a" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#89b344dd525d6f7bb39ef67788131f099b40c09f" [[package]] name = "hash256-std-hasher" @@ -2551,7 +2551,7 @@ dependencies = [ [[package]] name = "hash256-std-hasher" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#3caaba033a7ef57a7552e2996339a4319de8788a" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#89b344dd525d6f7bb39ef67788131f099b40c09f" dependencies = [ "crunchy", ] @@ -3192,7 +3192,7 @@ dependencies = [ [[package]] name = "keccak-hasher" version = "0.15.3" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#3caaba033a7ef57a7552e2996339a4319de8788a" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#89b344dd525d6f7bb39ef67788131f099b40c09f" dependencies = [ "hash-db", "hash256-std-hasher 0.15.2 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new)", @@ -4112,7 +4112,18 @@ dependencies = [ [[package]] name = "memory-db" version = "0.27.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#3caaba033a7ef57a7552e2996339a4319de8788a" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de006e09d04fc301a5f7e817b75aa49801c4479a8af753764416b085337ddcc5" +dependencies = [ + "hash-db", + "hashbrown 0.11.2", + "parity-util-mem", +] + +[[package]] +name = "memory-db" +version = "0.28.0" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#89b344dd525d6f7bb39ef67788131f099b40c09f" dependencies = [ "hash-db", "hashbrown 0.11.2", @@ -9729,7 +9740,7 @@ dependencies = [ "criterion", "hash-db", "hex-literal", - "memory-db", + "memory-db 0.28.0", "parity-scale-codec", "scale-info", "sp-core", @@ -10029,7 +10040,7 @@ dependencies = [ "frame-system-rpc-runtime-api", "futures 0.3.16", "log 0.4.14", - "memory-db", + "memory-db 0.27.0", "pallet-babe", "pallet-timestamp", "parity-scale-codec", @@ -10685,13 +10696,13 @@ checksum = "a7f741b240f1a48843f9b8e0444fb55fb2a4ff67293b50a9179dfd5ea67f8d41" [[package]] name = "trie-bench" -version = "0.28.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#3caaba033a7ef57a7552e2996339a4319de8788a" +version = "0.29.0" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#89b344dd525d6f7bb39ef67788131f099b40c09f" dependencies = [ "criterion", "hash-db", "keccak-hasher 0.15.3 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new)", - "memory-db", + "memory-db 0.28.0", "parity-scale-codec", "trie-db", "trie-root", @@ -10700,8 +10711,8 @@ dependencies = [ [[package]] name = "trie-db" -version = "0.22.6" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#3caaba033a7ef57a7552e2996339a4319de8788a" +version = "0.23.0" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#89b344dd525d6f7bb39ef67788131f099b40c09f" dependencies = [ "hash-db", "hashbrown 0.11.2", @@ -10712,8 +10723,8 @@ dependencies = [ [[package]] name = "trie-root" -version = "0.16.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#3caaba033a7ef57a7552e2996339a4319de8788a" +version = "0.17.0" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#89b344dd525d6f7bb39ef67788131f099b40c09f" dependencies = [ "hash-db", ] @@ -10731,7 +10742,7 @@ dependencies = [ [[package]] name = "trie-standardmap" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#3caaba033a7ef57a7552e2996339a4319de8788a" +source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#89b344dd525d6f7bb39ef67788131f099b40c09f" dependencies = [ "hash-db", "keccak-hasher 0.15.3 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new)", diff --git a/primitives/state-machine/Cargo.toml b/primitives/state-machine/Cargo.toml index bbe9728befd80..32c470ad41030 100644 --- a/primitives/state-machine/Cargo.toml +++ b/primitives/state-machine/Cargo.toml @@ -18,8 +18,8 @@ log = { version = "0.4.11", optional = true } thiserror = { version = "1.0.21", optional = true } parking_lot = { version = "0.11.1", optional = true } hash-db = { version = "0.15.2", default-features = false } -trie-db = { version = "0.22.6", default-features = false } -trie-root = { version = "0.16.0", default-features = false } +trie-db = { version = "0.23.0", default-features = false } +trie-root = { version = "0.17.0", default-features = false } sp-trie = { version = "4.0.0-dev", path = "../trie", default-features = false } sp-core = { version = "4.0.0-dev", path = "../core", default-features = false } sp-panic-handler = { version = "3.0.0", path = "../panic-handler", optional = true } diff --git a/primitives/trie/Cargo.toml b/primitives/trie/Cargo.toml index 66d8a1e47276e..41e8b43a756bb 100644 --- a/primitives/trie/Cargo.toml +++ b/primitives/trie/Cargo.toml @@ -22,13 +22,13 @@ codec = { package = "parity-scale-codec", version = "2.0.0", default-features = scale-info = { version = "1.0", default-features = false, features = ["derive"] } sp-std = { version = "4.0.0-dev", default-features = false, path = "../std" } hash-db = { version = "0.15.2", default-features = false } -trie-db = { version = "0.22.6", default-features = false } -trie-root = { version = "0.16.0", default-features = false } -memory-db = { version = "0.27.0", default-features = false } +trie-db = { version = "0.23.0", default-features = false } +trie-root = { version = "0.17.0", default-features = false } +memory-db = { version = "0.28.0", default-features = false } sp-core = { version = "4.0.0-dev", default-features = false, path = "../core" } [dev-dependencies] -trie-bench = "0.28.0" +trie-bench = "0.29.0" trie-standardmap = "0.15.2" criterion = "0.3.3" hex-literal = "0.3.3" diff --git a/primitives/trie/src/node_codec.rs b/primitives/trie/src/node_codec.rs index 7e5e38563d360..6ebf2f8f304af 100644 --- a/primitives/trie/src/node_codec.rs +++ b/primitives/trie/src/node_codec.rs @@ -111,10 +111,10 @@ impl NodeCodec { let bitmap = Bitmap::decode(&data[bitmap_range])?; let value = if branch_has_value { Some(if contains_hash { - ValuePlan::HashedValue(input.take(H::LENGTH)?) + ValuePlan::Node(input.take(H::LENGTH)?) } else { let count = >::decode(&mut input)?.0 as usize; - ValuePlan::Value(input.take(count)?) + ValuePlan::Inline(input.take(count)?) }) } else { None @@ -152,10 +152,10 @@ impl NodeCodec { )?; let partial_padding = nibble_ops::number_padding(nibble_count); let value = if contains_hash { - ValuePlan::HashedValue(input.take(H::LENGTH)?) + ValuePlan::Node(input.take(H::LENGTH)?) } else { let count = >::decode(&mut input)?.0 as usize; - ValuePlan::Value(input.take(count)?) + ValuePlan::Inline(input.take(count)?) }; Ok(NodePlan::Leaf { @@ -192,18 +192,18 @@ where } fn leaf_node(partial: Partial, value: Value) -> Vec { - let contains_hash = matches!(&value, Value::HashedValue(..)); + let contains_hash = matches!(&value, Value::Node(..)); let mut output = if contains_hash { partial_encode(partial, NodeKind::HashedValueLeaf) } else { partial_encode(partial, NodeKind::Leaf) }; match value { - Value::Value(value) => { + Value::Inline(value) => { Compact(value.len() as u32).encode_to(&mut output); output.extend_from_slice(value); }, - Value::HashedValue(hash, _) => { + Value::Node(hash, _) => { debug_assert!(hash.len() == H::LENGTH); output.extend_from_slice(hash); }, @@ -232,7 +232,7 @@ where children: impl Iterator::Out>>>>, value: Option, ) -> Vec { - let contains_hash = matches!(&value, Some(Value::HashedValue(..))); + let contains_hash = matches!(&value, Some(Value::Node(..))); let mut output = match (&value, contains_hash) { (&None, _) => partial_from_iterator_encode(partial, number_nibble, NodeKind::BranchNoValue), @@ -246,11 +246,11 @@ where let mut bitmap: [u8; BITMAP_LENGTH] = [0; BITMAP_LENGTH]; (0..BITMAP_LENGTH).for_each(|_| output.push(0)); match value { - Some(Value::Value(value)) => { + Some(Value::Inline(value)) => { Compact(value.len() as u32).encode_to(&mut output); output.extend_from_slice(value); }, - Some(Value::HashedValue(hash, _)) => { + Some(Value::Node(hash, _)) => { debug_assert!(hash.len() == H::LENGTH); output.extend_from_slice(hash); }, diff --git a/primitives/trie/src/trie_stream.rs b/primitives/trie/src/trie_stream.rs index 9768d78f3daa3..20f607c840d32 100644 --- a/primitives/trie/src/trie_stream.rs +++ b/primitives/trie/src/trie_stream.rs @@ -84,16 +84,16 @@ impl trie_root::TrieStream for TrieStream { fn append_leaf(&mut self, key: &[u8], value: TrieStreamValue) { let kind = match &value { - TrieStreamValue::Value(..) => NodeKind::Leaf, - TrieStreamValue::HashedValue(..) => NodeKind::HashedValueLeaf, + TrieStreamValue::Inline(..) => NodeKind::Leaf, + TrieStreamValue::Node(..) => NodeKind::HashedValueLeaf, }; self.buffer.extend(fuse_nibbles_node(key, kind)); match &value { - TrieStreamValue::Value(value) => { + TrieStreamValue::Inline(value) => { Compact(value.len() as u32).encode_to(&mut self.buffer); self.buffer.extend_from_slice(value); }, - TrieStreamValue::HashedValue(hash) => { + TrieStreamValue::Node(hash) => { self.buffer.extend_from_slice(hash.as_slice()); }, }; @@ -108,8 +108,8 @@ impl trie_root::TrieStream for TrieStream { if let Some(partial) = maybe_partial { let kind = match &maybe_value { None => NodeKind::BranchNoValue, - Some(TrieStreamValue::Value(..)) => NodeKind::BranchWithValue, - Some(TrieStreamValue::HashedValue(..)) => NodeKind::HashedValueBranch, + Some(TrieStreamValue::Inline(..)) => NodeKind::BranchWithValue, + Some(TrieStreamValue::Node(..)) => NodeKind::HashedValueBranch, }; self.buffer.extend(fuse_nibbles_node(partial, kind)); @@ -120,11 +120,11 @@ impl trie_root::TrieStream for TrieStream { } match maybe_value { None => (), - Some(TrieStreamValue::Value(value)) => { + Some(TrieStreamValue::Inline(value)) => { Compact(value.len() as u32).encode_to(&mut self.buffer); self.buffer.extend_from_slice(value); }, - Some(TrieStreamValue::HashedValue(hash)) => { + Some(TrieStreamValue::Node(hash)) => { self.buffer.extend_from_slice(hash.as_slice()); }, } diff --git a/test-utils/runtime/Cargo.toml b/test-utils/runtime/Cargo.toml index eb6ca51ce2e5a..ee9bca347887a 100644 --- a/test-utils/runtime/Cargo.toml +++ b/test-utils/runtime/Cargo.toml @@ -39,7 +39,7 @@ pallet-timestamp = { version = "4.0.0-dev", default-features = false, path = ".. sp-finality-grandpa = { version = "4.0.0-dev", default-features = false, path = "../../primitives/finality-grandpa" } sp-trie = { version = "4.0.0-dev", default-features = false, path = "../../primitives/trie" } sp-transaction-pool = { version = "4.0.0-dev", default-features = false, path = "../../primitives/transaction-pool" } -trie-db = { version = "0.22.6", default-features = false } +trie-db = { version = "0.23.0", default-features = false } parity-util-mem = { version = "0.10.0", default-features = false, features = ["primitive-types"] } sc-service = { version = "0.10.0-dev", default-features = false, optional = true, features = ["test-helpers"], path = "../../client/service" } sp-state-machine = { version = "0.10.0-dev", default-features = false, path = "../../primitives/state-machine" } From 343266837ea7528b91b39fd767377a084ad31648 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 19 Oct 2021 09:24:30 +0200 Subject: [PATCH 102/188] Removing patch --- Cargo.toml | 7 ------- 1 file changed, 7 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 54ad163ce9c10..210529490563b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -272,10 +272,3 @@ zeroize = { opt-level = 3 } [profile.release] # Substrate runtime requires unwinding. panic = "unwind" - -[patch.crates-io] -hash-db = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple4-new" } -memory-db = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple4-new" } -trie-db = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple4-new" } -trie-root = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple4-new" } -trie-bench = { git = "https://github.com/cheme/trie.git", branch = "hashed-value-simple4-new" } From 932eee7824d29661d7036f6db39b178c233619ba Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 19 Oct 2021 09:44:01 +0200 Subject: [PATCH 103/188] update lock --- Cargo.lock | 56 +++++++++++++++++------------------------------------- 1 file changed, 17 insertions(+), 39 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e1e9264bf5f34..167ef05656e58 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2537,7 +2537,8 @@ dependencies = [ [[package]] name = "hash-db" version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#89b344dd525d6f7bb39ef67788131f099b40c09f" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d23bd4e7b5eda0d0f3a307e8b381fdc8ba9000f26fbe912250c0a4cc3956364a" [[package]] name = "hash256-std-hasher" @@ -2548,14 +2549,6 @@ dependencies = [ "crunchy", ] -[[package]] -name = "hash256-std-hasher" -version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#89b344dd525d6f7bb39ef67788131f099b40c09f" -dependencies = [ - "crunchy", -] - [[package]] name = "hashbrown" version = "0.9.1" @@ -3185,17 +3178,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "711adba9940a039f4374fc5724c0a5eaca84a2d558cce62256bfe26f0dbef05e" dependencies = [ "hash-db", - "hash256-std-hasher 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)", - "tiny-keccak", -] - -[[package]] -name = "keccak-hasher" -version = "0.15.3" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#89b344dd525d6f7bb39ef67788131f099b40c09f" -dependencies = [ - "hash-db", - "hash256-std-hasher 0.15.2 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new)", + "hash256-std-hasher", "tiny-keccak", ] @@ -4123,7 +4106,8 @@ dependencies = [ [[package]] name = "memory-db" version = "0.28.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#89b344dd525d6f7bb39ef67788131f099b40c09f" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d505169b746dacf02f7d14d8c80b34edfd8212159c63d23c977739a0d960c626" dependencies = [ "hash-db", "hashbrown 0.11.2", @@ -9252,7 +9236,7 @@ dependencies = [ "ed25519-dalek", "futures 0.3.16", "hash-db", - "hash256-std-hasher 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)", + "hash256-std-hasher", "hex", "hex-literal", "impl-serde", @@ -9480,7 +9464,7 @@ name = "sp-runtime" version = "4.0.0-dev" dependencies = [ "either", - "hash256-std-hasher 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)", + "hash256-std-hasher", "impl-trait-for-tuples", "log 0.4.14", "parity-scale-codec", @@ -9749,7 +9733,7 @@ dependencies = [ "trie-bench", "trie-db", "trie-root", - "trie-standardmap 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)", + "trie-standardmap", ] [[package]] @@ -10697,22 +10681,24 @@ checksum = "a7f741b240f1a48843f9b8e0444fb55fb2a4ff67293b50a9179dfd5ea67f8d41" [[package]] name = "trie-bench" version = "0.29.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#89b344dd525d6f7bb39ef67788131f099b40c09f" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36ac46f6503d0fa976193db46f9dbb1d454e5dbde76495f1316f576c7f3f0e6b" dependencies = [ "criterion", "hash-db", - "keccak-hasher 0.15.3 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new)", + "keccak-hasher", "memory-db 0.28.0", "parity-scale-codec", "trie-db", "trie-root", - "trie-standardmap 0.15.2 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new)", + "trie-standardmap", ] [[package]] name = "trie-db" version = "0.23.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#89b344dd525d6f7bb39ef67788131f099b40c09f" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3ddae50680c12ef75bfbf58416ca6622fa43d879553f6cb2ed1a817346e1ffe" dependencies = [ "hash-db", "hashbrown 0.11.2", @@ -10724,7 +10710,8 @@ dependencies = [ [[package]] name = "trie-root" version = "0.17.0" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#89b344dd525d6f7bb39ef67788131f099b40c09f" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a36c5ca3911ed3c9a5416ee6c679042064b93fc637ded67e25f92e68d783891" dependencies = [ "hash-db", ] @@ -10736,16 +10723,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3161ba520ab28cd8e6b68e1126f1009f6e335339d1a73b978139011703264c8" dependencies = [ "hash-db", - "keccak-hasher 0.15.3 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "trie-standardmap" -version = "0.15.2" -source = "git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new#89b344dd525d6f7bb39ef67788131f099b40c09f" -dependencies = [ - "hash-db", - "keccak-hasher 0.15.3 (git+https://github.com/cheme/trie.git?branch=hashed-value-simple4-new)", + "keccak-hasher", ] [[package]] From 3299b7ffb368994120078b87b2dd2cf0f4337b4b Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 19 Oct 2021 16:44:32 +0200 Subject: [PATCH 104/188] extrinsic for sc-service-test using layout v0. --- client/service/test/src/client/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index e123363eb92c0..8f0b21e52a21e 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -40,7 +40,7 @@ use sp_state_machine::{ backend::Backend as _, ExecutionStrategy, InMemoryBackend, OverlayedChanges, StateMachine, }; use sp_storage::{ChildInfo, StorageKey}; -use sp_trie::{LayoutV1, TrieConfiguration}; +use sp_trie::{LayoutV0, TrieConfiguration}; use std::{ collections::{HashMap, HashSet}, sync::Arc, @@ -170,7 +170,7 @@ fn construct_block( let transactions = txs.into_iter().map(|tx| tx.into_signed_tx()).collect::>(); let iter = transactions.iter().map(Encode::encode); - let extrinsics_root = LayoutV1::::ordered_trie_root(iter).into(); + let extrinsics_root = LayoutV0::::ordered_trie_root(iter).into(); let mut header = Header { parent_hash, From 43289732d165c2f0d53ed198e32725b878ba68df Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 20 Oct 2021 11:39:36 +0200 Subject: [PATCH 105/188] Adding RuntimeVersionOf to CallExecutor works. --- client/api/src/call_executor.rs | 4 ++-- client/light/src/call_executor.rs | 2 +- client/service/src/client/client.rs | 32 ++++++++++++------------- client/service/test/src/client/light.rs | 12 +++++++++- 4 files changed, 30 insertions(+), 20 deletions(-) diff --git a/client/api/src/call_executor.rs b/client/api/src/call_executor.rs index 22af495c06542..b1fd731ca088c 100644 --- a/client/api/src/call_executor.rs +++ b/client/api/src/call_executor.rs @@ -19,7 +19,7 @@ //! A method call executor interface. use codec::{Decode, Encode}; -use sc_executor::RuntimeVersion; +use sc_executor::{RuntimeVersion, RuntimeVersionOf}; use sp_core::NativeOrEncoded; use sp_externalities::Extensions; use sp_runtime::{generic::BlockId, traits::Block as BlockT}; @@ -42,7 +42,7 @@ pub trait ExecutorProvider { } /// Method call executor. -pub trait CallExecutor { +pub trait CallExecutor: RuntimeVersionOf { /// Externalities error type. type Error: sp_state_machine::Error; diff --git a/client/light/src/call_executor.rs b/client/light/src/call_executor.rs index 0e96fa7e0c43a..3bd1cc7c87ac8 100644 --- a/client/light/src/call_executor.rs +++ b/client/light/src/call_executor.rs @@ -157,7 +157,7 @@ where fn runtime_version(&self, id: &BlockId) -> ClientResult { if self.backend.is_local_state_available(id) { - self.local.runtime_version(id) + >::runtime_version(&self.local, id) } else { Err(ClientError::NotAvailableOnLightClient) } diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index 62691e554a038..12bb411706007 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -263,7 +263,7 @@ where impl LockImportRun for Client where B: backend::Backend, - E: CallExecutor + RuntimeVersionOf, + E: CallExecutor, Block: BlockT, { fn lock_import_and_run(&self, f: F) -> Result @@ -302,7 +302,7 @@ impl LockImportRun for &Client where Block: BlockT, B: backend::Backend, - E: CallExecutor + RuntimeVersionOf, + E: CallExecutor, { fn lock_import_and_run(&self, f: F) -> Result where @@ -316,7 +316,7 @@ where impl Client where B: backend::Backend, - E: CallExecutor + RuntimeVersionOf, + E: CallExecutor, Block: BlockT, Block::Header: Clone, { @@ -1300,7 +1300,7 @@ where impl UsageProvider for Client where B: backend::Backend, - E: CallExecutor + RuntimeVersionOf, + E: CallExecutor, Block: BlockT, { /// Get usage info about current client. @@ -1312,7 +1312,7 @@ where impl ProofProvider for Client where B: backend::Backend, - E: CallExecutor + RuntimeVersionOf, + E: CallExecutor, Block: BlockT, { fn read_proof( @@ -1437,7 +1437,7 @@ where impl BlockBuilderProvider for Client where B: backend::Backend + Send + Sync + 'static, - E: CallExecutor + RuntimeVersionOf + Send + Sync + 'static, + E: CallExecutor + Send + Sync + 'static, Block: BlockT, Self: ChainHeaderBackend + ProvideRuntimeApi, >::Api: @@ -1495,7 +1495,7 @@ where impl StorageProvider for Client where B: backend::Backend, - E: CallExecutor + RuntimeVersionOf, + E: CallExecutor, Block: BlockT, { fn storage_keys( @@ -1721,7 +1721,7 @@ where impl ProvideUncles for Client where B: backend::Backend, - E: CallExecutor + RuntimeVersionOf, + E: CallExecutor, Block: BlockT, { fn uncles( @@ -1891,7 +1891,7 @@ where impl sc_consensus::BlockImport for &Client where B: backend::Backend, - E: CallExecutor + RuntimeVersionOf + Send + Sync, + E: CallExecutor + Send + Sync, Block: BlockT, Client: ProvideRuntimeApi, as ProvideRuntimeApi>::Api: @@ -2006,7 +2006,7 @@ where impl sc_consensus::BlockImport for Client where B: backend::Backend, - E: CallExecutor + RuntimeVersionOf + Send + Sync, + E: CallExecutor + Send + Sync, Block: BlockT, Self: ProvideRuntimeApi, >::Api: @@ -2036,7 +2036,7 @@ where impl Finalizer for Client where B: backend::Backend, - E: CallExecutor + RuntimeVersionOf, + E: CallExecutor, Block: BlockT, { fn apply_finality( @@ -2072,7 +2072,7 @@ where impl Finalizer for &Client where B: backend::Backend, - E: CallExecutor + RuntimeVersionOf, + E: CallExecutor, Block: BlockT, { fn apply_finality( @@ -2126,7 +2126,7 @@ where impl BlockBackend for Client where B: backend::Backend, - E: CallExecutor + RuntimeVersionOf, + E: CallExecutor, Block: BlockT, { fn block_body( @@ -2175,7 +2175,7 @@ where impl backend::AuxStore for Client where B: backend::Backend, - E: CallExecutor + RuntimeVersionOf, + E: CallExecutor, Block: BlockT, Self: ProvideRuntimeApi, >::Api: CoreApi, @@ -2207,7 +2207,7 @@ where impl backend::AuxStore for &Client where B: backend::Backend, - E: CallExecutor + RuntimeVersionOf, + E: CallExecutor, Block: BlockT, Client: ProvideRuntimeApi, as ProvideRuntimeApi>::Api: CoreApi, @@ -2234,7 +2234,7 @@ where impl sp_consensus::block_validation::Chain for Client where BE: backend::Backend, - E: CallExecutor + RuntimeVersionOf, + E: CallExecutor, B: BlockT, { fn block_status( diff --git a/client/service/test/src/client/light.rs b/client/service/test/src/client/light.rs index 3bcfdf32dec36..bb1dfdb94e4cc 100644 --- a/client/service/test/src/client/light.rs +++ b/client/service/test/src/client/light.rs @@ -30,7 +30,7 @@ use sc_client_api::{ RemoteBodyRequest, RemoteCallRequest, RemoteChangesRequest, RemoteHeaderRequest, RemoteReadChildRequest, RemoteReadRequest, Storage, StorageProof, StorageProvider, }; -use sc_executor::{NativeElseWasmExecutor, RuntimeVersion, WasmExecutionMethod}; +use sc_executor::{NativeElseWasmExecutor, RuntimeVersion, WasmExecutionMethod, RuntimeVersionOf}; use sc_light::{ backend::{Backend, GenesisOrUnavailableState}, blockchain::{Blockchain, BlockchainCache}, @@ -258,6 +258,16 @@ impl CallExecutor for DummyCallExecutor { } } +impl RuntimeVersionOf for DummyCallExecutor { + fn runtime_version( + &self, + _: &mut dyn sp_externalities::Externalities, + _: &sp_core::traits::RuntimeCode, + ) -> Result { + unreachable!() + } +} + fn local_executor() -> NativeElseWasmExecutor { NativeElseWasmExecutor::new(WasmExecutionMethod::Interpreted, None, 8) From c342bdf139d2ab3a283047305da83f82368ca97f Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 20 Oct 2021 11:56:18 +0200 Subject: [PATCH 106/188] fmt --- client/service/test/src/client/light.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/service/test/src/client/light.rs b/client/service/test/src/client/light.rs index bb1dfdb94e4cc..a253686e37aa5 100644 --- a/client/service/test/src/client/light.rs +++ b/client/service/test/src/client/light.rs @@ -30,7 +30,7 @@ use sc_client_api::{ RemoteBodyRequest, RemoteCallRequest, RemoteChangesRequest, RemoteHeaderRequest, RemoteReadChildRequest, RemoteReadRequest, Storage, StorageProof, StorageProvider, }; -use sc_executor::{NativeElseWasmExecutor, RuntimeVersion, WasmExecutionMethod, RuntimeVersionOf}; +use sc_executor::{NativeElseWasmExecutor, RuntimeVersion, RuntimeVersionOf, WasmExecutionMethod}; use sc_light::{ backend::{Backend, GenesisOrUnavailableState}, blockchain::{Blockchain, BlockchainCache}, From d8a9b50af1bc0b0fd340765fa9eca14f1a37e2fa Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 20 Oct 2021 16:52:53 +0200 Subject: [PATCH 107/188] error when resolving version and no wasm in storage. --- client/service/src/client/client.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index 12bb411706007..1127d31d6a053 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -1278,7 +1278,7 @@ where storage: &Storage, executor: &E, ) -> sp_blockchain::Result { - Ok(if let Some(wasm) = storage.top.get(well_known_keys::CODE) { + if let Some(wasm) = storage.top.get(well_known_keys::CODE) { let mut ext = sp_state_machine::BasicExternalities::new_empty(); // just to read runtime version. let code_fetcher = crate::client::wasm_override::WasmBlob::new(wasm.clone()); let hash = code_fetcher.hash.clone(); @@ -1290,10 +1290,12 @@ where let runtime_version = RuntimeVersionOf::runtime_version(executor, &mut ext, &runtime_code) .map_err(|e| sp_blockchain::Error::VersionInvalid(format!("{:?}", e)))?; - runtime_version.state_version() + Ok(runtime_version.state_version()) } else { - Default::default() - }) + Err(sp_blockchain::Error::VersionInvalid( + "Could not fetch runtime from storage.".to_string(), + )) + } } } From 898bc134a4a2936514c2e92b4474945b0cbf1566 Mon Sep 17 00:00:00 2001 From: cheme Date: Wed, 20 Oct 2021 16:55:59 +0200 Subject: [PATCH 108/188] use existing utils to instantiate runtime code. --- client/service/src/client/client.rs | 7 +------ client/service/src/client/wasm_override.rs | 4 ++-- 2 files changed, 3 insertions(+), 8 deletions(-) diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index 1127d31d6a053..fa03a6fa289ba 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -1281,12 +1281,7 @@ where if let Some(wasm) = storage.top.get(well_known_keys::CODE) { let mut ext = sp_state_machine::BasicExternalities::new_empty(); // just to read runtime version. let code_fetcher = crate::client::wasm_override::WasmBlob::new(wasm.clone()); - let hash = code_fetcher.hash.clone(); - let runtime_code = sp_core::traits::RuntimeCode { - code_fetcher: &code_fetcher, - heap_pages: None, - hash, - }; + let runtime_code = code_fetcher.runtime_code(None); let runtime_version = RuntimeVersionOf::runtime_version(executor, &mut ext, &runtime_code) .map_err(|e| sp_blockchain::Error::VersionInvalid(format!("{:?}", e)))?; diff --git a/client/service/src/client/wasm_override.rs b/client/service/src/client/wasm_override.rs index ee7ebb81078f2..2460b664b61cb 100644 --- a/client/service/src/client/wasm_override.rs +++ b/client/service/src/client/wasm_override.rs @@ -51,7 +51,7 @@ use std::{ /// Auxiliary structure that holds a wasm blob and its hash. pub(crate) struct WasmBlob { code: Vec, - pub(crate) hash: Vec, + hash: Vec, } impl WasmBlob { @@ -60,7 +60,7 @@ impl WasmBlob { Self { code, hash } } - fn runtime_code(&self, heap_pages: Option) -> RuntimeCode { + pub(crate) fn runtime_code(&self, heap_pages: Option) -> RuntimeCode { RuntimeCode { code_fetcher: self, hash: self.hash.clone(), heap_pages } } } From 1b1092c3306ab14ad115ed87a50f97af9e0e7378 Mon Sep 17 00:00:00 2001 From: kianenigma Date: Thu, 21 Oct 2021 09:36:54 +0200 Subject: [PATCH 109/188] migration pallet --- Cargo.lock | 19 + bin/node/runtime/Cargo.toml | 4 + bin/node/runtime/src/lib.rs | 10 + frame/state-trie-migration/Cargo.toml | 49 ++ frame/state-trie-migration/src/lib.rs | 938 ++++++++++++++++++++++++++ 5 files changed, 1020 insertions(+) create mode 100644 frame/state-trie-migration/Cargo.toml create mode 100644 frame/state-trie-migration/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 167ef05656e58..a35c40e90cc14 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4689,6 +4689,7 @@ dependencies = [ "pallet-society", "pallet-staking", "pallet-staking-reward-curve", + "pallet-state-trie-migration", "pallet-sudo", "pallet-timestamp", "pallet-tips", @@ -5951,6 +5952,24 @@ dependencies = [ "sp-arithmetic", ] +[[package]] +name = "pallet-state-trie-migration" +version = "4.0.0-dev" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", + "log 0.4.14", + "pallet-balances", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", + "sp-tracing", +] + [[package]] name = "pallet-sudo" version = "4.0.0-dev" diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index 22ff0954e2458..09d9c6dea9159 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -85,6 +85,7 @@ pallet-session = { version = "4.0.0-dev", features = [ pallet-session-benchmarking = { version = "4.0.0-dev", path = "../../../frame/session/benchmarking", default-features = false, optional = true } pallet-staking = { version = "4.0.0-dev", default-features = false, path = "../../../frame/staking" } pallet-staking-reward-curve = { version = "4.0.0-dev", default-features = false, path = "../../../frame/staking/reward-curve" } +pallet-state-trie-migration = { version = "4.0.0-dev", default-features = false, path = "../../../frame/state-trie-migration" } pallet-scheduler = { version = "4.0.0-dev", default-features = false, path = "../../../frame/scheduler" } pallet-society = { version = "4.0.0-dev", default-features = false, path = "../../../frame/society" } pallet-sudo = { version = "4.0.0-dev", default-features = false, path = "../../../frame/sudo" } @@ -147,6 +148,7 @@ std = [ "sp-runtime/std", "sp-staking/std", "pallet-staking/std", + "pallet-state-trie-migration/std", "sp-keyring", "sp-session/std", "pallet-sudo/std", @@ -201,6 +203,7 @@ runtime-benchmarks = [ "pallet-scheduler/runtime-benchmarks", "pallet-society/runtime-benchmarks", "pallet-staking/runtime-benchmarks", + "pallet-state-trie-migration/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", "pallet-tips/runtime-benchmarks", "pallet-transaction-storage/runtime-benchmarks", @@ -241,6 +244,7 @@ try-runtime = [ "pallet-randomness-collective-flip/try-runtime", "pallet-session/try-runtime", "pallet-staking/try-runtime", + "pallet-state-trie-migration/try-runtime", "pallet-sudo/try-runtime", "pallet-election-provider-multi-phase/try-runtime", "pallet-timestamp/try-runtime", diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index c7920629bf356..6c52a8e934388 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1241,6 +1241,13 @@ impl pallet_transaction_storage::Config for Runtime { type WeightInfo = pallet_transaction_storage::weights::SubstrateWeight; } +impl pallet_state_trie_migration::Config for Runtime { + type Event = Event; + type ControlOrigin = EnsureRoot; + type Currency = Balances; + type SignedDepositPerItem = (); +} + construct_runtime!( pub enum Runtime where Block = Block, @@ -1288,6 +1295,7 @@ construct_runtime!( Uniques: pallet_uniques::{Pallet, Call, Storage, Event}, TransactionStorage: pallet_transaction_storage::{Pallet, Call, Storage, Inherent, Config, Event}, BagsList: pallet_bags_list::{Pallet, Call, Storage, Event}, + StateTrieMigration: pallet_state_trie_migration::{Pallet, Call, Storage, Event}, } ); @@ -1644,6 +1652,7 @@ impl_runtime_apis! { list_benchmark!(list, extra, pallet_scheduler, Scheduler); list_benchmark!(list, extra, pallet_session, SessionBench::); list_benchmark!(list, extra, pallet_staking, Staking); + list_benchmark!(list, extra, pallet_state_trie_migration, StateTrieMigration); list_benchmark!(list, extra, frame_system, SystemBench::); list_benchmark!(list, extra, pallet_timestamp, Timestamp); list_benchmark!(list, extra, pallet_tips, Tips); @@ -1718,6 +1727,7 @@ impl_runtime_apis! { add_benchmark!(params, batches, pallet_scheduler, Scheduler); add_benchmark!(params, batches, pallet_session, SessionBench::); add_benchmark!(params, batches, pallet_staking, Staking); + add_benchmark!(params, batches, pallet_state_trie_migration, StateTrieMigration); add_benchmark!(params, batches, frame_system, SystemBench::); add_benchmark!(params, batches, pallet_timestamp, Timestamp); add_benchmark!(params, batches, pallet_tips, Tips); diff --git a/frame/state-trie-migration/Cargo.toml b/frame/state-trie-migration/Cargo.toml new file mode 100644 index 0000000000000..3fedac6a31cdd --- /dev/null +++ b/frame/state-trie-migration/Cargo.toml @@ -0,0 +1,49 @@ +[package] +name = "pallet-state-trie-migration" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2018" +license = "Unlicense" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME pallet migration of trie" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +log = { version = "0.4.14", default-features = false } + +sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } +sp-io = { version = "4.0.0-dev", default-features = false, path = "../../primitives/io" } +sp-core = { version = "4.0.0-dev", default-features = false, path = "../../primitives/core" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } + +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } +frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../benchmarking", optional = true } + +[dev-dependencies] +pallet-balances = { path = "../balances", version = "4.0.0-dev" } +sp-tracing = { path = "../../primitives/tracing", version = "4.0.0-dev" } + +[features] +default = ["std"] +std = [ + "log/std", + "scale-info/std", + "codec/std", + "frame-benchmarking/std", + "frame-support/std", + "frame-system/std", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", + "sp-std/std" +] + +runtime-benchmarks = ["frame-benchmarking"] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs new file mode 100644 index 0000000000000..2cbb607198650 --- /dev/null +++ b/frame/state-trie-migration/src/lib.rs @@ -0,0 +1,938 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Pallet State Trie Migration +//! +//! Reads and writes all keys and values in the entire state in a systematic way. This is useful for +//! upgrading a chain to `StorageVersion::V2`, where all keys need to be touched. +//! +//! ## Migration Types +//! +//! This pallet provides 3 ways to do this, each of which is suited for a particular use-case, and +//! can be enabled independently. +//! +//! ### Auto migration +//! +//! This system will try and migrate all keys by continuously using `on_initialize`. It is only +//! sensible for a relay chain or a solo chain, where going slightly over weight is not a problem. +//! It can be configured so that the migration takes at most `n` items and tries to not go over `x` +//! bytes, but the latter is not guaranteed. +//! +//! For example, if a chain contains keys of 1 byte size, the `on_initialize` could read up to `x - +//! 1` bytes from `n` different keys, while the next key is suddenly `:code:`, and there is no way +//! to bail out of this. +//! +//! ### Unsigned migration +//! +//! This system will use the offchain worker threads to correct the downside of the previous item: +//! knowing exactly the byte size of migration in each block. Offchain worker threads will first +//! find the maximum number of keys that can be migrated whilst staying below a certain byte size +//! limit offchain, and then submit that back to the chain as an unsigned transaction that can only +//! be included by validators. +//! +//! This approach is safer, and ensures that the migration reads do not take more than a certain +//! amount, yet the do impose some work on the validators/collators. +//! +//! ### Signed migration +//! +//! as a backup, the migration process can be set in motion via signed transactions that basically +//! say in advance how many items and how many bytes they will consume, and pay for it as well. This +//! can be a good safe alternative, if the former two systems are not desirable. +//! +//! The (minor) caveat of this approach is that we cannot know in advance how many bytes reading a +//! certain number of keys will incur. To overcome this, the runtime needs to configure this pallet +//! with a `SignedDepositPerItem`. This is be per-item deposit that the origin of the signed +//! migration transactions need to have in their account (on top of the normal fee) and if the size +//! witness data that they claim is incorrect, this deposit is slashed. +//! +//! --- +//! +//! Initially, this pallet does not contain any auto/unsigned migrations. They must be manually +//! enabled by the `ControlOrigin`. Note that these two migration types cannot co-exist And only one +//! can be enable at each point in time. + +#![cfg_attr(not(feature = "std"), no_std)] + +pub use pallet::*; + +const LOG_TARGET: &'static str = "runtime::state-trie-migration"; + +#[macro_export] +macro_rules! log { + ($level:tt, $patter:expr $(, $values:expr)* $(,)?) => { + log::$level!( + target: crate::LOG_TARGET, + concat!("[{:?}] 🤖 ", $patter), frame_system::Pallet::::block_number() $(, $values)* + ) + }; +} + +#[frame_support::pallet] +pub mod pallet { + use frame_benchmarking::Zero; + use frame_support::{ + dispatch::TransactionPriority, + ensure, + pallet_prelude::*, + traits::{Currency, Get}, + unsigned::ValidateUnsigned, + }; + use frame_system::{ + ensure_none, ensure_signed, + offchain::{SendTransactionTypes, SubmitTransaction}, + pallet_prelude::*, + }; + use sp_core::storage::well_known_keys::CHILD_STORAGE_KEY_PREFIX; + use sp_runtime::traits::{Bounded, Saturating}; + use sp_std::prelude::*; + + pub(crate) type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; + + pub trait WeightInfo { + fn process_top_key(x: u32) -> Weight; + } + impl WeightInfo for () { + fn process_top_key(x: u32) -> Weight { + 1000000 + } + } + + /// A migration task stored in state. + /// + /// It tracks the last top and child keys read. + #[derive(Clone, Encode, Decode, scale_info::TypeInfo)] + #[codec(mel_bound(T: Config))] + #[scale_info(skip_type_params(T))] + #[cfg_attr(test, derive(PartialEq, Eq))] + pub struct MigrationTask { + /// The top key that we currently have to iterate. + /// + /// If it does not exist, it means that the migration is done and no further keys exist. + pub(crate) current_top: Option>, + /// The last child key that we have processed. + /// + /// This is a child key under the current `self.last_top`. + /// + /// If this is set, no further top keys are processed until the child key migration is + /// complete. + pub(crate) current_child: Option>, + + /// dynamic counter for the number of items that we have processed in this execution from + /// the top trie. + /// + /// It is not written to storage. + #[codec(skip)] + pub(crate) dyn_top_items: u32, + /// dynamic counter for the number of items that we have processed in this execution from + /// any child trie. + /// + /// It is not written to storage. + #[codec(skip)] + pub(crate) dyn_child_items: u32, + + /// dynamic counter for for the byte size of items that we have processed in this + /// execution. + /// + /// It is not written to storage. + #[codec(skip)] + pub(crate) dyn_size: u32, + + #[codec(skip)] + _ph: sp_std::marker::PhantomData, + } + + #[cfg(feature = "std")] + impl sp_std::fmt::Debug for MigrationTask { + fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { + f.debug_struct("MigrationTask") + .field( + "top", + &self.current_top.as_ref().map(|d| sp_core::hexdisplay::HexDisplay::from(d)), + ) + .field( + "child", + &self.current_child.as_ref().map(|d| sp_core::hexdisplay::HexDisplay::from(d)), + ) + .field("dyn_top_items", &self.dyn_top_items) + .field("dyn_child_items", &self.dyn_child_items) + .field("dyn_size", &self.dyn_size) + .finish() + } + } + + impl Default for MigrationTask { + fn default() -> Self { + Self { + current_top: Some(Default::default()), + current_child: Default::default(), + dyn_child_items: Default::default(), + dyn_top_items: Default::default(), + dyn_size: Default::default(), + _ph: Default::default(), + } + } + } + + impl MigrationTask { + /// get the total number of keys affected by the current task. + pub(crate) fn dyn_total_items(&self) -> u32 { + self.dyn_child_items.saturating_add(self.dyn_top_items) + } + + /// Migrate keys until either of the given limits are exhausted, or if no more top keys + /// exist. + /// + /// Note that this returns after the **first** migration tick that causes exhaustion. In + /// other words, this should not be used in any environment where resources are strictly + /// bounded (e.g. a parachain), but it is acceptable otherwise (relay chain, offchain + /// workers). + pub(crate) fn migrate_until_exhaustion(&mut self, limits: MigrationLimits) { + loop { + self.migrate_tick(); + if self.exhausted(limits) { + break + } + } + } + + /// Check if there's any work left, or if we have exhausted the limits already. + fn exhausted(&self, limits: MigrationLimits) -> bool { + self.current_top.is_none() || + self.dyn_total_items() >= limits.item || + self.dyn_size >= limits.size + } + + /// Migrate AT MOST ONE KEY. This can be either a top or a child key. + /// + /// The only exception to this is that when the last key of the child tree is migrated, then + /// the top tree under which the child tree lives is also migrated. + /// + /// This function is the core of this entire pallet. + fn migrate_tick(&mut self) { + match (self.current_top.as_ref(), self.current_child.as_ref()) { + (Some(_), Some(_)) => { + // we're in the middle of doing work on a child tree. + self.migrate_child(); + if self.current_child.is_none() { + // this is the end of this child trie. process the top trie as well. + self.migrate_top() + } + }, + (Some(ref top_key), None) => { + if top_key.starts_with(CHILD_STORAGE_KEY_PREFIX) { + // no child migration at hand, but one will begin here. + self.current_child = Some(vec![]); + self.migrate_child(); + if self.current_child.is_none() { + // this is the end of this child trie. process the top trie as well. + self.migrate_top() + } + } else { + self.migrate_top(); + } + }, + (None, Some(_)) => { + // TODO: test edge case: last top key has child + log!(error, "LOGIC ERROR: unreachable code."); + Pallet::::halt() + }, + (None, None) => { + // nada + }, + } + } + + /// Migrate the current child key, setting it to its new value, if one exists. + /// + /// It updates the dynamic counters. + fn migrate_child(&mut self) { + let current_child = + self.current_child.clone().expect("value checked to be `Some`; qed"); + let current_top = self.current_top.clone().expect("value checked to be `Some`; qed"); + + let child_key = Pallet::::child_io_key(¤t_child); + if let Some(data) = sp_io::default_child_storage::get(child_key, ¤t_top) { + self.dyn_size = self.dyn_size.saturating_add(data.len() as u32); + sp_io::default_child_storage::set(child_key, ¤t_top, &data) + } + self.dyn_child_items.saturating_inc(); + let next_key = sp_io::default_child_storage::next_key(child_key, ¤t_top); + self.current_child = next_key; + log!( + trace, + "migrating child key {:?} from top key {:?}, next task: {:?}", + sp_core::hexdisplay::HexDisplay::from(¤t_child), + sp_core::hexdisplay::HexDisplay::from(¤t_top), + self + ); + } + + /// Migrate the current top key, setting it to its new value, if one exists. + /// + /// It updates the dynamic counters. + fn migrate_top(&mut self) { + let current_top = self.current_top.clone().expect("value checked to be `Some`; qed"); + if let Some(data) = sp_io::storage::get(¤t_top) { + self.dyn_size = self.dyn_size.saturating_add(data.len() as u32); + sp_io::storage::set(¤t_top, &data); + } + self.dyn_top_items.saturating_inc(); + let next_key = sp_io::storage::next_key(¤t_top); + self.current_top = next_key; + log!( + trace, + "migrated top key {:?}, next task: {:?}", + sp_core::hexdisplay::HexDisplay::from(¤t_top), + self + ); + } + } + + /// The limits of a migration. + #[derive(Clone, Copy, Encode, Decode, scale_info::TypeInfo, Default, Debug, PartialEq, Eq)] + pub struct MigrationLimits { + /// The byte size limit. + pub(crate) size: u32, + /// The number of keys limit. + pub(crate) item: u32, + } + + /// How a migration was computed. + #[derive(Clone, Copy, Encode, Decode, scale_info::TypeInfo, Debug, PartialEq, Eq)] + pub enum MigrationCompute { + /// A signed origin triggered the migration. + Signed, + /// An unsigned origin triggered the migration. + Unsigned, + /// An automatic task triggered the migration. + Auto, + } + + /// Inner events of this pallet. + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// Given number of `(top, child)` keys were migrated respectively, with the given + /// `compute`. + Migrated(u32, u32, MigrationCompute), + } + + /// The outer Pallet struct. + #[pallet::pallet] + #[pallet::generate_store(pub(crate) trait Store)] + pub struct Pallet(_); + + /// Configurations of this pallet. + #[pallet::config] + pub trait Config: frame_system::Config + SendTransactionTypes> { + /// Origin that can control the configurations of this pallet. + type ControlOrigin: frame_support::traits::EnsureOrigin; + + /// The overarching event type. + type Event: From> + IsType<::Event>; + + /// The currency provider type. + type Currency: Currency; + + /// The amount of deposit collected per item in advance, for signed migrations. + /// + /// This should reflect the average storage value size in the worse case. + type SignedDepositPerItem: Get>; + + /// The weight information of this pallet. + type WeightInfo: WeightInfo; + + /// The priority used for unsigned transactions. + type Priority: Get; + } + + /// Migration progress. + /// + /// This stores the snapshot of the last migrated keys. It can be set into motion and move + /// forward by any of the means provided by this pallet. + #[pallet::storage] + #[pallet::getter(fn migration_process)] + pub type MigrationProcess = StorageValue<_, MigrationTask, ValueQuery>; + + /// The limits that are imposed on automatic migrations. + /// + /// If set to None, then no automatic migration happens. + #[pallet::storage] + #[pallet::getter(fn auto_limits)] + pub type AutoLimits = StorageValue<_, Option, ValueQuery>; + + /// The size limits imposed on unsigned migrations. + /// + /// This should: + /// 1. be large enough to accommodate things like `:code:` + /// 2. small enough to never brick a parachain due to PoV limits. + /// + /// if set to `None`, then no unsigned migration happens. + #[pallet::storage] + #[pallet::getter(fn unsigned_size_limit)] + pub type UnsignedSizeLimit = StorageValue<_, Option, ValueQuery>; + + #[pallet::call] + impl Pallet { + /// control the automatic migration. + /// + /// The dispatch origin of this call must be [`Config::ControlOrigin`]. + #[pallet::weight(T::DbWeight::get().reads_writes(1, 1))] + pub fn control_auto_migration( + origin: OriginFor, + maybe_config: Option, + ) -> DispatchResultWithPostInfo { + T::ControlOrigin::ensure_origin(origin)?; + ensure!( + maybe_config.is_some() ^ Self::unsigned_size_limit().is_some(), + "unsigned and auto migration cannot co-exist" + ); + AutoLimits::::put(maybe_config); + Ok(().into()) + } + + /// control the unsigned migration. + /// + /// The dispatch origin of this call must be [`Config::ControlOrigin`]. + #[pallet::weight(T::DbWeight::get().reads_writes(1, 1))] + pub fn control_unsigned_migration( + origin: OriginFor, + maybe_size_limit: Option, + ) -> DispatchResultWithPostInfo { + T::ControlOrigin::ensure_origin(origin)?; + ensure!( + maybe_size_limit.is_some() ^ Self::auto_limits().is_some(), + "unsigned and auto migration cannot co-exist" + ); + UnsignedSizeLimit::::put(maybe_size_limit); + Ok(().into()) + } + + /// The unsigned call that can be submitted by offchain workers. + /// + /// This can only be valid if it is generated from the local node, which means only + /// validators can generate this call. + /// + /// It is guaranteed that migrating `item_limit` will not cause the total read bytes to + /// exceed [`UnsignedSizeLimit`]. + /// + /// The `size_limit` in the call arguments is for weighing. THe `_task` argument in the call + /// is for validation and ensuring that the migration process has not ticked forward since + /// the call was generated. + #[pallet::weight(Pallet::::dynamic_weight(*item_limit, *witness_size_limit))] + pub fn continue_migrate_unsigned( + origin: OriginFor, + item_limit: u32, + witness_size_limit: u32, + _witness_task: MigrationTask, + ) -> DispatchResultWithPostInfo { + ensure_none(origin)?; + let unsigned_size_limit = + Self::unsigned_size_limit().ok_or("unsigned limit not set, tx not allowed.")?; + ensure!(witness_size_limit == unsigned_size_limit, "wrong size limit witness data"); + + let mut task = Self::migration_process(); + // pre-dispatch and validate-unsigned already assure this. + debug_assert_eq!(task, _witness_task); + + let limits = MigrationLimits { size: unsigned_size_limit, item: item_limit }; + task.migrate_until_exhaustion(limits); + + // we panic if the validator submitted a bad transaction, making it financially bad for + // them to cheat. We could relax this. Also, if a bug causes validators to mistakenly + // produce bad transactions, they can avoid it by disabling offchain workers. + assert!(task.dyn_size < unsigned_size_limit); + + Self::deposit_event(Event::::Migrated( + task.dyn_top_items, + task.dyn_child_items, + MigrationCompute::Unsigned, + )); + + Ok(().into()) + } + + /// Migrate AT MOST `item_limit` keys by reading and writing them. + /// + /// The dispatch origin of this call can be any signed account. + /// + /// This transaction has NO MONETARY INCENTIVES. calling it will only incur transaction fees + /// on the caller, with no rewards paid out. + /// + /// The sum of the byte length of all the data read must be provided for up-front + /// fee-payment and weighing. + #[pallet::weight(Pallet::::dynamic_weight(*item_limit, *size_limit))] + pub fn continue_migrate( + origin: OriginFor, + item_limit: u32, + size_limit: u32, + ) -> DispatchResultWithPostInfo { + let who = ensure_signed(origin)?; + + // ensure they can pay more than the fee. + let deposit = T::SignedDepositPerItem::get().saturating_mul(item_limit.into()); + ensure!(T::Currency::can_slash(&who, deposit), "not enough funds"); + + let mut task = Self::migration_process(); + task.migrate_until_exhaustion(MigrationLimits { size: size_limit, item: item_limit }); + + // ensure that the migration witness data was correct. + if item_limit != task.dyn_total_items() || size_limit != task.dyn_size { + // let the imbalance burn. + let (_imbalance, _remainder) = T::Currency::slash(&who, deposit); + // defensive. + debug_assert!(_remainder.is_zero()); + return Err("Wrong witness data".into()) + } + + Self::deposit_event(Event::::Migrated( + task.dyn_top_items, + task.dyn_child_items, + MigrationCompute::Signed, + )); + MigrationProcess::::put(task); + Ok(().into()) + } + + /// Migrate the list of top keys by iterating each of them one by one. + /// + /// This does not affect the global migration process tracker ([`MigrationProcess`]), and + /// should only be used in case any keys are leftover due to a bug. + #[pallet::weight(Pallet::::dynamic_weight(keys.len() as u32, *total_size))] + pub fn migrate_custom_top( + origin: OriginFor, + keys: Vec>, + total_size: u32, + ) -> DispatchResultWithPostInfo { + let who = ensure_signed(origin)?; + + // ensure they can pay more than the fee. + let deposit = T::SignedDepositPerItem::get().saturating_mul((keys.len() as u32).into()); + ensure!(T::Currency::can_slash(&who, deposit), "not enough funds"); + + let mut dyn_size = 0u32; + for key in &keys { + if let Some(data) = sp_io::storage::get(&key) { + dyn_size = dyn_size.saturating_add(data.len() as u32); + sp_io::storage::set(key, &data); + } + } + + if dyn_size != total_size { + let (_imbalance, _remainder) = T::Currency::slash(&who, deposit); + debug_assert!(_remainder.is_zero()); + return Err("Wrong witness data".into()) + } + + Self::deposit_event(Event::::Migrated( + keys.len() as u32, + 0, + MigrationCompute::Signed, + )); + Ok(().into()) + } + + /// Migrate the list of child keys by iterating each of them one by one. + /// + /// All of the given child keys must be present under one `top_key`. + /// + /// This does not affect the global migration process tracker ([`MigrationProcess`]), and + /// should only be used in case any keys are leftover due to a bug. + #[pallet::weight(Pallet::::dynamic_weight(child_keys.len() as u32, *total_size))] + pub fn migrate_custom_child( + origin: OriginFor, + top_key: Vec, + child_keys: Vec>, + total_size: u32, + ) -> DispatchResultWithPostInfo { + let who = ensure_signed(origin)?; + + // ensure they can pay more than the fee. + let deposit = + T::SignedDepositPerItem::get().saturating_mul((child_keys.len() as u32).into()); + ensure!(T::Currency::can_slash(&who, deposit), "not enough funds"); + + let mut dyn_size = 0u32; + for child_key in &child_keys { + if let Some(data) = + sp_io::default_child_storage::get(Self::child_io_key(child_key), &top_key) + { + dyn_size = dyn_size.saturating_add(data.len() as u32); + sp_io::default_child_storage::set( + Self::child_io_key(child_key), + &top_key, + &data, + ); + } + } + + if dyn_size != total_size { + let (_imbalance, _remainder) = T::Currency::slash(&who, deposit); + debug_assert!(_remainder.is_zero()); + return Err("Wrong witness data".into()) + } + + Self::deposit_event(Event::::Migrated( + 0, + child_keys.len() as u32, + MigrationCompute::Signed, + )); + Ok(().into()) + } + } + + #[pallet::hooks] + impl Hooks> for Pallet { + fn on_initialize(_: BlockNumberFor) -> Weight { + if let Some(limits) = Self::auto_limits() { + let mut task = Self::migration_process(); + task.migrate_until_exhaustion(limits); + let weight = Self::dynamic_weight(task.dyn_total_items(), task.dyn_size); + + log!( + info, + "migrated {} top keys, {} child keys, and a total of {} bytes.", + task.dyn_top_items, + task.dyn_child_items, + task.dyn_size, + ); + Self::deposit_event(Event::::Migrated( + task.dyn_top_items as u32, + task.dyn_child_items as u32, + MigrationCompute::Auto, + )); + MigrationProcess::::put(task); + + weight + } else { + T::DbWeight::get().reads(1) + } + } + + fn offchain_worker(_: BlockNumberFor) { + if let Some(unsigned_size_limit) = Self::unsigned_size_limit() { + let mut task = Self::migration_process(); + let limits = + MigrationLimits { size: unsigned_size_limit, item: Bounded::max_value() }; + task.migrate_until_exhaustion(limits); + + // the last item cause us to go beyond the size limit, so we subtract one. we are + // making this assumption based on the impl of `migrate_until_exhaustion`. + let safe_items_to_read = task.dyn_total_items().saturating_sub(1); + + let original_task = Self::migration_process(); + let call = Call::continue_migrate_unsigned { + item_limit: safe_items_to_read, + // note that this must be simply the limit, not the actual bytes read. + witness_size_limit: unsigned_size_limit, + witness_task: original_task, + }; + match SubmitTransaction::>::submit_unsigned_transaction(call.into()) { + Ok(_) => { + log!( + debug, + "submitted a call to migrate {} items of {} bytes.", + safe_items_to_read, + task.dyn_size + ) + }, + Err(why) => { + log!(warn, "failed to submit a call to the pool {:?}", why) + }, + } + } + } + } + + #[pallet::validate_unsigned] + impl ValidateUnsigned for Pallet { + type Call = Call; + fn validate_unsigned(source: TransactionSource, call: &Self::Call) -> TransactionValidity { + if let Call::continue_migrate_unsigned { witness_task, .. } = call { + match source { + TransactionSource::Local | TransactionSource::InBlock => { /* allowed */ }, + _ => return InvalidTransaction::Call.into(), + } + + let onchain_task = Self::migration_process(); + if &onchain_task != witness_task { + return Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)) + } + + ValidTransaction::with_tag_prefix("StorageVersionMigration") + .priority(T::Priority::get()) + // deduplicate based on task data. + .and_provides(witness_task) + .longevity(5) + .propagate(false) + .build() + } else { + InvalidTransaction::Call.into() + } + } + + fn pre_dispatch(call: &Self::Call) -> Result<(), TransactionValidityError> { + if let Call::continue_migrate_unsigned { witness_task, .. } = call { + let onchain_task = Self::migration_process(); + if &onchain_task != witness_task { + return Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)) + } + Ok(()) + } else { + Err(InvalidTransaction::Call.into()) + } + } + } + + impl Pallet { + /// The real weight of a migration of the given number of `items` with total `size`. + fn dynamic_weight(items: u32, size: u32) -> frame_support::pallet_prelude::Weight { + let items = items as Weight; + items + .saturating_mul(::DbWeight::get().reads_writes(1, 1)) + // we assume that the read/write per-byte weight is the same for child and top tree. + .saturating_add(T::WeightInfo::process_top_key(size)) + } + + /// Put a stop to all ongoing migrations. + fn halt() { + UnsignedSizeLimit::::kill(); + AutoLimits::::kill(); + } + + fn child_io_key(storage_key: &Vec) -> &[u8] { + use sp_core::storage::{ChildType, PrefixedStorageKey}; + match ChildType::from_prefixed_key(PrefixedStorageKey::new_ref(storage_key)) { + Some((ChildType::ParentKeyId, storage_key)) => storage_key, + None => &[], // Ignore TODO + } + } + } +} + +#[cfg(feature = "runtime-benchmarks")] +mod benchmarks { + use super::*; + use sp_std::prelude::*; + + const KEY: &'static [u8] = b"key"; + + frame_benchmarking::benchmarks! { + process_top_key { + let x in 1 .. (4 * 1024 * 1024); + sp_io::storage::set(KEY, &vec![1u8; x as usize]); + }: { + let data = sp_io::storage::get(KEY).unwrap(); + sp_io::storage::set(KEY, &vec![1u8; x as usize]); + let _next = sp_io::storage::next_key(KEY); + assert_eq!(data.len(), x as usize); + } + } +} + +#[cfg(test)] +mod mock { + use super::*; + use crate as pallet_state_trie_migration; + use frame_support::{parameter_types, traits::Hooks}; + use frame_system::EnsureRoot; + use sp_core::H256; + use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, + StateVersion, + }; + + type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; + type Block = frame_system::mocking::MockBlock; + + // Configure a mock runtime to test the pallet. + frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Config, Storage, Event}, + StateTrieMigration: pallet_state_trie_migration::{Pallet, Call, Storage, Event}, + } + ); + + parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const SS58Prefix: u8 = 42; + } + + impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type Origin = Origin; + type Call = Call; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = Event; + type BlockHashCount = BlockHashCount; + type DbWeight = (); + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = SS58Prefix; + type OnSetCode = (); + } + + parameter_types! { + pub const ExistentialDeposit: u64 = 1; + } + + impl pallet_balances::Config for Test { + type Balance = u64; + type Event = Event; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; + type WeightInfo = (); + } + + impl pallet_state_trie_migration::Config for Test { + type Event = Event; + type ControlOrigin = EnsureRoot; + type Currency = Balances; + type SignedDepositPerItem = (); + type WeightInfo = (); + type Priority = (); + } + + impl frame_system::offchain::SendTransactionTypes for Test + where + Call: From, + { + type OverarchingCall = Call; + type Extrinsic = Extrinsic; + } + + pub type Extrinsic = sp_runtime::testing::TestXt; + + pub fn new_test_ext() -> sp_io::TestExternalities { + use sp_core::storage::ChildInfo; + + let t = frame_system::GenesisConfig::default(); + let mut storage = sp_core::storage::Storage { + top: vec![ + (b"key1".to_vec(), vec![1u8; 10]), // 6b657931 + (b"key2".to_vec(), vec![2u8; 20]), // 6b657932 + (b"key3".to_vec(), vec![3u8; 30]), // 6b657934 + (b"key4".to_vec(), vec![4u8; 40]), // 6b657934 + (sp_core::storage::well_known_keys::CODE.to_vec(), vec![1u8; 100]), + ] + .into_iter() + .collect(), + children_default: vec![ + ( + b":child_storage:chk1".to_vec(), + sp_core::storage::StorageChild { + data: vec![ + (b"key1".to_vec(), vec![1u8; 10]), + (b"key2".to_vec(), vec![2u8; 20]), + ] + .into_iter() + .collect(), + child_info: ChildInfo::new_default(b"chk1"), + }, + ), + ( + b":child_storage:chk2".to_vec(), + sp_core::storage::StorageChild { + data: vec![ + (b"key1".to_vec(), vec![1u8; 10]), + (b"key2".to_vec(), vec![2u8; 20]), + ] + .into_iter() + .collect(), + child_info: ChildInfo::new_default(b"chk2"), + }, + ), + ] + .into_iter() + .collect(), + }; + t.assimilate_storage::(&mut storage).unwrap(); + sp_tracing::try_init_simple(); + + (storage, StateVersion::V0).into() + } + + pub fn run_to_block(n: u64) { + while System::block_number() < n { + System::set_block_number(System::block_number() + 1); + System::on_initialize(System::block_number()); + StateTrieMigration::on_initialize(System::block_number()); + + System::on_finalize(System::block_number()); + } + } +} + +#[cfg(test)] +mod test { + use super::{mock::*, *}; + + #[test] + fn auto_migrate_works() { + new_test_ext().execute_with(|| { + assert_eq!(AutoLimits::::get(), None); + assert_eq!(MigrationProcess::::get(), Default::default()); + + // nothing happens if we don't set the limits. + run_to_block(50); + assert_eq!(MigrationProcess::::get(), Default::default()); + + // this should allow 1 item per block to be migrated. + AutoLimits::::put(Some(MigrationLimits { item: 1, size: 150 })); + + run_to_block(80); + }) + } + + #[test] + fn unsigned_migration_works() { + todo!(); + } + + #[test] + fn manual_migrate_works() { + todo!("test manually signed migration"); + } + + #[test] + fn custom_migrate_works() { + todo!("test custom keys to be migrated via signed") + } +} From d35f273b7d67b1b85a9e72973cab13c5c156c1d3 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 21 Oct 2021 11:00:15 +0200 Subject: [PATCH 110/188] Patch to delay runtime switch. --- primitives/io/src/lib.rs | 22 ---------------------- primitives/version/src/lib.rs | 7 +------ 2 files changed, 1 insertion(+), 28 deletions(-) diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index 7e54b54dc73b3..acb98b9e03e66 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -195,16 +195,6 @@ pub trait Storage { self.storage_root(sp_core::StateVersion::V0) } - #[version(2)] - /// "Commit" all existing operations and compute the resulting storage root. - /// - /// The hashing algorithm is defined by the `Block`. - /// - /// Returns a `Vec` that holds the SCALE encoded hash. - fn root(&mut self) -> Vec { - self.storage_root(sp_core::StateVersion::V1) - } - /// "Commit" all existing operations and get the resulting storage change root. /// `parent_hash` is a SCALE encoded hash. /// @@ -393,18 +383,6 @@ pub trait DefaultChildStorage { self.child_storage_root(&child_info, sp_core::StateVersion::V0) } - /// Default child root calculation. - /// - /// "Commit" all existing operations and compute the resulting child storage root. - /// The hashing algorithm is defined by the `Block`. - /// - /// Returns a `Vec` that holds the SCALE encoded hash. - #[version(2)] - fn root(&mut self, storage_key: &[u8]) -> Vec { - let child_info = ChildInfo::new_default(storage_key); - self.child_storage_root(&child_info, sp_core::StateVersion::V1) - } - /// Child storage key iteration. /// /// Get the next key in storage after the given one in lexicographic order in child storage. diff --git a/primitives/version/src/lib.rs b/primitives/version/src/lib.rs index 10edbca4eb24f..e08d3a04cffa1 100644 --- a/primitives/version/src/lib.rs +++ b/primitives/version/src/lib.rs @@ -217,12 +217,7 @@ impl RuntimeVersion { /// Returns state version to use for update. pub fn state_version(&self) -> StateVersion { - let core_api_id = sp_runtime::hashing::blake2_64(b"Core"); - if self.has_api_with(&core_api_id, |v| v >= 4) { - StateVersion::V1 - } else { - StateVersion::V0 - } + StateVersion::V0 } } From 90ceb5e41b99fd2419384e3ca4740860317e2934 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 21 Oct 2021 11:04:04 +0200 Subject: [PATCH 111/188] Revert "Patch to delay runtime switch." This reverts commit d35f273b7d67b1b85a9e72973cab13c5c156c1d3. --- primitives/io/src/lib.rs | 22 ++++++++++++++++++++++ primitives/version/src/lib.rs | 7 ++++++- 2 files changed, 28 insertions(+), 1 deletion(-) diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index acb98b9e03e66..7e54b54dc73b3 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -195,6 +195,16 @@ pub trait Storage { self.storage_root(sp_core::StateVersion::V0) } + #[version(2)] + /// "Commit" all existing operations and compute the resulting storage root. + /// + /// The hashing algorithm is defined by the `Block`. + /// + /// Returns a `Vec` that holds the SCALE encoded hash. + fn root(&mut self) -> Vec { + self.storage_root(sp_core::StateVersion::V1) + } + /// "Commit" all existing operations and get the resulting storage change root. /// `parent_hash` is a SCALE encoded hash. /// @@ -383,6 +393,18 @@ pub trait DefaultChildStorage { self.child_storage_root(&child_info, sp_core::StateVersion::V0) } + /// Default child root calculation. + /// + /// "Commit" all existing operations and compute the resulting child storage root. + /// The hashing algorithm is defined by the `Block`. + /// + /// Returns a `Vec` that holds the SCALE encoded hash. + #[version(2)] + fn root(&mut self, storage_key: &[u8]) -> Vec { + let child_info = ChildInfo::new_default(storage_key); + self.child_storage_root(&child_info, sp_core::StateVersion::V1) + } + /// Child storage key iteration. /// /// Get the next key in storage after the given one in lexicographic order in child storage. diff --git a/primitives/version/src/lib.rs b/primitives/version/src/lib.rs index e08d3a04cffa1..10edbca4eb24f 100644 --- a/primitives/version/src/lib.rs +++ b/primitives/version/src/lib.rs @@ -217,7 +217,12 @@ impl RuntimeVersion { /// Returns state version to use for update. pub fn state_version(&self) -> StateVersion { - StateVersion::V0 + let core_api_id = sp_runtime::hashing::blake2_64(b"Core"); + if self.has_api_with(&core_api_id, |v| v >= 4) { + StateVersion::V1 + } else { + StateVersion::V0 + } } } From eb272e6dfb4fa1a9886ac17b7e08131775b34c84 Mon Sep 17 00:00:00 2001 From: kianenigma Date: Thu, 21 Oct 2021 11:07:05 +0200 Subject: [PATCH 112/188] fix test --- frame/state-trie-migration/src/lib.rs | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index 2cbb607198650..907c14df202b4 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -96,7 +96,9 @@ pub mod pallet { offchain::{SendTransactionTypes, SubmitTransaction}, pallet_prelude::*, }; - use sp_core::storage::well_known_keys::CHILD_STORAGE_KEY_PREFIX; + use sp_core::storage::well_known_keys::{ + CHILD_STORAGE_KEY_PREFIX, DEFAULT_CHILD_STORAGE_KEY_PREFIX, + }; use sp_runtime::traits::{Bounded, Saturating}; use sp_std::prelude::*; @@ -234,7 +236,7 @@ pub mod pallet { } }, (Some(ref top_key), None) => { - if top_key.starts_with(CHILD_STORAGE_KEY_PREFIX) { + if top_key.starts_with(DEFAULT_CHILD_STORAGE_KEY_PREFIX) { // no child migration at hand, but one will begin here. self.current_child = Some(vec![]); self.migrate_child(); @@ -275,9 +277,8 @@ pub mod pallet { self.current_child = next_key; log!( trace, - "migrating child key {:?} from top key {:?}, next task: {:?}", + "migrating child key {:?}, next task: {:?}", sp_core::hexdisplay::HexDisplay::from(¤t_child), - sp_core::hexdisplay::HexDisplay::from(¤t_top), self ); } @@ -719,7 +720,7 @@ pub mod pallet { use sp_core::storage::{ChildType, PrefixedStorageKey}; match ChildType::from_prefixed_key(PrefixedStorageKey::new_ref(storage_key)) { Some((ChildType::ParentKeyId, storage_key)) => storage_key, - None => &[], // Ignore TODO + None => unreachable!(), } } } @@ -843,7 +844,6 @@ mod mock { pub fn new_test_ext() -> sp_io::TestExternalities { use sp_core::storage::ChildInfo; - let t = frame_system::GenesisConfig::default(); let mut storage = sp_core::storage::Storage { top: vec![ (b"key1".to_vec(), vec![1u8; 10]), // 6b657931 @@ -856,7 +856,7 @@ mod mock { .collect(), children_default: vec![ ( - b":child_storage:chk1".to_vec(), + b":child_storage:default:chk1".to_vec(), sp_core::storage::StorageChild { data: vec![ (b"key1".to_vec(), vec![1u8; 10]), @@ -864,11 +864,11 @@ mod mock { ] .into_iter() .collect(), - child_info: ChildInfo::new_default(b"chk1"), + child_info: ChildInfo::new_default(b":child_storage:default:chk1"), }, ), ( - b":child_storage:chk2".to_vec(), + b":child_storage:default:chk2".to_vec(), sp_core::storage::StorageChild { data: vec![ (b"key1".to_vec(), vec![1u8; 10]), @@ -876,16 +876,18 @@ mod mock { ] .into_iter() .collect(), - child_info: ChildInfo::new_default(b"chk2"), + child_info: ChildInfo::new_default(b":child_storage:default:chk2"), }, ), ] .into_iter() .collect(), }; - t.assimilate_storage::(&mut storage).unwrap(); - sp_tracing::try_init_simple(); + // let t = frame_system::GenesisConfig::default(); + // t.assimilate_storage::(&mut storage).unwrap(); + + sp_tracing::try_init_simple(); (storage, StateVersion::V0).into() } From e41881c2a251a084763c7fecdc49bd7d3c33596f Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 21 Oct 2021 11:27:53 +0200 Subject: [PATCH 113/188] fix child migration calls. --- frame/state-trie-migration/src/lib.rs | 16 ++++++++-------- primitives/state-machine/src/testing.rs | 1 - 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index 907c14df202b4..b56a70dc40a98 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -267,13 +267,13 @@ pub mod pallet { self.current_child.clone().expect("value checked to be `Some`; qed"); let current_top = self.current_top.clone().expect("value checked to be `Some`; qed"); - let child_key = Pallet::::child_io_key(¤t_child); - if let Some(data) = sp_io::default_child_storage::get(child_key, ¤t_top) { + let child_key = Pallet::::child_io_key(¤t_top); + if let Some(data) = sp_io::default_child_storage::get(child_key, ¤t_child) { self.dyn_size = self.dyn_size.saturating_add(data.len() as u32); - sp_io::default_child_storage::set(child_key, ¤t_top, &data) + sp_io::default_child_storage::set(child_key, ¤t_child, &data) } self.dyn_child_items.saturating_inc(); - let next_key = sp_io::default_child_storage::next_key(child_key, ¤t_top); + let next_key = sp_io::default_child_storage::next_key(child_key, ¤t_child); self.current_child = next_key; log!( trace, @@ -856,7 +856,7 @@ mod mock { .collect(), children_default: vec![ ( - b":child_storage:default:chk1".to_vec(), + b"chk1".to_vec(), sp_core::storage::StorageChild { data: vec![ (b"key1".to_vec(), vec![1u8; 10]), @@ -864,11 +864,11 @@ mod mock { ] .into_iter() .collect(), - child_info: ChildInfo::new_default(b":child_storage:default:chk1"), + child_info: ChildInfo::new_default(b"chk1"), }, ), ( - b":child_storage:default:chk2".to_vec(), + b"chk2".to_vec(), sp_core::storage::StorageChild { data: vec![ (b"key1".to_vec(), vec![1u8; 10]), @@ -876,7 +876,7 @@ mod mock { ] .into_iter() .collect(), - child_info: ChildInfo::new_default(b":child_storage:default:chk2"), + child_info: ChildInfo::new_default(b"chk2"), }, ), ] diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index 6964eb322cd3e..89468e938ca16 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -123,7 +123,6 @@ where overlay.set_collect_extrinsics(changes_trie_config.is_some()); assert!(storage.top.keys().all(|key| !is_child_storage_key(key))); - assert!(storage.children_default.keys().all(|key| is_child_storage_key(key))); storage.top.insert(CODE.to_vec(), code.to_vec()); From ac1a80cae62801fc6e9544dd3f55a24edee14b56 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 21 Oct 2021 12:13:33 +0200 Subject: [PATCH 114/188] useless closure --- primitives/state-machine/src/trie_backend.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index 6b36bdb153544..7ec8a24a16936 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -199,14 +199,14 @@ where { let mut eph = Ephemeral::new(self.essence.backend_storage(), &mut write_overlay); - let res = || match state_version { + let res = match state_version { StateVersion::V0 => delta_trie_root::, _, _, _, _, _>(&mut eph, root, delta), StateVersion::V1 => delta_trie_root::, _, _, _, _, _>(&mut eph, root, delta), }; - match res() { + match res { Ok(ret) => root = ret, Err(e) => warn!(target: "trie", "Failed to write to trie: {}", e), } From b06a90c8eec2b73451047ce96c534a98a71f3b98 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 21 Oct 2021 12:24:29 +0200 Subject: [PATCH 115/188] remove remaining state_hash variables. --- client/api/src/backend.rs | 4 ++-- client/api/src/in_mem.rs | 12 ++++++------ client/db/src/bench.rs | 12 ++++++------ client/db/src/storage_cache.rs | 16 ++++++++-------- client/light/src/backend.rs | 16 ++++++++-------- .../state-machine/src/in_memory_backend.rs | 10 +++++----- 6 files changed, 35 insertions(+), 35 deletions(-) diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index 0a1f08a51dc53..c05defdd9b79a 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -171,14 +171,14 @@ pub trait BlockImportOperation { &mut self, storage: Storage, commit: bool, - state_hash: StateVersion, + state_version: StateVersion, ) -> sp_blockchain::Result; /// Inject storage data into the database replacing any existing data. fn reset_storage( &mut self, storage: Storage, - state_hash: StateVersion, + state_version: StateVersion, ) -> sp_blockchain::Result; /// Set storage changes. diff --git a/client/api/src/in_mem.rs b/client/api/src/in_mem.rs index 73d639c69ba38..9da7f58d5a708 100644 --- a/client/api/src/in_mem.rs +++ b/client/api/src/in_mem.rs @@ -594,7 +594,7 @@ where &mut self, storage: Storage, commit: bool, - state_hash: StateVersion, + state_version: StateVersion, ) -> sp_blockchain::Result { check_genesis_storage(&storage)?; @@ -608,7 +608,7 @@ where let (root, transaction) = self.old_state.full_storage_root( storage.top.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))), child_delta, - state_hash, + state_version, ); if commit { @@ -663,17 +663,17 @@ where &mut self, storage: Storage, commit: bool, - state_hash: StateVersion, + state_version: StateVersion, ) -> sp_blockchain::Result { - self.apply_storage(storage, commit, state_hash) + self.apply_storage(storage, commit, state_version) } fn reset_storage( &mut self, storage: Storage, - state_hash: StateVersion, + state_version: StateVersion, ) -> sp_blockchain::Result { - self.apply_storage(storage, true, state_hash) + self.apply_storage(storage, true, state_version) } fn insert_aux(&mut self, ops: I) -> sp_blockchain::Result<()> diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index 5e17cda304de2..cafafca9b635c 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -106,7 +106,7 @@ impl BenchmarkingState { record_proof: bool, enable_tracking: bool, ) -> Result { - let state_hash = sp_runtime::StateVersion::default(); + let state_version = sp_runtime::StateVersion::default(); let mut root = B::Hash::default(); let mut mdb = MemoryDB::>::default(); sp_state_machine::TrieDBMutV1::>::new(&mut mdb, &mut root); @@ -140,7 +140,7 @@ impl BenchmarkingState { state.state.borrow_mut().as_mut().unwrap().full_storage_root( genesis.top.iter().map(|(k, v)| (k.as_ref(), Some(v.as_ref()))), child_delta, - state_hash, + state_version, ); state.genesis = transaction.clone().drain(); state.genesis_root = root.clone(); @@ -418,7 +418,7 @@ impl StateBackend> for BenchmarkingState { fn storage_root<'a>( &self, delta: impl Iterator)>, - state_hash: StateVersion, + state_version: StateVersion, ) -> (B::Hash, Self::Transaction) where B::Hash: Ord, @@ -426,14 +426,14 @@ impl StateBackend> for BenchmarkingState { self.state .borrow() .as_ref() - .map_or(Default::default(), |s| s.storage_root(delta, state_hash)) + .map_or(Default::default(), |s| s.storage_root(delta, state_version)) } fn child_storage_root<'a>( &self, child_info: &ChildInfo, delta: impl Iterator)>, - state_hash: StateVersion, + state_version: StateVersion, ) -> (B::Hash, bool, Self::Transaction) where B::Hash: Ord, @@ -441,7 +441,7 @@ impl StateBackend> for BenchmarkingState { self.state .borrow() .as_ref() - .map_or(Default::default(), |s| s.child_storage_root(child_info, delta, state_hash)) + .map_or(Default::default(), |s| s.child_storage_root(child_info, delta, state_version)) } fn pairs(&self) -> Vec<(Vec, Vec)> { diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index 8e89ece2f7d6b..579703ea9db85 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -676,24 +676,24 @@ impl>, B: BlockT> StateBackend> for Cachin fn storage_root<'a>( &self, delta: impl Iterator)>, - state_hash: StateVersion, + state_version: StateVersion, ) -> (B::Hash, Self::Transaction) where B::Hash: Ord, { - self.state.storage_root(delta, state_hash) + self.state.storage_root(delta, state_version) } fn child_storage_root<'a>( &self, child_info: &ChildInfo, delta: impl Iterator)>, - state_hash: StateVersion, + state_version: StateVersion, ) -> (B::Hash, bool, Self::Transaction) where B::Hash: Ord, { - self.state.child_storage_root(child_info, delta, state_hash) + self.state.child_storage_root(child_info, delta, state_version) } fn pairs(&self) -> Vec<(Vec, Vec)> { @@ -876,24 +876,24 @@ impl>, B: BlockT> StateBackend> fn storage_root<'a>( &self, delta: impl Iterator)>, - state_hash: StateVersion, + state_version: StateVersion, ) -> (B::Hash, Self::Transaction) where B::Hash: Ord, { - self.caching_state().storage_root(delta, state_hash) + self.caching_state().storage_root(delta, state_version) } fn child_storage_root<'a>( &self, child_info: &ChildInfo, delta: impl Iterator)>, - state_hash: StateVersion, + state_version: StateVersion, ) -> (B::Hash, bool, Self::Transaction) where B::Hash: Ord, { - self.caching_state().child_storage_root(child_info, delta, state_hash) + self.caching_state().child_storage_root(child_info, delta, state_version) } fn pairs(&self) -> Vec<(Vec, Vec)> { diff --git a/client/light/src/backend.rs b/client/light/src/backend.rs index 6c6d78d4d1228..4e1339404db0d 100644 --- a/client/light/src/backend.rs +++ b/client/light/src/backend.rs @@ -330,7 +330,7 @@ where &mut self, input: Storage, commit: bool, - state_hash: StateVersion, + state_version: StateVersion, ) -> ClientResult { check_genesis_storage(&input)?; @@ -360,9 +360,9 @@ where storage.insert(Some(storage_child.child_info), storage_child.data); } - let storage_update = InMemoryBackend::from((storage, state_hash)); + let storage_update = InMemoryBackend::from((storage, state_version)); let (storage_root, _) = - storage_update.full_storage_root(std::iter::empty(), child_delta, state_hash); + storage_update.full_storage_root(std::iter::empty(), child_delta, state_version); if commit { self.storage_update = Some(storage_update); } @@ -373,7 +373,7 @@ where fn reset_storage( &mut self, _input: Storage, - _state_hash: StateVersion, + _state_version: StateVersion, ) -> ClientResult { Err(ClientError::NotAvailableOnLightClient) } @@ -532,13 +532,13 @@ where fn storage_root<'a>( &self, delta: impl Iterator)>, - state_hash: StateVersion, + state_version: StateVersion, ) -> (H::Out, Self::Transaction) where H::Out: Ord, { match *self { - GenesisOrUnavailableState::Genesis(ref state) => state.storage_root(delta, state_hash), + GenesisOrUnavailableState::Genesis(ref state) => state.storage_root(delta, state_version), GenesisOrUnavailableState::Unavailable => Default::default(), } } @@ -547,14 +547,14 @@ where &self, child_info: &ChildInfo, delta: impl Iterator)>, - state_hash: StateVersion, + state_version: StateVersion, ) -> (H::Out, bool, Self::Transaction) where H::Out: Ord, { match *self { GenesisOrUnavailableState::Genesis(ref state) => { - let (root, is_equal, _) = state.child_storage_root(child_info, delta, state_hash); + let (root, is_equal, _) = state.child_storage_root(child_info, delta, state_version); (root, is_equal, Default::default()) }, GenesisOrUnavailableState::Unavailable => (H::Out::default(), true, Default::default()), diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index de1e10c04afec..d163aae200bee 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -190,13 +190,13 @@ mod tests { /// Assert in memory backend with only child trie keys works as trie backend. #[test] fn in_memory_with_child_trie_only() { - let state_hash = sp_core::StateVersion::default(); + let state_version = sp_core::StateVersion::default(); let storage = new_in_mem::(); let child_info = ChildInfo::new_default(b"1"); let child_info = &child_info; let storage = storage.update( vec![(Some(child_info.clone()), vec![(b"2".to_vec(), Some(b"3".to_vec()))])], - state_hash, + state_version, ); let trie_backend = storage.as_trie_backend().unwrap(); assert_eq!(trie_backend.child_storage(child_info, b"2").unwrap(), Some(b"3".to_vec())); @@ -206,17 +206,17 @@ mod tests { #[test] fn insert_multiple_times_child_data_works() { - let state_hash = sp_core::StateVersion::default(); + let state_version = sp_core::StateVersion::default(); let mut storage = new_in_mem::(); let child_info = ChildInfo::new_default(b"1"); storage.insert( vec![(Some(child_info.clone()), vec![(b"2".to_vec(), Some(b"3".to_vec()))])], - state_hash, + state_version, ); storage.insert( vec![(Some(child_info.clone()), vec![(b"1".to_vec(), Some(b"3".to_vec()))])], - state_hash, + state_version, ); assert_eq!(storage.child_storage(&child_info, &b"2"[..]), Ok(Some(b"3".to_vec()))); From 724b26f15531ef5db5fe17a910ab6ac8af4f2881 Mon Sep 17 00:00:00 2001 From: kianenigma Date: Thu, 21 Oct 2021 14:51:06 +0200 Subject: [PATCH 116/188] Fix and add more tests --- Cargo.lock | 1 + frame/state-trie-migration/Cargo.toml | 1 + frame/state-trie-migration/src/lib.rs | 276 ++++++++++++++++++++++---- 3 files changed, 243 insertions(+), 35 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a35c40e90cc14..f23a74b3dc0d0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5962,6 +5962,7 @@ dependencies = [ "log 0.4.14", "pallet-balances", "parity-scale-codec", + "parking_lot 0.11.1", "scale-info", "sp-core", "sp-io", diff --git a/frame/state-trie-migration/Cargo.toml b/frame/state-trie-migration/Cargo.toml index 3fedac6a31cdd..69d82bced4c35 100644 --- a/frame/state-trie-migration/Cargo.toml +++ b/frame/state-trie-migration/Cargo.toml @@ -28,6 +28,7 @@ frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = " [dev-dependencies] pallet-balances = { path = "../balances", version = "4.0.0-dev" } +parking_lot = "0.11.0" sp-tracing = { path = "../../primitives/tracing", version = "4.0.0-dev" } [features] diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index b56a70dc40a98..ea3a6bd53f315 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -99,7 +99,10 @@ pub mod pallet { use sp_core::storage::well_known_keys::{ CHILD_STORAGE_KEY_PREFIX, DEFAULT_CHILD_STORAGE_KEY_PREFIX, }; - use sp_runtime::traits::{Bounded, Saturating}; + use sp_runtime::{ + offchain::storage::{MutateStorageError, StorageValueRef}, + traits::{Bounded, Saturating}, + }; use sp_std::prelude::*; pub(crate) type BalanceOf = @@ -117,10 +120,9 @@ pub mod pallet { /// A migration task stored in state. /// /// It tracks the last top and child keys read. - #[derive(Clone, Encode, Decode, scale_info::TypeInfo)] + #[derive(Clone, Encode, Decode, scale_info::TypeInfo, PartialEq, Eq)] #[codec(mel_bound(T: Config))] #[scale_info(skip_type_params(T))] - #[cfg_attr(test, derive(PartialEq, Eq))] pub struct MigrationTask { /// The top key that we currently have to iterate. /// @@ -134,6 +136,9 @@ pub mod pallet { /// complete. pub(crate) current_child: Option>, + /// A marker to indicate if the previous tick was a child tree migration or not. + pub(crate) prev_tick_child: bool, + /// dynamic counter for the number of items that we have processed in this execution from /// the top trie. /// @@ -155,7 +160,12 @@ pub mod pallet { pub(crate) dyn_size: u32, #[codec(skip)] - _ph: sp_std::marker::PhantomData, + pub(crate) _ph: sp_std::marker::PhantomData, + + // TODO: I might remove these if they end up not being used. + pub(crate) size: u32, + pub(crate) top_items: u32, + pub(crate) child_items: u32, } #[cfg(feature = "std")] @@ -170,9 +180,13 @@ pub mod pallet { "child", &self.current_child.as_ref().map(|d| sp_core::hexdisplay::HexDisplay::from(d)), ) + .field("prev_tick_child", &self.prev_tick_child) .field("dyn_top_items", &self.dyn_top_items) .field("dyn_child_items", &self.dyn_child_items) .field("dyn_size", &self.dyn_size) + .field("size", &self.size) + .field("top_items", &self.top_items) + .field("child_items", &self.child_items) .finish() } } @@ -185,7 +199,11 @@ pub mod pallet { dyn_child_items: Default::default(), dyn_top_items: Default::default(), dyn_size: Default::default(), + prev_tick_child: Default::default(), _ph: Default::default(), + size: Default::default(), + top_items: Default::default(), + child_items: Default::default(), } } } @@ -204,12 +222,17 @@ pub mod pallet { /// bounded (e.g. a parachain), but it is acceptable otherwise (relay chain, offchain /// workers). pub(crate) fn migrate_until_exhaustion(&mut self, limits: MigrationLimits) { + log!(debug, "running migrations until {:?}", limits); loop { self.migrate_tick(); if self.exhausted(limits) { break } } + self.size = self.size.saturating_add(self.dyn_size); + self.child_items = self.child_items.saturating_add(self.dyn_child_items); + self.top_items = self.top_items.saturating_add(self.dyn_top_items); + log!(debug, "finished with {:?}", self); } /// Check if there's any work left, or if we have exhausted the limits already. @@ -230,21 +253,25 @@ pub mod pallet { (Some(_), Some(_)) => { // we're in the middle of doing work on a child tree. self.migrate_child(); - if self.current_child.is_none() { - // this is the end of this child trie. process the top trie as well. - self.migrate_top() - } }, (Some(ref top_key), None) => { - if top_key.starts_with(DEFAULT_CHILD_STORAGE_KEY_PREFIX) { + if top_key.starts_with(DEFAULT_CHILD_STORAGE_KEY_PREFIX) && + !self.prev_tick_child + { // no child migration at hand, but one will begin here. - self.current_child = Some(vec![]); - self.migrate_child(); - if self.current_child.is_none() { - // this is the end of this child trie. process the top trie as well. - self.migrate_top() + let maybe_first_child_key = { + let child_top_key = Pallet::::child_io_key(top_key); + sp_io::default_child_storage::next_key(child_top_key, &vec![]) + }; + if let Some(first_child_key) = maybe_first_child_key { + self.current_child = Some(first_child_key); + self.prev_tick_child = true; + self.migrate_child(); + } else { + self.migrate_top(); } } else { + self.prev_tick_child = false; self.migrate_top(); } }, @@ -267,13 +294,13 @@ pub mod pallet { self.current_child.clone().expect("value checked to be `Some`; qed"); let current_top = self.current_top.clone().expect("value checked to be `Some`; qed"); - let child_key = Pallet::::child_io_key(¤t_top); - if let Some(data) = sp_io::default_child_storage::get(child_key, ¤t_child) { + let child_top_key = Pallet::::child_io_key(¤t_top); + if let Some(data) = sp_io::default_child_storage::get(child_top_key, ¤t_child) { self.dyn_size = self.dyn_size.saturating_add(data.len() as u32); - sp_io::default_child_storage::set(child_key, ¤t_child, &data) + sp_io::default_child_storage::set(child_top_key, ¤t_child, &data) } self.dyn_child_items.saturating_inc(); - let next_key = sp_io::default_child_storage::next_key(child_key, ¤t_child); + let next_key = sp_io::default_child_storage::next_key(child_top_key, ¤t_child); self.current_child = next_key; log!( trace, @@ -360,6 +387,9 @@ pub mod pallet { /// The priority used for unsigned transactions. type Priority: Get; + + /// The repeat frequency of offchain workers. + type OffchainRepeat: Get; } /// Migration progress. @@ -464,6 +494,7 @@ pub mod pallet { task.dyn_child_items, MigrationCompute::Unsigned, )); + MigrationProcess::::put(task); Ok(().into()) } @@ -625,7 +656,12 @@ pub mod pallet { } } - fn offchain_worker(_: BlockNumberFor) { + fn offchain_worker(now: BlockNumberFor) { + if Self::ensure_offchain_repeat_frequency(now).is_err() { + return + } + + log!(debug, "started offchain worker thread."); if let Some(unsigned_size_limit) = Self::unsigned_size_limit() { let mut task = Self::migration_process(); let limits = @@ -646,10 +682,17 @@ pub mod pallet { match SubmitTransaction::>::submit_unsigned_transaction(call.into()) { Ok(_) => { log!( - debug, + info, "submitted a call to migrate {} items of {} bytes.", safe_items_to_read, - task.dyn_size + { + let mut t = Self::migration_process(); + t.migrate_until_exhaustion(MigrationLimits { + item: safe_items_to_read, + size: Bounded::max_value(), + }); + t.dyn_size + } ) }, Err(why) => { @@ -701,6 +744,8 @@ pub mod pallet { } impl Pallet { + const OFFCHAIN_LAST_BLOCK: &'static [u8] = b"parity/state-migration/last-block"; + /// The real weight of a migration of the given number of `items` with total `size`. fn dynamic_weight(items: u32, size: u32) -> frame_support::pallet_prelude::Weight { let items = items as Weight; @@ -723,6 +768,48 @@ pub mod pallet { None => unreachable!(), } } + + /// Checks if an execution of the offchain worker is permitted at the given block number, or + /// not. + /// + /// This makes sure that + /// 1. we don't run on previous blocks in case of a re-org + /// 2. we don't run twice within a window of length `T::OffchainRepeat`. + /// + /// Returns `Ok(())` if offchain worker limit is respected, `Err(reason)` otherwise. If + /// `Ok()` is returned, `now` is written in storage and will be used in further calls as the + /// baseline. + pub fn ensure_offchain_repeat_frequency(now: T::BlockNumber) -> Result<(), &'static str> { + let threshold = T::OffchainRepeat::get(); + let last_block = StorageValueRef::persistent(&Self::OFFCHAIN_LAST_BLOCK); + + let mutate_stat = last_block.mutate::<_, &'static str, _>( + |maybe_head: Result, _>| { + match maybe_head { + Ok(Some(head)) if now < head => Err("fork."), + Ok(Some(head)) if now >= head && now <= head + threshold => + Err("recently executed."), + Ok(Some(head)) if now > head + threshold => { + // we can run again now. Write the new head. + Ok(now) + }, + _ => { + // value doesn't exists. Probably this node just booted up. Write, and + // okay. + Ok(now) + }, + } + }, + ); + + match mutate_stat { + Ok(_) => Ok(()), + Err(MutateStorageError::ConcurrentModification(_)) => + Err("failed to write to offchain db (ConcurrentModification)."), + Err(MutateStorageError::ValueFunctionFailed(_)) => + Err("failed to write to offchain db (ValueFunctionFailed)."), + } + } } } @@ -748,14 +835,21 @@ mod benchmarks { #[cfg(test)] mod mock { + use parking_lot::RwLock; + use std::sync::Arc; + use super::*; use crate as pallet_state_trie_migration; use frame_support::{parameter_types, traits::Hooks}; use frame_system::EnsureRoot; use sp_core::H256; use sp_runtime::{ + offchain::{ + testing::{PoolState, TestOffchainExt, TestTransactionPoolExt}, + OffchainDbExt, OffchainWorkerExt, TransactionPoolExt, + }, testing::Header, - traits::{BlakeTwo256, IdentityLookup}, + traits::{BlakeTwo256, Dispatchable, Header as _, IdentityLookup}, StateVersion, }; @@ -808,6 +902,7 @@ mod mock { parameter_types! { pub const ExistentialDeposit: u64 = 1; + pub const OffchainRepeat: u64 = 4; } impl pallet_balances::Config for Test { @@ -829,6 +924,7 @@ mod mock { type SignedDepositPerItem = (); type WeightInfo = (); type Priority = (); + type OffchainRepeat = OffchainRepeat; } impl frame_system::offchain::SendTransactionTypes for Test @@ -841,10 +937,10 @@ mod mock { pub type Extrinsic = sp_runtime::testing::TestXt; - pub fn new_test_ext() -> sp_io::TestExternalities { + pub fn new_test_ext(version: StateVersion) -> sp_io::TestExternalities { use sp_core::storage::ChildInfo; - let mut storage = sp_core::storage::Storage { + let storage = sp_core::storage::Storage { top: vec![ (b"key1".to_vec(), vec![1u8; 10]), // 6b657931 (b"key2".to_vec(), vec![2u8; 20]), // 6b657932 @@ -884,48 +980,158 @@ mod mock { .collect(), }; - // let t = frame_system::GenesisConfig::default(); - // t.assimilate_storage::(&mut storage).unwrap(); - sp_tracing::try_init_simple(); - (storage, StateVersion::V0).into() + (storage, version).into() + } + + pub fn new_offchain_ext( + version: StateVersion, + ) -> (sp_io::TestExternalities, Arc>) { + let mut ext = new_test_ext(version); + let (offchain, _offchain_state) = TestOffchainExt::new(); + let (pool, pool_state) = TestTransactionPoolExt::new(); + + ext.register_extension(OffchainDbExt::new(offchain.clone())); + ext.register_extension(OffchainWorkerExt::new(offchain)); + ext.register_extension(TransactionPoolExt::new(pool)); + + (ext, pool_state) } - pub fn run_to_block(n: u64) { + pub fn run_to_block(n: u64) -> H256 { + let mut root = Default::default(); while System::block_number() < n { System::set_block_number(System::block_number() + 1); System::on_initialize(System::block_number()); + StateTrieMigration::on_initialize(System::block_number()); + root = System::finalize().state_root().clone(); System::on_finalize(System::block_number()); } + root + } + + pub fn run_to_block_and_drain_pool(n: u64, pool: Arc>) -> H256 { + let mut root = Default::default(); + while System::block_number() < n { + System::set_block_number(System::block_number() + 1); + System::on_initialize(System::block_number()); + + StateTrieMigration::on_initialize(System::block_number()); + + // drain previous transactions + pool.read() + .transactions + .clone() + .into_iter() + .map(|uxt| ::decode(&mut &*uxt).unwrap()) + .for_each(|xt| { + // dispatch them all with no origin. + xt.call.dispatch(frame_system::RawOrigin::None.into()).unwrap(); + }); + pool.try_write().unwrap().transactions.clear(); + + StateTrieMigration::offchain_worker(System::block_number()); + + root = System::finalize().state_root().clone(); + System::on_finalize(System::block_number()); + } + root } } #[cfg(test)] mod test { use super::{mock::*, *}; + use sp_runtime::StateVersion; + use std::sync::Arc; #[test] - fn auto_migrate_works() { - new_test_ext().execute_with(|| { + fn auto_migrate_works_single_item_per_block() { + let mut ext = new_test_ext(StateVersion::V0); + let root_upgraded = ext.execute_with(|| { assert_eq!(AutoLimits::::get(), None); assert_eq!(MigrationProcess::::get(), Default::default()); // nothing happens if we don't set the limits. - run_to_block(50); + let _ = run_to_block(50); assert_eq!(MigrationProcess::::get(), Default::default()); // this should allow 1 item per block to be migrated. AutoLimits::::put(Some(MigrationLimits { item: 1, size: 150 })); - run_to_block(80); - }) + let root = run_to_block(70); + + // eventually everything is over. + assert!(matches!( + StateTrieMigration::migration_process(), + MigrationTask { current_child: None, current_top: None, .. } + )); + root + }); + + let mut ext2 = new_test_ext(StateVersion::V1); + let root = ext2.execute_with(|| { + // update ex2 to contain the new items + let _ = run_to_block(50); + AutoLimits::::put(Some(MigrationLimits { item: 1, size: 150 })); + run_to_block(70) + }); + assert_eq!(root, root_upgraded); + } + + #[test] + fn auto_migrate_works_multi_item_per_block() { + let mut ext = new_test_ext(StateVersion::V0); + let root_upgraded = ext.execute_with(|| { + assert_eq!(AutoLimits::::get(), None); + assert_eq!(MigrationProcess::::get(), Default::default()); + + // nothing happens if we don't set the limits. + run_to_block(50); + assert_eq!(MigrationProcess::::get(), Default::default()); + + // this should allow 1 item per block to be migrated. + AutoLimits::::put(Some(MigrationLimits { item: 5, size: 150 })); + + let root = run_to_block(70); + + // eventually everything is over. + assert!(matches!( + StateTrieMigration::migration_process(), + MigrationTask { current_child: None, current_top: None, .. } + )); + + root + }); + + let mut ext2 = new_test_ext(StateVersion::V1); + let root = ext2.execute_with(|| { + // update ex2 to contain the new items + run_to_block(50); + AutoLimits::::put(Some(MigrationLimits { item: 5, size: 150 })); + run_to_block(70) + }); + assert_eq!(root, root_upgraded); } #[test] fn unsigned_migration_works() { - todo!(); + let (mut ext, pool) = new_offchain_ext(StateVersion::V0); + ext.execute_with(|| { + assert_eq!(UnsignedSizeLimit::::get(), None); + assert_eq!(MigrationProcess::::get(), Default::default()); + + // nothing happens if we don't set the limits. + run_to_block_and_drain_pool(50, Arc::clone(&pool)); + assert_eq!(MigrationProcess::::get(), Default::default()); + + // allow 50 bytes per run. + UnsignedSizeLimit::::put(Some(50)); + + run_to_block_and_drain_pool(70, Arc::clone(&pool)); + }); } #[test] From 84e3ab9f469a00218817ead19457f9091729dc43 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 21 Oct 2021 15:19:17 +0200 Subject: [PATCH 117/188] Remove outdated comment --- primitives/trie/src/node_header.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/primitives/trie/src/node_header.rs b/primitives/trie/src/node_header.rs index 839fffb87058f..10e29f6af974c 100644 --- a/primitives/trie/src/node_header.rs +++ b/primitives/trie/src/node_header.rs @@ -146,7 +146,7 @@ pub(crate) fn size_and_prefix_iterator( first_byte.chain(sp_std::iter::from_fn(next_bytes)) } -/// Encodes size and prefix to a stream output (prefix on 2 first bit only). +/// Encodes size and prefix to a stream output. fn encode_size_and_prefix(size: usize, prefix: u8, prefix_mask: usize, out: &mut W) where W: Output + ?Sized, From 213d64899fec3abc0b9e72434a40cf0058668ea5 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 21 Oct 2021 15:21:52 +0200 Subject: [PATCH 118/188] useless inner hash --- primitives/trie/src/node_codec.rs | 32 +++++++++++++------------------ 1 file changed, 13 insertions(+), 19 deletions(-) diff --git a/primitives/trie/src/node_codec.rs b/primitives/trie/src/node_codec.rs index 6ebf2f8f304af..53a5de270a79a 100644 --- a/primitives/trie/src/node_codec.rs +++ b/primitives/trie/src/node_codec.rs @@ -80,8 +80,19 @@ impl<'a> Input for ByteSliceInput<'a> { #[derive(Default, Clone)] pub struct NodeCodec(PhantomData); -impl NodeCodec { - fn decode_plan_inner_hashed(data: &[u8]) -> Result { +impl NodeCodecT for NodeCodec +where + H: Hasher, +{ + const ESCAPE_HEADER: Option = Some(trie_constants::ESCAPE_COMPACT_HEADER); + type Error = Error; + type HashOut = H::Out; + + fn hashed_null_node() -> ::Out { + H::hash(::empty_node()) + } + + fn decode_plan(data: &[u8]) -> Result { let mut input = ByteSliceInput::new(data); let header = NodeHeader::decode(&mut input)?; @@ -165,23 +176,6 @@ impl NodeCodec { }, } } -} - -impl NodeCodecT for NodeCodec -where - H: Hasher, -{ - const ESCAPE_HEADER: Option = Some(trie_constants::ESCAPE_COMPACT_HEADER); - type Error = Error; - type HashOut = H::Out; - - fn hashed_null_node() -> ::Out { - H::hash(::empty_node()) - } - - fn decode_plan(data: &[u8]) -> Result { - Self::decode_plan_inner_hashed(data) - } fn is_empty_node(data: &[u8]) -> bool { data == ::empty_node() From 24309cda0bb43c89b13ebd2d88e4d0f10baedca9 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 21 Oct 2021 19:08:16 +0200 Subject: [PATCH 119/188] fmt --- client/light/src/backend.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/client/light/src/backend.rs b/client/light/src/backend.rs index 4e1339404db0d..1f17a726ea2f9 100644 --- a/client/light/src/backend.rs +++ b/client/light/src/backend.rs @@ -538,7 +538,8 @@ where H::Out: Ord, { match *self { - GenesisOrUnavailableState::Genesis(ref state) => state.storage_root(delta, state_version), + GenesisOrUnavailableState::Genesis(ref state) => + state.storage_root(delta, state_version), GenesisOrUnavailableState::Unavailable => Default::default(), } } @@ -554,7 +555,8 @@ where { match *self { GenesisOrUnavailableState::Genesis(ref state) => { - let (root, is_equal, _) = state.child_storage_root(child_info, delta, state_version); + let (root, is_equal, _) = + state.child_storage_root(child_info, delta, state_version); (root, is_equal, Default::default()) }, GenesisOrUnavailableState::Unavailable => (H::Out::default(), true, Default::default()), From 6133b5a324e8305093230c3ece51d1f71bb0f4a4 Mon Sep 17 00:00:00 2001 From: kianenigma Date: Fri, 22 Oct 2021 10:29:02 +0200 Subject: [PATCH 120/188] remote tests --- Cargo.lock | 2 + frame/state-trie-migration/Cargo.toml | 3 ++ frame/state-trie-migration/src/lib.rs | 51 ++++++++++++++++++--- primitives/storage/src/lib.rs | 7 +-- utils/frame/remote-externalities/src/lib.rs | 6 +++ 5 files changed, 58 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f23a74b3dc0d0..1eb9b28a01726 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5963,12 +5963,14 @@ dependencies = [ "pallet-balances", "parity-scale-codec", "parking_lot 0.11.1", + "remote-externalities", "scale-info", "sp-core", "sp-io", "sp-runtime", "sp-std", "sp-tracing", + "tokio", ] [[package]] diff --git a/frame/state-trie-migration/Cargo.toml b/frame/state-trie-migration/Cargo.toml index 69d82bced4c35..b0b3584e35048 100644 --- a/frame/state-trie-migration/Cargo.toml +++ b/frame/state-trie-migration/Cargo.toml @@ -30,6 +30,8 @@ frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = " pallet-balances = { path = "../balances", version = "4.0.0-dev" } parking_lot = "0.11.0" sp-tracing = { path = "../../primitives/tracing", version = "4.0.0-dev" } +remote-externalities = { path = "../../utils/frame/remote-externalities", version = "0.10.0-dev" } +tokio = { version = "1.10", features = ["macros"] } [features] default = ["std"] @@ -48,3 +50,4 @@ std = [ runtime-benchmarks = ["frame-benchmarking"] try-runtime = ["frame-support/try-runtime"] +remote-tests = ["std"] diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index ea3a6bd53f315..f795a1795cdfc 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -209,6 +209,12 @@ pub mod pallet { } impl MigrationTask { + /// Return true if the task is finished. + #[cfg(test)] + pub(crate) fn finished(&self) -> bool { + self.current_top.is_none() && self.current_child.is_none() + } + /// get the total number of keys affected by the current task. pub(crate) fn dyn_total_items(&self) -> u32 { self.dyn_child_items.saturating_add(self.dyn_top_items) @@ -870,7 +876,7 @@ mod mock { ); parameter_types! { - pub const BlockHashCount: u64 = 250; + pub const BlockHashCount: u32 = 250; pub const SS58Prefix: u8 = 42; } @@ -881,12 +887,12 @@ mod mock { type Origin = Origin; type Call = Call; type Index = u64; - type BlockNumber = u64; + type BlockNumber = u32; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; - type Header = Header; + type Header = sp_runtime::generic::Header; type Event = Event; type BlockHashCount = BlockHashCount; type DbWeight = (); @@ -902,7 +908,7 @@ mod mock { parameter_types! { pub const ExistentialDeposit: u64 = 1; - pub const OffchainRepeat: u64 = 4; + pub const OffchainRepeat: u32 = 4; } impl pallet_balances::Config for Test { @@ -998,7 +1004,7 @@ mod mock { (ext, pool_state) } - pub fn run_to_block(n: u64) -> H256 { + pub fn run_to_block(n: u32) -> H256 { let mut root = Default::default(); while System::block_number() < n { System::set_block_number(System::block_number() + 1); @@ -1012,7 +1018,7 @@ mod mock { root } - pub fn run_to_block_and_drain_pool(n: u64, pool: Arc>) -> H256 { + pub fn run_to_block_and_drain_pool(n: u32, pool: Arc>) -> H256 { let mut root = Default::default(); while System::block_number() < n { System::set_block_number(System::block_number() + 1); @@ -1144,3 +1150,36 @@ mod test { todo!("test custom keys to be migrated via signed") } } + +#[cfg(all(test, feature = "remote-tests"))] +mod remote_tests { + use super::{mock::*, *}; + use remote_externalities::Mode; + use sp_runtime::StateVersion; + + // we only use the hash type from this (I hope). + type Block = sp_runtime::testing::Block; + + #[tokio::test] + async fn on_initialize_migration() { + sp_tracing::try_init_simple(); + let mut ext = remote_externalities::Builder::::new() + .mode(Mode::Online(std::env!("WS_API").to_owned().into())) + .build() + .await + .unwrap(); + + ext.execute_with(|| { + // requires the block number type in our tests to be same as with mainnet, u32. + let mut now = frame_system::Pallet::::block_number(); + AutoLimits::::put(Some(MigrationLimits { item: 1000, size: 4 * 1024 * 1024 })); + loop { + run_to_block(now + 1); + if StateTrieMigration::migration_process().finished() { + break + } + now += 1; + } + }) + } +} diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 68e7fe6cf8397..a6871c93f6e3e 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -119,8 +119,7 @@ impl DerefMut for PrefixedStorageKey { } impl PrefixedStorageKey { - /// Create a prefixed storage key from its byte array - /// representation. + /// Create a prefixed storage key from its byte array representation. pub fn new(inner: Vec) -> Self { PrefixedStorageKey(inner) } @@ -130,9 +129,7 @@ impl PrefixedStorageKey { PrefixedStorageKey::ref_cast(inner) } - /// Get inner key, this should - /// only be needed when writing - /// into parent trie to avoid an + /// Get inner key, this should only be needed when writing into parent trie to avoid an /// allocation. pub fn into_inner(self) -> Vec { self.0 diff --git a/utils/frame/remote-externalities/src/lib.rs b/utils/frame/remote-externalities/src/lib.rs index 733ec7c3200ad..3d1033b4db910 100644 --- a/utils/frame/remote-externalities/src/lib.rs +++ b/utils/frame/remote-externalities/src/lib.rs @@ -139,6 +139,12 @@ impl Default for OnlineConfig { } } +impl From for OnlineConfig { + fn from(s: String) -> Self { + Self { transport: s.into(), ..Default::default() } + } +} + /// Configuration of the state snapshot. #[derive(Clone)] pub struct SnapshotConfig { From 4eb1d42002742501d149b66017a5bb84fd74ca4b Mon Sep 17 00:00:00 2001 From: kianenigma Date: Tue, 26 Oct 2021 10:13:19 +0200 Subject: [PATCH 121/188] finally ksm works --- Cargo.lock | 1 + frame/state-trie-migration/src/lib.rs | 30 +- primitives/state-machine/src/testing.rs | 11 +- primitives/storage/src/lib.rs | 14 +- utils/frame/remote-externalities/Cargo.toml | 1 + utils/frame/remote-externalities/src/lib.rs | 283 +++++++++++++----- .../cli/src/commands/follow_chain.rs | 6 - 7 files changed, 254 insertions(+), 92 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1eb9b28a01726..791631211dc02 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7236,6 +7236,7 @@ name = "remote-externalities" version = "0.10.0-dev" dependencies = [ "env_logger 0.9.0", + "frame-support", "jsonrpsee-proc-macros", "jsonrpsee-ws-client", "log 0.4.14", diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index f795a1795cdfc..d786841de6a6b 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -96,9 +96,7 @@ pub mod pallet { offchain::{SendTransactionTypes, SubmitTransaction}, pallet_prelude::*, }; - use sp_core::storage::well_known_keys::{ - CHILD_STORAGE_KEY_PREFIX, DEFAULT_CHILD_STORAGE_KEY_PREFIX, - }; + use sp_core::storage::well_known_keys::DEFAULT_CHILD_STORAGE_KEY_PREFIX; use sp_runtime::{ offchain::storage::{MutateStorageError, StorageValueRef}, traits::{Bounded, Saturating}, @@ -159,13 +157,13 @@ pub mod pallet { #[codec(skip)] pub(crate) dyn_size: u32, - #[codec(skip)] - pub(crate) _ph: sp_std::marker::PhantomData, - // TODO: I might remove these if they end up not being used. pub(crate) size: u32, pub(crate) top_items: u32, pub(crate) child_items: u32, + + #[codec(skip)] + pub(crate) _ph: sp_std::marker::PhantomData, } #[cfg(feature = "std")] @@ -854,7 +852,6 @@ mod mock { testing::{PoolState, TestOffchainExt, TestTransactionPoolExt}, OffchainDbExt, OffchainWorkerExt, TransactionPoolExt, }, - testing::Header, traits::{BlakeTwo256, Dispatchable, Header as _, IdentityLookup}, StateVersion, }; @@ -1154,8 +1151,7 @@ mod test { #[cfg(all(test, feature = "remote-tests"))] mod remote_tests { use super::{mock::*, *}; - use remote_externalities::Mode; - use sp_runtime::StateVersion; + use remote_externalities::{Mode, OnlineConfig}; // we only use the hash type from this (I hope). type Block = sp_runtime::testing::Block; @@ -1164,7 +1160,12 @@ mod remote_tests { async fn on_initialize_migration() { sp_tracing::try_init_simple(); let mut ext = remote_externalities::Builder::::new() - .mode(Mode::Online(std::env!("WS_API").to_owned().into())) + .mode(Mode::Online(OnlineConfig { + transport: std::env!("WS_API").to_owned().into(), + scrape_children: true, + ..Default::default() + })) + .state_version(sp_core::StateVersion::V0) .build() .await .unwrap(); @@ -1172,14 +1173,23 @@ mod remote_tests { ext.execute_with(|| { // requires the block number type in our tests to be same as with mainnet, u32. let mut now = frame_system::Pallet::::block_number(); + let mut duration = 0; AutoLimits::::put(Some(MigrationLimits { item: 1000, size: 4 * 1024 * 1024 })); loop { run_to_block(now + 1); if StateTrieMigration::migration_process().finished() { break } + duration += 1; now += 1; } + + log::info!( + target: LOG_TARGET, + "finished migration in {} block, final state of the task: {:?}", + duration, + StateTrieMigration::migration_process() + ); }) } } diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index 89468e938ca16..bdbbe174e4e8b 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -160,11 +160,20 @@ where self.offchain_db.clone() } - /// Insert key/value into backend + /// Insert key/value into backend. + /// + /// This only supports inserting `top` keys. pub fn insert(&mut self, k: StorageKey, v: StorageValue) { self.backend.insert(vec![(None, vec![(k, Some(v))])], self.state_version); } + /// Insert key/value into backend. + /// + /// This only supports inserting `top` keys. + pub fn insert_child(&mut self, c: sp_core::storage::ChildInfo, k: StorageKey, v: StorageValue) { + self.backend.insert(vec![(Some(c), vec![(k, Some(v))])], self.state_version); + } + /// Registers the given extension for this instance. pub fn register_extension(&mut self, ext: E) { self.extensions.register(ext); diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index a6871c93f6e3e..51b0db49e7017 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -168,10 +168,8 @@ pub struct StorageChild { pub struct Storage { /// Top trie storage data. pub top: StorageMap, - /// Children trie storage data. - /// The key does not including prefix, for the `default` - /// trie kind, so this is exclusively for the `ChildType::ParentKeyId` - /// tries. + /// Children trie storage data. The key does not including prefix, for the `default` trie kind, + /// so this is exclusively for the `ChildType::ParentKeyId` tries. pub children_default: std::collections::HashMap, StorageChild>, } @@ -219,6 +217,10 @@ pub mod well_known_keys { key.starts_with(CHILD_STORAGE_KEY_PREFIX) } + pub fn is_default_child_storage_key(key: &[u8]) -> bool { + key.starts_with(DEFAULT_CHILD_STORAGE_KEY_PREFIX) + } + /// Returns if the given `key` starts with [`CHILD_STORAGE_KEY_PREFIX`] or collides with it. pub fn starts_with_child_storage_key(key: &[u8]) -> bool { if key.len() > CHILD_STORAGE_KEY_PREFIX.len() { @@ -234,7 +236,7 @@ pub const DEFAULT_MAX_INLINE_VALUE: u32 = 33; /// Information related to a child state. #[derive(Debug, Clone)] -#[cfg_attr(feature = "std", derive(PartialEq, Eq, Hash, PartialOrd, Ord))] +#[cfg_attr(feature = "std", derive(PartialEq, Eq, Hash, PartialOrd, Ord, Encode, Decode))] pub enum ChildInfo { /// This is the one used by default. ParentKeyId(ChildTrieParentKeyId), @@ -382,7 +384,7 @@ impl ChildType { /// Those unique id also required to be long enough to avoid any /// unique id to be prefixed by an other unique id. #[derive(Debug, Clone)] -#[cfg_attr(feature = "std", derive(PartialEq, Eq, Hash, PartialOrd, Ord))] +#[cfg_attr(feature = "std", derive(PartialEq, Eq, Hash, PartialOrd, Ord, Encode, Decode))] pub struct ChildTrieParentKeyId { /// Data is the storage key without prefix. data: Vec, diff --git a/utils/frame/remote-externalities/Cargo.toml b/utils/frame/remote-externalities/Cargo.toml index 2b35402f8f63f..7bee9f431004b 100644 --- a/utils/frame/remote-externalities/Cargo.toml +++ b/utils/frame/remote-externalities/Cargo.toml @@ -32,6 +32,7 @@ sp-version = { version = "4.0.0-dev", path = "../../../primitives/version" } [dev-dependencies] tokio = { version = "1.10", features = ["macros", "rt-multi-thread"] } pallet-elections-phragmen = { path = "../../../frame/elections-phragmen", version = "5.0.0-dev" } +frame-support = { path = "../../../frame/support", version = "4.0.0-dev" } [features] remote-test = [] diff --git a/utils/frame/remote-externalities/src/lib.rs b/utils/frame/remote-externalities/src/lib.rs index 3d1033b4db910..473ccd60289bc 100644 --- a/utils/frame/remote-externalities/src/lib.rs +++ b/utils/frame/remote-externalities/src/lib.rs @@ -26,10 +26,13 @@ use log::*; use sp_core::{ hashing::twox_128, hexdisplay::HexDisplay, - storage::{StorageData, StorageKey}, + storage::{ + well_known_keys::{is_default_child_storage_key, DEFAULT_CHILD_STORAGE_KEY_PREFIX}, + ChildInfo, PrefixedStorageKey, StorageData, StorageKey, + }, }; pub use sp_io::TestExternalities; -use sp_runtime::traits::Block as BlockT; +use sp_runtime::{traits::Block as BlockT, StateVersion}; use std::{ fs, path::{Path, PathBuf}, @@ -38,6 +41,8 @@ use std::{ pub mod rpc_api; type KeyPair = (StorageKey, StorageData); +type TopKeyPairs = Vec; +type ChildKeyPairs = Vec<(ChildInfo, Vec)>; const LOG_TARGET: &str = "remote-ext"; const DEFAULT_TARGET: &str = "wss://rpc.polkadot.io"; @@ -45,6 +50,19 @@ const BATCH_SIZE: usize = 1000; jsonrpsee_proc_macros::rpc_client_api! { RpcApi { + #[rpc(method = "childstate_getKeys", positional_params)] + fn child_get_keys( + child_key: PrefixedStorageKey, + prefix: StorageKey, + hash: Option, + ) -> Vec; + #[rpc(method = "childstate_getStorage", positional_params)] + fn child_get_storage( + child_key: PrefixedStorageKey, + prefix: StorageKey, + hash: Option, + ) -> StorageData; + #[rpc(method = "state_getStorage", positional_params)] fn get_storage(prefix: StorageKey, hash: Option) -> StorageData; #[rpc(method = "state_getKeysPaged", positional_params)] @@ -54,6 +72,7 @@ jsonrpsee_proc_macros::rpc_client_api! { start_key: Option, hash: Option, ) -> Vec; + #[rpc(method = "chain_getFinalizedHead", positional_params)] fn finalized_head() -> B::Hash; } @@ -114,6 +133,8 @@ pub struct OnlineConfig { pub state_snapshot: Option, /// The pallets to scrape. If empty, entire chain state will be scraped. pub pallets: Vec, + /// Lookout for child-keys, and scrape them as well if set to true. + pub scrape_children: bool, /// Transport config. pub transport: Transport, } @@ -132,6 +153,7 @@ impl Default for OnlineConfig { fn default() -> Self { Self { transport: Transport { uri: DEFAULT_TARGET.to_owned(), client: None }, + scrape_children: false, at: None, state_snapshot: None, pallets: vec![], @@ -178,6 +200,8 @@ pub struct Builder { hashed_blacklist: Vec>, /// connectivity mode, online or offline. mode: Mode, + /// The state version being used. + state_version: StateVersion, } // NOTE: ideally we would use `DefaultNoBound` here, but not worth bringing in frame-support for @@ -190,6 +214,7 @@ impl Default for Builder { hashed_prefixes: Default::default(), hashed_keys: Default::default(), hashed_blacklist: Default::default(), + state_version: StateVersion::V1, } } } @@ -222,7 +247,7 @@ impl Builder { RpcApi::::get_storage(self.as_online().rpc_client(), key, maybe_at) .await .map_err(|e| { - error!("Error = {:?}", e); + error!(target: LOG_TARGET, "Error = {:?}", e); "rpc get_storage failed." }) } @@ -230,7 +255,7 @@ impl Builder { async fn rpc_get_head(&self) -> Result { trace!(target: LOG_TARGET, "rpc: finalized_head"); RpcApi::::finalized_head(self.as_online().rpc_client()).await.map_err(|e| { - error!("Error = {:?}", e); + error!(target: LOG_TARGET, "Error = {:?}", e); "rpc finalized_head failed." }) } @@ -258,6 +283,7 @@ impl Builder { "rpc get_keys failed" })?; let page_len = page.len(); + all_keys.extend(page); if page_len < PAGE as usize { @@ -314,7 +340,7 @@ impl Builder { log::error!( target: LOG_TARGET, "failed to execute batch: {:?}. Error: {:?}", - chunk_keys, + chunk_keys.iter().map(|k| HexDisplay::from(k)), e ); "batch failed." @@ -346,22 +372,116 @@ impl Builder { // Internal methods impl Builder { - /// Save the given data as state snapshot. - fn save_state_snapshot(&self, data: &[KeyPair], path: &Path) -> Result<(), &'static str> { + /// Save the given data to the top keys snapshot. + fn save_top_snapshot(&self, data: &[KeyPair], path: &PathBuf) -> Result<(), &'static str> { + let mut path = path.clone(); + path.set_extension("top"); debug!(target: LOG_TARGET, "writing to state snapshot file {:?}", path); fs::write(path, data.encode()).map_err(|_| "fs::write failed.")?; Ok(()) } - /// initialize `Self` from state snapshot. Panics if the file does not exist. - fn load_state_snapshot(&self, path: &Path) -> Result, &'static str> { - info!(target: LOG_TARGET, "scraping key-pairs from state snapshot {:?}", path); + /// Save the given data to the child keys snapshot. + fn save_child_snapshot( + &self, + data: &ChildKeyPairs, + path: &PathBuf, + ) -> Result<(), &'static str> { + debug!(target: LOG_TARGET, "writing to state snapshot file {:?}", path); + let mut path = path.clone(); + path.set_extension("child"); + fs::write(path, data.encode()).map_err(|_| "fs::write failed.")?; + Ok(()) + } + + fn load_top_snapshot(&self, path: &PathBuf) -> Result { + let mut path = path.clone(); + path.set_extension("top"); + info!(target: LOG_TARGET, "loading top key-pairs from snapshot {:?}", path); + let bytes = fs::read(path).map_err(|_| "fs::read failed.")?; + Decode::decode(&mut &*bytes).map_err(|_| "decode failed") + } + + fn load_child_snapshot(&self, path: &PathBuf) -> Result { + let mut path = path.clone(); + path.set_extension("child"); + info!(target: LOG_TARGET, "loading child key-pairs from snapshot {:?}", path); let bytes = fs::read(path).map_err(|_| "fs::read failed.")?; Decode::decode(&mut &*bytes).map_err(|_| "decode failed") } - /// Build `Self` from a network node denoted by `uri`. - async fn load_remote(&self) -> Result, &'static str> { + /// Load all of the child keys from the remote config, given the already scraped list of top key + /// pairs. + async fn load_child_keys_remote( + &self, + top_kp: &[KeyPair], + ) -> Result)>, &'static str> { + let child_bearing_top_keys = top_kp + .iter() + .filter_map( + |(k, _)| { + if is_default_child_storage_key(k.as_ref()) { + Some(k) + } else { + None + } + }, + ) + .collect::>(); + + info!( + target: LOG_TARGET, + "👩‍👦 scraping child-tree data from {} top keys", + child_bearing_top_keys.len() + ); + + let mut child_kp = vec![]; + for prefixed_top_key in child_bearing_top_keys { + let child_keys = RpcApi::::child_get_keys( + self.as_online().rpc_client(), + PrefixedStorageKey::new(prefixed_top_key.as_ref().to_vec()), + StorageKey(vec![]), + self.as_online().at, + ) + .await + .map_err(|e| { + error!(target: LOG_TARGET, "Error = {:?}", e); + "rpc child_get_keys failed." + })?; + + debug!( + target: LOG_TARGET, + "scraped {} child-keys of the child-bearing top key: {:?}", + child_keys.len(), + HexDisplay::from(prefixed_top_key) + ); + + let mut child_kv = vec![]; + for child_key in child_keys { + let child_value = RpcApi::::child_get_storage( + self.as_online().rpc_client(), + PrefixedStorageKey::new(prefixed_top_key.as_ref().to_vec()), + child_key.clone(), + self.as_online().at, + ) + .await + .map_err(|e| { + error!(target: LOG_TARGET, "Error = {:?}", e); + "rpc child_get_storage failed." + })?; + child_kv.push((child_key, child_value)); + } + + // super tricky. + let un_prefixed = &prefixed_top_key.0[DEFAULT_CHILD_STORAGE_KEY_PREFIX.len()..]; + child_kp.push((ChildInfo::new_default(&un_prefixed), child_kv)); + } + + Ok(child_kp) + } + + /// Load all the `top` keys from the remote config. + async fn load_top_keys_remote(&self) -> Result, &'static str> { let config = self.as_online(); let at = self .as_online() @@ -432,16 +552,19 @@ impl Builder { Ok(()) } - pub(crate) async fn pre_build(mut self) -> Result, &'static str> { - let mut base_kv = match self.mode.clone() { - Mode::Offline(config) => self.load_state_snapshot(&config.state_snapshot.path)?, + pub(crate) async fn pre_build( + mut self, + ) -> Result<(Vec, Vec<(ChildInfo, Vec)>), &'static str> { + let mode = self.mode.clone(); + let mut top_kp = match mode { + Mode::Offline(config) => self.load_top_snapshot(&config.state_snapshot.path)?, Mode::Online(config) => { self.init_remote_client().await?; - let kp = self.load_remote().await?; + let top_kp = self.load_top_keys_remote().await?; if let Some(c) = config.state_snapshot { - self.save_state_snapshot(&kp, &c.path)?; + self.save_top_snapshot(&top_kp, &c.path)?; } - kp + top_kp }, }; @@ -452,7 +575,7 @@ impl Builder { "extending externalities with {} manually injected key-values", self.hashed_key_values.len() ); - base_kv.extend(self.hashed_key_values.clone()); + top_kp.extend(self.hashed_key_values.clone()); } // exclude manual key values. @@ -462,10 +585,22 @@ impl Builder { "excluding externalities from {} keys", self.hashed_blacklist.len() ); - base_kv.retain(|(k, _)| !self.hashed_blacklist.contains(&k.0)) + top_kp.retain(|(k, _)| !self.hashed_blacklist.contains(&k.0)) } - Ok(base_kv) + let child_kp = match self.mode { + Mode::Online(ref config) if config.scrape_children => { + let child_kp = self.load_child_keys_remote(&top_kp).await?; + if let Some(c) = &config.state_snapshot { + self.save_child_snapshot(&child_kp, &c.path)?; + } + child_kp + }, + Mode::Offline(ref config) => self.load_child_snapshot(&config.state_snapshot.path)?, + _ => Default::default(), + }; + + Ok((top_kp, child_kp)) } } @@ -512,6 +647,12 @@ impl Builder { self } + /// The state version to use. + pub fn state_version(mut self, version: StateVersion) -> Self { + self.state_version = version; + self + } + /// overwrite the `at` value, if `mode` is set to [`Mode::Online`]. /// /// noop if `mode` is [`Mode::Offline`] @@ -525,16 +666,42 @@ impl Builder { /// Build the test externalities. pub async fn build(self) -> Result { - let kv = self.pre_build().await?; - let mut ext = TestExternalities::new_empty(); - - info!(target: LOG_TARGET, "injecting a total of {} keys", kv.len()); - for (k, v) in kv { - let (k, v) = (k.0, v.0); - // Insert the key,value pair into the test trie backend - ext.insert(k, v); + let state_version = self.state_version.clone(); + let (top_kv, child_kv) = self.pre_build().await?; + let mut ext = TestExternalities::new_with_code_and_state( + Default::default(), + Default::default(), + state_version, + ); + + info!(target: LOG_TARGET, "injecting a total of {} top keys", top_kv.len()); + for (k, v) in top_kv { + // skip writing the child root data if + if is_default_child_storage_key(k.as_ref()) { + continue + } + ext.insert(k.0, v.0); } + info!( + target: LOG_TARGET, + "injecting a total of {} child keys", + child_kv.iter().map(|(_, kv)| kv).flatten().count() + ); + + for (info, key_values) in child_kv { + for (k, v) in key_values { + ext.insert_child(info.clone(), k.0, v.0); + } + } + + ext.commit_all().unwrap(); + info!( + target: LOG_TARGET, + "initialized state externalities with storage root {:?}", + ext.as_backend().root() + ); + Ok(ext) } } @@ -628,11 +795,7 @@ mod remote_tests { init_logger(); Builder::::new() .mode(Mode::Online(OnlineConfig { - pallets: vec![ - "Proxy".to_owned(), - "Multisig".to_owned(), - "PhragmenElection".to_owned(), - ], + pallets: vec!["Proxy".to_owned(), "Multisig".to_owned()], ..Default::default() })) .build() @@ -641,41 +804,6 @@ mod remote_tests { .execute_with(|| {}); } - #[tokio::test] - async fn sanity_check_decoding() { - use pallet_elections_phragmen::SeatHolder; - use sp_core::crypto::Ss58Codec; - type AccountId = sp_runtime::AccountId32; - type Balance = u128; - frame_support::generate_storage_alias!( - PhragmenElection, - Members => - Value>> - ); - - init_logger(); - Builder::::new() - .mode(Mode::Online(OnlineConfig { - pallets: vec!["PhragmenElection".to_owned()], - ..Default::default() - })) - .build() - .await - .expect(REMOTE_INACCESSIBLE) - .execute_with(|| { - // Gav's polkadot account. 99% this will be in the council. - let gav_polkadot = - AccountId::from_ss58check("13RDY9nrJpyTDBSUdBw12dGwhk19sGwsrVZ2bxkzYHBSagP2") - .unwrap(); - let members = Members::get().unwrap(); - assert!(members - .iter() - .map(|s| s.who.clone()) - .find(|a| a == &gav_polkadot) - .is_some()); - }); - } - #[tokio::test] async fn can_create_state_snapshot() { init_logger(); @@ -713,4 +841,21 @@ mod remote_tests { .expect(REMOTE_INACCESSIBLE) .execute_with(|| {}); } + + #[tokio::test] + async fn can_build_child_tree() { + init_logger(); + Builder::::new() + .mode(Mode::Online(OnlineConfig { + transport: "wss://kusama-rpc.polkadot.io".to_owned().into(), + pallets: vec!["Crowdloan".to_owned()], + ..Default::default() + })) + // get all the child tries. + .inject_hashed_prefix(DEFAULT_CHILD_STORAGE_KEY_PREFIX) + .build() + .await + .expect(REMOTE_INACCESSIBLE) + .execute_with(|| {}); + } } diff --git a/utils/frame/try-runtime/cli/src/commands/follow_chain.rs b/utils/frame/try-runtime/cli/src/commands/follow_chain.rs index 69d0eb498cc6d..a78e3d56e175a 100644 --- a/utils/frame/try-runtime/cli/src/commands/follow_chain.rs +++ b/utils/frame/try-runtime/cli/src/commands/follow_chain.rs @@ -116,12 +116,6 @@ where .inject_hashed_key_value(&[(code_key.clone(), code.clone())]) .build() .await?; - log::info!( - target: LOG_TARGET, - "initialized state externalities at {:?}, storage root {:?}", - number, - new_ext.as_backend().root() - ); let (expected_spec_name, expected_spec_version, spec_state_version) = local_spec::(&new_ext, &executor); From 2a9861026232b9ad0635ccf933efe17d05eeb298 Mon Sep 17 00:00:00 2001 From: kianenigma Date: Tue, 26 Oct 2021 10:53:20 +0200 Subject: [PATCH 122/188] batches are broken --- utils/frame/remote-externalities/src/lib.rs | 90 +++++++++++++++++---- 1 file changed, 75 insertions(+), 15 deletions(-) diff --git a/utils/frame/remote-externalities/src/lib.rs b/utils/frame/remote-externalities/src/lib.rs index 473ccd60289bc..471e46959a1be 100644 --- a/utils/frame/remote-externalities/src/lib.rs +++ b/utils/frame/remote-externalities/src/lib.rs @@ -46,7 +46,8 @@ type ChildKeyPairs = Vec<(ChildInfo, Vec)>; const LOG_TARGET: &str = "remote-ext"; const DEFAULT_TARGET: &str = "wss://rpc.polkadot.io"; -const BATCH_SIZE: usize = 1000; +const BATCH_SIZE: usize = 512; +const PAGE: u32 = BATCH_SIZE as u32; jsonrpsee_proc_macros::rpc_client_api! { RpcApi { @@ -266,7 +267,6 @@ impl Builder { prefix: StorageKey, at: B::Hash, ) -> Result, &'static str> { - const PAGE: u32 = 512; let mut last_key: Option = None; let mut all_keys: Vec = vec![]; let keys = loop { @@ -318,7 +318,7 @@ impl Builder { use serde_json::to_value; let keys = self.get_keys_paged(prefix, at).await?; let keys_count = keys.len(); - debug!(target: LOG_TARGET, "Querying a total of {} keys", keys.len()); + debug!(target: LOG_TARGET, "Querying a total of {} top keys", keys.len()); let mut key_values: Vec = vec![]; let client = self.as_online().rpc_client(); @@ -336,16 +336,40 @@ impl Builder { ) }) .collect::>(); - let values = client.batch_request::>(batch).await.map_err(|e| { - log::error!( - target: LOG_TARGET, - "failed to execute batch: {:?}. Error: {:?}", - chunk_keys.iter().map(|k| HexDisplay::from(k)), - e - ); - "batch failed." - })?; + + // TODO: Niklas: this code works, but the below batch does not work ONLY on kusama.. + // weird stuff. + // try it out with: test -- --release -p remote-externalities --features remote-test + // can_build_one_small_pallet, it should work without any requirements. + let mut values = vec![]; + for (method, params) in batch { + let value = client + .request::>(method, params.clone()) + .await + .map_err(|e| { + log::error!( + target: LOG_TARGET, + "failed to decode key: {:?}. Error: {:?}", + params, + e + ); + }) + .unwrap_or_default(); + values.push(value); + } + + // let values = client.batch_request::>(batch).await.map_err(|e| { + // log::error!( + // target: LOG_TARGET, + // "failed to execute batch: {:?}. Error: {:?}", + // chunk_keys.iter().map(|k| HexDisplay::from(k)).collect::>(), + // e + // ); + // "batch failed." + // })?; + assert_eq!(chunk_keys.len(), values.len()); + for (idx, key) in chunk_keys.into_iter().enumerate() { let maybe_value = values[idx].clone(); let value = maybe_value.unwrap_or_else(|| { @@ -777,7 +801,7 @@ mod remote_tests { const REMOTE_INACCESSIBLE: &'static str = "Can't reach the remote node. Is it running?"; #[tokio::test] - async fn can_build_one_pallet() { + async fn can_build_one_big_pallet() { init_logger(); Builder::::new() .mode(Mode::Online(OnlineConfig { @@ -790,11 +814,49 @@ mod remote_tests { .execute_with(|| {}); } + #[tokio::test] + async fn can_build_one_small_pallet() { + init_logger(); + Builder::::new() + .mode(Mode::Online(OnlineConfig { + transport: "wss://kusama-rpc.polkadot.io".to_owned().into(), + pallets: vec!["Council".to_owned()], + ..Default::default() + })) + .build() + .await + .expect(REMOTE_INACCESSIBLE) + .execute_with(|| {}); + + Builder::::new() + .mode(Mode::Online(OnlineConfig { + transport: "wss://rpc.polkadot.io".to_owned().into(), + pallets: vec!["Council".to_owned()], + ..Default::default() + })) + .build() + .await + .expect(REMOTE_INACCESSIBLE) + .execute_with(|| {}); + } + #[tokio::test] async fn can_build_few_pallet() { init_logger(); Builder::::new() .mode(Mode::Online(OnlineConfig { + transport: "wss://kusama-rpc.polkadot.io".to_owned().into(), + pallets: vec!["Proxy".to_owned(), "Multisig".to_owned()], + ..Default::default() + })) + .build() + .await + .expect(REMOTE_INACCESSIBLE) + .execute_with(|| {}); + + Builder::::new() + .mode(Mode::Online(OnlineConfig { + transport: "wss://rpc.polkadot.io".to_owned().into(), pallets: vec!["Proxy".to_owned(), "Multisig".to_owned()], ..Default::default() })) @@ -851,8 +913,6 @@ mod remote_tests { pallets: vec!["Crowdloan".to_owned()], ..Default::default() })) - // get all the child tries. - .inject_hashed_prefix(DEFAULT_CHILD_STORAGE_KEY_PREFIX) .build() .await .expect(REMOTE_INACCESSIBLE) From c95cd1e7f20c303069705957e307ee62820980d8 Mon Sep 17 00:00:00 2001 From: kianenigma Date: Wed, 27 Oct 2021 11:29:46 +0200 Subject: [PATCH 123/188] clean the benchmarks --- bin/node/runtime/src/lib.rs | 4 ++ frame/state-trie-migration/src/lib.rs | 51 +++++++++++++++------ utils/frame/remote-externalities/src/lib.rs | 45 ++++++------------ utils/frame/try-runtime/cli/src/lib.rs | 1 + 4 files changed, 55 insertions(+), 46 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 6c52a8e934388..4c4c2ad6bd7f0 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1241,11 +1241,15 @@ impl pallet_transaction_storage::Config for Runtime { type WeightInfo = pallet_transaction_storage::weights::SubstrateWeight; } +#[cfg(feature = "runtime-benchmarks")] impl pallet_state_trie_migration::Config for Runtime { type Event = Event; type ControlOrigin = EnsureRoot; type Currency = Balances; type SignedDepositPerItem = (); + type Priority = (); + type OffchainRepeat = (); + type WeightInfo = (); } construct_runtime!( diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index d786841de6a6b..c2c231d9db52e 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -96,7 +96,9 @@ pub mod pallet { offchain::{SendTransactionTypes, SubmitTransaction}, pallet_prelude::*, }; - use sp_core::storage::well_known_keys::DEFAULT_CHILD_STORAGE_KEY_PREFIX; + use sp_core::{ + hexdisplay::HexDisplay, storage::well_known_keys::DEFAULT_CHILD_STORAGE_KEY_PREFIX, + }; use sp_runtime::{ offchain::storage::{MutateStorageError, StorageValueRef}, traits::{Bounded, Saturating}, @@ -109,8 +111,9 @@ pub mod pallet { pub trait WeightInfo { fn process_top_key(x: u32) -> Weight; } + impl WeightInfo for () { - fn process_top_key(x: u32) -> Weight { + fn process_top_key(_: u32) -> Weight { 1000000 } } @@ -157,16 +160,24 @@ pub mod pallet { #[codec(skip)] pub(crate) dyn_size: u32, - // TODO: I might remove these if they end up not being used. + /// The total size of the migration, over all executions. + /// + /// This only kept around for bookkeeping and debugging. pub(crate) size: u32, + /// The total count of top keys in the migration, over all executions. + /// + /// This only kept around for bookkeeping and debugging. pub(crate) top_items: u32, + /// The total count of child keys in the migration, over all executions. + /// + /// This only kept around for bookkeeping and debugging. pub(crate) child_items: u32, #[codec(skip)] pub(crate) _ph: sp_std::marker::PhantomData, } - #[cfg(feature = "std")] + #[cfg(any(feature = "std", feature = "runtime-benchmarks"))] impl sp_std::fmt::Debug for MigrationTask { fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { f.debug_struct("MigrationTask") @@ -748,6 +759,7 @@ pub mod pallet { } impl Pallet { + /// The path used to identify the offchain worker persistent storage. const OFFCHAIN_LAST_BLOCK: &'static [u8] = b"parity/state-migration/last-block"; /// The real weight of a migration of the given number of `items` with total `size`. @@ -765,11 +777,20 @@ pub mod pallet { AutoLimits::::kill(); } - fn child_io_key(storage_key: &Vec) -> &[u8] { + /// Convert a child root key, aka. "Child-bearing top key" into the proper format. + fn child_io_key(root: &Vec) -> &[u8] { use sp_core::storage::{ChildType, PrefixedStorageKey}; - match ChildType::from_prefixed_key(PrefixedStorageKey::new_ref(storage_key)) { - Some((ChildType::ParentKeyId, storage_key)) => storage_key, - None => unreachable!(), + match ChildType::from_prefixed_key(PrefixedStorageKey::new_ref(root)) { + Some((ChildType::ParentKeyId, root)) => root, + None => { + log!( + warn, + "some data seems to be stored under key {:?}, which is a non-default \ + child-trie. This is a logical error and shall not happen.", + HexDisplay::from(root), + ); + Default::default() + }, } } @@ -820,19 +841,22 @@ pub mod pallet { #[cfg(feature = "runtime-benchmarks")] mod benchmarks { use super::*; - use sp_std::prelude::*; + // The size of the key seemingly makes no difference in the read/write time, so we make it + // constant. const KEY: &'static [u8] = b"key"; frame_benchmarking::benchmarks! { process_top_key { - let x in 1 .. (4 * 1024 * 1024); - sp_io::storage::set(KEY, &vec![1u8; x as usize]); + let v in 1 .. (4 * 1024 * 1024); + + let value = sp_std::vec![1u8; v as usize]; + sp_io::storage::set(KEY, &value); }: { let data = sp_io::storage::get(KEY).unwrap(); - sp_io::storage::set(KEY, &vec![1u8; x as usize]); + sp_io::storage::set(KEY, &data); let _next = sp_io::storage::next_key(KEY); - assert_eq!(data.len(), x as usize); + assert_eq!(data, value); } } } @@ -842,7 +866,6 @@ mod mock { use parking_lot::RwLock; use std::sync::Arc; - use super::*; use crate as pallet_state_trie_migration; use frame_support::{parameter_types, traits::Hooks}; use frame_system::EnsureRoot; diff --git a/utils/frame/remote-externalities/src/lib.rs b/utils/frame/remote-externalities/src/lib.rs index 471e46959a1be..c3322dda72ca2 100644 --- a/utils/frame/remote-externalities/src/lib.rs +++ b/utils/frame/remote-externalities/src/lib.rs @@ -337,36 +337,15 @@ impl Builder { }) .collect::>(); - // TODO: Niklas: this code works, but the below batch does not work ONLY on kusama.. - // weird stuff. - // try it out with: test -- --release -p remote-externalities --features remote-test - // can_build_one_small_pallet, it should work without any requirements. - let mut values = vec![]; - for (method, params) in batch { - let value = client - .request::>(method, params.clone()) - .await - .map_err(|e| { - log::error!( - target: LOG_TARGET, - "failed to decode key: {:?}. Error: {:?}", - params, - e - ); - }) - .unwrap_or_default(); - values.push(value); - } - - // let values = client.batch_request::>(batch).await.map_err(|e| { - // log::error!( - // target: LOG_TARGET, - // "failed to execute batch: {:?}. Error: {:?}", - // chunk_keys.iter().map(|k| HexDisplay::from(k)).collect::>(), - // e - // ); - // "batch failed." - // })?; + let values = client.batch_request::>(batch).await.map_err(|e| { + log::error!( + target: LOG_TARGET, + "failed to execute batch: {:?}. Error: {:?}", + chunk_keys.iter().map(|k| HexDisplay::from(k)).collect::>(), + e + ); + "batch failed." + })?; assert_eq!(chunk_keys.len(), values.len()); @@ -700,7 +679,7 @@ impl Builder { info!(target: LOG_TARGET, "injecting a total of {} top keys", top_kv.len()); for (k, v) in top_kv { - // skip writing the child root data if + // skip writing the child root data. if is_default_child_storage_key(k.as_ref()) { continue } @@ -909,7 +888,9 @@ mod remote_tests { init_logger(); Builder::::new() .mode(Mode::Online(OnlineConfig { - transport: "wss://kusama-rpc.polkadot.io".to_owned().into(), + // transport: "wss://kusama-rpc.polkadot.io".to_owned().into(), + transport: "ws://kianenigma-archive:9924".to_owned().into(), + // transport: "ws://localhost:9999".to_owned().into(), pallets: vec!["Crowdloan".to_owned()], ..Default::default() })) diff --git a/utils/frame/try-runtime/cli/src/lib.rs b/utils/frame/try-runtime/cli/src/lib.rs index bbf38d2767ec0..65eaad062b77a 100644 --- a/utils/frame/try-runtime/cli/src/lib.rs +++ b/utils/frame/try-runtime/cli/src/lib.rs @@ -484,6 +484,7 @@ impl State { transport: uri.to_owned().into(), state_snapshot: snapshot_path.as_ref().map(SnapshotConfig::new), pallets: pallets.to_owned().unwrap_or_default(), + scrape_children: false, at, })) .inject_hashed_key( From d9d09fda4d8a2cb483aaa1329af39df6c4697418 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Wed, 27 Oct 2021 16:59:08 +0100 Subject: [PATCH 124/188] Apply suggestions from code review Co-authored-by: Guillaume Thiolliere --- frame/state-trie-migration/src/lib.rs | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index c2c231d9db52e..6db2844bc1009 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -18,7 +18,7 @@ //! # Pallet State Trie Migration //! //! Reads and writes all keys and values in the entire state in a systematic way. This is useful for -//! upgrading a chain to `StorageVersion::V2`, where all keys need to be touched. +//! upgrading a chain to [`sp-core::StateVersion::V1`], where all keys need to be touched. //! //! ## Migration Types //! @@ -108,6 +108,7 @@ pub mod pallet { pub(crate) type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; + /// The weight information of this pallet. pub trait WeightInfo { fn process_top_key(x: u32) -> Weight; } @@ -244,6 +245,8 @@ pub mod pallet { break } } + + // accumulate dynamic data into the storage items. self.size = self.size.saturating_add(self.dyn_size); self.child_items = self.child_items.saturating_add(self.dyn_child_items); self.top_items = self.top_items.saturating_add(self.dyn_top_items); @@ -259,9 +262,6 @@ pub mod pallet { /// Migrate AT MOST ONE KEY. This can be either a top or a child key. /// - /// The only exception to this is that when the last key of the child tree is migrated, then - /// the top tree under which the child tree lives is also migrated. - /// /// This function is the core of this entire pallet. fn migrate_tick(&mut self) { match (self.current_top.as_ref(), self.current_child.as_ref()) { @@ -275,14 +275,15 @@ pub mod pallet { { // no child migration at hand, but one will begin here. let maybe_first_child_key = { - let child_top_key = Pallet::::child_io_key(top_key); - sp_io::default_child_storage::next_key(child_top_key, &vec![]) + let child_root = Pallet::::child_io_key(top_key); + sp_io::default_child_storage::next_key(child_root, &vec![]) }; if let Some(first_child_key) = maybe_first_child_key { self.current_child = Some(first_child_key); self.prev_tick_child = true; self.migrate_child(); } else { + log!(warn, "{:?} is a child root but it seems to have no inner keys", top_key); self.migrate_top(); } } else { @@ -309,13 +310,13 @@ pub mod pallet { self.current_child.clone().expect("value checked to be `Some`; qed"); let current_top = self.current_top.clone().expect("value checked to be `Some`; qed"); - let child_top_key = Pallet::::child_io_key(¤t_top); - if let Some(data) = sp_io::default_child_storage::get(child_top_key, ¤t_child) { + let child_root = Pallet::::child_io_key(¤t_top); + if let Some(data) = sp_io::default_child_storage::get(child_root, ¤t_child) { self.dyn_size = self.dyn_size.saturating_add(data.len() as u32); - sp_io::default_child_storage::set(child_top_key, ¤t_child, &data) + sp_io::default_child_storage::set(child_root, ¤t_child, &data) } self.dyn_child_items.saturating_inc(); - let next_key = sp_io::default_child_storage::next_key(child_top_key, ¤t_child); + let next_key = sp_io::default_child_storage::next_key(child_root, ¤t_child); self.current_child = next_key; log!( trace, From 3eaa235d9e85465065bebc1f036e395d5e270588 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Wed, 27 Oct 2021 17:14:15 +0100 Subject: [PATCH 125/188] Apply suggestions from code review Co-authored-by: Guillaume Thiolliere --- primitives/state-machine/src/testing.rs | 4 ++-- primitives/storage/src/lib.rs | 4 ++++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index bdbbe174e4e8b..5b16d0892684d 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -162,14 +162,14 @@ where /// Insert key/value into backend. /// - /// This only supports inserting `top` keys. + /// This only supports inserting keys in `top` trie. pub fn insert(&mut self, k: StorageKey, v: StorageValue) { self.backend.insert(vec![(None, vec![(k, Some(v))])], self.state_version); } /// Insert key/value into backend. /// - /// This only supports inserting `top` keys. + /// This only supports inserting keys in child tries. pub fn insert_child(&mut self, c: sp_core::storage::ChildInfo, k: StorageKey, v: StorageValue) { self.backend.insert(vec![(Some(c), vec![(k, Some(v))])], self.state_version); } diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 51b0db49e7017..da1edbaf689f2 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -217,6 +217,10 @@ pub mod well_known_keys { key.starts_with(CHILD_STORAGE_KEY_PREFIX) } + /// Whether a key is a default child storage key. + /// + /// This is convenience function which basically checks if the given `key` starts + /// with `DEFAULT_CHILD_STORAGE_KEY_PREFIX` and doesn't do anything apart from that. pub fn is_default_child_storage_key(key: &[u8]) -> bool { key.starts_with(DEFAULT_CHILD_STORAGE_KEY_PREFIX) } From ef556f02673636f9f83725fe5093a683b5166f76 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Thu, 28 Oct 2021 11:03:39 +0100 Subject: [PATCH 126/188] Update frame/state-trie-migration/src/lib.rs Co-authored-by: Joshy Orndorff --- frame/state-trie-migration/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index 6db2844bc1009..674163aaa7890 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -55,7 +55,7 @@ //! //! The (minor) caveat of this approach is that we cannot know in advance how many bytes reading a //! certain number of keys will incur. To overcome this, the runtime needs to configure this pallet -//! with a `SignedDepositPerItem`. This is be per-item deposit that the origin of the signed +//! with a `SignedDepositPerItem`. This is the per-item deposit that the origin of the signed //! migration transactions need to have in their account (on top of the normal fee) and if the size //! witness data that they claim is incorrect, this deposit is slashed. //! From 9ff5ee94a22c8619a2ae053a9dd2ea77379af110 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Thu, 28 Oct 2021 11:04:25 +0100 Subject: [PATCH 127/188] Update frame/state-trie-migration/src/lib.rs --- frame/state-trie-migration/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index 674163aaa7890..3ae9cf4c23624 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -788,6 +788,7 @@ pub mod pallet { warn, "some data seems to be stored under key {:?}, which is a non-default \ child-trie. This is a logical error and shall not happen.", + Self::halt(); HexDisplay::from(root), ); Default::default() From a0e5c5891e2864b30e9d0d86776c78040567ab1c Mon Sep 17 00:00:00 2001 From: kianenigma Date: Sat, 30 Oct 2021 17:08:02 +0200 Subject: [PATCH 128/188] brand new version --- bin/node/runtime/src/lib.rs | 14 +- frame/state-trie-migration/src/lib.rs | 662 +++++++++++++------- utils/frame/remote-externalities/src/lib.rs | 10 +- 3 files changed, 465 insertions(+), 221 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 4c4c2ad6bd7f0..3618432d57a24 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1241,14 +1241,20 @@ impl pallet_transaction_storage::Config for Runtime { type WeightInfo = pallet_transaction_storage::weights::SubstrateWeight; } -#[cfg(feature = "runtime-benchmarks")] +parameter_types! { + pub const SignedMigrationMaxLimits: pallet_state_trie_migration::MigrationLimits = + pallet_state_trie_migration::MigrationLimits { size: 1024 * 512, item: 512 }; + pub const SignedDepositPerItem: Balance = 1 * DOLLARS; +} impl pallet_state_trie_migration::Config for Runtime { type Event = Event; type ControlOrigin = EnsureRoot; type Currency = Balances; - type SignedDepositPerItem = (); - type Priority = (); - type OffchainRepeat = (); + type SignedDepositPerItem = SignedDepositPerItem; + type SignedMigrationMaxLimits = SignedMigrationMaxLimits; + type UnsignedPriority = ImOnlineUnsignedPriority; + type UnsignedBackOff = frame_support::traits::ConstU32<5>; + type OffchainRepeat = frame_support::traits::ConstU32<3>; type WeightInfo = (); } diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index c2c231d9db52e..f432b59cc9a8c 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -83,7 +83,6 @@ macro_rules! log { #[frame_support::pallet] pub mod pallet { - use frame_benchmarking::Zero; use frame_support::{ dispatch::TransactionPriority, ensure, @@ -101,7 +100,7 @@ pub mod pallet { }; use sp_runtime::{ offchain::storage::{MutateStorageError, StorageValueRef}, - traits::{Bounded, Saturating}, + traits::{Saturating, Zero}, }; use sp_std::prelude::*; @@ -110,12 +109,16 @@ pub mod pallet { pub trait WeightInfo { fn process_top_key(x: u32) -> Weight; + fn continue_migrate() -> Weight; } impl WeightInfo for () { fn process_top_key(_: u32) -> Weight { 1000000 } + fn continue_migrate() -> Weight { + 1000000 + } } /// A migration task stored in state. @@ -177,7 +180,7 @@ pub mod pallet { pub(crate) _ph: sp_std::marker::PhantomData, } - #[cfg(any(feature = "std", feature = "runtime-benchmarks"))] + #[cfg(any(feature = "std", feature = "runtime-benchmarks", feature = "try-runtime"))] impl sp_std::fmt::Debug for MigrationTask { fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { f.debug_struct("MigrationTask") @@ -219,11 +222,22 @@ pub mod pallet { impl MigrationTask { /// Return true if the task is finished. - #[cfg(test)] pub(crate) fn finished(&self) -> bool { self.current_top.is_none() && self.current_child.is_none() } + /// Returns `true` if the task fully complies with the given limits. + pub(crate) fn fully_complies_with(&self, limits: MigrationLimits) -> bool { + self.dyn_total_items() <= limits.item && self.dyn_size <= limits.size + } + + /// Check if there's any work left, or if we have exhausted the limits already. + fn exhausted(&self, limits: MigrationLimits) -> bool { + self.current_top.is_none() || + self.dyn_total_items() >= limits.item || + self.dyn_size >= limits.size + } + /// get the total number of keys affected by the current task. pub(crate) fn dyn_total_items(&self) -> u32 { self.dyn_child_items.saturating_add(self.dyn_top_items) @@ -237,7 +251,14 @@ pub mod pallet { /// bounded (e.g. a parachain), but it is acceptable otherwise (relay chain, offchain /// workers). pub(crate) fn migrate_until_exhaustion(&mut self, limits: MigrationLimits) { - log!(debug, "running migrations until {:?}", limits); + log!(debug, "running migrations on top of {:?} until {:?}", self, limits); + + if limits.item.is_zero() || limits.size.is_zero() { + // handle this minor edge case, else we would call `migrate_tick` at least once. + log!(warn, "limits are zero. stopping"); + return + } + loop { self.migrate_tick(); if self.exhausted(limits) { @@ -250,18 +271,8 @@ pub mod pallet { log!(debug, "finished with {:?}", self); } - /// Check if there's any work left, or if we have exhausted the limits already. - fn exhausted(&self, limits: MigrationLimits) -> bool { - self.current_top.is_none() || - self.dyn_total_items() >= limits.item || - self.dyn_size >= limits.size - } - /// Migrate AT MOST ONE KEY. This can be either a top or a child key. /// - /// The only exception to this is that when the last key of the child tree is migrated, then - /// the top tree under which the child tree lives is also migrated. - /// /// This function is the core of this entire pallet. fn migrate_tick(&mut self) { match (self.current_top.as_ref(), self.current_child.as_ref()) { @@ -270,29 +281,51 @@ pub mod pallet { self.migrate_child(); }, (Some(ref top_key), None) => { - if top_key.starts_with(DEFAULT_CHILD_STORAGE_KEY_PREFIX) && - !self.prev_tick_child - { - // no child migration at hand, but one will begin here. - let maybe_first_child_key = { - let child_top_key = Pallet::::child_io_key(top_key); - sp_io::default_child_storage::next_key(child_top_key, &vec![]) - }; - if let Some(first_child_key) = maybe_first_child_key { - self.current_child = Some(first_child_key); - self.prev_tick_child = true; - self.migrate_child(); - } else { + // we have a top key and no child key. 3 possibilities exist: + // 1. we continue the top key migrations. + // 2. this is the root of a child key, and we start processing child keys (and + // should call `migrate_child`). + // 3. this is the root of a child key, and we are finishing all child-keys (and + // should call `migrate_top`). + + // NOTE: this block is written intentionally to verbosely for easy of + // verification. + match ( + top_key.starts_with(DEFAULT_CHILD_STORAGE_KEY_PREFIX), + self.prev_tick_child, + ) { + (true, true) => { + // we're done with migrating a child-root. + self.prev_tick_child = false; self.migrate_top(); - } - } else { - self.prev_tick_child = false; - self.migrate_top(); - } + }, + (true, false) => { + // start going into a child key. + let maybe_first_child_key = { + let child_top_key = Pallet::::child_io_key(top_key); + sp_io::default_child_storage::next_key(child_top_key, &vec![]) + }; + if let Some(first_child_key) = maybe_first_child_key { + self.current_child = Some(first_child_key); + self.prev_tick_child = true; + self.migrate_child(); + } else { + self.migrate_top(); + } + }, + (false, true) => { + // should never happen. + log!(error, "LOGIC ERROR: unreachable code [0]."); + Pallet::::halt(); + }, + (false, false) => { + // continue the top key migration + self.migrate_top(); + }, + }; }, (None, Some(_)) => { - // TODO: test edge case: last top key has child - log!(error, "LOGIC ERROR: unreachable code."); + log!(error, "LOGIC ERROR: unreachable code [1]."); Pallet::::halt() }, (None, None) => { @@ -330,17 +363,22 @@ pub mod pallet { /// It updates the dynamic counters. fn migrate_top(&mut self) { let current_top = self.current_top.clone().expect("value checked to be `Some`; qed"); - if let Some(data) = sp_io::storage::get(¤t_top) { + let added_size = if let Some(data) = sp_io::storage::get(¤t_top) { self.dyn_size = self.dyn_size.saturating_add(data.len() as u32); sp_io::storage::set(¤t_top, &data); - } + data.len() as u32 + } else { + Zero::zero() + }; + self.dyn_top_items.saturating_inc(); let next_key = sp_io::storage::next_key(¤t_top); self.current_top = next_key; log!( trace, - "migrated top key {:?}, next task: {:?}", + "migrated top key {:?} with size {}, next_task = {:?}", sp_core::hexdisplay::HexDisplay::from(¤t_top), + added_size, self ); } @@ -350,9 +388,9 @@ pub mod pallet { #[derive(Clone, Copy, Encode, Decode, scale_info::TypeInfo, Default, Debug, PartialEq, Eq)] pub struct MigrationLimits { /// The byte size limit. - pub(crate) size: u32, + pub size: u32, /// The number of keys limit. - pub(crate) item: u32, + pub item: u32, } /// How a migration was computed. @@ -397,11 +435,20 @@ pub mod pallet { /// This should reflect the average storage value size in the worse case. type SignedDepositPerItem: Get>; + /// The maximum limits that the signed migration could use. + type SignedMigrationMaxLimits: Get; + /// The weight information of this pallet. type WeightInfo: WeightInfo; /// The priority used for unsigned transactions. - type Priority: Get; + type UnsignedPriority: Get; + + /// The number of items that offchain worker will subtract from the first item count that + /// causes an over-consumption. + /// + /// A value around 5-10 is reasonable. + type UnsignedBackOff: Get; /// The repeat frequency of offchain workers. type OffchainRepeat: Get; @@ -430,8 +477,8 @@ pub mod pallet { /// /// if set to `None`, then no unsigned migration happens. #[pallet::storage] - #[pallet::getter(fn unsigned_size_limit)] - pub type UnsignedSizeLimit = StorageValue<_, Option, ValueQuery>; + #[pallet::getter(fn unsigned_limits)] + pub type UnsignedLimits = StorageValue<_, Option, ValueQuery>; #[pallet::call] impl Pallet { @@ -445,7 +492,7 @@ pub mod pallet { ) -> DispatchResultWithPostInfo { T::ControlOrigin::ensure_origin(origin)?; ensure!( - maybe_config.is_some() ^ Self::unsigned_size_limit().is_some(), + maybe_config.is_some() ^ Self::unsigned_limits().is_some(), "unsigned and auto migration cannot co-exist" ); AutoLimits::::put(maybe_config); @@ -458,14 +505,14 @@ pub mod pallet { #[pallet::weight(T::DbWeight::get().reads_writes(1, 1))] pub fn control_unsigned_migration( origin: OriginFor, - maybe_size_limit: Option, + maybe_limit: Option, ) -> DispatchResultWithPostInfo { T::ControlOrigin::ensure_origin(origin)?; ensure!( - maybe_size_limit.is_some() ^ Self::auto_limits().is_some(), + maybe_limit.is_some() ^ Self::auto_limits().is_some(), "unsigned and auto migration cannot co-exist" ); - UnsignedSizeLimit::::put(maybe_size_limit); + UnsignedLimits::::put(maybe_limit); Ok(().into()) } @@ -474,35 +521,36 @@ pub mod pallet { /// This can only be valid if it is generated from the local node, which means only /// validators can generate this call. /// - /// It is guaranteed that migrating `item_limit` will not cause the total read bytes to - /// exceed [`UnsignedSizeLimit`]. + /// The `item_limit` is the maximum number of items that can be read whilst ensuring that + /// the migration does not go over `Self::unsigned_limits().size`. /// - /// The `size_limit` in the call arguments is for weighing. THe `_task` argument in the call - /// is for validation and ensuring that the migration process has not ticked forward since - /// the call was generated. - #[pallet::weight(Pallet::::dynamic_weight(*item_limit, *witness_size_limit))] + /// The `witness_size` should always be equal to `Self::unsigned_limits().size` and is only + /// used for weighing. + #[pallet::weight(Pallet::::dynamic_weight(*item_limit, *witness_size))] pub fn continue_migrate_unsigned( origin: OriginFor, item_limit: u32, - witness_size_limit: u32, + witness_size: u32, _witness_task: MigrationTask, ) -> DispatchResultWithPostInfo { ensure_none(origin)?; - let unsigned_size_limit = - Self::unsigned_size_limit().ok_or("unsigned limit not set, tx not allowed.")?; - ensure!(witness_size_limit == unsigned_size_limit, "wrong size limit witness data"); + let chain_limits = + Self::unsigned_limits().ok_or("unsigned limit not set, tx not allowed.")?; + ensure!(witness_size == chain_limits.size, "wrong witness data"); let mut task = Self::migration_process(); // pre-dispatch and validate-unsigned already assure this. debug_assert_eq!(task, _witness_task); - let limits = MigrationLimits { size: unsigned_size_limit, item: item_limit }; - task.migrate_until_exhaustion(limits); + // we run the task with the given item limit, and the chain size limit.. + task.migrate_until_exhaustion(MigrationLimits { + size: chain_limits.size, + item: item_limit, + }); - // we panic if the validator submitted a bad transaction, making it financially bad for - // them to cheat. We could relax this. Also, if a bug causes validators to mistakenly - // produce bad transactions, they can avoid it by disabling offchain workers. - assert!(task.dyn_size < unsigned_size_limit); + // .. and we assert that the size limit must have been fully met with the given item + // limit. + assert!(task.fully_complies_with(chain_limits)); Self::deposit_event(Event::::Migrated( task.dyn_top_items, @@ -514,7 +562,7 @@ pub mod pallet { Ok(().into()) } - /// Migrate AT MOST `item_limit` keys by reading and writing them. + /// Continue the migration for the given `limits`. /// /// The dispatch origin of this call can be any signed account. /// @@ -523,26 +571,36 @@ pub mod pallet { /// /// The sum of the byte length of all the data read must be provided for up-front /// fee-payment and weighing. - #[pallet::weight(Pallet::::dynamic_weight(*item_limit, *size_limit))] + #[pallet::weight( + // the migration process + Pallet::::dynamic_weight(limits.item, * real_size) + // rest of the operations, like deposit etc. + + T::WeightInfo::continue_migrate() + )] pub fn continue_migrate( origin: OriginFor, - item_limit: u32, - size_limit: u32, + limits: MigrationLimits, + real_size: u32, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; + let max_limits = T::SignedMigrationMaxLimits::get(); + ensure!( + limits.size <= max_limits.size && limits.item <= max_limits.item, + "max signed limits not respected" + ); + // ensure they can pay more than the fee. - let deposit = T::SignedDepositPerItem::get().saturating_mul(item_limit.into()); + let deposit = T::SignedDepositPerItem::get().saturating_mul(limits.item.into()); ensure!(T::Currency::can_slash(&who, deposit), "not enough funds"); let mut task = Self::migration_process(); - task.migrate_until_exhaustion(MigrationLimits { size: size_limit, item: item_limit }); + task.migrate_until_exhaustion(limits); // ensure that the migration witness data was correct. - if item_limit != task.dyn_total_items() || size_limit != task.dyn_size { + if real_size != task.dyn_size { // let the imbalance burn. let (_imbalance, _remainder) = T::Currency::slash(&who, deposit); - // defensive. debug_assert!(_remainder.is_zero()); return Err("Wrong witness data".into()) } @@ -552,6 +610,7 @@ pub mod pallet { task.dyn_child_items, MigrationCompute::Signed, )); + MigrationProcess::::put(task); Ok(().into()) } @@ -677,38 +736,59 @@ pub mod pallet { } log!(debug, "started offchain worker thread."); - if let Some(unsigned_size_limit) = Self::unsigned_size_limit() { + if let Some(chain_limits) = Self::unsigned_limits() { let mut task = Self::migration_process(); - let limits = - MigrationLimits { size: unsigned_size_limit, item: Bounded::max_value() }; - task.migrate_until_exhaustion(limits); + if task.finished() { + log!(warn, "task is finished, remove `unsigned_limits`."); + return + } + + task.migrate_until_exhaustion(chain_limits); + + if task.dyn_size > chain_limits.size { + // previous `migrate_until_exhaustion` finished with too much size consumption. + // This most likely means that if it migrated `x` items, now we need to migrate + // `x - 1` items. But, we migrate less by 5 by default, since the state may have + // changed between the execution of this offchain worker and time that the + // transaction reaches the chain. + log!( + debug, + "reducing item count of {} by {}.", + task.dyn_total_items(), + T::UnsignedBackOff::get(), + ); + let mut new_task = Self::migration_process(); + new_task.migrate_until_exhaustion(MigrationLimits { + size: chain_limits.size, + item: task.dyn_total_items().saturating_sub(T::UnsignedBackOff::get()), + }); + task = new_task; + } + + let item_limit = task.dyn_total_items(); + if item_limit.is_zero() { + log!(warn, "can't fit anything in a migration."); + return + } - // the last item cause us to go beyond the size limit, so we subtract one. we are - // making this assumption based on the impl of `migrate_until_exhaustion`. - let safe_items_to_read = task.dyn_total_items().saturating_sub(1); + // with the above if-statement, the limits must now be STRICTLY respected, so we + // panic and crash the OCW otherwise. + assert!( + task.fully_complies_with(chain_limits), + "runtime::state-trie-migration: The offchain worker failed to create transaction." + ); let original_task = Self::migration_process(); + let call = Call::continue_migrate_unsigned { - item_limit: safe_items_to_read, - // note that this must be simply the limit, not the actual bytes read. - witness_size_limit: unsigned_size_limit, + item_limit, + witness_size: chain_limits.size, witness_task: original_task, }; + match SubmitTransaction::>::submit_unsigned_transaction(call.into()) { Ok(_) => { - log!( - info, - "submitted a call to migrate {} items of {} bytes.", - safe_items_to_read, - { - let mut t = Self::migration_process(); - t.migrate_until_exhaustion(MigrationLimits { - item: safe_items_to_read, - size: Bounded::max_value(), - }); - t.dyn_size - } - ) + log!(info, "submitted a call to migrate {} items.", item_limit) }, Err(why) => { log!(warn, "failed to submit a call to the pool {:?}", why) @@ -734,7 +814,7 @@ pub mod pallet { } ValidTransaction::with_tag_prefix("StorageVersionMigration") - .priority(T::Priority::get()) + .priority(T::UnsignedPriority::get()) // deduplicate based on task data. .and_provides(witness_task) .longevity(5) @@ -773,7 +853,7 @@ pub mod pallet { /// Put a stop to all ongoing migrations. fn halt() { - UnsignedSizeLimit::::kill(); + UnsignedLimits::::kill(); AutoLimits::::kill(); } @@ -847,6 +927,10 @@ mod benchmarks { const KEY: &'static [u8] = b"key"; frame_benchmarking::benchmarks! { + continue_migrate { + let null = MigrationLimits::default(); + let caller = frame_benchmarking::whitelisted_caller(); + }: _(frame_system::RawOrigin::Signed(caller), null, 0) process_top_key { let v in 1 .. (4 * 1024 * 1024); @@ -866,6 +950,7 @@ mod mock { use parking_lot::RwLock; use std::sync::Arc; + use super::*; use crate as pallet_state_trie_migration; use frame_support::{parameter_types, traits::Hooks}; use frame_system::EnsureRoot; @@ -928,7 +1013,9 @@ mod mock { parameter_types! { pub const ExistentialDeposit: u64 = 1; - pub const OffchainRepeat: u32 = 4; + pub const OffchainRepeat: u32 = 1; + pub const SignedDepositPerItem: u64 = 1; + pub const SignedMigrationMaxLimits: MigrationLimits = MigrationLimits { size: 1024, item: 5 }; } impl pallet_balances::Config for Test { @@ -947,9 +1034,11 @@ mod mock { type Event = Event; type ControlOrigin = EnsureRoot; type Currency = Balances; - type SignedDepositPerItem = (); + type SignedDepositPerItem = SignedDepositPerItem; + type SignedMigrationMaxLimits = SignedMigrationMaxLimits; type WeightInfo = (); - type Priority = (); + type UnsignedPriority = (); + type UnsignedBackOff = frame_support::traits::ConstU32<2>; type OffchainRepeat = OffchainRepeat; } @@ -963,22 +1052,27 @@ mod mock { pub type Extrinsic = sp_runtime::testing::TestXt; - pub fn new_test_ext(version: StateVersion) -> sp_io::TestExternalities { + pub fn new_test_ext(version: StateVersion, with_pallets: bool) -> sp_io::TestExternalities { use sp_core::storage::ChildInfo; - let storage = sp_core::storage::Storage { + let mut custom_storage = sp_core::storage::Storage { top: vec![ - (b"key1".to_vec(), vec![1u8; 10]), // 6b657931 - (b"key2".to_vec(), vec![2u8; 20]), // 6b657932 - (b"key3".to_vec(), vec![3u8; 30]), // 6b657934 - (b"key4".to_vec(), vec![4u8; 40]), // 6b657934 - (sp_core::storage::well_known_keys::CODE.to_vec(), vec![1u8; 100]), + (b"key1".to_vec(), vec![1u8; 10]), // 6b657931 + (b"key2".to_vec(), vec![1u8; 20]), // 6b657931 + (b"key3".to_vec(), vec![1u8; 30]), // 6b657931 + (b"key4".to_vec(), vec![1u8; 40]), // 6b657931 + (b"key5".to_vec(), vec![2u8; 50]), // 6b657932 + (b"key6".to_vec(), vec![3u8; 50]), // 6b657934 + (b"key7".to_vec(), vec![4u8; 50]), // 6b657934 + (b"key8".to_vec(), vec![4u8; 50]), // 6b657934 + (b"key9".to_vec(), vec![4u8; 50]), // 6b657934 + (b"CODE".to_vec(), vec![1u8; 100]), // 434f4445 ] .into_iter() .collect(), children_default: vec![ ( - b"chk1".to_vec(), + b"chk1".to_vec(), // 63686b31 sp_core::storage::StorageChild { data: vec![ (b"key1".to_vec(), vec![1u8; 10]), @@ -1006,14 +1100,29 @@ mod mock { .collect(), }; + if with_pallets { + frame_system::GenesisConfig::default() + .assimilate_storage::(&mut custom_storage) + .unwrap(); + pallet_balances::GenesisConfig:: { balances: vec![(1, 1000)] } + .assimilate_storage(&mut custom_storage) + .unwrap(); + } + sp_tracing::try_init_simple(); - (storage, version).into() + (custom_storage, version).into() } pub fn new_offchain_ext( version: StateVersion, + with_pallets: bool, ) -> (sp_io::TestExternalities, Arc>) { - let mut ext = new_test_ext(version); + let mut ext = new_test_ext(version, with_pallets); + let pool_state = offchainify(&mut ext); + (ext, pool_state) + } + + pub fn offchainify(ext: &mut sp_io::TestExternalities) -> Arc> { let (offchain, _offchain_state) = TestOffchainExt::new(); let (pool, pool_state) = TestTransactionPoolExt::new(); @@ -1021,7 +1130,7 @@ mod mock { ext.register_extension(OffchainWorkerExt::new(offchain)); ext.register_extension(TransactionPoolExt::new(pool)); - (ext, pool_state) + pool_state } pub fn run_to_block(n: u32) -> H256 { @@ -1070,111 +1179,172 @@ mod mock { #[cfg(test)] mod test { use super::{mock::*, *}; - use sp_runtime::StateVersion; + use sp_runtime::{traits::Bounded, StateVersion}; use std::sync::Arc; #[test] - fn auto_migrate_works_single_item_per_block() { - let mut ext = new_test_ext(StateVersion::V0); - let root_upgraded = ext.execute_with(|| { - assert_eq!(AutoLimits::::get(), None); - assert_eq!(MigrationProcess::::get(), Default::default()); - - // nothing happens if we don't set the limits. - let _ = run_to_block(50); - assert_eq!(MigrationProcess::::get(), Default::default()); - - // this should allow 1 item per block to be migrated. - AutoLimits::::put(Some(MigrationLimits { item: 1, size: 150 })); + fn auto_migrate_works() { + let run_with_limits = |limit, from, until| { + let mut ext = new_test_ext(StateVersion::V0, false); + let root_upgraded = ext.execute_with(|| { + assert_eq!(AutoLimits::::get(), None); + assert_eq!(MigrationProcess::::get(), Default::default()); + + // nothing happens if we don't set the limits. + let _ = run_to_block(from); + assert_eq!(MigrationProcess::::get(), Default::default()); + + // this should allow 1 item per block to be migrated. + AutoLimits::::put(Some(limit)); + + let root = run_to_block(until); + + // eventually everything is over. + assert!(matches!( + StateTrieMigration::migration_process(), + MigrationTask { current_child: None, current_top: None, .. } + )); + root + }); + + let mut ext2 = new_test_ext(StateVersion::V1, false); + let root = ext2.execute_with(|| { + // update ex2 to contain the new items + let _ = run_to_block(from); + AutoLimits::::put(Some(limit)); + run_to_block(until) + }); + assert_eq!(root, root_upgraded); + }; - let root = run_to_block(70); + // single item + run_with_limits(MigrationLimits { item: 1, size: 1000 }, 10, 100); + // multi-item + run_with_limits(MigrationLimits { item: 5, size: 1000 }, 10, 100); + // multi-item, based on size. Note that largest value is 100 bytes. + run_with_limits(MigrationLimits { item: 1000, size: 128 }, 10, 100); + // unbounded + run_with_limits( + MigrationLimits { item: Bounded::max_value(), size: Bounded::max_value() }, + 10, + 100, + ); + } - // eventually everything is over. - assert!(matches!( - StateTrieMigration::migration_process(), - MigrationTask { current_child: None, current_top: None, .. } - )); - root - }); + #[test] + fn ocw_migration_works() { + let run_with_limits = |limits, from, until| { + let (mut ext, pool) = new_offchain_ext(StateVersion::V0, false); + let root_upgraded = ext.execute_with(|| { + assert_eq!(UnsignedLimits::::get(), None); + assert_eq!(MigrationProcess::::get(), Default::default()); + + // nothing happens if we don't set the limits. + run_to_block_and_drain_pool(from, Arc::clone(&pool)); + assert_eq!(MigrationProcess::::get(), Default::default()); + + // allow 2 items per run + UnsignedLimits::::put(Some(limits)); + + run_to_block_and_drain_pool(until, Arc::clone(&pool)) + }); + + let (mut ext2, pool2) = new_offchain_ext(StateVersion::V1, false); + let root = ext2.execute_with(|| { + // update ex2 to contain the new items + run_to_block_and_drain_pool(from, Arc::clone(&pool2)); + UnsignedLimits::::put(Some(limits)); + run_to_block_and_drain_pool(until, Arc::clone(&pool2)) + }); + assert_eq!(root, root_upgraded); + }; - let mut ext2 = new_test_ext(StateVersion::V1); - let root = ext2.execute_with(|| { - // update ex2 to contain the new items - let _ = run_to_block(50); - AutoLimits::::put(Some(MigrationLimits { item: 1, size: 150 })); - run_to_block(70) - }); - assert_eq!(root, root_upgraded); + // single item + run_with_limits(MigrationLimits { item: 1, size: 1000 }, 10, 100); + // multi-item + run_with_limits(MigrationLimits { item: 5, size: 1000 }, 10, 100); + // multi-item, based on size + run_with_limits(MigrationLimits { item: 1000, size: 128 }, 10, 100); + // unbounded + run_with_limits( + MigrationLimits { item: Bounded::max_value(), size: Bounded::max_value() }, + 10, + 100, + ); } #[test] - fn auto_migrate_works_multi_item_per_block() { - let mut ext = new_test_ext(StateVersion::V0); - let root_upgraded = ext.execute_with(|| { - assert_eq!(AutoLimits::::get(), None); + fn signed_migrate_works() { + new_test_ext(StateVersion::V0, true).execute_with(|| { assert_eq!(MigrationProcess::::get(), Default::default()); - // nothing happens if we don't set the limits. - run_to_block(50); - assert_eq!(MigrationProcess::::get(), Default::default()); + // can't submit if limit is too high. + frame_support::assert_err!( + StateTrieMigration::continue_migrate( + Origin::signed(1), + MigrationLimits { item: 5, size: sp_runtime::traits::Bounded::max_value() }, + Bounded::max_value(), + ), + "max signed limits not respected" + ); - // this should allow 1 item per block to be migrated. - AutoLimits::::put(Some(MigrationLimits { item: 5, size: 150 })); + // can't submit if poor. + frame_support::assert_err!( + StateTrieMigration::continue_migrate( + Origin::signed(2), + MigrationLimits { item: 5, size: 100 }, + 100, + ), + "not enough funds" + ); - let root = run_to_block(70); + // migrate all keys in a series of submissions + while !MigrationProcess::::get().finished() { + // first we compute the task to get the accurate consumption. + let mut task = StateTrieMigration::migration_process(); + task.migrate_until_exhaustion(SignedMigrationMaxLimits::get()); - // eventually everything is over. - assert!(matches!( - StateTrieMigration::migration_process(), - MigrationTask { current_child: None, current_top: None, .. } - )); + frame_support::assert_ok!(StateTrieMigration::continue_migrate( + Origin::signed(1), + SignedMigrationMaxLimits::get(), + task.dyn_size + )); - root - }); + // no funds should remain reserved. + assert_eq!(Balances::reserved_balance(&1), 0); - let mut ext2 = new_test_ext(StateVersion::V1); - let root = ext2.execute_with(|| { - // update ex2 to contain the new items - run_to_block(50); - AutoLimits::::put(Some(MigrationLimits { item: 5, size: 150 })); - run_to_block(70) + // and the task should be updated + assert!(matches!( + StateTrieMigration::migration_process(), + MigrationTask { size: x, .. } if x > 0, + )); + } }); - assert_eq!(root, root_upgraded); } #[test] - fn unsigned_migration_works() { - let (mut ext, pool) = new_offchain_ext(StateVersion::V0); - ext.execute_with(|| { - assert_eq!(UnsignedSizeLimit::::get(), None); - assert_eq!(MigrationProcess::::get(), Default::default()); - - // nothing happens if we don't set the limits. - run_to_block_and_drain_pool(50, Arc::clone(&pool)); - assert_eq!(MigrationProcess::::get(), Default::default()); - - // allow 50 bytes per run. - UnsignedSizeLimit::::put(Some(50)); + fn custom_migrate_works() { + new_test_ext(StateVersion::V0, true).execute_with(|| { + frame_support::assert_ok!(StateTrieMigration::migrate_custom_top( + Origin::signed(1), + vec![b"key1".to_vec(), b"key2".to_vec(), b"key3".to_vec()], + 10 + 20 + 30, + )); - run_to_block_and_drain_pool(70, Arc::clone(&pool)); + // no funds should remain reserved. + assert_eq!(Balances::reserved_balance(&1), 0); }); } - - #[test] - fn manual_migrate_works() { - todo!("test manually signed migration"); - } - - #[test] - fn custom_migrate_works() { - todo!("test custom keys to be migrated via signed") - } } #[cfg(all(test, feature = "remote-tests"))] mod remote_tests { + use std::sync::Arc; + use super::{mock::*, *}; + use mock::run_to_block_and_drain_pool; use remote_externalities::{Mode, OnlineConfig}; + use sp_runtime::traits::Bounded; // we only use the hash type from this (I hope). type Block = sp_runtime::testing::Block; @@ -1182,37 +1352,97 @@ mod remote_tests { #[tokio::test] async fn on_initialize_migration() { sp_tracing::try_init_simple(); - let mut ext = remote_externalities::Builder::::new() - .mode(Mode::Online(OnlineConfig { - transport: std::env!("WS_API").to_owned().into(), - scrape_children: true, - ..Default::default() - })) - .state_version(sp_core::StateVersion::V0) - .build() - .await - .unwrap(); - - ext.execute_with(|| { - // requires the block number type in our tests to be same as with mainnet, u32. - let mut now = frame_system::Pallet::::block_number(); - let mut duration = 0; - AutoLimits::::put(Some(MigrationLimits { item: 1000, size: 4 * 1024 * 1024 })); - loop { - run_to_block(now + 1); - if StateTrieMigration::migration_process().finished() { - break + let run_with_limits = |limits| async move { + let mut ext = remote_externalities::Builder::::new() + .mode(Mode::Offline(remote_externalities::OfflineConfig { + state_snapshot: "/home/kianenigma/remote-builds/polka-state".to_owned().into(), + })) + // .mode(Mode::Online(OnlineConfig { + // transport: std::env!("WS_API").to_owned().into(), + // scrape_children: true, + // ..Default::default() + // })) + .state_version(sp_core::StateVersion::V0) + .build() + .await + .unwrap(); + + ext.execute_with(|| { + // requires the block number type in our tests to be same as with mainnet, u32. + let mut now = frame_system::Pallet::::block_number(); + let mut duration = 0; + AutoLimits::::put(Some(limits)); + loop { + run_to_block(now + 1); + if StateTrieMigration::migration_process().finished() { + break + } + duration += 1; + now += 1; } - duration += 1; - now += 1; - } - log::info!( - target: LOG_TARGET, - "finished migration in {} block, final state of the task: {:?}", - duration, - StateTrieMigration::migration_process() - ); - }) + log::info!( + target: LOG_TARGET, + "finished on_initialize migration in {} block, final state of the task: {:?}", + duration, + StateTrieMigration::migration_process() + ); + }) + }; + // item being the bottleneck + run_with_limits(MigrationLimits { item: 1000, size: 4 * 1024 * 1024 }).await; + // size being the bottleneck + run_with_limits(MigrationLimits { item: Bounded::max_value(), size: 4 * 1024 }).await; + } + + #[tokio::test] + async fn offchain_worker_migration() { + sp_tracing::try_init_simple(); + let run_with_limits = |limits| async move { + let mut ext = remote_externalities::Builder::::new() + // .mode(Mode::Online(OnlineConfig { + // transport: std::env!("WS_API").to_owned().into(), + // scrape_children: true, + // state_snapshot: Some( + // "/home/kianenigma/remote-builds/ksm-state".to_owned().into(), + // ), + // ..Default::default() + // })) + .mode(Mode::Offline(remote_externalities::OfflineConfig { + state_snapshot: "/home/kianenigma/remote-builds/ksm-state".to_owned().into(), + })) + .state_version(sp_core::StateVersion::V0) + .build() + .await + .unwrap(); + let pool_state = offchainify(&mut ext); + + ext.execute_with(|| { + // requires the block number type in our tests to be same as with mainnet, u32. + let mut now = frame_system::Pallet::::block_number(); + let mut duration = 0; + UnsignedLimits::::put(Some(limits)); + loop { + run_to_block_and_drain_pool(now + 1, Arc::clone(&pool_state)); + if StateTrieMigration::migration_process().finished() { + break + } + duration += 1; + now += 1; + } + + log::info!( + target: LOG_TARGET, + "finished offchain-worker migration in {} block, final state of the task: {:?}", + duration, + StateTrieMigration::migration_process() + ); + }) + }; + // item being the bottleneck + // run_with_limits(MigrationLimits { item: 1000, size: 4 * 1024 * 1024 }).await; + // size being the bottleneck + run_with_limits(MigrationLimits { item: Bounded::max_value(), size: 2 * 1024 * 1024 }) + .await; } } diff --git a/utils/frame/remote-externalities/src/lib.rs b/utils/frame/remote-externalities/src/lib.rs index c3322dda72ca2..5089dd2834671 100644 --- a/utils/frame/remote-externalities/src/lib.rs +++ b/utils/frame/remote-externalities/src/lib.rs @@ -175,6 +175,12 @@ pub struct SnapshotConfig { pub path: PathBuf, } +impl> From

for SnapshotConfig { + fn from(p: P) -> Self { + Self { path: p.into() } + } +} + impl SnapshotConfig { pub fn new>(path: P) -> Self { Self { path: path.into() } @@ -599,7 +605,9 @@ impl Builder { } child_kp }, - Mode::Offline(ref config) => self.load_child_snapshot(&config.state_snapshot.path)?, + Mode::Offline(ref config) => self.load_child_snapshot(&config.state_snapshot.path).map_err(|why| + log::warn!(target: LOG_TARGET, "failed to load child-key file due to {:?}", why) + ).unwrap_or_default(), _ => Default::default(), }; From b6aad5136b3861fe60af413a0cf9c872fcdaefcb Mon Sep 17 00:00:00 2001 From: kianenigma Date: Sat, 30 Oct 2021 17:13:20 +0200 Subject: [PATCH 129/188] fix build --- frame/state-trie-migration/src/lib.rs | 30 +++++++++++---------------- 1 file changed, 12 insertions(+), 18 deletions(-) diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index 3bae30aec12f0..4a49358aae971 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -870,9 +870,9 @@ pub mod pallet { warn, "some data seems to be stored under key {:?}, which is a non-default \ child-trie. This is a logical error and shall not happen.", - Self::halt(); HexDisplay::from(root), ); + Self::halt(); Default::default() }, } @@ -1358,14 +1358,11 @@ mod remote_tests { sp_tracing::try_init_simple(); let run_with_limits = |limits| async move { let mut ext = remote_externalities::Builder::::new() - .mode(Mode::Offline(remote_externalities::OfflineConfig { - state_snapshot: "/home/kianenigma/remote-builds/polka-state".to_owned().into(), + .mode(Mode::Online(OnlineConfig { + transport: std::env!("WS_API").to_owned().into(), + scrape_children: true, + ..Default::default() })) - // .mode(Mode::Online(OnlineConfig { - // transport: std::env!("WS_API").to_owned().into(), - // scrape_children: true, - // ..Default::default() - // })) .state_version(sp_core::StateVersion::V0) .build() .await @@ -1404,16 +1401,13 @@ mod remote_tests { sp_tracing::try_init_simple(); let run_with_limits = |limits| async move { let mut ext = remote_externalities::Builder::::new() - // .mode(Mode::Online(OnlineConfig { - // transport: std::env!("WS_API").to_owned().into(), - // scrape_children: true, - // state_snapshot: Some( - // "/home/kianenigma/remote-builds/ksm-state".to_owned().into(), - // ), - // ..Default::default() - // })) - .mode(Mode::Offline(remote_externalities::OfflineConfig { - state_snapshot: "/home/kianenigma/remote-builds/ksm-state".to_owned().into(), + .mode(Mode::Online(OnlineConfig { + transport: std::env!("WS_API").to_owned().into(), + scrape_children: true, + state_snapshot: Some( + "/home/kianenigma/remote-builds/ksm-state".to_owned().into(), + ), + ..Default::default() })) .state_version(sp_core::StateVersion::V0) .build() From a980e4010db43bffc411e60c46cd61de6ce9be8a Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Sat, 30 Oct 2021 16:15:01 +0100 Subject: [PATCH 130/188] Update frame/state-trie-migration/src/lib.rs Co-authored-by: Guillaume Thiolliere --- frame/state-trie-migration/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index 4a49358aae971..432805a32b5bc 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -45,7 +45,7 @@ //! be included by validators. //! //! This approach is safer, and ensures that the migration reads do not take more than a certain -//! amount, yet the do impose some work on the validators/collators. +//! amount, yet it does impose some work on the validators/collators. //! //! ### Signed migration //! From 013b3899a26bddb3b8b6357eb8457ea2507f0dbe Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Sat, 30 Oct 2021 16:15:09 +0100 Subject: [PATCH 131/188] Update frame/state-trie-migration/src/lib.rs Co-authored-by: Guillaume Thiolliere --- frame/state-trie-migration/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index 432805a32b5bc..1bd7a728a91e7 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -135,7 +135,7 @@ pub mod pallet { pub(crate) current_top: Option>, /// The last child key that we have processed. /// - /// This is a child key under the current `self.last_top`. + /// This is a child key under the current `self.current_top`. /// /// If this is set, no further top keys are processed until the child key migration is /// complete. From d6bd51df57ef0027c37aa3ecc38a649702796c0a Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Thu, 4 Nov 2021 12:07:02 +0000 Subject: [PATCH 132/188] Update primitives/storage/src/lib.rs Co-authored-by: cheme --- primitives/storage/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index da1edbaf689f2..bb65557791139 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -168,8 +168,8 @@ pub struct StorageChild { pub struct Storage { /// Top trie storage data. pub top: StorageMap, - /// Children trie storage data. The key does not including prefix, for the `default` trie kind, - /// so this is exclusively for the `ChildType::ParentKeyId` tries. + /// Children trie storage data. Key does not include prefix, only for the `default` trie kind, + /// of `ChildType::ParentKeyId` type. pub children_default: std::collections::HashMap, StorageChild>, } From 82c8cb2f8454ba62b660572f5383aa030b8cc4df Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Thu, 4 Nov 2021 12:10:40 +0000 Subject: [PATCH 133/188] Update frame/state-trie-migration/src/lib.rs Co-authored-by: cheme --- frame/state-trie-migration/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index 1bd7a728a91e7..cf07fe7ee70ce 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -679,7 +679,7 @@ pub mod pallet { let mut dyn_size = 0u32; for child_key in &child_keys { if let Some(data) = - sp_io::default_child_storage::get(Self::child_io_key(child_key), &top_key) + sp_io::default_child_storage::get(Self::child_io_key(top_key), &child_key) { dyn_size = dyn_size.saturating_add(data.len() as u32); sp_io::default_child_storage::set( From d75b23e72d0dfb76bbd32dd945619b5acadfa1e8 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Thu, 4 Nov 2021 12:11:02 +0000 Subject: [PATCH 134/188] Update frame/state-trie-migration/src/lib.rs Co-authored-by: cheme --- frame/state-trie-migration/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index cf07fe7ee70ce..1e954f14fb95a 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -683,8 +683,8 @@ pub mod pallet { { dyn_size = dyn_size.saturating_add(data.len() as u32); sp_io::default_child_storage::set( - Self::child_io_key(child_key), - &top_key, + Self::child_io_key(top_key), + &child_key, &data, ); } From 9b4837f0fd8e4504ee80ba1994c4dc207ff0443a Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 5 Nov 2021 12:26:12 +0100 Subject: [PATCH 135/188] fmt and opt-in feature to apply state change. --- client/consensus/babe/src/verification.rs | 4 +- .../src/protocol/notifications/behaviour.rs | 4 +- client/network/src/protocol/sync/blocks.rs | 2 +- client/network/src/service/tests.rs | 2 +- client/network/src/transactions.rs | 4 +- .../election-provider-multi-phase/src/lib.rs | 2 +- .../src/pallet/parse/pallet_struct.rs | 4 +- primitives/io/Cargo.toml | 1 + primitives/io/src/lib.rs | 4 +- .../runtime-interface/proc-macro/src/lib.rs | 24 +++++++-- .../host_function_interface.rs | 52 ++++++++++++++++--- .../proc-macro/src/runtime_interface/mod.rs | 6 ++- primitives/version/Cargo.toml | 1 + primitives/version/src/lib.rs | 7 +++ 14 files changed, 91 insertions(+), 26 deletions(-) diff --git a/client/consensus/babe/src/verification.rs b/client/consensus/babe/src/verification.rs index af118312dd07c..1554fa6de31be 100644 --- a/client/consensus/babe/src/verification.rs +++ b/client/consensus/babe/src/verification.rs @@ -114,7 +114,7 @@ where ); check_secondary_plain_header::(pre_hash, secondary, sig, &epoch)?; - } + }, PreDigest::SecondaryVRF(secondary) if epoch.config.allowed_slots.is_secondary_vrf_slots_allowed() => { @@ -125,7 +125,7 @@ where ); check_secondary_vrf_header::(pre_hash, secondary, sig, &epoch)?; - } + }, _ => return Err(babe_err(Error::SecondarySlotAssignmentsDisabled)), } diff --git a/client/network/src/protocol/notifications/behaviour.rs b/client/network/src/protocol/notifications/behaviour.rs index 01138e3207570..f66f1fbe9e95a 100644 --- a/client/network/src/protocol/notifications/behaviour.rs +++ b/client/network/src/protocol/notifications/behaviour.rs @@ -712,7 +712,7 @@ impl Notifications { timer: delay_id, timer_deadline: *backoff, }; - } + }, // Disabled => Enabled PeerState::Disabled { mut connections, backoff_until } => { @@ -2085,7 +2085,7 @@ impl NetworkBehaviour for Notifications { .boxed(), ); } - } + }, // We intentionally never remove elements from `delays`, and it may // thus contain obsolete entries. This is a normal situation. diff --git a/client/network/src/protocol/sync/blocks.rs b/client/network/src/protocol/sync/blocks.rs index 30ba7ffafeffc..ce4535dc0b45f 100644 --- a/client/network/src/protocol/sync/blocks.rs +++ b/client/network/src/protocol/sync/blocks.rs @@ -203,7 +203,7 @@ impl BlockCollection { { *downloading -= 1; false - } + }, Some(&mut BlockRangeState::Downloading { .. }) => true, _ => false, }; diff --git a/client/network/src/service/tests.rs b/client/network/src/service/tests.rs index 69b172d07edfe..87e481dc87f2d 100644 --- a/client/network/src/service/tests.rs +++ b/client/network/src/service/tests.rs @@ -530,7 +530,7 @@ fn fallback_name_working() { { assert_eq!(negotiated_fallback, Some(PROTOCOL_NAME)); break - } + }, _ => {}, }; } diff --git a/client/network/src/transactions.rs b/client/network/src/transactions.rs index 99350f603a375..6d190651160f0 100644 --- a/client/network/src/transactions.rs +++ b/client/network/src/transactions.rs @@ -336,13 +336,13 @@ impl TransactionsHandler { }, ); debug_assert!(_was_in.is_none()); - } + }, Event::NotificationStreamClosed { remote, protocol } if protocol == self.protocol_name => { let _peer = self.peers.remove(&remote); debug_assert!(_peer.is_some()); - } + }, Event::NotificationsReceived { remote, messages } => { for (protocol, message) in messages { diff --git a/frame/election-provider-multi-phase/src/lib.rs b/frame/election-provider-multi-phase/src/lib.rs index a7863fafa7747..ab8b1523f0509 100644 --- a/frame/election-provider-multi-phase/src/lib.rs +++ b/frame/election-provider-multi-phase/src/lib.rs @@ -772,7 +772,7 @@ pub mod pallet { Self::on_initialize_open_unsigned(enabled, now); T::WeightInfo::on_initialize_open_unsigned() } - } + }, _ => T::WeightInfo::on_initialize_nothing(), } } diff --git a/frame/support/procedural/src/pallet/parse/pallet_struct.rs b/frame/support/procedural/src/pallet/parse/pallet_struct.rs index 278f46e13818e..c528faf669ee3 100644 --- a/frame/support/procedural/src/pallet/parse/pallet_struct.rs +++ b/frame/support/procedural/src/pallet/parse/pallet_struct.rs @@ -130,12 +130,12 @@ impl PalletStructDef { if generate_storage_info.is_none() => { generate_storage_info = Some(span); - } + }, PalletStructAttr::StorageVersion { storage_version, .. } if storage_version_found.is_none() => { storage_version_found = Some(storage_version); - } + }, attr => { let msg = "Unexpected duplicated attribute"; return Err(syn::Error::new(attr.span(), msg)) diff --git a/primitives/io/Cargo.toml b/primitives/io/Cargo.toml index df9a496a914be..03ce36819b0f0 100644 --- a/primitives/io/Cargo.toml +++ b/primitives/io/Cargo.toml @@ -54,6 +54,7 @@ std = [ "futures", "parking_lot", ] +new-state = [] with-tracing = [ "sp-tracing/with-tracing" diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index 7e54b54dc73b3..b64af7c4754b1 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -98,7 +98,7 @@ pub enum KillStorageResult { } /// Interface for accessing the storage from within the runtime. -#[runtime_interface] +#[runtime_interface(feature_patch=new_state,root,2)] pub trait Storage { /// Returns the data for `key` in the storage or `None` if the key can not be found. fn get(&self, key: &[u8]) -> Option> { @@ -265,7 +265,7 @@ pub trait Storage { /// Interface for accessing the child storage for default child trie, /// from within the runtime. -#[runtime_interface] +#[runtime_interface(feature_patch=new_state,root,2)] pub trait DefaultChildStorage { /// Get a default child storage value for a given key. /// diff --git a/primitives/runtime-interface/proc-macro/src/lib.rs b/primitives/runtime-interface/proc-macro/src/lib.rs index 6b0669a298e1c..e025d133d1e94 100644 --- a/primitives/runtime-interface/proc-macro/src/lib.rs +++ b/primitives/runtime-interface/proc-macro/src/lib.rs @@ -39,16 +39,17 @@ mod utils; struct Options { wasm_only: bool, tracing: bool, + feature_patch: Vec<(String, String, String)>, } impl Options { - fn unpack(self) -> (bool, bool) { - (self.wasm_only, self.tracing) + fn unpack(self) -> (bool, bool, Vec<(String, String, String)>) { + (self.wasm_only, self.tracing, self.feature_patch) } } impl Default for Options { fn default() -> Self { - Options { wasm_only: false, tracing: true } + Options { wasm_only: false, tracing: true, feature_patch: Vec::new() } } } @@ -63,6 +64,19 @@ impl Parse for Options { } else if lookahead.peek(runtime_interface::keywords::no_tracing) { let _ = input.parse::(); res.tracing = false; + } else if lookahead.peek(runtime_interface::keywords::feature_patch) { + let _ = input.parse::(); + let _ = input.parse::(); + let feature_name = input.parse::()?; + let _ = input.parse::(); + let fonc_name = input.parse::()?; + let _ = input.parse::(); + let fonc_version = match input.parse::()? { + syn::ExprLit { lit: syn::Lit::Int(lit), .. } => lit.to_string(), + _ => return Err(lookahead.error()), + }; + let patch = (feature_name.to_string(), fonc_name.to_string(), fonc_version); + res.feature_patch.push(patch); } else if lookahead.peek(Token![,]) { let _ = input.parse::(); } else { @@ -79,9 +93,9 @@ pub fn runtime_interface( input: proc_macro::TokenStream, ) -> proc_macro::TokenStream { let trait_def = parse_macro_input!(input as ItemTrait); - let (wasm_only, tracing) = parse_macro_input!(attrs as Options).unpack(); + let (wasm_only, tracing, feature_patch) = parse_macro_input!(attrs as Options).unpack(); - runtime_interface::runtime_interface_impl(trait_def, wasm_only, tracing) + runtime_interface::runtime_interface_impl(trait_def, wasm_only, tracing, feature_patch) .unwrap_or_else(|e| e.to_compile_error()) .into() } diff --git a/primitives/runtime-interface/proc-macro/src/runtime_interface/host_function_interface.rs b/primitives/runtime-interface/proc-macro/src/runtime_interface/host_function_interface.rs index 75498c09c18c7..ed50e49df77cc 100644 --- a/primitives/runtime-interface/proc-macro/src/runtime_interface/host_function_interface.rs +++ b/primitives/runtime-interface/proc-macro/src/runtime_interface/host_function_interface.rs @@ -41,24 +41,46 @@ use inflector::Inflector; use std::iter::{self, Iterator}; +fn feature_patch( + feature_patch: &Vec<(String, String, String)>, + version: &u32, + method: &TraitItemMethod, +) -> TokenStream { + let method = method.sig.ident.to_string(); + for (feature, method_patch, version_patch) in feature_patch { + if &version.to_string() == version_patch && &method == method_patch { + return quote! { + #[cfg(not(feature=#feature))] + } + } + } + quote! {} +} /// Generate the extern host functions for wasm and the `HostFunctions` struct that provides the /// implementations for the host functions on the host. -pub fn generate(trait_def: &ItemTrait, is_wasm_only: bool) -> Result { +pub fn generate( + trait_def: &ItemTrait, + is_wasm_only: bool, + features_patch: Vec<(String, String, String)>, +) -> Result { let trait_name = &trait_def.ident; let extern_host_function_impls = get_runtime_interface(trait_def)?.latest_versions().try_fold( TokenStream::new(), |mut t, (version, method)| { - t.extend(generate_extern_host_function(method, version, trait_name)?); + let patch = feature_patch(&features_patch, &version, &method); + t.extend(generate_extern_host_function(method, version, trait_name, patch)?); Ok::<_, Error>(t) }, )?; let exchangeable_host_functions = get_runtime_interface(trait_def)? .latest_versions() - .try_fold(TokenStream::new(), |mut t, (_, m)| { - t.extend(generate_exchangeable_host_function(m)?); + .try_fold(TokenStream::new(), |mut t, (version, method)| { + let patch = feature_patch(&features_patch, &version, &method); + t.extend(generate_exchangeable_host_function(method, patch)?); Ok::<_, Error>(t) })?; - let host_functions_struct = generate_host_functions_struct(trait_def, is_wasm_only)?; + let host_functions_struct = + generate_host_functions_struct(trait_def, is_wasm_only, features_patch)?; Ok(quote! { /// The implementations of the extern host functions. This special implementation module @@ -82,6 +104,7 @@ fn generate_extern_host_function( method: &TraitItemMethod, version: u32, trait_name: &Ident, + patch: TokenStream, ) -> Result { let crate_ = generate_crate_access(); let args = get_function_arguments(&method.sig); @@ -113,6 +136,7 @@ fn generate_extern_host_function( }; Ok(quote! { + #patch #[doc = #doc_string] pub fn #function ( #( #args ),* ) #return_value { extern "C" { @@ -137,7 +161,10 @@ fn generate_extern_host_function( } /// Generate the host exchangeable function for the given method. -fn generate_exchangeable_host_function(method: &TraitItemMethod) -> Result { +fn generate_exchangeable_host_function( + method: &TraitItemMethod, + patch: TokenStream, +) -> Result { let crate_ = generate_crate_access(); let arg_types = get_function_argument_types(&method.sig); let function = &method.sig.ident; @@ -146,6 +173,7 @@ fn generate_exchangeable_host_function(method: &TraitItemMethod) -> Result Result, ) -> Result { let crate_ = generate_crate_access(); let host_functions = get_runtime_interface(trait_def)? .all_versions() .map(|(version, method)| { - generate_host_function_implementation(&trait_def.ident, method, version, is_wasm_only) + let patch = feature_patch(&features_patch, &version, &method); + generate_host_function_implementation( + &trait_def.ident, + method, + version, + is_wasm_only, + patch, + ) }) .collect::>>()?; @@ -194,6 +230,7 @@ fn generate_host_function_implementation( method: &TraitItemMethod, version: u32, is_wasm_only: bool, + feature_patch: TokenStream, ) -> Result { let name = create_host_function_ident(&method.sig.ident, version, trait_name).to_string(); let struct_name = Ident::new(&name.to_pascal_case(), Span::call_site()); @@ -207,6 +244,7 @@ fn generate_host_function_implementation( let convert_return_value = generate_return_value_into_wasm_value(&method.sig); Ok(quote! { + #feature_patch { struct #struct_name; diff --git a/primitives/runtime-interface/proc-macro/src/runtime_interface/mod.rs b/primitives/runtime-interface/proc-macro/src/runtime_interface/mod.rs index 78feda663850c..09a6eb8c5a5d2 100644 --- a/primitives/runtime-interface/proc-macro/src/runtime_interface/mod.rs +++ b/primitives/runtime-interface/proc-macro/src/runtime_interface/mod.rs @@ -35,6 +35,8 @@ pub mod keywords { syn::custom_keyword!(wasm_only); // Disable tracing-macros added to the [`runtime_interface`] by specifying this optional entry syn::custom_keyword!(no_tracing); + // Only allow function declaration depending on external crate feature. + syn::custom_keyword!(feature_patch); } /// Implementation of the `runtime_interface` attribute. @@ -45,12 +47,14 @@ pub fn runtime_interface_impl( trait_def: ItemTrait, is_wasm_only: bool, tracing: bool, + feature_patch: Vec<(String, String, String)>, ) -> Result { let bare_functions = bare_function_interface::generate(&trait_def, is_wasm_only, tracing)?; let crate_include = generate_runtime_interface_include(); let mod_name = Ident::new(&trait_def.ident.to_string().to_snake_case(), Span::call_site()); let trait_decl_impl = trait_decl_impl::process(&trait_def, is_wasm_only)?; - let host_functions = host_function_interface::generate(&trait_def, is_wasm_only)?; + let host_functions = + host_function_interface::generate(&trait_def, is_wasm_only, feature_patch)?; let vis = trait_def.vis; let attrs = &trait_def.attrs; diff --git a/primitives/version/Cargo.toml b/primitives/version/Cargo.toml index 2a2c2698c74c3..dce2d7aa78ad7 100644 --- a/primitives/version/Cargo.toml +++ b/primitives/version/Cargo.toml @@ -37,3 +37,4 @@ std = [ "parity-wasm", "thiserror", ] +new-state = [] diff --git a/primitives/version/src/lib.rs b/primitives/version/src/lib.rs index 10edbca4eb24f..c3b7d33c184a7 100644 --- a/primitives/version/src/lib.rs +++ b/primitives/version/src/lib.rs @@ -215,6 +215,7 @@ impl RuntimeVersion { self.apis.iter().find_map(|a| (a.0 == *id).then(|| a.1)) } + #[cfg(feature = "new-state")] /// Returns state version to use for update. pub fn state_version(&self) -> StateVersion { let core_api_id = sp_runtime::hashing::blake2_64(b"Core"); @@ -224,6 +225,12 @@ impl RuntimeVersion { StateVersion::V0 } } + + #[cfg(not(feature = "new-state"))] + /// Returns state version to use for update. + pub fn state_version(&self) -> StateVersion { + StateVersion::V0 + } } #[cfg(feature = "std")] From b3d8f6286b27f5dbc9d80772bb47394e5f1512d8 Mon Sep 17 00:00:00 2001 From: Emeric Chevalier Date: Mon, 8 Nov 2021 10:35:07 +0100 Subject: [PATCH 136/188] feature gate core version, use new test feature for node and test node --- bin/node/cli/Cargo.toml | 2 +- bin/node/runtime/Cargo.toml | 2 +- client/api/Cargo.toml | 3 +++ primitives/api/Cargo.toml | 1 + primitives/api/src/lib.rs | 26 ++++++++++++++++++++++++++ primitives/runtime/Cargo.toml | 1 + test-utils/client/Cargo.toml | 2 +- test-utils/runtime/Cargo.toml | 2 +- 8 files changed, 35 insertions(+), 4 deletions(-) diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index 1d394dd952db0..53cb05caf80e8 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -46,7 +46,7 @@ structopt = { version = "0.3.8", optional = true } sp-authority-discovery = { version = "4.0.0-dev", path = "../../../primitives/authority-discovery" } sp-consensus-babe = { version = "0.10.0-dev", path = "../../../primitives/consensus/babe" } grandpa-primitives = { version = "4.0.0-dev", package = "sp-finality-grandpa", path = "../../../primitives/finality-grandpa" } -sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } +sp-api = { version = "4.0.0-dev", path = "../../../primitives/api", features = ["new-state"] } sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" } sp-timestamp = { version = "4.0.0-dev", path = "../../../primitives/timestamp" } diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index 22ff0954e2458..785b0214c3daa 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -32,7 +32,7 @@ node-primitives = { version = "2.0.0", default-features = false, path = "../prim sp-offchain = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/offchain" } sp-core = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/core" } sp-std = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/std" } -sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/api" } +sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/api", features = ["new-state"] } sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/runtime" } sp-staking = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/staking" } sp-keyring = { version = "4.0.0-dev", optional = true, path = "../../../primitives/keyring" } diff --git a/client/api/Cargo.toml b/client/api/Cargo.toml index af8704058b660..5d49a1fad77f4 100644 --- a/client/api/Cargo.toml +++ b/client/api/Cargo.toml @@ -42,3 +42,6 @@ prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0. sp-test-primitives = { version = "2.0.0", path = "../../primitives/test-primitives" } substrate-test-runtime = { version = "2.0.0", path = "../../test-utils/runtime" } thiserror = "1.0.21" + +[features] +new-state = ["sp-api/new-state"] diff --git a/primitives/api/Cargo.toml b/primitives/api/Cargo.toml index c57c3730fc7b6..80fde051096c7 100644 --- a/primitives/api/Cargo.toml +++ b/primitives/api/Cargo.toml @@ -49,3 +49,4 @@ std = [ # # This sets the max logging level to `off` for `log`. disable-logging = ["log/max_level_off"] +new-state = ["sp-version/new-state", "sp-runtime/new-state"] diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index d3337f33902dc..c4098a04c1345 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -698,6 +698,7 @@ impl From for OldRuntimeVersion { } } +#[cfg(feature = "new-state")] decl_runtime_apis! { /// The `Core` runtime api that every Substrate runtime needs to implement. #[core_trait] @@ -721,3 +722,28 @@ decl_runtime_apis! { fn metadata() -> OpaqueMetadata; } } + +#[cfg(not(feature = "new-state"))] +decl_runtime_apis! { + /// The `Core` runtime api that every Substrate runtime needs to implement. + #[core_trait] + #[api_version(3)] + pub trait Core { + /// Returns the version of the runtime. + fn version() -> RuntimeVersion; + /// Returns the version of the runtime. + #[changed_in(3)] + fn version() -> OldRuntimeVersion; + /// Execute the given block. + fn execute_block(block: Block); + /// Initialize a block with the given header. + #[renamed("initialise_block", 2)] + fn initialize_block(header: &::Header); + } + + /// The `Metadata` api trait that returns metadata for the runtime. + pub trait Metadata { + /// Returns the metadata of a runtime. + fn metadata() -> OpaqueMetadata; + } +} diff --git a/primitives/runtime/Cargo.toml b/primitives/runtime/Cargo.toml index 475d2b769de39..4face7fea3437 100644 --- a/primitives/runtime/Cargo.toml +++ b/primitives/runtime/Cargo.toml @@ -58,3 +58,4 @@ std = [ "hash256-std-hasher/std", "either/use_std", ] +new-state = ["sp-io/new-state"] diff --git a/test-utils/client/Cargo.toml b/test-utils/client/Cargo.toml index 204b6ac435e07..b5513892c597b 100644 --- a/test-utils/client/Cargo.toml +++ b/test-utils/client/Cargo.toml @@ -17,7 +17,7 @@ futures = "0.3.16" hex = "0.4" serde = "1.0.126" serde_json = "1.0.68" -sc-client-api = { version = "4.0.0-dev", path = "../../client/api" } +sc-client-api = { version = "4.0.0-dev", path = "../../client/api", features = ["new-state"] } sc-client-db = { version = "0.10.0-dev", features = [ "test-helpers", ], path = "../../client/db" } diff --git a/test-utils/runtime/Cargo.toml b/test-utils/runtime/Cargo.toml index ee9bca347887a..1a14917df43f6 100644 --- a/test-utils/runtime/Cargo.toml +++ b/test-utils/runtime/Cargo.toml @@ -30,7 +30,7 @@ sp-io = { version = "4.0.0-dev", default-features = false, path = "../../primiti frame-support = { version = "4.0.0-dev", default-features = false, path = "../../frame/support" } sp-version = { version = "4.0.0-dev", default-features = false, path = "../../primitives/version" } sp-session = { version = "4.0.0-dev", default-features = false, path = "../../primitives/session" } -sp-api = { version = "4.0.0-dev", default-features = false, path = "../../primitives/api" } +sp-api = { version = "4.0.0-dev", default-features = false, path = "../../primitives/api", features = ["new-state"] } sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } pallet-babe = { version = "4.0.0-dev", default-features = false, path = "../../frame/babe" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../../frame/system" } From dd20a8d1d667b52f3d47ac495bb37780da216603 Mon Sep 17 00:00:00 2001 From: Emeric Chevalier Date: Mon, 8 Nov 2021 10:47:50 +0100 Subject: [PATCH 137/188] Use a 'State' api version instead of Core one. --- primitives/api/src/lib.rs | 31 ++++++++----------------------- primitives/version/src/lib.rs | 4 ++-- 2 files changed, 10 insertions(+), 25 deletions(-) diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index c4098a04c1345..7b7a3c5459f29 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -698,11 +698,10 @@ impl From for OldRuntimeVersion { } } -#[cfg(feature = "new-state")] decl_runtime_apis! { /// The `Core` runtime api that every Substrate runtime needs to implement. #[core_trait] - #[api_version(4)] + #[api_version(3)] pub trait Core { /// Returns the version of the runtime. fn version() -> RuntimeVersion; @@ -723,27 +722,13 @@ decl_runtime_apis! { } } -#[cfg(not(feature = "new-state"))] +#[cfg(feature = "new-state")] decl_runtime_apis! { - /// The `Core` runtime api that every Substrate runtime needs to implement. - #[core_trait] - #[api_version(3)] - pub trait Core { - /// Returns the version of the runtime. - fn version() -> RuntimeVersion; - /// Returns the version of the runtime. - #[changed_in(3)] - fn version() -> OldRuntimeVersion; - /// Execute the given block. - fn execute_block(block: Block); - /// Initialize a block with the given header. - #[renamed("initialise_block", 2)] - fn initialize_block(header: &::Header); - } - - /// The `Metadata` api trait that returns metadata for the runtime. - pub trait Metadata { - /// Returns the metadata of a runtime. - fn metadata() -> OpaqueMetadata; + /// State api mainly for checking + /// if new state should apply during state migration. + /// When no state migration support is needed, this + /// could be remove in favor of a Core versioning update. + #[api_version(1)] + pub trait State { } } diff --git a/primitives/version/src/lib.rs b/primitives/version/src/lib.rs index c3b7d33c184a7..194bac1cdea36 100644 --- a/primitives/version/src/lib.rs +++ b/primitives/version/src/lib.rs @@ -218,8 +218,8 @@ impl RuntimeVersion { #[cfg(feature = "new-state")] /// Returns state version to use for update. pub fn state_version(&self) -> StateVersion { - let core_api_id = sp_runtime::hashing::blake2_64(b"Core"); - if self.has_api_with(&core_api_id, |v| v >= 4) { + let core_api_id = sp_runtime::hashing::blake2_64(b"State"); + if self.has_api_with(&core_api_id, |v| v >= 1) { StateVersion::V1 } else { StateVersion::V0 From c75d1488a25c5797271f0c7c05b4233583fc1177 Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 8 Nov 2021 16:45:51 +0100 Subject: [PATCH 138/188] fix merge of test function --- primitives/state-machine/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 4929c8c506c30..b1a0039494dfd 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -2194,6 +2194,7 @@ mod tests { assert_eq!(remote_proof.encode().len(), remote_proof.encoded_size()); } + #[test] fn prove_range_with_child_works() { let state_version = StateVersion::V0; let remote_backend = trie_backend::tests::test_trie(state_version); From aba86328861f1537bcb218c9d47148151cc78d03 Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 8 Nov 2021 17:17:02 +0100 Subject: [PATCH 139/188] use blake macro. --- Cargo.lock | 1 + primitives/version/Cargo.toml | 1 + primitives/version/src/lib.rs | 2 +- 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 3adf68506a56a..0191e76cfbdc0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9909,6 +9909,7 @@ dependencies = [ "parity-wasm 0.42.2", "scale-info", "serde", + "sp-core-hashing-proc-macro", "sp-runtime", "sp-std", "sp-version-proc-macro", diff --git a/primitives/version/Cargo.toml b/primitives/version/Cargo.toml index 238689dc12dad..97053dc60d7bf 100644 --- a/primitives/version/Cargo.toml +++ b/primitives/version/Cargo.toml @@ -24,6 +24,7 @@ sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../runti sp-version-proc-macro = { version = "4.0.0-dev", default-features = false, path = "proc-macro" } parity-wasm = { version = "0.42.2", optional = true } thiserror = { version = "1.0.21", optional = true } +sp-core-hashing-proc-macro = { version = "4.0.0-dev", path = "../core/hashing/proc-macro" } [features] default = ["std"] diff --git a/primitives/version/src/lib.rs b/primitives/version/src/lib.rs index 194bac1cdea36..a1e09deba393d 100644 --- a/primitives/version/src/lib.rs +++ b/primitives/version/src/lib.rs @@ -218,7 +218,7 @@ impl RuntimeVersion { #[cfg(feature = "new-state")] /// Returns state version to use for update. pub fn state_version(&self) -> StateVersion { - let core_api_id = sp_runtime::hashing::blake2_64(b"State"); + let core_api_id = sp_core_hashing_proc_macro::blake2b_64!(b"State"); if self.has_api_with(&core_api_id, |v| v >= 1) { StateVersion::V1 } else { From 205db5eac161bc8211f0bf419f228e87a844cc10 Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 8 Nov 2021 18:00:21 +0100 Subject: [PATCH 140/188] Fix state api (require declaring the api in runtime). --- bin/node/runtime/src/lib.rs | 3 +++ client/rpc/src/state/tests.rs | 6 +++--- primitives/api/src/lib.rs | 1 - test-utils/runtime/client/Cargo.toml | 2 +- test-utils/runtime/src/lib.rs | 6 ++++++ 5 files changed, 13 insertions(+), 5 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 570abe53ed01f..0d7f0c9294d2f 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1737,6 +1737,9 @@ impl_runtime_apis! { Ok(batches) } } + + impl sp_api::State for Runtime { + } } #[cfg(test)] diff --git a/client/rpc/src/state/tests.rs b/client/rpc/src/state/tests.rs index b538f65ccaaa9..2b7ff724df149 100644 --- a/client/rpc/src/state/tests.rs +++ b/client/rpc/src/state/tests.rs @@ -550,11 +550,11 @@ fn should_return_runtime_version() { ); let result = "{\"specName\":\"test\",\"implName\":\"parity-test\",\"authoringVersion\":1,\ - \"specVersion\":2,\"implVersion\":2,\"apis\":[[\"0xdf6acb689907609b\",4],\ + \"specVersion\":2,\"implVersion\":2,\"apis\":[[\"0xdf6acb689907609b\",3],\ [\"0x37e397fc7c91f5e4\",1],[\"0xd2bc9897eed08f15\",3],[\"0x40fe3ad401f8959a\",5],\ [\"0xc6e9a76309f39b09\",1],[\"0xdd718d5cc53262d4\",1],[\"0xcbca25e39f142387\",2],\ - [\"0xf78b278be53f454c\",2],[\"0xab3c0572291feb8b\",1],[\"0xbc9d89904f5b923f\",1]],\ - \"transactionVersion\":1}"; + [\"0xf78b278be53f454c\",2],[\"0xab3c0572291feb8b\",1],[\"0xbc9d89904f5b923f\",1],\ + [\"0xa442ff44ab783a5b\",1]],\"transactionVersion\":1}"; let runtime_version = executor::block_on(api.runtime_version(None.into())).unwrap(); let serialized = serde_json::to_string(&runtime_version).unwrap(); diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index 7fe348d3d2f08..ba8dd887153fb 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -720,7 +720,6 @@ decl_runtime_apis! { } } -#[cfg(feature = "new-state")] decl_runtime_apis! { /// State api mainly for checking /// if new state should apply during state migration. diff --git a/test-utils/runtime/client/Cargo.toml b/test-utils/runtime/client/Cargo.toml index fbc6aefdb850c..8b73046af0e4b 100644 --- a/test-utils/runtime/client/Cargo.toml +++ b/test-utils/runtime/client/Cargo.toml @@ -20,7 +20,7 @@ substrate-test-client = { version = "2.0.0", path = "../../client" } sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } substrate-test-runtime = { version = "2.0.0", path = "../../runtime" } sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" } -sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } +sp-api = { version = "4.0.0-dev", path = "../../../primitives/api", features = ["new-state"] } sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } codec = { package = "parity-scale-codec", version = "2.0.0" } sc-client-api = { version = "4.0.0-dev", path = "../../../client/api" } diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index fcb691961aadf..cd530d0e09870 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -946,6 +946,9 @@ cfg_if! { 0 } } + + impl sp_api::State for Runtime { + } } } else { impl_runtime_apis! { @@ -1177,6 +1180,9 @@ cfg_if! { 0 } } + + impl sp_api::State for Runtime { + } } } } From 6126ed236f9f71df025b26a82a374e50a4e62d41 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 11 Nov 2021 11:54:22 +0100 Subject: [PATCH 141/188] Opt out feature, fix macro for io to select a given version instead of latest. --- bin/node/cli/Cargo.toml | 2 +- bin/node/runtime/Cargo.toml | 2 +- bin/node/runtime/src/lib.rs | 4 +- client/api/Cargo.toml | 2 +- client/rpc/src/state/tests.rs | 6 +- client/service/src/client/client.rs | 2 +- primitives/api/Cargo.toml | 2 +- primitives/api/src/lib.rs | 30 ++++++-- primitives/io/Cargo.toml | 2 +- primitives/io/src/lib.rs | 4 +- .../runtime-interface/proc-macro/src/lib.rs | 20 ++--- .../bare_function_interface.rs | 75 +++++++++++++++++-- .../host_function_interface.rs | 52 ++----------- .../proc-macro/src/runtime_interface/mod.rs | 14 ++-- primitives/runtime/Cargo.toml | 2 +- primitives/version/Cargo.toml | 1 - primitives/version/src/lib.rs | 11 +-- test-utils/client/Cargo.toml | 2 +- test-utils/runtime/Cargo.toml | 2 +- test-utils/runtime/client/Cargo.toml | 2 +- test-utils/runtime/src/lib.rs | 6 -- 21 files changed, 135 insertions(+), 108 deletions(-) diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index 429c659a9457e..f7de7f97cb7ec 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -46,7 +46,7 @@ structopt = { version = "0.3.8", optional = true } sp-authority-discovery = { version = "4.0.0-dev", path = "../../../primitives/authority-discovery" } sp-consensus-babe = { version = "0.10.0-dev", path = "../../../primitives/consensus/babe" } grandpa-primitives = { version = "4.0.0-dev", package = "sp-finality-grandpa", path = "../../../primitives/finality-grandpa" } -sp-api = { version = "4.0.0-dev", path = "../../../primitives/api", features = ["new-state"] } +sp-api = { version = "4.0.0-dev", path = "../../../primitives/api", features = ["old_state"] } sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" } sp-timestamp = { version = "4.0.0-dev", path = "../../../primitives/timestamp" } diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index 7fe1f822f47ff..b5e22ec0ae2f6 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -32,7 +32,7 @@ node-primitives = { version = "2.0.0", default-features = false, path = "../prim sp-offchain = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/offchain" } sp-core = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/core" } sp-std = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/std" } -sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/api", features = ["new-state"] } +sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/api", features = ["old_state"] } sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/runtime" } sp-staking = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/staking" } sp-keyring = { version = "4.0.0-dev", optional = true, path = "../../../primitives/keyring" } diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 0d7f0c9294d2f..b3d46ee26e4e3 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -125,6 +125,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 2, + state_version: 1, }; /// The BABE epoch configuration at genesis. @@ -1737,9 +1738,6 @@ impl_runtime_apis! { Ok(batches) } } - - impl sp_api::State for Runtime { - } } #[cfg(test)] diff --git a/client/api/Cargo.toml b/client/api/Cargo.toml index c67434f85cf7c..490a3376d1771 100644 --- a/client/api/Cargo.toml +++ b/client/api/Cargo.toml @@ -44,4 +44,4 @@ substrate-test-runtime = { version = "2.0.0", path = "../../test-utils/runtime" thiserror = "1.0.21" [features] -new-state = ["sp-api/new-state"] +old_state = ["sp-api/old_state"] diff --git a/client/rpc/src/state/tests.rs b/client/rpc/src/state/tests.rs index 2b7ff724df149..b538f65ccaaa9 100644 --- a/client/rpc/src/state/tests.rs +++ b/client/rpc/src/state/tests.rs @@ -550,11 +550,11 @@ fn should_return_runtime_version() { ); let result = "{\"specName\":\"test\",\"implName\":\"parity-test\",\"authoringVersion\":1,\ - \"specVersion\":2,\"implVersion\":2,\"apis\":[[\"0xdf6acb689907609b\",3],\ + \"specVersion\":2,\"implVersion\":2,\"apis\":[[\"0xdf6acb689907609b\",4],\ [\"0x37e397fc7c91f5e4\",1],[\"0xd2bc9897eed08f15\",3],[\"0x40fe3ad401f8959a\",5],\ [\"0xc6e9a76309f39b09\",1],[\"0xdd718d5cc53262d4\",1],[\"0xcbca25e39f142387\",2],\ - [\"0xf78b278be53f454c\",2],[\"0xab3c0572291feb8b\",1],[\"0xbc9d89904f5b923f\",1],\ - [\"0xa442ff44ab783a5b\",1]],\"transactionVersion\":1}"; + [\"0xf78b278be53f454c\",2],[\"0xab3c0572291feb8b\",1],[\"0xbc9d89904f5b923f\",1]],\ + \"transactionVersion\":1}"; let runtime_version = executor::block_on(api.runtime_version(None.into())).unwrap(); let serialized = serde_json::to_string(&runtime_version).unwrap(); diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index 719aab34a870b..4a18747d541fd 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -1539,7 +1539,7 @@ where start_key: &[Vec], ) -> sp_blockchain::Result<(KeyValueStates, usize)> { let mut db = sp_state_machine::MemoryDB::>::new(&[]); - // Compact encoding + // Compact encoding let _ = sp_trie::decode_compact::>, _, _>( &mut db, proof.iter_compact_encoded_nodes(), diff --git a/primitives/api/Cargo.toml b/primitives/api/Cargo.toml index 9102fde56b1de..fdb82efa88031 100644 --- a/primitives/api/Cargo.toml +++ b/primitives/api/Cargo.toml @@ -49,4 +49,4 @@ std = [ # # This sets the max logging level to `off` for `log`. disable-logging = ["log/max_level_off"] -new-state = ["sp-version/new-state", "sp-runtime/new-state"] +old_state = ["sp-runtime/old_state"] diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index ba8dd887153fb..315a35de9f40d 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -696,10 +696,11 @@ impl From for OldRuntimeVersion { } } +#[cfg(not(feature = "old_state"))] decl_runtime_apis! { /// The `Core` runtime api that every Substrate runtime needs to implement. #[core_trait] - #[api_version(3)] + #[api_version(4)] pub trait Core { /// Returns the version of the runtime. fn version() -> RuntimeVersion; @@ -720,12 +721,27 @@ decl_runtime_apis! { } } +#[cfg(feature = "old_state")] decl_runtime_apis! { - /// State api mainly for checking - /// if new state should apply during state migration. - /// When no state migration support is needed, this - /// could be remove in favor of a Core versioning update. - #[api_version(1)] - pub trait State { + /// The `Core` runtime api that every Substrate runtime needs to implement. + #[core_trait] + #[api_version(3)] + pub trait Core { + /// Returns the version of the runtime. + fn version() -> RuntimeVersion; + /// Returns the version of the runtime. + #[changed_in(3)] + fn version() -> OldRuntimeVersion; + /// Execute the given block. + fn execute_block(block: Block); + /// Initialize a block with the given header. + #[renamed("initialise_block", 2)] + fn initialize_block(header: &::Header); + } + + /// The `Metadata` api trait that returns metadata for the runtime. + pub trait Metadata { + /// Returns the metadata of a runtime. + fn metadata() -> OpaqueMetadata; } } diff --git a/primitives/io/Cargo.toml b/primitives/io/Cargo.toml index 974bfc0c8cd10..7ef37a80056ed 100644 --- a/primitives/io/Cargo.toml +++ b/primitives/io/Cargo.toml @@ -54,7 +54,7 @@ std = [ "futures", "parking_lot", ] -new-state = [] +old_state = [] with-tracing = [ "sp-tracing/with-tracing" diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index b64af7c4754b1..fe7f4698a3c1f 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -98,7 +98,7 @@ pub enum KillStorageResult { } /// Interface for accessing the storage from within the runtime. -#[runtime_interface(feature_patch=new_state,root,2)] +#[runtime_interface(feature_force_version=old_state,root,1)] pub trait Storage { /// Returns the data for `key` in the storage or `None` if the key can not be found. fn get(&self, key: &[u8]) -> Option> { @@ -265,7 +265,7 @@ pub trait Storage { /// Interface for accessing the child storage for default child trie, /// from within the runtime. -#[runtime_interface(feature_patch=new_state,root,2)] +#[runtime_interface(feature_force_version=old_state,root,1)] pub trait DefaultChildStorage { /// Get a default child storage value for a given key. /// diff --git a/primitives/runtime-interface/proc-macro/src/lib.rs b/primitives/runtime-interface/proc-macro/src/lib.rs index e025d133d1e94..ca245ab6c01de 100644 --- a/primitives/runtime-interface/proc-macro/src/lib.rs +++ b/primitives/runtime-interface/proc-macro/src/lib.rs @@ -39,17 +39,17 @@ mod utils; struct Options { wasm_only: bool, tracing: bool, - feature_patch: Vec<(String, String, String)>, + feature_force_version: Vec<(String, String, u32)>, } impl Options { - fn unpack(self) -> (bool, bool, Vec<(String, String, String)>) { - (self.wasm_only, self.tracing, self.feature_patch) + fn unpack(self) -> (bool, bool, Vec<(String, String, u32)>) { + (self.wasm_only, self.tracing, self.feature_force_version) } } impl Default for Options { fn default() -> Self { - Options { wasm_only: false, tracing: true, feature_patch: Vec::new() } + Options { wasm_only: false, tracing: true, feature_force_version: Vec::new() } } } @@ -64,19 +64,19 @@ impl Parse for Options { } else if lookahead.peek(runtime_interface::keywords::no_tracing) { let _ = input.parse::(); res.tracing = false; - } else if lookahead.peek(runtime_interface::keywords::feature_patch) { - let _ = input.parse::(); + } else if lookahead.peek(runtime_interface::keywords::feature_force_version) { + let _ = input.parse::(); let _ = input.parse::(); let feature_name = input.parse::()?; let _ = input.parse::(); let fonc_name = input.parse::()?; let _ = input.parse::(); let fonc_version = match input.parse::()? { - syn::ExprLit { lit: syn::Lit::Int(lit), .. } => lit.to_string(), + syn::ExprLit { lit: syn::Lit::Int(lit), .. } => lit.base10_parse::()?, _ => return Err(lookahead.error()), }; let patch = (feature_name.to_string(), fonc_name.to_string(), fonc_version); - res.feature_patch.push(patch); + res.feature_force_version.push(patch); } else if lookahead.peek(Token![,]) { let _ = input.parse::(); } else { @@ -93,9 +93,9 @@ pub fn runtime_interface( input: proc_macro::TokenStream, ) -> proc_macro::TokenStream { let trait_def = parse_macro_input!(input as ItemTrait); - let (wasm_only, tracing, feature_patch) = parse_macro_input!(attrs as Options).unpack(); + let (wasm_only, tracing, feature_force_version) = parse_macro_input!(attrs as Options).unpack(); - runtime_interface::runtime_interface_impl(trait_def, wasm_only, tracing, feature_patch) + runtime_interface::runtime_interface_impl(trait_def, wasm_only, tracing, feature_force_version) .unwrap_or_else(|e| e.to_compile_error()) .into() } diff --git a/primitives/runtime-interface/proc-macro/src/runtime_interface/bare_function_interface.rs b/primitives/runtime-interface/proc-macro/src/runtime_interface/bare_function_interface.rs index c951dedb67713..1c2b7dfcc152c 100644 --- a/primitives/runtime-interface/proc-macro/src/runtime_interface/bare_function_interface.rs +++ b/primitives/runtime-interface/proc-macro/src/runtime_interface/bare_function_interface.rs @@ -45,9 +45,29 @@ use quote::{quote, quote_spanned}; use std::iter; +fn not_feature_force_version( + feature_force_version: &Vec<(String, String, u32)>, + method: &TraitItemMethod, +) -> TokenStream { + let method = method.sig.ident.to_string(); + for (feature, method_patch, _version_patch) in feature_force_version { + if &method == method_patch { + return quote! { + #[cfg(not(feature=#feature))] + } + } + } + quote! {} +} + /// Generate one bare function per trait method. The name of the bare function is equal to the name /// of the trait method. -pub fn generate(trait_def: &ItemTrait, is_wasm_only: bool, tracing: bool) -> Result { +pub fn generate( + trait_def: &ItemTrait, + is_wasm_only: bool, + tracing: bool, + feature_force_version: &Vec<(String, String, u32)>, +) -> Result { let trait_name = &trait_def.ident; let runtime_interface = get_runtime_interface(trait_def)?; @@ -55,7 +75,35 @@ pub fn generate(trait_def: &ItemTrait, is_wasm_only: bool, tracing: bool) -> Res let token_stream: Result = runtime_interface.latest_versions().try_fold( TokenStream::new(), |mut t, (latest_version, method)| { - t.extend(function_for_method(method, latest_version, is_wasm_only)?); + t.extend(function_for_method( + method, + latest_version, + is_wasm_only, + not_feature_force_version(&feature_force_version, &method), + )?); + Ok(t) + }, + ); + + // forced version + let token_stream: Result = feature_force_version.iter().try_fold( + token_stream?, + |mut t, (feature, method, force_version)| { + // lookup method + let (_, full_method) = runtime_interface + .all_versions() + .find(|(version, full_method)| { + version == force_version && &full_method.sig.ident.to_string() == method + }) + .expect("Force version not found"); + + let feature_check = quote!(#[cfg(feature=#feature)]); + t.extend(function_for_method( + full_method, + *force_version, + is_wasm_only, + feature_check, + )?); Ok(t) }, ); @@ -77,11 +125,15 @@ fn function_for_method( method: &TraitItemMethod, latest_version: u32, is_wasm_only: bool, + feature_check: TokenStream, ) -> Result { - let std_impl = - if !is_wasm_only { function_std_latest_impl(method, latest_version)? } else { quote!() }; + let std_impl = if !is_wasm_only { + function_std_latest_impl(method, latest_version, &feature_check)? + } else { + quote!() + }; - let no_std_impl = function_no_std_impl(method)?; + let no_std_impl = function_no_std_impl(method, feature_check)?; Ok(quote! { #std_impl @@ -91,7 +143,10 @@ fn function_for_method( } /// Generates the bare function implementation for `cfg(not(feature = "std"))`. -fn function_no_std_impl(method: &TraitItemMethod) -> Result { +fn function_no_std_impl( + method: &TraitItemMethod, + feature_check: TokenStream, +) -> Result { let function_name = &method.sig.ident; let host_function_name = create_exchangeable_host_function_ident(&method.sig.ident); let args = get_function_arguments(&method.sig); @@ -100,6 +155,7 @@ fn function_no_std_impl(method: &TraitItemMethod) -> Result { let attrs = method.attrs.iter().filter(|a| !a.path.is_ident("version")); Ok(quote! { + #feature_check #[cfg(not(feature = "std"))] #( #attrs )* pub fn #function_name( #( #args, )* ) #return_value { @@ -112,7 +168,11 @@ fn function_no_std_impl(method: &TraitItemMethod) -> Result { /// Generate call to latest function version for `cfg((feature = "std")` /// /// This should generate simple `fn func(..) { func_version_(..) }`. -fn function_std_latest_impl(method: &TraitItemMethod, latest_version: u32) -> Result { +fn function_std_latest_impl( + method: &TraitItemMethod, + latest_version: u32, + feature_check: &TokenStream, +) -> Result { let function_name = &method.sig.ident; let args = get_function_arguments(&method.sig).map(FnArg::Typed); let arg_names = get_function_argument_names(&method.sig).collect::>(); @@ -122,6 +182,7 @@ fn function_std_latest_impl(method: &TraitItemMethod, latest_version: u32) -> Re create_function_ident_with_version(&method.sig.ident, latest_version); Ok(quote_spanned! { method.span() => + #feature_check #[cfg(feature = "std")] #( #attrs )* pub fn #function_name( #( #args, )* ) #return_value { diff --git a/primitives/runtime-interface/proc-macro/src/runtime_interface/host_function_interface.rs b/primitives/runtime-interface/proc-macro/src/runtime_interface/host_function_interface.rs index ed50e49df77cc..75498c09c18c7 100644 --- a/primitives/runtime-interface/proc-macro/src/runtime_interface/host_function_interface.rs +++ b/primitives/runtime-interface/proc-macro/src/runtime_interface/host_function_interface.rs @@ -41,46 +41,24 @@ use inflector::Inflector; use std::iter::{self, Iterator}; -fn feature_patch( - feature_patch: &Vec<(String, String, String)>, - version: &u32, - method: &TraitItemMethod, -) -> TokenStream { - let method = method.sig.ident.to_string(); - for (feature, method_patch, version_patch) in feature_patch { - if &version.to_string() == version_patch && &method == method_patch { - return quote! { - #[cfg(not(feature=#feature))] - } - } - } - quote! {} -} /// Generate the extern host functions for wasm and the `HostFunctions` struct that provides the /// implementations for the host functions on the host. -pub fn generate( - trait_def: &ItemTrait, - is_wasm_only: bool, - features_patch: Vec<(String, String, String)>, -) -> Result { +pub fn generate(trait_def: &ItemTrait, is_wasm_only: bool) -> Result { let trait_name = &trait_def.ident; let extern_host_function_impls = get_runtime_interface(trait_def)?.latest_versions().try_fold( TokenStream::new(), |mut t, (version, method)| { - let patch = feature_patch(&features_patch, &version, &method); - t.extend(generate_extern_host_function(method, version, trait_name, patch)?); + t.extend(generate_extern_host_function(method, version, trait_name)?); Ok::<_, Error>(t) }, )?; let exchangeable_host_functions = get_runtime_interface(trait_def)? .latest_versions() - .try_fold(TokenStream::new(), |mut t, (version, method)| { - let patch = feature_patch(&features_patch, &version, &method); - t.extend(generate_exchangeable_host_function(method, patch)?); + .try_fold(TokenStream::new(), |mut t, (_, m)| { + t.extend(generate_exchangeable_host_function(m)?); Ok::<_, Error>(t) })?; - let host_functions_struct = - generate_host_functions_struct(trait_def, is_wasm_only, features_patch)?; + let host_functions_struct = generate_host_functions_struct(trait_def, is_wasm_only)?; Ok(quote! { /// The implementations of the extern host functions. This special implementation module @@ -104,7 +82,6 @@ fn generate_extern_host_function( method: &TraitItemMethod, version: u32, trait_name: &Ident, - patch: TokenStream, ) -> Result { let crate_ = generate_crate_access(); let args = get_function_arguments(&method.sig); @@ -136,7 +113,6 @@ fn generate_extern_host_function( }; Ok(quote! { - #patch #[doc = #doc_string] pub fn #function ( #( #args ),* ) #return_value { extern "C" { @@ -161,10 +137,7 @@ fn generate_extern_host_function( } /// Generate the host exchangeable function for the given method. -fn generate_exchangeable_host_function( - method: &TraitItemMethod, - patch: TokenStream, -) -> Result { +fn generate_exchangeable_host_function(method: &TraitItemMethod) -> Result { let crate_ = generate_crate_access(); let arg_types = get_function_argument_types(&method.sig); let function = &method.sig.ident; @@ -173,7 +146,6 @@ fn generate_exchangeable_host_function( let output = &method.sig.output; Ok(quote! { - #patch #[cfg(not(feature = "std"))] #[allow(non_upper_case_globals)] #[doc = #doc_string] @@ -188,21 +160,13 @@ fn generate_exchangeable_host_function( fn generate_host_functions_struct( trait_def: &ItemTrait, is_wasm_only: bool, - features_patch: Vec<(String, String, String)>, ) -> Result { let crate_ = generate_crate_access(); let host_functions = get_runtime_interface(trait_def)? .all_versions() .map(|(version, method)| { - let patch = feature_patch(&features_patch, &version, &method); - generate_host_function_implementation( - &trait_def.ident, - method, - version, - is_wasm_only, - patch, - ) + generate_host_function_implementation(&trait_def.ident, method, version, is_wasm_only) }) .collect::>>()?; @@ -230,7 +194,6 @@ fn generate_host_function_implementation( method: &TraitItemMethod, version: u32, is_wasm_only: bool, - feature_patch: TokenStream, ) -> Result { let name = create_host_function_ident(&method.sig.ident, version, trait_name).to_string(); let struct_name = Ident::new(&name.to_pascal_case(), Span::call_site()); @@ -244,7 +207,6 @@ fn generate_host_function_implementation( let convert_return_value = generate_return_value_into_wasm_value(&method.sig); Ok(quote! { - #feature_patch { struct #struct_name; diff --git a/primitives/runtime-interface/proc-macro/src/runtime_interface/mod.rs b/primitives/runtime-interface/proc-macro/src/runtime_interface/mod.rs index 09a6eb8c5a5d2..b41993a069c4d 100644 --- a/primitives/runtime-interface/proc-macro/src/runtime_interface/mod.rs +++ b/primitives/runtime-interface/proc-macro/src/runtime_interface/mod.rs @@ -36,7 +36,7 @@ pub mod keywords { // Disable tracing-macros added to the [`runtime_interface`] by specifying this optional entry syn::custom_keyword!(no_tracing); // Only allow function declaration depending on external crate feature. - syn::custom_keyword!(feature_patch); + syn::custom_keyword!(feature_force_version); } /// Implementation of the `runtime_interface` attribute. @@ -47,14 +47,18 @@ pub fn runtime_interface_impl( trait_def: ItemTrait, is_wasm_only: bool, tracing: bool, - feature_patch: Vec<(String, String, String)>, + feature_force_version: Vec<(String, String, u32)>, ) -> Result { - let bare_functions = bare_function_interface::generate(&trait_def, is_wasm_only, tracing)?; + let bare_functions = bare_function_interface::generate( + &trait_def, + is_wasm_only, + tracing, + &feature_force_version, + )?; let crate_include = generate_runtime_interface_include(); let mod_name = Ident::new(&trait_def.ident.to_string().to_snake_case(), Span::call_site()); let trait_decl_impl = trait_decl_impl::process(&trait_def, is_wasm_only)?; - let host_functions = - host_function_interface::generate(&trait_def, is_wasm_only, feature_patch)?; + let host_functions = host_function_interface::generate(&trait_def, is_wasm_only)?; let vis = trait_def.vis; let attrs = &trait_def.attrs; diff --git a/primitives/runtime/Cargo.toml b/primitives/runtime/Cargo.toml index 3ef55fd6ab4da..7b706f82c53d3 100644 --- a/primitives/runtime/Cargo.toml +++ b/primitives/runtime/Cargo.toml @@ -58,4 +58,4 @@ std = [ "hash256-std-hasher/std", "either/use_std", ] -new-state = ["sp-io/new-state"] +old_state = ["sp-io/old_state"] diff --git a/primitives/version/Cargo.toml b/primitives/version/Cargo.toml index 97053dc60d7bf..3e45e4eaf18d9 100644 --- a/primitives/version/Cargo.toml +++ b/primitives/version/Cargo.toml @@ -38,4 +38,3 @@ std = [ "parity-wasm", "thiserror", ] -new-state = [] diff --git a/primitives/version/src/lib.rs b/primitives/version/src/lib.rs index a1e09deba393d..f1fce23787cc8 100644 --- a/primitives/version/src/lib.rs +++ b/primitives/version/src/lib.rs @@ -215,22 +215,15 @@ impl RuntimeVersion { self.apis.iter().find_map(|a| (a.0 == *id).then(|| a.1)) } - #[cfg(feature = "new-state")] /// Returns state version to use for update. pub fn state_version(&self) -> StateVersion { - let core_api_id = sp_core_hashing_proc_macro::blake2b_64!(b"State"); - if self.has_api_with(&core_api_id, |v| v >= 1) { + let core_api_id = sp_core_hashing_proc_macro::blake2b_64!(b"Core"); + if self.has_api_with(&core_api_id, |v| v >= 4) { StateVersion::V1 } else { StateVersion::V0 } } - - #[cfg(not(feature = "new-state"))] - /// Returns state version to use for update. - pub fn state_version(&self) -> StateVersion { - StateVersion::V0 - } } #[cfg(feature = "std")] diff --git a/test-utils/client/Cargo.toml b/test-utils/client/Cargo.toml index d1dab015eb59e..774a9460072cf 100644 --- a/test-utils/client/Cargo.toml +++ b/test-utils/client/Cargo.toml @@ -17,7 +17,7 @@ futures = "0.3.16" hex = "0.4" serde = "1.0.126" serde_json = "1.0.68" -sc-client-api = { version = "4.0.0-dev", path = "../../client/api", features = ["new-state"] } +sc-client-api = { version = "4.0.0-dev", path = "../../client/api", features = ["old_state"] } sc-client-db = { version = "0.10.0-dev", features = [ "test-helpers", ], path = "../../client/db" } diff --git a/test-utils/runtime/Cargo.toml b/test-utils/runtime/Cargo.toml index 354078af57718..464dd8e50f7f5 100644 --- a/test-utils/runtime/Cargo.toml +++ b/test-utils/runtime/Cargo.toml @@ -30,7 +30,7 @@ sp-io = { version = "4.0.0-dev", default-features = false, path = "../../primiti frame-support = { version = "4.0.0-dev", default-features = false, path = "../../frame/support" } sp-version = { version = "4.0.0-dev", default-features = false, path = "../../primitives/version" } sp-session = { version = "4.0.0-dev", default-features = false, path = "../../primitives/session" } -sp-api = { version = "4.0.0-dev", default-features = false, path = "../../primitives/api", features = ["new-state"] } +sp-api = { version = "4.0.0-dev", default-features = false, path = "../../primitives/api", features = ["old_state"] } sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } pallet-babe = { version = "4.0.0-dev", default-features = false, path = "../../frame/babe" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../../frame/system" } diff --git a/test-utils/runtime/client/Cargo.toml b/test-utils/runtime/client/Cargo.toml index 8b73046af0e4b..45cf8b74d85ce 100644 --- a/test-utils/runtime/client/Cargo.toml +++ b/test-utils/runtime/client/Cargo.toml @@ -20,7 +20,7 @@ substrate-test-client = { version = "2.0.0", path = "../../client" } sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } substrate-test-runtime = { version = "2.0.0", path = "../../runtime" } sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" } -sp-api = { version = "4.0.0-dev", path = "../../../primitives/api", features = ["new-state"] } +sp-api = { version = "4.0.0-dev", path = "../../../primitives/api", features = ["old_state"] } sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } codec = { package = "parity-scale-codec", version = "2.0.0" } sc-client-api = { version = "4.0.0-dev", path = "../../../client/api" } diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index cd530d0e09870..fcb691961aadf 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -946,9 +946,6 @@ cfg_if! { 0 } } - - impl sp_api::State for Runtime { - } } } else { impl_runtime_apis! { @@ -1180,9 +1177,6 @@ cfg_if! { 0 } } - - impl sp_api::State for Runtime { - } } } } From 610bc23912e7b1f974f78998a6a5d546c8e93c5c Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 11 Nov 2021 11:56:00 +0100 Subject: [PATCH 142/188] run test nodes on new state. --- bin/node/cli/Cargo.toml | 2 +- bin/node/runtime/Cargo.toml | 2 +- test-utils/client/Cargo.toml | 2 +- test-utils/runtime/Cargo.toml | 2 +- test-utils/runtime/client/Cargo.toml | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index f7de7f97cb7ec..9bcc6f163d728 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -46,7 +46,7 @@ structopt = { version = "0.3.8", optional = true } sp-authority-discovery = { version = "4.0.0-dev", path = "../../../primitives/authority-discovery" } sp-consensus-babe = { version = "0.10.0-dev", path = "../../../primitives/consensus/babe" } grandpa-primitives = { version = "4.0.0-dev", package = "sp-finality-grandpa", path = "../../../primitives/finality-grandpa" } -sp-api = { version = "4.0.0-dev", path = "../../../primitives/api", features = ["old_state"] } +sp-api = { version = "4.0.0-dev", path = "../../../primitives/api"} sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" } sp-timestamp = { version = "4.0.0-dev", path = "../../../primitives/timestamp" } diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index b5e22ec0ae2f6..c0b888e55b1f6 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -32,7 +32,7 @@ node-primitives = { version = "2.0.0", default-features = false, path = "../prim sp-offchain = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/offchain" } sp-core = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/core" } sp-std = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/std" } -sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/api", features = ["old_state"] } +sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/api" } sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/runtime" } sp-staking = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/staking" } sp-keyring = { version = "4.0.0-dev", optional = true, path = "../../../primitives/keyring" } diff --git a/test-utils/client/Cargo.toml b/test-utils/client/Cargo.toml index 774a9460072cf..a8b2e8f57ac52 100644 --- a/test-utils/client/Cargo.toml +++ b/test-utils/client/Cargo.toml @@ -17,7 +17,7 @@ futures = "0.3.16" hex = "0.4" serde = "1.0.126" serde_json = "1.0.68" -sc-client-api = { version = "4.0.0-dev", path = "../../client/api", features = ["old_state"] } +sc-client-api = { version = "4.0.0-dev", path = "../../client/api" } sc-client-db = { version = "0.10.0-dev", features = [ "test-helpers", ], path = "../../client/db" } diff --git a/test-utils/runtime/Cargo.toml b/test-utils/runtime/Cargo.toml index 464dd8e50f7f5..e1150a28feb4b 100644 --- a/test-utils/runtime/Cargo.toml +++ b/test-utils/runtime/Cargo.toml @@ -30,7 +30,7 @@ sp-io = { version = "4.0.0-dev", default-features = false, path = "../../primiti frame-support = { version = "4.0.0-dev", default-features = false, path = "../../frame/support" } sp-version = { version = "4.0.0-dev", default-features = false, path = "../../primitives/version" } sp-session = { version = "4.0.0-dev", default-features = false, path = "../../primitives/session" } -sp-api = { version = "4.0.0-dev", default-features = false, path = "../../primitives/api", features = ["old_state"] } +sp-api = { version = "4.0.0-dev", default-features = false, path = "../../primitives/api" } sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } pallet-babe = { version = "4.0.0-dev", default-features = false, path = "../../frame/babe" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../../frame/system" } diff --git a/test-utils/runtime/client/Cargo.toml b/test-utils/runtime/client/Cargo.toml index 45cf8b74d85ce..fbc6aefdb850c 100644 --- a/test-utils/runtime/client/Cargo.toml +++ b/test-utils/runtime/client/Cargo.toml @@ -20,7 +20,7 @@ substrate-test-client = { version = "2.0.0", path = "../../client" } sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } substrate-test-runtime = { version = "2.0.0", path = "../../runtime" } sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" } -sp-api = { version = "4.0.0-dev", path = "../../../primitives/api", features = ["old_state"] } +sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } codec = { package = "parity-scale-codec", version = "2.0.0" } sc-client-api = { version = "4.0.0-dev", path = "../../../client/api" } From c0af4f137ed0d5908c598e27c8027fb1f05f7790 Mon Sep 17 00:00:00 2001 From: cheme Date: Thu, 11 Nov 2021 12:05:47 +0100 Subject: [PATCH 143/188] fix --- bin/node/runtime/src/lib.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index b3d46ee26e4e3..570abe53ed01f 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -125,7 +125,6 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 2, - state_version: 1, }; /// The BABE epoch configuration at genesis. From 9db1b95b62e7de845db74524c257005c25d45997 Mon Sep 17 00:00:00 2001 From: kianenigma Date: Fri, 12 Nov 2021 15:58:12 +0100 Subject: [PATCH 144/188] new test structure --- frame/state-trie-migration/src/lib.rs | 67 +++++++++++----- .../state-machine/src/trie_backend_essence.rs | 78 +++++++++++++++++-- 2 files changed, 119 insertions(+), 26 deletions(-) diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index 3f35d299b6269..f3477f2e909a8 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -341,18 +341,18 @@ pub mod pallet { /// /// It updates the dynamic counters. fn migrate_child(&mut self) { - let last_child = - self.last_child.as_ref().expect("value checked to be `Some`; qed"); + let last_child = self.last_child.as_ref().expect("value checked to be `Some`; qed"); let last_top = self.last_top.clone().expect("value checked to be `Some`; qed"); let child_root = Pallet::::child_io_key(&last_top); - let added_size = if let Some(data) = sp_io::default_child_storage::get(child_root, &last_child) { - self.dyn_size = self.dyn_size.saturating_add(data.len() as u32); - sp_io::default_child_storage::set(child_root, last_child, &data); - data.len() as u32 - } else { - Zero::zero() - }; + let added_size = + if let Some(data) = sp_io::default_child_storage::get(child_root, &last_child) { + self.dyn_size = self.dyn_size.saturating_add(data.len() as u32); + sp_io::default_child_storage::set(child_root, last_child, &data); + data.len() as u32 + } else { + Zero::zero() + }; self.dyn_child_items.saturating_inc(); let next_key = sp_io::default_child_storage::next_key(child_root, last_child); @@ -760,7 +760,9 @@ pub mod pallet { let mut new_task = Self::migration_process(); new_task.migrate_until_exhaustion(MigrationLimits { size: chain_limits.size, - item: task.dyn_total_items().saturating_sub(T::UnsignedBackOff::get().max(1)), + item: task + .dyn_total_items() + .saturating_sub(T::UnsignedBackOff::get().max(1)), }); task = new_task; } @@ -1365,27 +1367,50 @@ mod remote_tests { .await .unwrap(); - ext.execute_with(|| { - // requires the block number type in our tests to be same as with mainnet, u32. - let mut now = frame_system::Pallet::::block_number(); - let mut duration = 0; + let mut now = ext.execute_with(|| { AutoLimits::::put(Some(limits)); - loop { + // requires the block number type in our tests to be same as with mainnet, u32. + frame_system::Pallet::::block_number() + }); + + let mut duration = 0; + + loop { + let finished = ext.execute_with(|| { run_to_block(now + 1); if StateTrieMigration::migration_process().finished() { - break + return true } duration += 1; now += 1; - } + false + }); + let (top_left, child_left) = + ext.as_backend().essence().check_migration_state().unwrap(); log::info!( target: LOG_TARGET, - "finished on_initialize migration in {} block, final state of the task: {:?}", - duration, - StateTrieMigration::migration_process() + "(top_left: {}, child_left {})", + top_left, + child_left, ); - }) + + if finished { + break + } + } + + log::info!( + target: LOG_TARGET, + "finished on_initialize migration in {} block, final state of the task: {:?}", + duration, + StateTrieMigration::migration_process(), + ); + + let (top_left, child_left) = + ext.as_backend().essence().check_migration_state().unwrap(); + assert_eq!(top_left, 0); + assert_eq!(child_left, 0); }; // item being the bottleneck run_with_limits(MigrationLimits { item: 1000, size: 4 * 1024 * 1024 }).await; diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 945fd05ebffb4..ecb9af2ed815f 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -23,20 +23,22 @@ use codec::Encode; use hash_db::{self, AsHashDB, HashDB, HashDBRef, Hasher, Prefix}; #[cfg(feature = "std")] use parking_lot::RwLock; -use sp_core::storage::ChildInfo; +use sp_core::storage::{ChildInfo, ChildType, PrefixedStorageKey}; use sp_std::{boxed::Box, vec::Vec}; use sp_trie::{ empty_child_trie_root, read_child_trie_value, read_trie_value, trie_types::{TrieDB, TrieError}, - DBValue, KeySpacedDB, PrefixedMemoryDB, Trie, TrieDBIterator, TrieDBKeyIterator, + DBValue, KeySpacedDB, LayoutV1 as Layout, PrefixedMemoryDB, Trie, TrieDBIterator, + TrieDBKeyIterator, }; #[cfg(feature = "std")] use std::collections::HashMap; #[cfg(feature = "std")] use std::sync::Arc; -// In this module, we only use layout for read operation and empty root, -// where V1 and V0 are equivalent. -use sp_trie::LayoutV1 as Layout; +use trie_db::{ + node::{NodePlan, ValuePlan}, + TrieDBNodeIterator, +}; #[cfg(not(feature = "std"))] macro_rules! format { @@ -438,6 +440,72 @@ where false, ); } + + /// Check remaining state item to migrate. Note this function should be remove when all state + /// migration did finished as it is only an utility. + // original author: @cheme + pub fn check_migration_state(&self) -> Result<(u64, u64)> { + let threshold: u32 = 33; + let mut nb_to_migrate = 0; + let mut nb_to_migrate_child = 0; + + let trie = sp_trie::trie_types::TrieDB::new(self, &self.root) + .map_err(|e| format!("TrieDB creation error: {}", e))?; + let iter_node = TrieDBNodeIterator::new(&trie) + .map_err(|e| format!("TrieDB node iterator error: {}", e))?; + for node in iter_node { + let node = node.map_err(|e| format!("TrieDB node iterator error: {}", e))?; + match node.2.node_plan() { + NodePlan::Leaf { value, .. } | + NodePlan::NibbledBranch { value: Some(value), .. } => + if let ValuePlan::Inline(range) = value { + if (range.end - range.start) as u32 >= threshold { + nb_to_migrate += 1; + } + }, + _ => (), + } + } + + let mut child_roots: Vec<(ChildInfo, Vec)> = Vec::new(); + // get all child trie roots + for key_value in trie.iter().map_err(|e| format!("TrieDB node iterator error: {}", e))? { + let (key, value) = + key_value.map_err(|e| format!("TrieDB node iterator error: {}", e))?; + if key[..] + .starts_with(sp_core::storage::well_known_keys::DEFAULT_CHILD_STORAGE_KEY_PREFIX) + { + let prefixed_key = PrefixedStorageKey::new(key); + let (_type, unprefixed) = ChildType::from_prefixed_key(&prefixed_key).unwrap(); + child_roots.push((ChildInfo::new_default(unprefixed), value)); + } + } + for (child_info, root) in child_roots { + let mut child_root = H::Out::default(); + let storage = KeySpacedDB::new(self, child_info.keyspace()); + + child_root.as_mut()[..].copy_from_slice(&root[..]); + let trie = sp_trie::trie_types::TrieDB::new(&storage, &child_root) + .map_err(|e| format!("New child TrieDB error: {}", e))?; + let iter_node = TrieDBNodeIterator::new(&trie) + .map_err(|e| format!("TrieDB node iterator error: {}", e))?; + for node in iter_node { + let node = node.map_err(|e| format!("Child TrieDB node iterator error: {}", e))?; + match node.2.node_plan() { + NodePlan::Leaf { value, .. } | + NodePlan::NibbledBranch { value: Some(value), .. } => + if let ValuePlan::Inline(range) = value { + if (range.end - range.start) as u32 >= threshold { + nb_to_migrate_child += 1; + } + }, + _ => (), + } + } + } + + Ok((nb_to_migrate, nb_to_migrate_child)) + } } pub(crate) struct Ephemeral<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { From 13e68449aabf5cc75912476bc395b41e95b868c1 Mon Sep 17 00:00:00 2001 From: kianenigma Date: Sat, 13 Nov 2021 16:01:21 +0100 Subject: [PATCH 145/188] new testing stuff from emeric --- frame/state-trie-migration/src/lib.rs | 41 +++-- utils/frame/remote-externalities/src/lib.rs | 177 +++++++------------- 2 files changed, 87 insertions(+), 131 deletions(-) diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index f3477f2e909a8..807e0813c3942 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -1346,7 +1346,7 @@ mod remote_tests { use super::{mock::*, *}; use mock::run_to_block_and_drain_pool; - use remote_externalities::{Mode, OnlineConfig}; + use remote_externalities::{Mode, OfflineConfig, OnlineConfig}; use sp_runtime::traits::Bounded; // we only use the hash type from this (I hope). @@ -1357,11 +1357,19 @@ mod remote_tests { sp_tracing::try_init_simple(); let run_with_limits = |limits| async move { let mut ext = remote_externalities::Builder::::new() - .mode(Mode::Online(OnlineConfig { - transport: std::env!("WS_API").to_owned().into(), - scrape_children: true, - ..Default::default() - })) + .mode(Mode::OfflineOrElseOnline( + OfflineConfig { + state_snapshot: "/home/kianenigma/remote-builds/state".to_owned().into(), + }, + OnlineConfig { + transport: std::env!("WS_API").to_owned().into(), + scrape_children: true, + state_snapshot: Some( + "/home/kianenigma/remote-builds/state".to_owned().into(), + ), + ..Default::default() + }, + )) .state_version(sp_core::StateVersion::V0) .build() .await @@ -1423,14 +1431,19 @@ mod remote_tests { sp_tracing::try_init_simple(); let run_with_limits = |limits| async move { let mut ext = remote_externalities::Builder::::new() - .mode(Mode::Online(OnlineConfig { - transport: std::env!("WS_API").to_owned().into(), - scrape_children: true, - state_snapshot: Some( - "/home/kianenigma/remote-builds/ksm-state".to_owned().into(), - ), - ..Default::default() - })) + .mode(Mode::OfflineOrElseOnline( + OfflineConfig { + state_snapshot: "/home/kianenigma/remote-builds/state".to_owned().into(), + }, + OnlineConfig { + transport: std::env!("WS_API").to_owned().into(), + scrape_children: true, + state_snapshot: Some( + "/home/kianenigma/remote-builds/state".to_owned().into(), + ), + ..Default::default() + }, + )) .state_version(sp_core::StateVersion::V0) .build() .await diff --git a/utils/frame/remote-externalities/src/lib.rs b/utils/frame/remote-externalities/src/lib.rs index fb1a4d660402f..54087146b57c6 100644 --- a/utils/frame/remote-externalities/src/lib.rs +++ b/utils/frame/remote-externalities/src/lib.rs @@ -22,6 +22,7 @@ use codec::{Decode, Encode}; use jsonrpsee_ws_client::{types::v2::params::JsonRpcParams, WsClient, WsClientBuilder}; +use log::*; use sp_core::{ hashing::twox_128, hexdisplay::HexDisplay, @@ -104,12 +105,6 @@ pub struct OfflineConfig { pub state_snapshot: SnapshotConfig, } -impl> From

for SnapshotConfig { - fn from(p: P) -> Self { - Self { path: p.into() } - } -} - /// Description of the transport protocol (for online execution). #[derive(Debug)] pub struct Transport { @@ -333,11 +328,7 @@ impl Builder { use serde_json::to_value; let keys = self.get_keys_paged(prefix, at).await?; let keys_count = keys.len(); -<<<<<<< HEAD - debug!(target: LOG_TARGET, "Querying a total of {} top keys", keys.len()); -======= log::debug!(target: LOG_TARGET, "Querying a total of {} keys", keys.len()); ->>>>>>> ecr-state-update-host let mut key_values: Vec = vec![]; let client = self.as_online().rpc_client(); @@ -394,31 +385,24 @@ impl Builder { // Internal methods impl Builder { -<<<<<<< HEAD /// Save the given data to the top keys snapshot. fn save_top_snapshot(&self, data: &[KeyPair], path: &PathBuf) -> Result<(), &'static str> { let mut path = path.clone(); path.set_extension("top"); debug!(target: LOG_TARGET, "writing to state snapshot file {:?}", path); -======= - /// Save the given data as state snapshot. - fn save_state_snapshot(&self, data: &[KeyPair], path: &Path) -> Result<(), &'static str> { - log::debug!(target: LOG_TARGET, "writing to state snapshot file {:?}", path); ->>>>>>> ecr-state-update-host fs::write(path, data.encode()).map_err(|_| "fs::write failed.")?; Ok(()) } -<<<<<<< HEAD /// Save the given data to the child keys snapshot. fn save_child_snapshot( &self, data: &ChildKeyPairs, path: &PathBuf, ) -> Result<(), &'static str> { - debug!(target: LOG_TARGET, "writing to state snapshot file {:?}", path); let mut path = path.clone(); path.set_extension("child"); + debug!(target: LOG_TARGET, "writing to state snapshot file {:?}", path); fs::write(path, data.encode()).map_err(|_| "fs::write failed.")?; Ok(()) } @@ -435,22 +419,32 @@ impl Builder { let mut path = path.clone(); path.set_extension("child"); info!(target: LOG_TARGET, "loading child key-pairs from snapshot {:?}", path); -======= - /// initialize `Self` from state snapshot. Panics if the file does not exist. - fn load_state_snapshot(&self, path: &Path) -> Result, &'static str> { - log::info!(target: LOG_TARGET, "scraping key-pairs from state snapshot {:?}", path); ->>>>>>> ecr-state-update-host let bytes = fs::read(path).map_err(|_| "fs::read failed.")?; Decode::decode(&mut &*bytes).map_err(|_| "decode failed") } + /// Load all of the child keys from the remote config, given the already scraped list of top key + /// pairs. + /// + /// Stores all values to cache as well, if provided. + async fn load_child_keys_remote_and_maybe_save( + &self, + top_kv: &[KeyPair], + ) -> Result)>, &'static str> { + let child_kv = self.load_child_keys_remote(&top_kv).await?; + if let Some(c) = &self.as_online().state_snapshot { + self.save_child_snapshot(&child_kv, &c.path)?; + } + Ok(child_kv) + } + /// Load all of the child keys from the remote config, given the already scraped list of top key /// pairs. async fn load_child_keys_remote( &self, - top_kp: &[KeyPair], + top_kv: &[KeyPair], ) -> Result)>, &'static str> { - let child_bearing_top_keys = top_kp + let child_bearing_top_keys = top_kv .iter() .filter_map( |(k, _)| { @@ -469,7 +463,7 @@ impl Builder { child_bearing_top_keys.len() ); - let mut child_kp = vec![]; + let mut child_kvs = vec![]; for prefixed_top_key in child_bearing_top_keys { let child_keys = RpcApi::::child_get_keys( self.as_online().rpc_client(), @@ -508,10 +502,19 @@ impl Builder { // super tricky. let un_prefixed = &prefixed_top_key.0[DEFAULT_CHILD_STORAGE_KEY_PREFIX.len()..]; - child_kp.push((ChildInfo::new_default(&un_prefixed), child_kv)); + child_kvs.push((ChildInfo::new_default(&un_prefixed), child_kv)); } - Ok(child_kp) + Ok(child_kvs) + } + + /// Load all the `top` keys from the remote config, and maybe write then to cache. + async fn load_top_keys_remote_and_maybe_save(&self) -> Result, &'static str> { + let top_kv = self.load_top_keys_remote().await?; + if let Some(c) = &self.as_online().state_snapshot { + self.save_top_snapshot(&top_kv, &c.path)?; + } + Ok(top_kv) } /// Load all the `top` keys from the remote config. @@ -594,65 +597,65 @@ impl Builder { mut self, ) -> Result<(Vec, Vec<(ChildInfo, Vec)>), &'static str> { let mode = self.mode.clone(); - let mut top_kp = match mode { + let mut top_kv = match mode { Mode::Offline(config) => self.load_top_snapshot(&config.state_snapshot.path)?, - Mode::Online(config) => { + Mode::Online(_) => { self.init_remote_client().await?; - let top_kp = self.load_top_keys_remote().await?; - if let Some(c) = config.state_snapshot { - self.save_top_snapshot(&top_kp, &c.path)?; - } - top_kp + self.load_top_keys_remote_and_maybe_save().await? }, - Mode::OfflineOrElseOnline(offline_config, online_config) => { - if let Ok(kv) = self.load_state_snapshot(&offline_config.state_snapshot.path) { - kv + Mode::OfflineOrElseOnline(offline_config, _) => { + if let Ok(top_kv) = self.load_top_snapshot(&offline_config.state_snapshot.path) { + top_kv } else { self.init_remote_client().await?; - let kp = self.load_remote().await?; - if let Some(c) = online_config.state_snapshot { - self.save_state_snapshot(&kp, &c.path)?; - } - kp + self.load_top_keys_remote_and_maybe_save().await? } }, }; // inject manual key values. if !self.hashed_key_values.is_empty() { - log::debug!( + log::info!( target: LOG_TARGET, "extending externalities with {} manually injected key-values", self.hashed_key_values.len() ); - top_kp.extend(self.hashed_key_values.clone()); + top_kv.extend(self.hashed_key_values.clone()); } // exclude manual key values. if !self.hashed_blacklist.is_empty() { - log::debug!( + log::info!( target: LOG_TARGET, "excluding externalities from {} keys", self.hashed_blacklist.len() ); - top_kp.retain(|(k, _)| !self.hashed_blacklist.contains(&k.0)) + top_kv.retain(|(k, _)| !self.hashed_blacklist.contains(&k.0)) } - let child_kp = match self.mode { - Mode::Online(ref config) if config.scrape_children => { - let child_kp = self.load_child_keys_remote(&top_kp).await?; - if let Some(c) = &config.state_snapshot { - self.save_child_snapshot(&child_kp, &c.path)?; + let child_kv = match self.mode.clone() { + Mode::Online(ref config) if config.scrape_children => + self.load_child_keys_remote_and_maybe_save(&top_kv).await?, + Mode::Offline(ref config) => self + .load_child_snapshot(&config.state_snapshot.path) + .map_err(|why| { + log::warn!(target: LOG_TARGET, "failed to load child-key file due to {:?}", why) + }) + .unwrap_or_default(), + Mode::OfflineOrElseOnline(ref offline_config, ref online_config) + if online_config.scrape_children => + { + if let Ok(child_kv) = self.load_child_snapshot(&offline_config.state_snapshot.path) + { + child_kv + } else { + self.load_child_keys_remote_and_maybe_save(&top_kv).await? } - child_kp }, - Mode::Offline(ref config) => self.load_child_snapshot(&config.state_snapshot.path).map_err(|why| - log::warn!(target: LOG_TARGET, "failed to load child-key file due to {:?}", why) - ).unwrap_or_default(), _ => Default::default(), }; - Ok((top_kp, child_kp)) + Ok((top_kv, child_kv)) } } @@ -718,7 +721,6 @@ impl Builder { /// Build the test externalities. pub async fn build(self) -> Result { -<<<<<<< HEAD let state_version = self.state_version.clone(); let (top_kv, child_kv) = self.pre_build().await?; let mut ext = TestExternalities::new_with_code_and_state( @@ -734,16 +736,6 @@ impl Builder { continue } ext.insert(k.0, v.0); -======= - let kv = self.pre_build().await?; - let mut ext = TestExternalities::new_empty(); - - log::info!(target: LOG_TARGET, "injecting a total of {} keys", kv.len()); - for (k, v) in kv { - let (k, v) = (k.0, v.0); - // Insert the key,value pair into the test trie backend - ext.insert(k, v); ->>>>>>> ecr-state-update-host } info!( @@ -840,9 +832,6 @@ mod remote_tests { const REMOTE_INACCESSIBLE: &'static str = "Can't reach the remote node. Is it running?"; #[tokio::test] -<<<<<<< HEAD - async fn can_build_one_big_pallet() { -======= async fn offline_else_online_works() { init_logger(); // this shows that in the second run, we use the remote and create a cache. @@ -883,7 +872,6 @@ mod remote_tests { #[tokio::test] async fn can_build_one_pallet() { ->>>>>>> ecr-state-update-host init_logger(); Builder::::new() .mode(Mode::Online(OnlineConfig { @@ -897,52 +885,7 @@ mod remote_tests { } #[tokio::test] - async fn can_build_one_small_pallet() { - init_logger(); - Builder::::new() - .mode(Mode::Online(OnlineConfig { -<<<<<<< HEAD - transport: "wss://kusama-rpc.polkadot.io".to_owned().into(), - pallets: vec!["Council".to_owned()], - ..Default::default() - })) - .build() - .await - .expect(REMOTE_INACCESSIBLE) - .execute_with(|| {}); - - Builder::::new() - .mode(Mode::Online(OnlineConfig { - transport: "wss://rpc.polkadot.io".to_owned().into(), - pallets: vec!["Council".to_owned()], -======= - pallets: vec!["Proxy".to_owned(), "Multisig".to_owned()], ->>>>>>> ecr-state-update-host - ..Default::default() - })) - .build() - .await - .expect(REMOTE_INACCESSIBLE) - .execute_with(|| {}); - } - - #[tokio::test] -<<<<<<< HEAD async fn can_build_few_pallet() { -======= - async fn sanity_check_decoding() { - use pallet_elections_phragmen::SeatHolder; - use sp_core::crypto::Ss58Codec; - - type AccountId = sp_runtime::AccountId32; - type Balance = u128; - frame_support::generate_storage_alias!( - PhragmenElection, - Members => - Value>> - ); - ->>>>>>> ecr-state-update-host init_logger(); Builder::::new() .mode(Mode::Online(OnlineConfig { From ab79457b6ef00698bd5de2d41ead861e9c5ae686 Mon Sep 17 00:00:00 2001 From: kianenigma Date: Sun, 14 Nov 2021 15:35:34 +0100 Subject: [PATCH 146/188] Add commit_all, still not working --- frame/state-trie-migration/src/lib.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index 807e0813c3942..b41cc83707b91 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -1394,6 +1394,7 @@ mod remote_tests { false }); + ext.commit_all().unwrap(); let (top_left, child_left) = ext.as_backend().essence().check_migration_state().unwrap(); log::info!( @@ -1420,8 +1421,9 @@ mod remote_tests { assert_eq!(top_left, 0); assert_eq!(child_left, 0); }; + // item being the bottleneck - run_with_limits(MigrationLimits { item: 1000, size: 4 * 1024 * 1024 }).await; + run_with_limits(MigrationLimits { item: 16 * 1024, size: 128 * 1024 * 1024 }).await; // size being the bottleneck run_with_limits(MigrationLimits { item: Bounded::max_value(), size: 4 * 1024 }).await; } From 07e495bf18b22707c066e73bf4a63cf55190ce5e Mon Sep 17 00:00:00 2001 From: kianenigma Date: Mon, 15 Nov 2021 15:56:32 +0100 Subject: [PATCH 147/188] Fix all tests --- frame/state-trie-migration/src/lib.rs | 64 ++++++++++++++++++++------- 1 file changed, 47 insertions(+), 17 deletions(-) diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index b41cc83707b91..cc1bf10b514ea 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -1349,7 +1349,7 @@ mod remote_tests { use remote_externalities::{Mode, OfflineConfig, OnlineConfig}; use sp_runtime::traits::Bounded; - // we only use the hash type from this (I hope). + // we only use the hash type from this, so using the mock should be fine. type Block = sp_runtime::testing::Block; #[tokio::test] @@ -1382,6 +1382,8 @@ mod remote_tests { }); let mut duration = 0; + // set the version to 1, as if the upgrade happened. + ext.state_version = sp_core::StateVersion::V1; loop { let finished = ext.execute_with(|| { @@ -1394,7 +1396,6 @@ mod remote_tests { false }); - ext.commit_all().unwrap(); let (top_left, child_left) = ext.as_backend().essence().check_migration_state().unwrap(); log::info!( @@ -1409,12 +1410,14 @@ mod remote_tests { } } - log::info!( - target: LOG_TARGET, - "finished on_initialize migration in {} block, final state of the task: {:?}", - duration, - StateTrieMigration::migration_process(), - ); + ext.execute_with(|| { + log::info!( + target: LOG_TARGET, + "finished on_initialize migration in {} block, final state of the task: {:?}", + duration, + StateTrieMigration::migration_process(), + ) + }); let (top_left, child_left) = ext.as_backend().essence().check_migration_state().unwrap(); @@ -1423,7 +1426,7 @@ mod remote_tests { }; // item being the bottleneck - run_with_limits(MigrationLimits { item: 16 * 1024, size: 128 * 1024 * 1024 }).await; + run_with_limits(MigrationLimits { item: 32 * 1024, size: 128 * 1024 * 1024 }).await; // size being the bottleneck run_with_limits(MigrationLimits { item: Bounded::max_value(), size: 4 * 1024 }).await; } @@ -1452,30 +1455,57 @@ mod remote_tests { .unwrap(); let pool_state = offchainify(&mut ext); - ext.execute_with(|| { - // requires the block number type in our tests to be same as with mainnet, u32. - let mut now = frame_system::Pallet::::block_number(); - let mut duration = 0; + let mut now = ext.execute_with(|| { UnsignedLimits::::put(Some(limits)); - loop { + // requires the block number type in our tests to be same as with mainnet, u32. + frame_system::Pallet::::block_number() + }); + + let mut duration = 0; + // set the version to 1, as if the upgrade happened. + ext.state_version = sp_core::StateVersion::V1; + + loop { + let finished = ext.execute_with(|| { run_to_block_and_drain_pool(now + 1, Arc::clone(&pool_state)); if StateTrieMigration::migration_process().finished() { - break + return true } duration += 1; now += 1; + false + }); + + let (top_left, child_left) = + ext.as_backend().essence().check_migration_state().unwrap(); + log::info!( + target: LOG_TARGET, + "(top_left: {}, child_left {})", + top_left, + child_left, + ); + + if finished { + break } + } + ext.execute_with(|| { log::info!( target: LOG_TARGET, "finished offchain-worker migration in {} block, final state of the task: {:?}", duration, StateTrieMigration::migration_process() ); - }) + }); + + let (top_left, child_left) = + ext.as_backend().essence().check_migration_state().unwrap(); + assert_eq!(top_left, 0); + assert_eq!(child_left, 0); }; // item being the bottleneck - // run_with_limits(MigrationLimits { item: 1000, size: 4 * 1024 * 1024 }).await; + run_with_limits(MigrationLimits { item: 1000, size: 4 * 1024 * 1024 }).await; // size being the bottleneck run_with_limits(MigrationLimits { item: Bounded::max_value(), size: 2 * 1024 * 1024 }) .await; From ba54fd593348c61b02cf0a00e46d8f0af7842cb2 Mon Sep 17 00:00:00 2001 From: kianenigma Date: Mon, 15 Nov 2021 16:40:49 +0100 Subject: [PATCH 148/188] add comment --- frame/state-trie-migration/src/lib.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index cc1bf10b514ea..26d84fa8f1bed 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -1349,6 +1349,7 @@ mod remote_tests { use remote_externalities::{Mode, OfflineConfig, OnlineConfig}; use sp_runtime::traits::Bounded; + // TODO: show PoV size per block? // we only use the hash type from this, so using the mock should be fine. type Block = sp_runtime::testing::Block; @@ -1505,7 +1506,7 @@ mod remote_tests { assert_eq!(child_left, 0); }; // item being the bottleneck - run_with_limits(MigrationLimits { item: 1000, size: 4 * 1024 * 1024 }).await; + run_with_limits(MigrationLimits { item: 16 * 1024, size: 4 * 1024 * 1024 }).await; // size being the bottleneck run_with_limits(MigrationLimits { item: Bounded::max_value(), size: 2 * 1024 * 1024 }) .await; From 64e4fba7e6c33111e25b43bf72579c70a4d87aa5 Mon Sep 17 00:00:00 2001 From: kianenigma Date: Tue, 16 Nov 2021 16:53:50 +0100 Subject: [PATCH 149/188] we have PoV tracking baby --- frame/state-trie-migration/src/lib.rs | 16 ++++++++--- primitives/io/src/lib.rs | 4 +++ primitives/state-machine/src/ext.rs | 2 +- primitives/state-machine/src/lib.rs | 2 ++ primitives/state-machine/src/testing.rs | 35 ++++++++++++++++++++++++- 5 files changed, 54 insertions(+), 5 deletions(-) diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index 26d84fa8f1bed..f51b729f7c53d 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -1345,9 +1345,11 @@ mod remote_tests { use std::sync::Arc; use super::{mock::*, *}; + use codec::Encode; use mock::run_to_block_and_drain_pool; use remote_externalities::{Mode, OfflineConfig, OnlineConfig}; - use sp_runtime::traits::Bounded; + use sp_io::InMemoryProvingBackend; + use sp_runtime::traits::{Bounded, HashFor}; // TODO: show PoV size per block? // we only use the hash type from this, so using the mock should be fine. @@ -1387,7 +1389,10 @@ mod remote_tests { ext.state_version = sp_core::StateVersion::V1; loop { - let finished = ext.execute_with(|| { + let trie_backend = ext.backend.clone(); + let last_state_root = ext.backend.root().clone(); + let proving_backend = InMemoryProvingBackend::new(&trie_backend); + let (finished, proof) = ext.execute_and_get_proof(&proving_backend, || { run_to_block(now + 1); if StateTrieMigration::migration_process().finished() { return true @@ -1401,7 +1406,12 @@ mod remote_tests { ext.as_backend().essence().check_migration_state().unwrap(); log::info!( target: LOG_TARGET, - "(top_left: {}, child_left {})", + "proceeded to #{}, proof size: {} (top_left: {}, child_left {})", + now, + proof + .into_compact_proof::>(last_state_root) + .unwrap() + .encoded_size(), top_left, child_left, ); diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index fe7f4698a3c1f..eb5daef125b52 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -1521,6 +1521,10 @@ pub fn oom(_: core::alloc::Layout) -> ! { #[cfg(feature = "std")] pub type TestExternalities = sp_state_machine::TestExternalities; +#[cfg(feature = "std")] +pub type InMemoryProvingBackend<'a> = + sp_state_machine::InMemoryProvingBackend<'a, sp_core::Blake2Hasher>; + /// The host functions Substrate provides for the Wasm runtime environment. /// /// All these host functions will be callable from inside the Wasm environment. diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index fa27f8809ea54..d8ce9a6f54360 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -102,7 +102,7 @@ where /// The overlayed changes to write to. overlay: &'a mut OverlayedChanges, /// The storage backend to read from. - backend: &'a B, + pub(crate) backend: &'a B, /// The cache for the storage transactions. storage_transaction_cache: &'a mut StorageTransactionCache, /// Changes trie state to read from. diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index b1a0039494dfd..224b57dfb195e 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -211,6 +211,8 @@ mod execution { /// Trie backend with in-memory storage. pub type InMemoryBackend = TrieBackend, H>; + pub type InMemoryProvingBackend<'a, H> = ProvingBackend<'a, MemoryDB, H>; + /// Strategy for executing a call into the runtime. #[derive(Copy, Clone, Eq, PartialEq, Debug)] pub enum ExecutionStrategy { diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index 5b16d0892684d..d5c1455dcdf67 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -29,7 +29,8 @@ use crate::{ InMemoryStorage as ChangesTrieInMemoryStorage, State as ChangesTrieState, }, ext::Ext, - InMemoryBackend, OverlayedChanges, StorageKey, StorageTransactionCache, StorageValue, + InMemoryBackend, InMemoryProvingBackend, OverlayedChanges, StorageKey, + StorageTransactionCache, StorageValue, }; use codec::Decode; @@ -45,6 +46,7 @@ use sp_core::{ StateVersion, }; use sp_externalities::{Extension, ExtensionStore, Extensions}; +use sp_trie::StorageProof; /// Simple HashMap-based Externalities impl. pub struct TestExternalities @@ -88,6 +90,26 @@ where ) } + pub fn proving_ext<'a>( + &'a mut self, + proving_backend: &'a InMemoryProvingBackend<'a, H>, + ) -> Ext> { + Ext::new( + &mut self.overlay, + &mut self.storage_transaction_cache, + &proving_backend, + match self.changes_trie_config.clone() { + Some(config) => Some(ChangesTrieState { + config, + zero: 0.into(), + storage: &self.changes_trie_storage, + }), + None => None, + }, + Some(&mut self.extensions), + ) + } + /// Create a new instance of `TestExternalities` with storage. pub fn new(storage: Storage) -> Self { Self::new_with_code_and_state(&[], storage, Default::default()) @@ -230,6 +252,17 @@ where sp_externalities::set_and_run_with_externalities(&mut ext, execute) } + pub fn execute_and_get_proof<'a, R>( + &'a mut self, + proving_backend: &'a InMemoryProvingBackend<'a, H>, + execute: impl FnOnce() -> R, + ) -> (R, StorageProof) { + let mut ext = self.proving_ext(proving_backend); + let outcome = sp_externalities::set_and_run_with_externalities(&mut ext, execute); + let proof = ext.backend.extract_proof(); + (outcome, proof) + } + /// Execute the given closure while `self` is set as externalities. /// /// Returns the result of the given closure, if no panics occured. From b367aac27ba2463cefe4bd63791ae6b706248f7c Mon Sep 17 00:00:00 2001 From: kianenigma Date: Wed, 17 Nov 2021 16:10:26 +0000 Subject: [PATCH 150/188] document stuff, but proof size is still wrong --- frame/state-trie-migration/src/lib.rs | 3 ++- primitives/io/src/lib.rs | 2 ++ primitives/state-machine/src/lib.rs | 1 + primitives/state-machine/src/proving_backend.rs | 5 +++++ primitives/state-machine/src/testing.rs | 12 ++++++++++-- 5 files changed, 20 insertions(+), 3 deletions(-) diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index f51b729f7c53d..fe51dfea1ce02 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -1138,6 +1138,7 @@ mod mock { pub fn run_to_block(n: u32) -> H256 { let mut root = Default::default(); + log::debug!(target: LOG_TARGET, "running from {:?} to {:?}", System::block_number(), n); while System::block_number() < n { System::set_block_number(System::block_number() + 1); System::on_initialize(System::block_number()); @@ -1390,7 +1391,7 @@ mod remote_tests { loop { let trie_backend = ext.backend.clone(); - let last_state_root = ext.backend.root().clone(); + let last_state_root = trie_backend.root().clone(); let proving_backend = InMemoryProvingBackend::new(&trie_backend); let (finished, proof) = ext.execute_and_get_proof(&proving_backend, || { run_to_block(now + 1); diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index eb5daef125b52..ca66340fc54b0 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -1521,6 +1521,8 @@ pub fn oom(_: core::alloc::Layout) -> ! { #[cfg(feature = "std")] pub type TestExternalities = sp_state_machine::TestExternalities; +/// A backend capable of generating storage proofs, with hash types aligned with +/// [`TestExternalities`]. #[cfg(feature = "std")] pub type InMemoryProvingBackend<'a> = sp_state_machine::InMemoryProvingBackend<'a, sp_core::Blake2Hasher>; diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 224b57dfb195e..527c9081bf429 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -211,6 +211,7 @@ mod execution { /// Trie backend with in-memory storage. pub type InMemoryBackend = TrieBackend, H>; + /// Proving Trie backend with in-memory storage. pub type InMemoryProvingBackend<'a, H> = ProvingBackend<'a, MemoryDB, H>; /// Strategy for executing a call into the runtime. diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 6d359c132e2a7..b2cf50149e683 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -222,6 +222,11 @@ where pub fn estimate_encoded_size(&self) -> usize { self.0.essence().backend_storage().proof_recorder.estimate_encoded_size() } + + /// Clear the proof recorded data. + pub fn clear_recorder(&self) { + self.0.essence().backend_storage().proof_recorder.reset() + } } impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> TrieBackendStorage diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index d5c1455dcdf67..ea043c6edb3f7 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -29,8 +29,8 @@ use crate::{ InMemoryStorage as ChangesTrieInMemoryStorage, State as ChangesTrieState, }, ext::Ext, - InMemoryBackend, InMemoryProvingBackend, OverlayedChanges, StorageKey, - StorageTransactionCache, StorageValue, + InMemoryBackend, InMemoryProvingBackend, OverlayedChanges, StorageKey, StorageTransactionCache, + StorageValue, }; use codec::Decode; @@ -90,6 +90,9 @@ where ) } + /// Get an externalities implementation, using the given `proving_backend`. + /// + /// This will be capable of computing the PoV. See [`execute_and_get_proof`]. pub fn proving_ext<'a>( &'a mut self, proving_backend: &'a InMemoryProvingBackend<'a, H>, @@ -252,6 +255,10 @@ where sp_externalities::set_and_run_with_externalities(&mut ext, execute) } + /// Execute the given closure while `self`, with `proving_backend` as backend, is set as + /// externalities. + /// + /// Returns the result of the given closure, and the storage proof. pub fn execute_and_get_proof<'a, R>( &'a mut self, proving_backend: &'a InMemoryProvingBackend<'a, H>, @@ -260,6 +267,7 @@ where let mut ext = self.proving_ext(proving_backend); let outcome = sp_externalities::set_and_run_with_externalities(&mut ext, execute); let proof = ext.backend.extract_proof(); + ext.backend.clear_recorder(); (outcome, proof) } From a5a2a2784ca4fee71f8d58a1c9334bfeba135ed3 Mon Sep 17 00:00:00 2001 From: kianenigma Date: Wed, 17 Nov 2021 17:04:52 +0000 Subject: [PATCH 151/188] FUCK YEAH --- frame/state-trie-migration/src/lib.rs | 6 +++--- primitives/state-machine/src/testing.rs | 11 +++++++---- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index fe51dfea1ce02..310675403275c 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -1343,16 +1343,14 @@ mod test { #[cfg(all(test, feature = "remote-tests"))] mod remote_tests { - use std::sync::Arc; - use super::{mock::*, *}; use codec::Encode; use mock::run_to_block_and_drain_pool; use remote_externalities::{Mode, OfflineConfig, OnlineConfig}; use sp_io::InMemoryProvingBackend; use sp_runtime::traits::{Bounded, HashFor}; + use std::sync::Arc; - // TODO: show PoV size per block? // we only use the hash type from this, so using the mock should be fine. type Block = sp_runtime::testing::Block; @@ -1416,6 +1414,8 @@ mod remote_tests { top_left, child_left, ); + proving_backend.clear_recorder(); + ext.commit_all().unwrap(); if finished { break diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index ea043c6edb3f7..ea4e7cd688750 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -258,16 +258,19 @@ where /// Execute the given closure while `self`, with `proving_backend` as backend, is set as /// externalities. /// + /// Based on where `proving_backend` comes from, this may or may bot affect `self.ext`. + /// /// Returns the result of the given closure, and the storage proof. pub fn execute_and_get_proof<'a, R>( &'a mut self, proving_backend: &'a InMemoryProvingBackend<'a, H>, execute: impl FnOnce() -> R, ) -> (R, StorageProof) { - let mut ext = self.proving_ext(proving_backend); - let outcome = sp_externalities::set_and_run_with_externalities(&mut ext, execute); - let proof = ext.backend.extract_proof(); - ext.backend.clear_recorder(); + use codec::Encode; + let mut proving_ext = self.proving_ext(proving_backend); + let outcome = sp_externalities::set_and_run_with_externalities(&mut proving_ext, execute); + let proof = proving_ext.backend.extract_proof(); + proving_ext.backend.clear_recorder(); (outcome, proof) } From 8a5cc10d9a747d447054cf92c998766c16d64a7d Mon Sep 17 00:00:00 2001 From: kianenigma Date: Thu, 18 Nov 2021 17:56:59 +0000 Subject: [PATCH 152/188] a big batch of review comments --- Cargo.lock | 1 + frame/bags-list/remote-tests/src/migration.rs | 4 +- .../remote-tests/src/sanity_check.rs | 4 +- frame/state-trie-migration/Cargo.toml | 1 + frame/state-trie-migration/src/lib.rs | 199 ++++++++++++------ primitives/state-machine/src/testing.rs | 1 - .../state-machine/src/trie_backend_essence.rs | 2 +- utils/frame/remote-externalities/src/lib.rs | 2 +- utils/frame/try-runtime/cli/src/lib.rs | 2 +- 9 files changed, 136 insertions(+), 80 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 35122d2f4b0ff..c173c0e193629 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6058,6 +6058,7 @@ dependencies = [ "sp-std", "sp-tracing", "tokio", + "zstd", ] [[package]] diff --git a/frame/bags-list/remote-tests/src/migration.rs b/frame/bags-list/remote-tests/src/migration.rs index 514744b07bbdf..3e20d6aead6af 100644 --- a/frame/bags-list/remote-tests/src/migration.rs +++ b/frame/bags-list/remote-tests/src/migration.rs @@ -34,9 +34,7 @@ pub async fn execute( .mode(Mode::Online(OnlineConfig { transport: ws_url.to_string().into(), pallets: vec![pallet_staking::Pallet::::name().to_string()], - at: None, - state_snapshot: None, - scrape_children: false, + ..Default::default() })) .build() .await diff --git a/frame/bags-list/remote-tests/src/sanity_check.rs b/frame/bags-list/remote-tests/src/sanity_check.rs index e035b1cfc0ef8..48a1f321f7026 100644 --- a/frame/bags-list/remote-tests/src/sanity_check.rs +++ b/frame/bags-list/remote-tests/src/sanity_check.rs @@ -35,9 +35,7 @@ pub async fn execute( .mode(Mode::Online(OnlineConfig { transport: ws_url.to_string().into(), pallets: vec![pallet_bags_list::Pallet::::name().to_string()], - at: None, - state_snapshot: None, - scrape_children: false, + ..Default::default() })) .inject_hashed_prefix(&>::prefix_hash()) .inject_hashed_prefix(&>::prefix_hash()) diff --git a/frame/state-trie-migration/Cargo.toml b/frame/state-trie-migration/Cargo.toml index b0b3584e35048..af334c20954cb 100644 --- a/frame/state-trie-migration/Cargo.toml +++ b/frame/state-trie-migration/Cargo.toml @@ -32,6 +32,7 @@ parking_lot = "0.11.0" sp-tracing = { path = "../../primitives/tracing", version = "4.0.0-dev" } remote-externalities = { path = "../../utils/frame/remote-externalities", version = "0.10.0-dev" } tokio = { version = "1.10", features = ["macros"] } +zstd = "0.9" [features] default = ["std"] diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index 310675403275c..1e9723fcfc0a9 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -95,9 +95,7 @@ pub mod pallet { offchain::{SendTransactionTypes, SubmitTransaction}, pallet_prelude::*, }; - use sp_core::{ - hexdisplay::HexDisplay, storage::well_known_keys::DEFAULT_CHILD_STORAGE_KEY_PREFIX, - }; + use sp_core::storage::well_known_keys::DEFAULT_CHILD_STORAGE_KEY_PREFIX; use sp_runtime::{ offchain::storage::{MutateStorageError, StorageValueRef}, traits::{Saturating, Zero}, @@ -132,14 +130,14 @@ pub mod pallet { /// The top key that we currently have to iterate. /// /// If it does not exist, it means that the migration is done and no further keys exist. - pub(crate) last_top: Option>, + pub(crate) current_top: Option>, /// The last child key that we have processed. /// /// This is a child key under the current `self.last_top`. /// /// If this is set, no further top keys are processed until the child key migration is /// complete. - pub(crate) last_child: Option>, + pub(crate) current_child: Option>, /// A marker to indicate if the previous tick was a child tree migration or not. pub(crate) prev_tick_child: bool, @@ -187,11 +185,11 @@ pub mod pallet { f.debug_struct("MigrationTask") .field( "top", - &self.last_top.as_ref().map(|d| sp_core::hexdisplay::HexDisplay::from(d)), + &self.current_top.as_ref().map(|d| sp_core::hexdisplay::HexDisplay::from(d)), ) .field( "child", - &self.last_child.as_ref().map(|d| sp_core::hexdisplay::HexDisplay::from(d)), + &self.current_child.as_ref().map(|d| sp_core::hexdisplay::HexDisplay::from(d)), ) .field("prev_tick_child", &self.prev_tick_child) .field("dyn_top_items", &self.dyn_top_items) @@ -207,8 +205,8 @@ pub mod pallet { impl Default for MigrationTask { fn default() -> Self { Self { - last_top: Some(Default::default()), - last_child: Default::default(), + current_top: Some(Default::default()), + current_child: Default::default(), dyn_child_items: Default::default(), dyn_top_items: Default::default(), dyn_size: Default::default(), @@ -224,7 +222,7 @@ pub mod pallet { impl MigrationTask { /// Return true if the task is finished. pub(crate) fn finished(&self) -> bool { - self.last_top.is_none() && self.last_child.is_none() + self.current_top.is_none() && self.current_child.is_none() } /// Returns `true` if the task fully complies with the given limits. @@ -234,7 +232,7 @@ pub mod pallet { /// Check if there's any work left, or if we have exhausted the limits already. fn exhausted(&self, limits: MigrationLimits) -> bool { - self.last_top.is_none() || + self.current_top.is_none() || self.dyn_total_items() >= limits.item || self.dyn_size >= limits.size } @@ -247,7 +245,7 @@ pub mod pallet { /// Migrate keys until either of the given limits are exhausted, or if no more top keys /// exist. /// - /// Note that this returns after the **first** migration tick that causes exhaustion. In + /// Note that this can return after the **first** migration tick that causes exhaustion. In /// other words, this should not be used in any environment where resources are strictly /// bounded (e.g. a parachain), but it is acceptable otherwise (relay chain, offchain /// workers). @@ -278,7 +276,7 @@ pub mod pallet { /// /// This function is the core of this entire pallet. fn migrate_tick(&mut self) { - match (self.last_top.as_ref(), self.last_child.as_ref()) { + match (self.current_top.as_ref(), self.current_child.as_ref()) { (Some(_), Some(_)) => { // we're in the middle of doing work on a child tree. self.migrate_child(); @@ -302,17 +300,24 @@ pub mod pallet { self.migrate_top(); }, (true, false) => { - // start going into a child key. + // start going into a child key. In the first iteration, we always let maybe_first_child_key = { - let child_top_key = Pallet::::child_io_key(top_key); + // just in case there's some data in `&[]`, read it. Since we can't + // check this without reading the actual key, and given that this + // function should always read at most one key, we return after + // this. The rest of the migration should happen in the next tick. + let child_top_key = Pallet::::child_io_key_or_halt(top_key); + let _ = sp_io::default_child_storage::get(child_top_key, &vec![]); sp_io::default_child_storage::next_key(child_top_key, &vec![]) }; if let Some(first_child_key) = maybe_first_child_key { - self.last_child = Some(first_child_key); + self.current_child = Some(first_child_key); self.prev_tick_child = true; - self.migrate_child(); } else { - self.migrate_top(); + // we have already done a (pretty useless) child key migration, just + // set the flag. Since we don't set the `self.current_child`, next + // tick will move forward to the next top key. + self.prev_tick_child = true; } }, (true, true) => { @@ -341,10 +346,10 @@ pub mod pallet { /// /// It updates the dynamic counters. fn migrate_child(&mut self) { - let last_child = self.last_child.as_ref().expect("value checked to be `Some`; qed"); - let last_top = self.last_top.clone().expect("value checked to be `Some`; qed"); + let last_child = self.current_child.as_ref().expect("value checked to be `Some`; qed"); + let last_top = self.current_top.clone().expect("value checked to be `Some`; qed"); - let child_root = Pallet::::child_io_key(&last_top); + let child_root = Pallet::::child_io_key_or_halt(&last_top); let added_size = if let Some(data) = sp_io::default_child_storage::get(child_root, &last_child) { self.dyn_size = self.dyn_size.saturating_add(data.len() as u32); @@ -356,15 +361,15 @@ pub mod pallet { self.dyn_child_items.saturating_inc(); let next_key = sp_io::default_child_storage::next_key(child_root, last_child); - self.last_child = next_key; - log!(trace, "migrated a child key with size: {:?}, next task: {:?}", self, added_size); + self.current_child = next_key; + log!(trace, "migrated a child key with size: {:?}, next task: {:?}", added_size, self,); } /// Migrate the current top key, setting it to its new value, if one exists. /// /// It updates the dynamic counters. fn migrate_top(&mut self) { - let last_top = self.last_top.as_ref().expect("value checked to be `Some`; qed"); + let last_top = self.current_top.as_ref().expect("value checked to be `Some`; qed"); let added_size = if let Some(data) = sp_io::storage::get(&last_top) { self.dyn_size = self.dyn_size.saturating_add(data.len() as u32); sp_io::storage::set(last_top, &data); @@ -375,7 +380,7 @@ pub mod pallet { self.dyn_top_items.saturating_inc(); let next_key = sp_io::storage::next_key(last_top); - self.last_top = next_key; + self.current_top = next_key; log!(trace, "migrated a top key with size {}, next_task = {:?}", added_size, self); } } @@ -443,7 +448,13 @@ pub mod pallet { /// The number of items that offchain worker will subtract from the first item count that /// causes an over-consumption. /// - /// A value around 5-10 is reasonable. + /// This is a safety feature to assist the offchain worker submitted transactions and help + /// them not exceed the byte limit of the task. Nonetheless, the fundamental problem is that + /// if a transaction is ensured to not exceed any limit at block `t` when it is generated, + /// there is no guarantee that the same assumption holds at block `t + x`, when this + /// transaction is actually executed. This is where this value comes into play, and by + /// reducing the number of keys that will be migrated, further reduces the chance of byte + /// limit being exceeded. type UnsignedBackOff: Get; /// The repeat frequency of offchain workers. @@ -553,6 +564,7 @@ pub mod pallet { task.dyn_child_items, MigrationCompute::Unsigned, )); + MigrationProcess::::put(task); Ok(().into()) @@ -598,7 +610,7 @@ pub mod pallet { // let the imbalance burn. let (_imbalance, _remainder) = T::Currency::slash(&who, deposit); debug_assert!(_remainder.is_zero()); - return Err("Wrong witness data".into()) + return Err("wrong witness data".into()) } Self::deposit_event(Event::::Migrated( @@ -635,6 +647,7 @@ pub mod pallet { } } + dbg!(dyn_size, total_size); if dyn_size != total_size { let (_imbalance, _remainder) = T::Currency::slash(&who, deposit); debug_assert!(_remainder.is_zero()); @@ -671,12 +684,13 @@ pub mod pallet { let mut dyn_size = 0u32; for child_key in &child_keys { - if let Some(data) = - sp_io::default_child_storage::get(Self::child_io_key(&top_key), &child_key) - { + if let Some(data) = sp_io::default_child_storage::get( + Self::child_io_key(&top_key).ok_or("bad child key")?, + &child_key, + ) { dyn_size = dyn_size.saturating_add(data.len() as u32); sp_io::default_child_storage::set( - Self::child_io_key(&top_key), + Self::child_io_key(&top_key).ok_or("bad child key")?, &child_key, &data, ); @@ -739,7 +753,7 @@ pub mod pallet { if let Some(chain_limits) = Self::unsigned_limits() { let mut task = Self::migration_process(); if task.finished() { - log!(warn, "task is finished, remove `unsigned_limits`."); + log!(debug, "task is finished, remove `unsigned_limits`."); return } @@ -860,23 +874,26 @@ pub mod pallet { } /// Convert a child root key, aka. "Child-bearing top key" into the proper format. - fn child_io_key(root: &Vec) -> &[u8] { + fn child_io_key(root: &Vec) -> Option<&[u8]> { use sp_core::storage::{ChildType, PrefixedStorageKey}; match ChildType::from_prefixed_key(PrefixedStorageKey::new_ref(root)) { - Some((ChildType::ParentKeyId, root)) => root, - None => { - log!( - warn, - "some data seems to be stored under key {:?}, which is a non-default \ - child-trie. This is a logical error and shall not happen.", - HexDisplay::from(root), - ); - Self::halt(); - Default::default() - }, + Some((ChildType::ParentKeyId, root)) => Some(root), + _ => None, } } + /// Same as [`child_io_key`], and it halts the auto/unsigned migrations if a bad child root + /// is used. + /// + /// This should be used when we are sure that `root` is a correct default child root. + fn child_io_key_or_halt(root: &Vec) -> &[u8] { + let key = Self::child_io_key(root); + if key.is_none() { + Self::halt(); + } + key.unwrap_or_default() + } + /// Checks if an execution of the offchain worker is permitted at the given block number, or /// not. /// @@ -1058,18 +1075,19 @@ mod mock { pub fn new_test_ext(version: StateVersion, with_pallets: bool) -> sp_io::TestExternalities { use sp_core::storage::ChildInfo; + let minimum_size = sp_core::storage::DEFAULT_MAX_INLINE_VALUE as usize + 1; let mut custom_storage = sp_core::storage::Storage { top: vec![ - (b"key1".to_vec(), vec![1u8; 10]), // 6b657931 - (b"key2".to_vec(), vec![1u8; 20]), // 6b657931 - (b"key3".to_vec(), vec![1u8; 30]), // 6b657931 - (b"key4".to_vec(), vec![1u8; 40]), // 6b657931 - (b"key5".to_vec(), vec![2u8; 50]), // 6b657932 - (b"key6".to_vec(), vec![3u8; 50]), // 6b657934 - (b"key7".to_vec(), vec![4u8; 50]), // 6b657934 - (b"key8".to_vec(), vec![4u8; 50]), // 6b657934 - (b"key9".to_vec(), vec![4u8; 50]), // 6b657934 - (b"CODE".to_vec(), vec![1u8; 100]), // 434f4445 + (b"key1".to_vec(), vec![1u8; minimum_size + 1]), // 6b657931 + (b"key2".to_vec(), vec![1u8; minimum_size + 2]), // 6b657931 + (b"key3".to_vec(), vec![1u8; minimum_size + 3]), // 6b657931 + (b"key4".to_vec(), vec![1u8; minimum_size + 4]), // 6b657931 + (b"key5".to_vec(), vec![1u8; minimum_size + 5]), // 6b657932 + (b"key6".to_vec(), vec![1u8; minimum_size + 6]), // 6b657934 + (b"key7".to_vec(), vec![1u8; minimum_size + 7]), // 6b657934 + (b"key8".to_vec(), vec![1u8; minimum_size + 8]), // 6b657934 + (b"key9".to_vec(), vec![1u8; minimum_size + 9]), // 6b657934 + (b"CODE".to_vec(), vec![1u8; minimum_size + 100]), // 434f4445 ] .into_iter() .collect(), @@ -1078,8 +1096,8 @@ mod mock { b"chk1".to_vec(), // 63686b31 sp_core::storage::StorageChild { data: vec![ - (b"key1".to_vec(), vec![1u8; 10]), - (b"key2".to_vec(), vec![2u8; 20]), + (b"key1".to_vec(), vec![1u8; 55]), + (b"key2".to_vec(), vec![2u8; 66]), ] .into_iter() .collect(), @@ -1090,8 +1108,8 @@ mod mock { b"chk2".to_vec(), sp_core::storage::StorageChild { data: vec![ - (b"key1".to_vec(), vec![1u8; 10]), - (b"key2".to_vec(), vec![2u8; 20]), + (b"key1".to_vec(), vec![1u8; 54]), + (b"key2".to_vec(), vec![2u8; 64]), ] .into_iter() .collect(), @@ -1138,7 +1156,7 @@ mod mock { pub fn run_to_block(n: u32) -> H256 { let mut root = Default::default(); - log::debug!(target: LOG_TARGET, "running from {:?} to {:?}", System::block_number(), n); + log::trace!(target: LOG_TARGET, "running from {:?} to {:?}", System::block_number(), n); while System::block_number() < n { System::set_block_number(System::block_number() + 1); System::on_initialize(System::block_number()); @@ -1186,6 +1204,48 @@ mod test { use sp_runtime::{traits::Bounded, StateVersion}; use std::sync::Arc; + #[test] + fn fails_if_no_migration() { + let mut ext = new_test_ext(StateVersion::V0, false); + let root1 = ext.execute_with(|| run_to_block(30)); + + let mut ext2 = new_test_ext(StateVersion::V1, false); + let root2 = ext2.execute_with(|| run_to_block(30)); + + // these two roots should not be the same. + assert_ne!(root1, root2); + } + + #[test] + fn detects_first_child_key() { + use frame_support::storage::child; + let limit = MigrationLimits { item: 1, size: 1000 }; + let mut ext = new_test_ext(StateVersion::V0, false); + + let root_upgraded = ext.execute_with(|| { + child::put(&child::ChildInfo::new_default(b"chk1"), &[], &vec![66u8; 77]); + + AutoLimits::::put(Some(limit)); + let root = run_to_block(30); + + // eventually everything is over. + assert!(matches!( + StateTrieMigration::migration_process(), + MigrationTask { current_child: None, current_top: None, .. } + )); + root + }); + + let mut ext2 = new_test_ext(StateVersion::V1, false); + let root = ext2.execute_with(|| { + child::put(&child::ChildInfo::new_default(b"chk1"), &[], &vec![66u8; 77]); + AutoLimits::::put(Some(limit)); + run_to_block(30) + }); + + assert_eq!(root, root_upgraded); + } + #[test] fn auto_migrate_works() { let run_with_limits = |limit, from, until| { @@ -1206,7 +1266,7 @@ mod test { // eventually everything is over. assert!(matches!( StateTrieMigration::migration_process(), - MigrationTask { last_child: None, last_top: None, .. } + MigrationTask { current_child: None, current_top: None, .. } )); root }); @@ -1268,7 +1328,7 @@ mod test { // multi-item run_with_limits(MigrationLimits { item: 5, size: 1000 }, 10, 100); // multi-item, based on size - run_with_limits(MigrationLimits { item: 1000, size: 128 }, 10, 100); + run_with_limits(MigrationLimits { item: 1000, size: 512 }, 10, 100); // unbounded run_with_limits( MigrationLimits { item: Bounded::max_value(), size: Bounded::max_value() }, @@ -1327,12 +1387,12 @@ mod test { } #[test] - fn custom_migrate_works() { + fn custom_migrate_top_works() { new_test_ext(StateVersion::V0, true).execute_with(|| { frame_support::assert_ok!(StateTrieMigration::migrate_custom_top( Origin::signed(1), vec![b"key1".to_vec(), b"key2".to_vec(), b"key3".to_vec()], - 10 + 20 + 30, + 3 + sp_core::storage::DEFAULT_MAX_INLINE_VALUE * 3 + 1 + 2 + 3, )); // no funds should remain reserved. @@ -1365,7 +1425,6 @@ mod remote_tests { }, OnlineConfig { transport: std::env!("WS_API").to_owned().into(), - scrape_children: true, state_snapshot: Some( "/home/kianenigma/remote-builds/state".to_owned().into(), ), @@ -1403,14 +1462,15 @@ mod remote_tests { let (top_left, child_left) = ext.as_backend().essence().check_migration_state().unwrap(); + let compact_proof = + proof.clone().into_compact_proof::>(last_state_root).unwrap(); log::info!( target: LOG_TARGET, - "proceeded to #{}, proof size: {} (top_left: {}, child_left {})", + "proceeded to #{}, original proof: {}, compact proof size: {}, compact zstd compressed: {} // top_left: {}, child_left: {}", now, - proof - .into_compact_proof::>(last_state_root) - .unwrap() - .encoded_size(), + proof.encoded_size(), + compact_proof.encoded_size(), + zstd::stream::encode_all(&compact_proof.encode()[..], 0).unwrap().len(), top_left, child_left, ); @@ -1454,7 +1514,6 @@ mod remote_tests { }, OnlineConfig { transport: std::env!("WS_API").to_owned().into(), - scrape_children: true, state_snapshot: Some( "/home/kianenigma/remote-builds/state".to_owned().into(), ), diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index ea4e7cd688750..0b40a467ba391 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -266,7 +266,6 @@ where proving_backend: &'a InMemoryProvingBackend<'a, H>, execute: impl FnOnce() -> R, ) -> (R, StorageProof) { - use codec::Encode; let mut proving_ext = self.proving_ext(proving_backend); let outcome = sp_externalities::set_and_run_with_externalities(&mut proving_ext, execute); let proof = proving_ext.backend.extract_proof(); diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index ecb9af2ed815f..f7401418d9782 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -445,7 +445,7 @@ where /// migration did finished as it is only an utility. // original author: @cheme pub fn check_migration_state(&self) -> Result<(u64, u64)> { - let threshold: u32 = 33; + let threshold: u32 = sp_core::storage::DEFAULT_MAX_INLINE_VALUE; let mut nb_to_migrate = 0; let mut nb_to_migrate_child = 0; diff --git a/utils/frame/remote-externalities/src/lib.rs b/utils/frame/remote-externalities/src/lib.rs index 54087146b57c6..737ef311a41cd 100644 --- a/utils/frame/remote-externalities/src/lib.rs +++ b/utils/frame/remote-externalities/src/lib.rs @@ -156,7 +156,7 @@ impl Default for OnlineConfig { fn default() -> Self { Self { transport: Transport { uri: DEFAULT_TARGET.to_owned(), client: None }, - scrape_children: false, + scrape_children: true, at: None, state_snapshot: None, pallets: vec![], diff --git a/utils/frame/try-runtime/cli/src/lib.rs b/utils/frame/try-runtime/cli/src/lib.rs index 65eaad062b77a..6d466ab11ed40 100644 --- a/utils/frame/try-runtime/cli/src/lib.rs +++ b/utils/frame/try-runtime/cli/src/lib.rs @@ -484,7 +484,7 @@ impl State { transport: uri.to_owned().into(), state_snapshot: snapshot_path.as_ref().map(SnapshotConfig::new), pallets: pallets.to_owned().unwrap_or_default(), - scrape_children: false, + scrape_children: true, at, })) .inject_hashed_key( From 14d9f425456dc41e87423b1db7c4ac042f2d5898 Mon Sep 17 00:00:00 2001 From: kianenigma Date: Fri, 19 Nov 2021 15:38:23 +0000 Subject: [PATCH 153/188] add more tests --- frame/state-trie-migration/src/lib.rs | 236 +++++++++++++++++++++----- 1 file changed, 196 insertions(+), 40 deletions(-) diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index 1e9723fcfc0a9..b3b036a46c60b 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -84,7 +84,7 @@ macro_rules! log { #[frame_support::pallet] pub mod pallet { use frame_support::{ - dispatch::TransactionPriority, + dispatch::{DispatchErrorWithPostInfo, PostDispatchInfo, TransactionPriority}, ensure, pallet_prelude::*, traits::{Currency, Get}, @@ -109,6 +109,8 @@ pub mod pallet { pub trait WeightInfo { fn process_top_key(x: u32) -> Weight; fn continue_migrate() -> Weight; + fn migrate_custom_top_fail() -> Weight; + fn migrate_custom_top_success() -> Weight; } impl WeightInfo for () { @@ -118,6 +120,12 @@ pub mod pallet { fn continue_migrate() -> Weight { 1000000 } + fn migrate_custom_top_fail() -> Weight { + 1000000 + } + fn migrate_custom_top_success() -> Weight { + 1000000 + } } /// A migration task stored in state. @@ -411,7 +419,9 @@ pub mod pallet { pub enum Event { /// Given number of `(top, child)` keys were migrated respectively, with the given /// `compute`. - Migrated(u32, u32, MigrationCompute), + Migrated { top: u32, child: u32, compute: MigrationCompute }, + /// Some account got slashed by the given amount. + Slashed { who: T::AccountId, amount: BalanceOf }, } /// The outer Pallet struct. @@ -436,6 +446,11 @@ pub mod pallet { /// This should reflect the average storage value size in the worse case. type SignedDepositPerItem: Get>; + /// The base value of [`SignedDepositPerItem`]. + /// + /// Final deposit is `items * SignedDepositPerItem + SignedDepositBase`. + type SignedDepositBase: Get>; + /// The maximum limits that the signed migration could use. type SignedMigrationMaxLimits: Get; @@ -528,12 +543,20 @@ pub mod pallet { /// This can only be valid if it is generated from the local node, which means only /// validators can generate this call. /// - /// The `item_limit` is the maximum number of items that can be read whilst ensuring that - /// the migration does not go over `Self::unsigned_limits().size`. + /// The `item_limit` should be used as the limit on the number of items migrated, and the + /// submitter must guarantee that using this item limit, `size` does not go over + /// `Self::unsigned_limits().size`. /// /// The `witness_size` should always be equal to `Self::unsigned_limits().size` and is only /// used for weighing. - #[pallet::weight(Pallet::::dynamic_weight(*item_limit, *witness_size))] + #[pallet::weight( + // for reading and writing `migration_process` + T::DbWeight::get().reads_writes(1, 1) + .saturating_add( + // for executing the migration itself. + Pallet::::dynamic_weight(*item_limit, *witness_size) + ) + )] pub fn continue_migrate_unsigned( origin: OriginFor, item_limit: u32, @@ -559,11 +582,11 @@ pub mod pallet { // limit. assert!(task.fully_complies_with(chain_limits)); - Self::deposit_event(Event::::Migrated( - task.dyn_top_items, - task.dyn_child_items, - MigrationCompute::Unsigned, - )); + Self::deposit_event(Event::::Migrated { + top: task.dyn_top_items, + child: task.dyn_child_items, + compute: MigrationCompute::Unsigned, + }); MigrationProcess::::put(task); @@ -613,11 +636,11 @@ pub mod pallet { return Err("wrong witness data".into()) } - Self::deposit_event(Event::::Migrated( - task.dyn_top_items, - task.dyn_child_items, - MigrationCompute::Signed, - )); + Self::deposit_event(Event::::Migrated { + top: task.dyn_top_items, + child: task.dyn_child_items, + compute: MigrationCompute::Signed, + }); MigrationProcess::::put(task); Ok(Pays::No.into()) @@ -627,7 +650,13 @@ pub mod pallet { /// /// This does not affect the global migration process tracker ([`MigrationProcess`]), and /// should only be used in case any keys are leftover due to a bug. - #[pallet::weight(Pallet::::dynamic_weight(keys.len() as u32, *total_size))] + #[pallet::weight( + T::WeightInfo::migrate_custom_top_success() + .max(T::WeightInfo::migrate_custom_top_fail()) + .saturating_add( + Pallet::::dynamic_weight(keys.len() as u32, *total_size) + ) + )] pub fn migrate_custom_top( origin: OriginFor, keys: Vec>, @@ -636,7 +665,9 @@ pub mod pallet { let who = ensure_signed(origin)?; // ensure they can pay more than the fee. - let deposit = T::SignedDepositPerItem::get().saturating_mul((keys.len() as u32).into()); + let deposit = T::SignedDepositBase::get().saturating_add( + T::SignedDepositPerItem::get().saturating_mul((keys.len() as u32).into()), + ); ensure!(T::Currency::can_slash(&who, deposit), "not enough funds"); let mut dyn_size = 0u32; @@ -647,18 +678,17 @@ pub mod pallet { } } - dbg!(dyn_size, total_size); if dyn_size != total_size { let (_imbalance, _remainder) = T::Currency::slash(&who, deposit); debug_assert!(_remainder.is_zero()); return Err("Wrong witness data".into()) } - Self::deposit_event(Event::::Migrated( - keys.len() as u32, - 0, - MigrationCompute::Signed, - )); + Self::deposit_event(Event::::Migrated { + top: keys.len() as u32, + child: 0, + compute: MigrationCompute::Signed, + }); Ok(().into()) } @@ -668,7 +698,13 @@ pub mod pallet { /// /// This does not affect the global migration process tracker ([`MigrationProcess`]), and /// should only be used in case any keys are leftover due to a bug. - #[pallet::weight(Pallet::::dynamic_weight(child_keys.len() as u32, *total_size))] + #[pallet::weight( + T::WeightInfo::migrate_custom_top_success() + .max(T::WeightInfo::migrate_custom_top_fail()) + .saturating_add( + Pallet::::dynamic_weight(child_keys.len() as u32, *total_size) + ) + )] pub fn migrate_custom_child( origin: OriginFor, top_key: Vec, @@ -678,8 +714,9 @@ pub mod pallet { let who = ensure_signed(origin)?; // ensure they can pay more than the fee. - let deposit = - T::SignedDepositPerItem::get().saturating_mul((child_keys.len() as u32).into()); + let deposit = T::SignedDepositBase::get().saturating_add( + T::SignedDepositPerItem::get().saturating_mul((child_keys.len() as u32).into()), + ); ensure!(T::Currency::can_slash(&who, deposit), "not enough funds"); let mut dyn_size = 0u32; @@ -700,15 +737,25 @@ pub mod pallet { if dyn_size != total_size { let (_imbalance, _remainder) = T::Currency::slash(&who, deposit); debug_assert!(_remainder.is_zero()); - return Err("Wrong witness data".into()) + Self::deposit_event(Event::::Slashed { who, amount: deposit }); + Err(DispatchErrorWithPostInfo { + error: "bad witness".into(), + post_info: PostDispatchInfo { + actual_weight: Some(T::WeightInfo::migrate_custom_top_fail()), + pays_fee: Pays::Yes, + }, + }) + } else { + Self::deposit_event(Event::::Migrated { + top: 0, + child: child_keys.len() as u32, + compute: MigrationCompute::Signed, + }); + Ok(PostDispatchInfo { + actual_weight: Some(T::WeightInfo::migrate_custom_top_success()), + pays_fee: Pays::Yes, + }) } - - Self::deposit_event(Event::::Migrated( - 0, - child_keys.len() as u32, - MigrationCompute::Signed, - )); - Ok(().into()) } } @@ -731,11 +778,11 @@ pub mod pallet { task.dyn_child_items, task.dyn_size, ); - Self::deposit_event(Event::::Migrated( - task.dyn_top_items as u32, - task.dyn_child_items as u32, - MigrationCompute::Auto, - )); + Self::deposit_event(Event::::Migrated { + top: task.dyn_top_items, + child: task.dyn_child_items, + compute: MigrationCompute::Auto, + }); MigrationProcess::::put(task); weight @@ -940,7 +987,8 @@ pub mod pallet { #[cfg(feature = "runtime-benchmarks")] mod benchmarks { - use super::*; + use super::{pallet::Pallet as StateTrieMigration, *}; + use frame_support::traits::Currency; // The size of the key seemingly makes no difference in the read/write time, so we make it // constant. @@ -951,6 +999,41 @@ mod benchmarks { let null = MigrationLimits::default(); let caller = frame_benchmarking::whitelisted_caller(); }: _(frame_system::RawOrigin::Signed(caller), null, 0) + verify { + assert_eq!(StateTrieMigration::::migration_process(), Default::default()) + } + + migrate_custom_top_success { + let null = MigrationLimits::default(); + let caller = frame_benchmarking::whitelisted_caller(); + let stash = T::Currency::minimum_balance() * BalanceOf::::from(10u32); + T::Currency::make_free_balance_be(&caller, stash); + }: migrate_custom_top(frame_system::RawOrigin::Signed(caller.clone()), Default::default(), 0) + verify { + assert_eq!(StateTrieMigration::::migration_process(), Default::default()); + assert_eq!(T::Currency::free_balance(&caller), stash) + } + + migrate_custom_top_fail { + let null = MigrationLimits::default(); + let caller = frame_benchmarking::whitelisted_caller(); + let stash = T::Currency::minimum_balance() * BalanceOf::::from(10u32); + T::Currency::make_free_balance_be(&caller, stash); + }: { + assert!( + StateTrieMigration::::migrate_custom_top( + frame_system::RawOrigin::Signed(caller.clone()).into(), + Default::default(), + 1, + ).is_err() + ) + } + verify { + assert_eq!(StateTrieMigration::::migration_process(), Default::default()); + // must have gotten slashed + assert!(T::Currency::free_balance(&caller) < stash) + } + process_top_key { let v in 1 .. (4 * 1024 * 1024); @@ -962,6 +1045,12 @@ mod benchmarks { let _next = sp_io::storage::next_key(KEY); assert_eq!(data, value); } + + impl_benchmark_test_suite!( + StateTrieMigration, + crate::mock::new_test_ext(sp_runtime::StateVersion::V0, true), + crate::mock::Test + ); } } @@ -1035,6 +1124,7 @@ mod mock { pub const ExistentialDeposit: u64 = 1; pub const OffchainRepeat: u32 = 1; pub const SignedDepositPerItem: u64 = 1; + pub const SignedDepositBase: u64 = 5; pub const SignedMigrationMaxLimits: MigrationLimits = MigrationLimits { size: 1024, item: 5 }; } @@ -1055,6 +1145,7 @@ mod mock { type ControlOrigin = EnsureRoot; type Currency = Balances; type SignedDepositPerItem = SignedDepositPerItem; + type SignedDepositBase = SignedDepositBase; type SignedMigrationMaxLimits = SignedMigrationMaxLimits; type WeightInfo = (); type UnsignedPriority = (); @@ -1201,6 +1292,7 @@ mod mock { #[cfg(test)] mod test { use super::{mock::*, *}; + use sp_core::storage::well_known_keys::DEFAULT_CHILD_STORAGE_KEY_PREFIX; use sp_runtime::{traits::Bounded, StateVersion}; use std::sync::Arc; @@ -1397,6 +1489,70 @@ mod test { // no funds should remain reserved. assert_eq!(Balances::reserved_balance(&1), 0); + assert_eq!(Balances::free_balance(&1), 1000); + }); + + new_test_ext(StateVersion::V0, true).execute_with(|| { + assert_eq!(Balances::free_balance(&1), 1000); + + // note that we don't expect this to be a noop -- we do slash. + frame_support::assert_err!( + StateTrieMigration::migrate_custom_top( + Origin::signed(1), + vec![b"key1".to_vec(), b"key2".to_vec(), b"key3".to_vec()], + 69, // wrong witness + ), + "Wrong witness data" + ); + + // no funds should remain reserved. + assert_eq!(Balances::reserved_balance(&1), 0); + assert_eq!( + Balances::free_balance(&1), + 1000 - (3 * SignedDepositPerItem::get() + SignedDepositBase::get()) + ); + }); + } + + #[test] + fn custom_migrate_child_works() { + let childify = |s: &'static str| { + let mut string = DEFAULT_CHILD_STORAGE_KEY_PREFIX.to_vec(); + string.extend_from_slice(s.as_ref()); + string + }; + + new_test_ext(StateVersion::V0, true).execute_with(|| { + frame_support::assert_ok!(StateTrieMigration::migrate_custom_child( + Origin::signed(1), + childify("chk1"), + vec![b"key1".to_vec(), b"key2".to_vec()], + 55 + 66, + )); + + // no funds should remain reserved. + assert_eq!(Balances::reserved_balance(&1), 0); + assert_eq!(Balances::free_balance(&1), 1000); + }); + + new_test_ext(StateVersion::V0, true).execute_with(|| { + assert_eq!(Balances::free_balance(&1), 1000); + + // note that we don't expect this to be a noop -- we do slash. + assert!(StateTrieMigration::migrate_custom_child( + Origin::signed(1), + childify("chk1"), + vec![b"key1".to_vec(), b"key2".to_vec()], + 999999, // wrong witness + ) + .is_err()); + + // no funds should remain reserved. + assert_eq!(Balances::reserved_balance(&1), 0); + assert_eq!( + Balances::free_balance(&1), + 1000 - (2 * SignedDepositPerItem::get() + SignedDepositBase::get()) + ); }); } } From 4d392a17200e147ca6273da14278a2cd49a9b14d Mon Sep 17 00:00:00 2001 From: kianenigma Date: Fri, 19 Nov 2021 16:02:20 +0000 Subject: [PATCH 154/188] tweak test --- frame/state-trie-migration/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index b3b036a46c60b..78e3d97bdb466 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -1654,7 +1654,7 @@ mod remote_tests { }; // item being the bottleneck - run_with_limits(MigrationLimits { item: 32 * 1024, size: 128 * 1024 * 1024 }).await; + run_with_limits(MigrationLimits { item: 8 * 1024, size: 128 * 1024 * 1024 }).await; // size being the bottleneck run_with_limits(MigrationLimits { item: Bounded::max_value(), size: 4 * 1024 }).await; } From d886baa921f955d1f1cf5fc144be4884fe16bb89 Mon Sep 17 00:00:00 2001 From: kianenigma Date: Wed, 24 Nov 2021 13:35:00 +0100 Subject: [PATCH 155/188] update config --- bin/node/runtime/src/lib.rs | 6 ++++-- utils/frame/remote-externalities/src/lib.rs | 5 ++++- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 269398287f38d..f99b20742f8ae 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1246,13 +1246,15 @@ impl pallet_transaction_storage::Config for Runtime { parameter_types! { pub const SignedMigrationMaxLimits: pallet_state_trie_migration::MigrationLimits = pallet_state_trie_migration::MigrationLimits { size: 1024 * 512, item: 512 }; - pub const SignedDepositPerItem: Balance = 1 * DOLLARS; + pub const MigrationSignedDepositPerItem: Balance = 1 * CENTS; + pub const MigrationSignedDepositBase: Balance = 20 * DOLLARS; } impl pallet_state_trie_migration::Config for Runtime { type Event = Event; type ControlOrigin = EnsureRoot; type Currency = Balances; - type SignedDepositPerItem = SignedDepositPerItem; + type SignedDepositPerItem = MigrationSignedDepositPerItem; + type SignedDepositBase = MigrationSignedDepositBase; type SignedMigrationMaxLimits = SignedMigrationMaxLimits; type UnsignedPriority = ImOnlineUnsignedPriority; type UnsignedBackOff = frame_support::traits::ConstU32<5>; diff --git a/utils/frame/remote-externalities/src/lib.rs b/utils/frame/remote-externalities/src/lib.rs index 737ef311a41cd..41063e01be705 100644 --- a/utils/frame/remote-externalities/src/lib.rs +++ b/utils/frame/remote-externalities/src/lib.rs @@ -581,7 +581,10 @@ impl Builder { .max_request_body_size(u32::MAX) .build(&online.transport.uri) .await - .map_err(|_| "failed to build ws client")?; + .map_err(|e| { + log::error!(target: LOG_TARGET, "{:?}", e); + "failed to build ws client" + })?; online.transport.client = Some(ws_client); // Then, if `at` is not set, set it. From 2df11e411b358d3484aa22243f1f3b3a63fa18e5 Mon Sep 17 00:00:00 2001 From: kianenigma Date: Thu, 25 Nov 2021 08:48:44 +0100 Subject: [PATCH 156/188] some remote-ext stuff --- utils/frame/remote-externalities/src/lib.rs | 59 +++++++++++---------- 1 file changed, 30 insertions(+), 29 deletions(-) diff --git a/utils/frame/remote-externalities/src/lib.rs b/utils/frame/remote-externalities/src/lib.rs index 1f6ca1cf6ab40..d263aa29629f8 100644 --- a/utils/frame/remote-externalities/src/lib.rs +++ b/utils/frame/remote-externalities/src/lib.rs @@ -40,9 +40,9 @@ use std::{ pub mod rpc_api; -type KeyPair = (StorageKey, StorageData); -type TopKeyPairs = Vec; -type ChildKeyPairs = Vec<(ChildInfo, Vec)>; +type KeyValue = (StorageKey, StorageData); +type TopKeyValue = Vec; +type ChildKeyValue = Vec<(ChildInfo, Vec)>; const LOG_TARGET: &str = "remote-ext"; const DEFAULT_TARGET: &str = "wss://rpc.polkadot.io"; @@ -134,10 +134,10 @@ pub struct OnlineConfig { pub state_snapshot: Option, /// The pallets to scrape. If empty, entire chain state will be scraped. pub pallets: Vec, - /// Lookout for child-keys, and scrape them as well if set to true. - pub scrape_children: bool, /// Transport config. pub transport: Transport, + /// Lookout for child-keys, and scrape them as well if set to true. + pub scrape_children: bool, } impl OnlineConfig { @@ -197,7 +197,7 @@ impl Default for SnapshotConfig { pub struct Builder { /// Custom key-pairs to be injected into the externalities. The *hashed* keys and values must /// be given. - hashed_key_values: Vec, + hashed_key_values: Vec, /// Storage entry key prefixes to be injected into the externalities. The *hashed* prefix must /// be given. hashed_prefixes: Vec>, @@ -319,14 +319,14 @@ impl Builder { &self, prefix: StorageKey, at: B::Hash, - ) -> Result, &'static str> { + ) -> Result, &'static str> { use jsonrpsee_ws_client::types::traits::Client; use serde_json::to_value; let keys = self.get_keys_paged(prefix, at).await?; let keys_count = keys.len(); debug!(target: LOG_TARGET, "Querying a total of {} top keys", keys.len()); - let mut key_values: Vec = vec![]; + let mut key_values: Vec = vec![]; let client = self.as_online().rpc_client(); for chunk_keys in keys.chunks(BATCH_SIZE) { let batch = chunk_keys @@ -382,7 +382,7 @@ impl Builder { // Internal methods impl Builder { /// Save the given data to the top keys snapshot. - fn save_top_snapshot(&self, data: &[KeyPair], path: &PathBuf) -> Result<(), &'static str> { + fn save_top_snapshot(&self, data: &[KeyValue], path: &PathBuf) -> Result<(), &'static str> { let mut path = path.clone(); path.set_extension("top"); debug!(target: LOG_TARGET, "writing to state snapshot file {:?}", path); @@ -393,7 +393,7 @@ impl Builder { /// Save the given data to the child keys snapshot. fn save_child_snapshot( &self, - data: &ChildKeyPairs, + data: &ChildKeyValue, path: &PathBuf, ) -> Result<(), &'static str> { debug!(target: LOG_TARGET, "writing to state snapshot file {:?}", path); @@ -403,7 +403,7 @@ impl Builder { Ok(()) } - fn load_top_snapshot(&self, path: &PathBuf) -> Result { + fn load_top_snapshot(&self, path: &PathBuf) -> Result { let mut path = path.clone(); path.set_extension("top"); info!(target: LOG_TARGET, "loading top key-pairs from snapshot {:?}", path); @@ -411,7 +411,7 @@ impl Builder { Decode::decode(&mut &*bytes).map_err(|_| "decode failed") } - fn load_child_snapshot(&self, path: &PathBuf) -> Result { + fn load_child_snapshot(&self, path: &PathBuf) -> Result { let mut path = path.clone(); path.set_extension("child"); info!(target: LOG_TARGET, "loading child key-pairs from snapshot {:?}", path); @@ -423,8 +423,8 @@ impl Builder { /// pairs. async fn load_child_keys_remote( &self, - top_kp: &[KeyPair], - ) -> Result)>, &'static str> { + top_kp: &[KeyValue], + ) -> Result)>, &'static str> { let child_bearing_top_keys = top_kp .iter() .filter_map( @@ -490,7 +490,7 @@ impl Builder { } /// Load all the `top` keys from the remote config. - async fn load_top_keys_remote(&self) -> Result, &'static str> { + async fn load_top_keys_remote(&self) -> Result, &'static str> { let config = self.as_online(); let at = self .as_online() @@ -506,7 +506,7 @@ impl Builder { let module_kv = self.rpc_get_pairs_paged(hashed_prefix.clone(), at).await?; info!( target: LOG_TARGET, - "downloaded data for module {} (count: {} / prefix: {:?}).", + "downloaded data for module {} (coun {} / prefix: {:?}).", f, module_kv.len(), HexDisplay::from(&hashed_prefix), @@ -563,17 +563,17 @@ impl Builder { pub(crate) async fn pre_build( mut self, - ) -> Result<(Vec, Vec<(ChildInfo, Vec)>), &'static str> { + ) -> Result<(Vec, Vec<(ChildInfo, Vec)>), &'static str> { let mode = self.mode.clone(); - let mut top_kp = match mode { + let mut top_kv = match mode { Mode::Offline(config) => self.load_top_snapshot(&config.state_snapshot.path)?, Mode::Online(config) => { self.init_remote_client().await?; - let top_kp = self.load_top_keys_remote().await?; + let top_kv = self.load_top_keys_remote().await?; if let Some(c) = config.state_snapshot { - self.save_top_snapshot(&top_kp, &c.path)?; + self.save_top_snapshot(&top_kv, &c.path)?; } - top_kp + top_kv }, }; @@ -584,7 +584,7 @@ impl Builder { "extending externalities with {} manually injected key-values", self.hashed_key_values.len() ); - top_kp.extend(self.hashed_key_values.clone()); + top_kv.extend(self.hashed_key_values.clone()); } // exclude manual key values. @@ -594,16 +594,16 @@ impl Builder { "excluding externalities from {} keys", self.hashed_blacklist.len() ); - top_kp.retain(|(k, _)| !self.hashed_blacklist.contains(&k.0)) + top_kv.retain(|(k, _)| !self.hashed_blacklist.contains(&k.0)) } - let child_kp = match self.mode { + let child_kv = match self.mode { Mode::Online(ref config) if config.scrape_children => { - let child_kp = self.load_child_keys_remote(&top_kp).await?; + let child_kv = self.load_child_keys_remote(&top_kv).await?; if let Some(c) = &config.state_snapshot { - self.save_child_snapshot(&child_kp, &c.path)?; + self.save_child_snapshot(&child_kv, &c.path)?; } - child_kp + child_kv }, Mode::Offline(ref config) => self.load_child_snapshot(&config.state_snapshot.path).map_err(|why| log::warn!(target: LOG_TARGET, "failed to load child-key file due to {:?}", why) @@ -611,7 +611,7 @@ impl Builder { _ => Default::default(), }; - Ok((top_kp, child_kp)) + Ok((top_kv, child_kv)) } } @@ -623,7 +623,7 @@ impl Builder { } /// Inject a manual list of key and values to the storage. - pub fn inject_hashed_key_value(mut self, injections: &[KeyPair]) -> Self { + pub fn inject_hashed_key_value(mut self, injections: &[KeyValue]) -> Self { for i in injections { self.hashed_key_values.push(i.clone()); } @@ -788,6 +788,7 @@ mod remote_tests { const REMOTE_INACCESSIBLE: &'static str = "Can't reach the remote node. Is it running?"; #[tokio::test] + #[ignore] async fn can_build_one_big_pallet() { init_logger(); Builder::::new() From 8080e4c2e6ba51f06bd3df1e93610d6606ca583c Mon Sep 17 00:00:00 2001 From: kianenigma Date: Tue, 4 Jan 2022 13:57:49 +0000 Subject: [PATCH 157/188] delete some of the old stuff --- client/api/src/cht.rs | 473 ------- client/light/src/backend.rs | 592 --------- client/light/src/call_executor.rs | 219 ---- client/service/src/client/call_executor.rs | 433 ------- primitives/api/Cargo.toml | 1 - primitives/api/src/lib.rs | 25 - primitives/io/Cargo.toml | 1 - primitives/io/src/lib.rs | 4 +- .../state-machine/src/changes_trie/build.rs | 1086 ----------------- .../state-machine/src/changes_trie/mod.rs | 430 ------- 10 files changed, 2 insertions(+), 3262 deletions(-) delete mode 100644 client/api/src/cht.rs delete mode 100644 client/light/src/backend.rs delete mode 100644 client/light/src/call_executor.rs delete mode 100644 client/service/src/client/call_executor.rs delete mode 100644 primitives/state-machine/src/changes_trie/build.rs delete mode 100644 primitives/state-machine/src/changes_trie/mod.rs diff --git a/client/api/src/cht.rs b/client/api/src/cht.rs deleted file mode 100644 index 8fe6075729778..0000000000000 --- a/client/api/src/cht.rs +++ /dev/null @@ -1,473 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Canonical hash trie definitions and helper functions. -//! -//! Each CHT is a trie mapping block numbers to canonical hash. -//! One is generated for every `SIZE` blocks, allowing us to discard those blocks in -//! favor of the trie root. When the "ancient" blocks need to be accessed, we simply -//! request an inclusion proof of a specific block number against the trie with the -//! root hash. A correct proof implies that the claimed block is identical to the one -//! we discarded. - -use codec::Encode; -use hash_db; -use sp_trie; - -use sp_core::{convert_hash, H256}; -use sp_runtime::traits::{AtLeast32Bit, Header as HeaderT, One, Zero}; -use sp_state_machine::{ - prove_read_on_trie_backend, read_proof_check, read_proof_check_on_proving_backend, - Backend as StateBackend, InMemoryBackend, MemoryDB, StorageProof, TrieBackend, -}; - -use sp_blockchain::{Error as ClientError, Result as ClientResult}; - -/// The size of each CHT. This value is passed to every CHT-related function from -/// production code. Other values are passed from tests. -const SIZE: u32 = 2048; - -/// Gets default CHT size. -pub fn size>() -> N { - SIZE.into() -} - -/// Returns Some(cht_number) if CHT is need to be built when the block with given number is -/// canonized. -pub fn is_build_required(cht_size: N, block_num: N) -> Option -where - N: Clone + AtLeast32Bit, -{ - let block_cht_num = block_to_cht_number(cht_size.clone(), block_num.clone())?; - let two = N::one() + N::one(); - if block_cht_num < two { - return None - } - let cht_start = start_number(cht_size, block_cht_num.clone()); - if cht_start != block_num { - return None - } - - Some(block_cht_num - two) -} - -/// Returns Some(max_cht_number) if CHT has ever been built given maximal canonical block number. -pub fn max_cht_number(cht_size: N, max_canonical_block: N) -> Option -where - N: Clone + AtLeast32Bit, -{ - let max_cht_number = block_to_cht_number(cht_size, max_canonical_block)?; - let two = N::one() + N::one(); - if max_cht_number < two { - return None - } - Some(max_cht_number - two) -} - -/// Compute a CHT root from an iterator of block hashes. Fails if shorter than -/// SIZE items. The items are assumed to proceed sequentially from `start_number(cht_num)`. -/// Discards the trie's nodes. -pub fn compute_root( - cht_size: Header::Number, - cht_num: Header::Number, - hashes: I, -) -> ClientResult -where - Header: HeaderT, - Hasher: hash_db::Hasher, - Hasher::Out: Ord, - I: IntoIterator>>, -{ - use sp_trie::TrieConfiguration; - Ok(sp_trie::LayoutV0::::trie_root(build_pairs::(cht_size, cht_num, hashes)?)) -} - -/// Build CHT-based header proof. -pub fn build_proof( - cht_size: Header::Number, - cht_num: Header::Number, - blocks: BlocksI, - hashes: HashesI, -) -> ClientResult -where - Header: HeaderT, - Hasher: hash_db::Hasher, - Hasher::Out: Ord + codec::Codec, - BlocksI: IntoIterator, - HashesI: IntoIterator>>, -{ - let transaction = build_pairs::(cht_size, cht_num, hashes)? - .into_iter() - .map(|(k, v)| (k, Some(v))) - .collect::>(); - let storage = InMemoryBackend::::default() - .update(vec![(None, transaction)], sp_runtime::StateVersion::V0); - let trie_storage = storage - .as_trie_backend() - .expect("InMemoryState::as_trie_backend always returns Some; qed"); - prove_read_on_trie_backend( - trie_storage, - blocks.into_iter().map(|number| encode_cht_key(number)), - ) - .map_err(ClientError::from_state) -} - -/// Check CHT-based header proof. -pub fn check_proof( - local_root: Header::Hash, - local_number: Header::Number, - remote_hash: Header::Hash, - remote_proof: StorageProof, -) -> ClientResult<()> -where - Header: HeaderT, - Hasher: hash_db::Hasher, - Hasher::Out: Ord + codec::Codec, -{ - do_check_proof::( - local_root, - local_number, - remote_hash, - move |local_root, local_cht_key| { - read_proof_check::( - local_root, - remote_proof, - ::std::iter::once(local_cht_key), - ) - .map(|mut map| map.remove(local_cht_key).expect("checked proof of local_cht_key; qed")) - .map_err(ClientError::from_state) - }, - ) -} - -/// Check CHT-based header proof on pre-created proving backend. -pub fn check_proof_on_proving_backend( - local_root: Header::Hash, - local_number: Header::Number, - remote_hash: Header::Hash, - proving_backend: &TrieBackend, Hasher>, -) -> ClientResult<()> -where - Header: HeaderT, - Hasher: hash_db::Hasher, - Hasher::Out: Ord + codec::Codec, -{ - do_check_proof::( - local_root, - local_number, - remote_hash, - |_, local_cht_key| { - read_proof_check_on_proving_backend::(proving_backend, local_cht_key) - .map_err(ClientError::from_state) - }, - ) -} - -/// Check CHT-based header proof using passed checker function. -fn do_check_proof( - local_root: Header::Hash, - local_number: Header::Number, - remote_hash: Header::Hash, - checker: F, -) -> ClientResult<()> -where - Header: HeaderT, - Hasher: hash_db::Hasher, - Hasher::Out: Ord, - F: FnOnce(Hasher::Out, &[u8]) -> ClientResult>>, -{ - let root: Hasher::Out = convert_hash(&local_root); - let local_cht_key = encode_cht_key(local_number); - let local_cht_value = checker(root, &local_cht_key)?; - let local_cht_value = local_cht_value.ok_or_else(|| ClientError::InvalidCHTProof)?; - let local_hash = - decode_cht_value(&local_cht_value).ok_or_else(|| ClientError::InvalidCHTProof)?; - match &local_hash[..] == remote_hash.as_ref() { - true => Ok(()), - false => Err(ClientError::InvalidCHTProof.into()), - } -} - -/// Group ordered blocks by CHT number and call functor with blocks of each group. -pub fn for_each_cht_group( - cht_size: Header::Number, - blocks: I, - mut functor: F, - mut functor_param: P, -) -> ClientResult<()> -where - Header: HeaderT, - I: IntoIterator, - F: FnMut(P, Header::Number, Vec) -> ClientResult

, -{ - let mut current_cht_num = None; - let mut current_cht_blocks = Vec::new(); - for block in blocks { - let new_cht_num = block_to_cht_number(cht_size, block).ok_or_else(|| { - ClientError::Backend(format!("Cannot compute CHT root for the block #{}", block)) - })?; - - let advance_to_next_cht = current_cht_num.is_some() && current_cht_num != Some(new_cht_num); - if advance_to_next_cht { - let current_cht_num = current_cht_num.expect( - "advance_to_next_cht is true; - it is true only when current_cht_num is Some; qed", - ); - assert!( - new_cht_num > current_cht_num, - "for_each_cht_group only supports ordered iterators" - ); - - functor_param = - functor(functor_param, current_cht_num, std::mem::take(&mut current_cht_blocks))?; - } - - current_cht_blocks.push(block); - current_cht_num = Some(new_cht_num); - } - - if let Some(current_cht_num) = current_cht_num { - functor(functor_param, current_cht_num, std::mem::take(&mut current_cht_blocks))?; - } - - Ok(()) -} - -/// Build pairs for computing CHT. -fn build_pairs( - cht_size: Header::Number, - cht_num: Header::Number, - hashes: I, -) -> ClientResult, Vec)>> -where - Header: HeaderT, - I: IntoIterator>>, -{ - let start_num = start_number(cht_size, cht_num); - let mut pairs = Vec::new(); - let mut hash_index = Header::Number::zero(); - for hash in hashes.into_iter() { - let hash = - hash?.ok_or_else(|| ClientError::from(ClientError::MissingHashRequiredForCHT))?; - pairs.push((encode_cht_key(start_num + hash_index).to_vec(), encode_cht_value(hash))); - hash_index += Header::Number::one(); - if hash_index == cht_size { - break - } - } - - if hash_index == cht_size { - Ok(pairs) - } else { - Err(ClientError::MissingHashRequiredForCHT) - } -} - -/// Get the starting block of a given CHT. -/// CHT 0 includes block 1...SIZE, -/// CHT 1 includes block SIZE + 1 ... 2*SIZE -/// More generally: CHT N includes block (1 + N*SIZE)...((N+1)*SIZE). -/// This is because the genesis hash is assumed to be known -/// and including it would be redundant. -pub fn start_number(cht_size: N, cht_num: N) -> N { - (cht_num * cht_size) + N::one() -} - -/// Get the ending block of a given CHT. -pub fn end_number(cht_size: N, cht_num: N) -> N { - (cht_num + N::one()) * cht_size -} - -/// Convert a block number to a CHT number. -/// Returns `None` for `block_num` == 0, `Some` otherwise. -pub fn block_to_cht_number(cht_size: N, block_num: N) -> Option { - if block_num == N::zero() { - None - } else { - Some((block_num - N::one()) / cht_size) - } -} - -/// Convert header number into CHT key. -pub fn encode_cht_key(number: N) -> Vec { - number.encode() -} - -/// Convert header hash into CHT value. -fn encode_cht_value>(hash: Hash) -> Vec { - hash.as_ref().to_vec() -} - -/// Convert CHT value into block header hash. -pub fn decode_cht_value(value: &[u8]) -> Option { - match value.len() { - 32 => Some(H256::from_slice(&value[0..32])), - _ => None, - } -} - -#[cfg(test)] -mod tests { - use super::*; - use sp_runtime::{generic, traits::BlakeTwo256}; - - type Header = generic::Header; - - #[test] - fn is_build_required_works() { - assert_eq!(is_build_required(SIZE, 0u32.into()), None); - assert_eq!(is_build_required(SIZE, 1u32.into()), None); - assert_eq!(is_build_required(SIZE, SIZE), None); - assert_eq!(is_build_required(SIZE, SIZE + 1), None); - assert_eq!(is_build_required(SIZE, 2 * SIZE), None); - assert_eq!(is_build_required(SIZE, 2 * SIZE + 1), Some(0)); - assert_eq!(is_build_required(SIZE, 2 * SIZE + 2), None); - assert_eq!(is_build_required(SIZE, 3 * SIZE), None); - assert_eq!(is_build_required(SIZE, 3 * SIZE + 1), Some(1)); - assert_eq!(is_build_required(SIZE, 3 * SIZE + 2), None); - } - - #[test] - fn max_cht_number_works() { - assert_eq!(max_cht_number(SIZE, 0u32.into()), None); - assert_eq!(max_cht_number(SIZE, 1u32.into()), None); - assert_eq!(max_cht_number(SIZE, SIZE), None); - assert_eq!(max_cht_number(SIZE, SIZE + 1), None); - assert_eq!(max_cht_number(SIZE, 2 * SIZE), None); - assert_eq!(max_cht_number(SIZE, 2 * SIZE + 1), Some(0)); - assert_eq!(max_cht_number(SIZE, 2 * SIZE + 2), Some(0)); - assert_eq!(max_cht_number(SIZE, 3 * SIZE), Some(0)); - assert_eq!(max_cht_number(SIZE, 3 * SIZE + 1), Some(1)); - assert_eq!(max_cht_number(SIZE, 3 * SIZE + 2), Some(1)); - } - - #[test] - fn start_number_works() { - assert_eq!(start_number(SIZE, 0u32), 1u32); - assert_eq!(start_number(SIZE, 1u32), SIZE + 1); - assert_eq!(start_number(SIZE, 2u32), SIZE + SIZE + 1); - } - - #[test] - fn end_number_works() { - assert_eq!(end_number(SIZE, 0u32), SIZE); - assert_eq!(end_number(SIZE, 1u32), SIZE + SIZE); - assert_eq!(end_number(SIZE, 2u32), SIZE + SIZE + SIZE); - } - - #[test] - fn build_pairs_fails_when_no_enough_blocks() { - assert!(build_pairs::( - SIZE as _, - 0, - ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))).take(SIZE as usize / 2) - ) - .is_err()); - } - - #[test] - fn build_pairs_fails_when_missing_block() { - assert!(build_pairs::( - SIZE as _, - 0, - ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))) - .take(SIZE as usize / 2) - .chain(::std::iter::once(Ok(None))) - .chain( - ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(2)))) - .take(SIZE as usize / 2 - 1) - ) - ) - .is_err()); - } - - #[test] - fn compute_root_works() { - assert!(compute_root::( - SIZE as _, - 42, - ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))).take(SIZE as usize) - ) - .is_ok()); - } - - #[test] - #[should_panic] - fn build_proof_panics_when_querying_wrong_block() { - assert!(build_proof::( - SIZE as _, - 0, - vec![(SIZE * 1000) as u64], - ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))).take(SIZE as usize) - ) - .is_err()); - } - - #[test] - fn build_proof_works() { - assert!(build_proof::( - SIZE as _, - 0, - vec![(SIZE / 2) as u64], - ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))).take(SIZE as usize) - ) - .is_ok()); - } - - #[test] - #[should_panic] - fn for_each_cht_group_panics() { - let cht_size = SIZE as u64; - let _ = for_each_cht_group::( - cht_size, - vec![cht_size * 5, cht_size * 2], - |_, _, _| Ok(()), - (), - ); - } - - #[test] - fn for_each_cht_group_works() { - let cht_size = SIZE as u64; - let _ = for_each_cht_group::( - cht_size, - vec![ - cht_size * 2 + 1, - cht_size * 2 + 2, - cht_size * 2 + 5, - cht_size * 4 + 1, - cht_size * 4 + 7, - cht_size * 6 + 1, - ], - |_, cht_num, blocks| { - match cht_num { - 2 => assert_eq!( - blocks, - vec![cht_size * 2 + 1, cht_size * 2 + 2, cht_size * 2 + 5] - ), - 4 => assert_eq!(blocks, vec![cht_size * 4 + 1, cht_size * 4 + 7]), - 6 => assert_eq!(blocks, vec![cht_size * 6 + 1]), - _ => unreachable!(), - } - - Ok(()) - }, - (), - ); - } -} diff --git a/client/light/src/backend.rs b/client/light/src/backend.rs deleted file mode 100644 index 1f17a726ea2f9..0000000000000 --- a/client/light/src/backend.rs +++ /dev/null @@ -1,592 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Light client backend. Only stores headers and justifications of blocks. -//! Everything else is requested from full nodes on demand. - -use parking_lot::RwLock; -use std::{ - collections::{HashMap, HashSet}, - sync::Arc, -}; - -use codec::{Decode, Encode}; - -use super::blockchain::Blockchain; -use hash_db::Hasher; -use sc_client_api::{ - backend::{ - AuxStore, Backend as ClientBackend, BlockImportOperation, NewBlockState, - PrunableStateChangesTrieStorage, RemoteBackend, - }, - blockchain::{well_known_cache_keys, HeaderBackend as BlockchainHeaderBackend}, - in_mem::check_genesis_storage, - light::Storage as BlockchainStorage, - UsageInfo, -}; -use sp_blockchain::{Error as ClientError, Result as ClientResult}; -use sp_core::{ - offchain::storage::InMemOffchainStorage, - storage::{well_known_keys, ChildInfo}, - ChangesTrieConfiguration, -}; -use sp_runtime::{ - generic::BlockId, - traits::{Block as BlockT, HashFor, Header, NumberFor, Zero}, - Justification, Justifications, StateVersion, Storage, -}; -use sp_state_machine::{ - Backend as StateBackend, ChangesTrieTransaction, ChildStorageCollection, InMemoryBackend, - IndexOperation, StorageCollection, TrieBackend, -}; - -const IN_MEMORY_EXPECT_PROOF: &str = - "InMemory state backend has Void error type and always succeeds; qed"; - -/// Light client backend. -pub struct Backend { - blockchain: Arc>, - genesis_state: RwLock>>, - import_lock: RwLock<()>, -} - -/// Light block (header and justification) import operation. -pub struct ImportOperation { - header: Option, - cache: HashMap>, - leaf_state: NewBlockState, - aux_ops: Vec<(Vec, Option>)>, - finalized_blocks: Vec>, - set_head: Option>, - storage_update: Option>>, - changes_trie_config_update: Option>, - _phantom: std::marker::PhantomData, -} - -/// Either in-memory genesis state, or locally-unavailable state. -pub enum GenesisOrUnavailableState { - /// Genesis state - storage values are stored in-memory. - Genesis(InMemoryBackend), - /// We know that state exists, but all calls will fail with error, because it - /// isn't locally available. - Unavailable, -} - -impl Backend { - /// Create new light backend. - pub fn new(blockchain: Arc>) -> Self { - Self { blockchain, genesis_state: RwLock::new(None), import_lock: Default::default() } - } - - /// Get shared blockchain reference. - pub fn blockchain(&self) -> &Arc> { - &self.blockchain - } -} - -impl AuxStore for Backend { - fn insert_aux< - 'a, - 'b: 'a, - 'c: 'a, - I: IntoIterator, - D: IntoIterator, - >( - &self, - insert: I, - delete: D, - ) -> ClientResult<()> { - self.blockchain.storage().insert_aux(insert, delete) - } - - fn get_aux(&self, key: &[u8]) -> ClientResult>> { - self.blockchain.storage().get_aux(key) - } -} - -impl ClientBackend for Backend> -where - Block: BlockT, - S: BlockchainStorage, - Block::Hash: Ord, -{ - type BlockImportOperation = ImportOperation; - type Blockchain = Blockchain; - type State = GenesisOrUnavailableState>; - type OffchainStorage = InMemOffchainStorage; - - fn begin_operation(&self) -> ClientResult { - Ok(ImportOperation { - header: None, - cache: Default::default(), - leaf_state: NewBlockState::Normal, - aux_ops: Vec::new(), - finalized_blocks: Vec::new(), - set_head: None, - storage_update: None, - changes_trie_config_update: None, - _phantom: Default::default(), - }) - } - - fn begin_state_operation( - &self, - _operation: &mut Self::BlockImportOperation, - _block: BlockId, - ) -> ClientResult<()> { - Ok(()) - } - - fn commit_operation(&self, mut operation: Self::BlockImportOperation) -> ClientResult<()> { - if !operation.finalized_blocks.is_empty() { - for block in operation.finalized_blocks { - self.blockchain.storage().finalize_header(block)?; - } - } - - if let Some(header) = operation.header { - let is_genesis_import = header.number().is_zero(); - if let Some(new_config) = operation.changes_trie_config_update { - operation - .cache - .insert(well_known_cache_keys::CHANGES_TRIE_CONFIG, new_config.encode()); - } - self.blockchain.storage().import_header( - header, - operation.cache, - operation.leaf_state, - operation.aux_ops, - )?; - - // when importing genesis block => remember its state - if is_genesis_import { - *self.genesis_state.write() = operation.storage_update.take(); - } - } else { - for (key, maybe_val) in operation.aux_ops { - match maybe_val { - Some(val) => self - .blockchain - .storage() - .insert_aux(&[(&key[..], &val[..])], std::iter::empty())?, - None => - self.blockchain.storage().insert_aux(std::iter::empty(), &[&key[..]])?, - } - } - } - - if let Some(set_head) = operation.set_head { - self.blockchain.storage().set_head(set_head)?; - } - - Ok(()) - } - - fn finalize_block( - &self, - block: BlockId, - _justification: Option, - ) -> ClientResult<()> { - self.blockchain.storage().finalize_header(block) - } - - fn append_justification( - &self, - _block: BlockId, - _justification: Justification, - ) -> ClientResult<()> { - Ok(()) - } - - fn blockchain(&self) -> &Blockchain { - &self.blockchain - } - - fn usage_info(&self) -> Option { - self.blockchain.storage().usage_info() - } - - fn changes_trie_storage(&self) -> Option<&dyn PrunableStateChangesTrieStorage> { - None - } - - fn offchain_storage(&self) -> Option { - None - } - - fn state_at(&self, block: BlockId) -> ClientResult { - let block_number = self.blockchain.expect_block_number_from_id(&block)?; - - // special case for genesis block - if block_number.is_zero() { - if let Some(genesis_state) = self.genesis_state.read().clone() { - return Ok(GenesisOrUnavailableState::Genesis(genesis_state)) - } - } - - // else return unavailable state. We do not return error here, because error - // would mean that we do not know this state at all. But we know that it exists - Ok(GenesisOrUnavailableState::Unavailable) - } - - fn revert( - &self, - _n: NumberFor, - _revert_finalized: bool, - ) -> ClientResult<(NumberFor, HashSet)> { - Err(ClientError::NotAvailableOnLightClient) - } - - fn remove_leaf_block(&self, _hash: &Block::Hash) -> ClientResult<()> { - Err(ClientError::NotAvailableOnLightClient) - } - - fn get_import_lock(&self) -> &RwLock<()> { - &self.import_lock - } -} - -impl RemoteBackend for Backend> -where - Block: BlockT, - S: BlockchainStorage + 'static, - Block::Hash: Ord, -{ - fn is_local_state_available(&self, block: &BlockId) -> bool { - self.genesis_state.read().is_some() && - self.blockchain - .expect_block_number_from_id(block) - .map(|num| num.is_zero()) - .unwrap_or(false) - } - - fn remote_blockchain(&self) -> Arc> { - self.blockchain.clone() - } -} - -impl BlockImportOperation for ImportOperation -where - Block: BlockT, - S: BlockchainStorage, - Block::Hash: Ord, -{ - type State = GenesisOrUnavailableState>; - - fn state(&self) -> ClientResult> { - // None means 'locally-stateless' backend - Ok(None) - } - - fn set_block_data( - &mut self, - header: Block::Header, - _body: Option>, - _indexed_body: Option>>, - _justifications: Option, - state: NewBlockState, - ) -> ClientResult<()> { - self.leaf_state = state; - self.header = Some(header); - Ok(()) - } - - fn update_cache(&mut self, cache: HashMap>) { - self.cache = cache; - } - - fn update_db_storage( - &mut self, - _update: >>::Transaction, - ) -> ClientResult<()> { - // we're not storing anything locally => ignore changes - Ok(()) - } - - fn update_changes_trie( - &mut self, - _update: ChangesTrieTransaction, NumberFor>, - ) -> ClientResult<()> { - // we're not storing anything locally => ignore changes - Ok(()) - } - - fn set_genesis_state( - &mut self, - input: Storage, - commit: bool, - state_version: StateVersion, - ) -> ClientResult { - check_genesis_storage(&input)?; - - // changes trie configuration - let changes_trie_config = input - .top - .iter() - .find(|(k, _)| &k[..] == well_known_keys::CHANGES_TRIE_CONFIG) - .map(|(_, v)| { - Decode::decode(&mut &v[..]) - .expect("changes trie configuration is encoded properly at genesis") - }); - self.changes_trie_config_update = Some(changes_trie_config); - - // this is only called when genesis block is imported => shouldn't be performance bottleneck - let mut storage: HashMap, _> = HashMap::new(); - storage.insert(None, input.top); - - // create a list of children keys to re-compute roots for - let child_delta = input - .children_default - .iter() - .map(|(_storage_key, storage_child)| (&storage_child.child_info, std::iter::empty())); - - // make sure to persist the child storage - for (_child_key, storage_child) in input.children_default.clone() { - storage.insert(Some(storage_child.child_info), storage_child.data); - } - - let storage_update = InMemoryBackend::from((storage, state_version)); - let (storage_root, _) = - storage_update.full_storage_root(std::iter::empty(), child_delta, state_version); - if commit { - self.storage_update = Some(storage_update); - } - - Ok(storage_root) - } - - fn reset_storage( - &mut self, - _input: Storage, - _state_version: StateVersion, - ) -> ClientResult { - Err(ClientError::NotAvailableOnLightClient) - } - - fn insert_aux(&mut self, ops: I) -> ClientResult<()> - where - I: IntoIterator, Option>)>, - { - self.aux_ops.append(&mut ops.into_iter().collect()); - Ok(()) - } - - fn update_storage( - &mut self, - _update: StorageCollection, - _child_update: ChildStorageCollection, - ) -> ClientResult<()> { - // we're not storing anything locally => ignore changes - Ok(()) - } - - fn mark_finalized( - &mut self, - block: BlockId, - _justifications: Option, - ) -> ClientResult<()> { - self.finalized_blocks.push(block); - Ok(()) - } - - fn mark_head(&mut self, block: BlockId) -> ClientResult<()> { - self.set_head = Some(block); - Ok(()) - } - - fn update_transaction_index( - &mut self, - _index: Vec, - ) -> sp_blockchain::Result<()> { - // noop for the light client - Ok(()) - } -} - -impl std::fmt::Debug for GenesisOrUnavailableState { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => state.fmt(f), - GenesisOrUnavailableState::Unavailable => write!(f, "Unavailable"), - } - } -} - -impl StateBackend for GenesisOrUnavailableState -where - H::Out: Ord + codec::Codec, -{ - type Error = ClientError; - type Transaction = as StateBackend>::Transaction; - type TrieBackendStorage = as StateBackend>::TrieBackendStorage; - - fn storage(&self, key: &[u8]) -> ClientResult>> { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => - Ok(state.storage(key).expect(IN_MEMORY_EXPECT_PROOF)), - GenesisOrUnavailableState::Unavailable => Err(ClientError::NotAvailableOnLightClient), - } - } - - fn child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> ClientResult>> { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => - Ok(state.child_storage(child_info, key).expect(IN_MEMORY_EXPECT_PROOF)), - GenesisOrUnavailableState::Unavailable => Err(ClientError::NotAvailableOnLightClient), - } - } - - fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => - Ok(state.next_storage_key(key).expect(IN_MEMORY_EXPECT_PROOF)), - GenesisOrUnavailableState::Unavailable => Err(ClientError::NotAvailableOnLightClient), - } - } - - fn next_child_storage_key( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Result>, Self::Error> { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => - Ok(state.next_child_storage_key(child_info, key).expect(IN_MEMORY_EXPECT_PROOF)), - GenesisOrUnavailableState::Unavailable => Err(ClientError::NotAvailableOnLightClient), - } - } - - fn for_keys_with_prefix(&self, prefix: &[u8], action: A) { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => - state.for_keys_with_prefix(prefix, action), - GenesisOrUnavailableState::Unavailable => (), - } - } - - fn for_key_values_with_prefix(&self, prefix: &[u8], action: A) { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => - state.for_key_values_with_prefix(prefix, action), - GenesisOrUnavailableState::Unavailable => (), - } - } - - fn apply_to_key_values_while, Vec) -> bool>( - &self, - child_info: Option<&ChildInfo>, - prefix: Option<&[u8]>, - start_at: Option<&[u8]>, - action: A, - allow_missing: bool, - ) -> ClientResult { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => Ok(state - .apply_to_key_values_while(child_info, prefix, start_at, action, allow_missing) - .expect(IN_MEMORY_EXPECT_PROOF)), - GenesisOrUnavailableState::Unavailable => Err(ClientError::NotAvailableOnLightClient), - } - } - - fn apply_to_keys_while bool>( - &self, - child_info: Option<&ChildInfo>, - prefix: Option<&[u8]>, - action: A, - ) { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => - state.apply_to_keys_while(child_info, prefix, action), - GenesisOrUnavailableState::Unavailable => (), - } - } - - fn for_child_keys_with_prefix( - &self, - child_info: &ChildInfo, - prefix: &[u8], - action: A, - ) { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => - state.for_child_keys_with_prefix(child_info, prefix, action), - GenesisOrUnavailableState::Unavailable => (), - } - } - - fn storage_root<'a>( - &self, - delta: impl Iterator)>, - state_version: StateVersion, - ) -> (H::Out, Self::Transaction) - where - H::Out: Ord, - { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => - state.storage_root(delta, state_version), - GenesisOrUnavailableState::Unavailable => Default::default(), - } - } - - fn child_storage_root<'a>( - &self, - child_info: &ChildInfo, - delta: impl Iterator)>, - state_version: StateVersion, - ) -> (H::Out, bool, Self::Transaction) - where - H::Out: Ord, - { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => { - let (root, is_equal, _) = - state.child_storage_root(child_info, delta, state_version); - (root, is_equal, Default::default()) - }, - GenesisOrUnavailableState::Unavailable => (H::Out::default(), true, Default::default()), - } - } - - fn pairs(&self) -> Vec<(Vec, Vec)> { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => state.pairs(), - GenesisOrUnavailableState::Unavailable => Vec::new(), - } - } - - fn keys(&self, prefix: &[u8]) -> Vec> { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => state.keys(prefix), - GenesisOrUnavailableState::Unavailable => Vec::new(), - } - } - - fn register_overlay_stats(&self, _stats: &sp_state_machine::StateMachineStats) {} - - fn usage_info(&self) -> sp_state_machine::UsageInfo { - sp_state_machine::UsageInfo::empty() - } - - fn as_trie_backend(&self) -> Option<&TrieBackend> { - match self { - GenesisOrUnavailableState::Genesis(ref state) => state.as_trie_backend(), - GenesisOrUnavailableState::Unavailable => None, - } - } -} diff --git a/client/light/src/call_executor.rs b/client/light/src/call_executor.rs deleted file mode 100644 index 3bd1cc7c87ac8..0000000000000 --- a/client/light/src/call_executor.rs +++ /dev/null @@ -1,219 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Methods that light client could use to execute runtime calls. - -use std::{cell::RefCell, panic::UnwindSafe, result, sync::Arc}; - -use codec::{Decode, Encode}; -use hash_db::Hasher; -use sp_core::{ - convert_hash, - traits::{CodeExecutor, SpawnNamed}, - NativeOrEncoded, -}; -use sp_externalities::Extensions; -use sp_runtime::{ - generic::BlockId, - traits::{Block as BlockT, Header as HeaderT}, -}; -use sp_state_machine::{ - create_proof_check_backend, execution_proof_check_on_trie_backend, ExecutionManager, - ExecutionStrategy, OverlayedChanges, StorageProof, -}; - -use sp_api::{ProofRecorder, StorageTransactionCache}; - -use sp_blockchain::{Error as ClientError, Result as ClientResult}; - -use sc_client_api::{ - backend::RemoteBackend, call_executor::CallExecutor, light::RemoteCallRequest, -}; -use sc_executor::{RuntimeVersion, RuntimeVersionOf}; - -/// Call executor that is able to execute calls only on genesis state. -/// -/// Trying to execute call on non-genesis state leads to error. -pub struct GenesisCallExecutor { - backend: Arc, - local: L, -} - -impl GenesisCallExecutor { - /// Create new genesis call executor. - pub fn new(backend: Arc, local: L) -> Self { - Self { backend, local } - } -} - -impl Clone for GenesisCallExecutor { - fn clone(&self) -> Self { - GenesisCallExecutor { backend: self.backend.clone(), local: self.local.clone() } - } -} - -impl CallExecutor for GenesisCallExecutor -where - Block: BlockT, - B: RemoteBackend, - Local: CallExecutor, -{ - type Error = ClientError; - - type Backend = B; - - fn call( - &self, - id: &BlockId, - method: &str, - call_data: &[u8], - strategy: ExecutionStrategy, - extensions: Option, - ) -> ClientResult> { - if self.backend.is_local_state_available(id) { - self.local.call(id, method, call_data, strategy, extensions) - } else { - Err(ClientError::NotAvailableOnLightClient) - } - } - - fn contextual_call< - EM: Fn( - Result, Self::Error>, - Result, Self::Error>, - ) -> Result, Self::Error>, - R: Encode + Decode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, - >( - &self, - at: &BlockId, - method: &str, - call_data: &[u8], - changes: &RefCell, - _: Option<&RefCell>>, - _manager: ExecutionManager, - native_call: Option, - recorder: &Option>, - extensions: Option, - ) -> ClientResult> - where - ExecutionManager: Clone, - { - // there's no actual way/need to specify native/wasm execution strategy on light node - // => we can safely ignore passed values - - if self.backend.is_local_state_available(at) { - CallExecutor::contextual_call::< - fn( - Result, Local::Error>, - Result, Local::Error>, - ) -> Result, Local::Error>, - _, - NC, - >( - &self.local, - at, - method, - call_data, - changes, - None, - ExecutionManager::NativeWhenPossible, - native_call, - recorder, - extensions, - ) - } else { - Err(ClientError::NotAvailableOnLightClient) - } - } - - fn prove_execution( - &self, - at: &BlockId, - method: &str, - call_data: &[u8], - ) -> ClientResult<(Vec, StorageProof)> { - if self.backend.is_local_state_available(at) { - self.local.prove_execution(at, method, call_data) - } else { - Err(ClientError::NotAvailableOnLightClient) - } - } - - fn runtime_version(&self, id: &BlockId) -> ClientResult { - if self.backend.is_local_state_available(id) { - >::runtime_version(&self.local, id) - } else { - Err(ClientError::NotAvailableOnLightClient) - } - } -} - -impl RuntimeVersionOf for GenesisCallExecutor -where - Local: RuntimeVersionOf, -{ - fn runtime_version( - &self, - ext: &mut dyn sp_externalities::Externalities, - runtime_code: &sp_core::traits::RuntimeCode, - ) -> Result { - self.local.runtime_version(ext, runtime_code) - } -} - -/// Check remote contextual execution proof using given backend. -/// -/// Proof should include the method execution proof. -pub fn check_execution_proof( - executor: &E, - spawn_handle: Box, - request: &RemoteCallRequest

, - remote_proof: StorageProof, -) -> ClientResult> -where - Header: HeaderT, - E: CodeExecutor + Clone + 'static, - H: Hasher, - H::Out: Ord + codec::Codec + 'static, -{ - let local_state_root = request.header.state_root(); - let root: H::Out = convert_hash(&local_state_root); - - // prepare execution environment - let mut changes = OverlayedChanges::default(); - let trie_backend = create_proof_check_backend(root, remote_proof)?; - - // TODO: Remove when solved: https://github.com/paritytech/substrate/issues/5047 - let backend_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&trie_backend); - let runtime_code = backend_runtime_code - .runtime_code() - .map_err(|_e| ClientError::RuntimeCodeMissing)?; - - // execute method - execution_proof_check_on_trie_backend::( - &trie_backend, - &mut changes, - executor, - spawn_handle, - &request.method, - &request.call_data, - &runtime_code, - ) - .map_err(Into::into) -} diff --git a/client/service/src/client/call_executor.rs b/client/service/src/client/call_executor.rs deleted file mode 100644 index f2b45bc413e2a..0000000000000 --- a/client/service/src/client/call_executor.rs +++ /dev/null @@ -1,433 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use super::{client::ClientConfig, wasm_override::WasmOverride, wasm_substitutes::WasmSubstitutes}; -use codec::{Decode, Encode}; -use sc_client_api::{backend, call_executor::CallExecutor, HeaderBackend}; -use sc_executor::{RuntimeVersion, RuntimeVersionOf}; -use sp_api::{ProofRecorder, StorageTransactionCache}; -use sp_core::{ - traits::{CodeExecutor, RuntimeCode, SpawnNamed}, - NativeOrEncoded, NeverNativeValue, -}; -use sp_externalities::Extensions; -use sp_runtime::{generic::BlockId, traits::Block as BlockT}; -use sp_state_machine::{ - self, backend::Backend as _, ExecutionManager, ExecutionStrategy, Ext, OverlayedChanges, - StateMachine, StorageProof, -}; -use std::{cell::RefCell, panic::UnwindSafe, result, sync::Arc}; - -/// Call executor that executes methods locally, querying all required -/// data from local backend. -pub struct LocalCallExecutor { - backend: Arc, - executor: E, - wasm_override: Arc>, - wasm_substitutes: WasmSubstitutes, - spawn_handle: Box, - client_config: ClientConfig, -} - -impl LocalCallExecutor -where - E: CodeExecutor + RuntimeVersionOf + Clone + 'static, - B: backend::Backend, -{ - /// Creates new instance of local call executor. - pub fn new( - backend: Arc, - executor: E, - spawn_handle: Box, - client_config: ClientConfig, - ) -> sp_blockchain::Result { - let wasm_override = client_config - .wasm_runtime_overrides - .as_ref() - .map(|p| WasmOverride::new(p.clone(), &executor)) - .transpose()?; - - let wasm_substitutes = WasmSubstitutes::new( - client_config.wasm_runtime_substitutes.clone(), - executor.clone(), - backend.clone(), - )?; - - Ok(LocalCallExecutor { - backend, - executor, - wasm_override: Arc::new(wasm_override), - spawn_handle, - client_config, - wasm_substitutes, - }) - } - - /// Check if local runtime code overrides are enabled and one is available - /// for the given `BlockId`. If yes, return it; otherwise return the same - /// `RuntimeCode` instance that was passed. - fn check_override<'a>( - &'a self, - onchain_code: RuntimeCode<'a>, - id: &BlockId, - ) -> sp_blockchain::Result> - where - Block: BlockT, - B: backend::Backend, - { - let spec = CallExecutor::runtime_version(self, id)?; - let code = if let Some(d) = self - .wasm_override - .as_ref() - .as_ref() - .map(|o| o.get(&spec.spec_version, onchain_code.heap_pages, &spec.spec_name)) - .flatten() - { - log::debug!(target: "wasm_overrides", "using WASM override for block {}", id); - d - } else if let Some(s) = - self.wasm_substitutes.get(spec.spec_version, onchain_code.heap_pages, id) - { - log::debug!(target: "wasm_substitutes", "Using WASM substitute for block {:?}", id); - s - } else { - log::debug!( - target: "wasm_overrides", - "No WASM override available for block {}, using onchain code", - id - ); - onchain_code - }; - - Ok(code) - } -} - -impl Clone for LocalCallExecutor -where - E: Clone, -{ - fn clone(&self) -> Self { - LocalCallExecutor { - backend: self.backend.clone(), - executor: self.executor.clone(), - wasm_override: self.wasm_override.clone(), - spawn_handle: self.spawn_handle.clone(), - client_config: self.client_config.clone(), - wasm_substitutes: self.wasm_substitutes.clone(), - } - } -} - -impl CallExecutor for LocalCallExecutor -where - B: backend::Backend, - E: CodeExecutor + RuntimeVersionOf + Clone + 'static, - Block: BlockT, -{ - type Error = E::Error; - - type Backend = B; - - fn call( - &self, - at: &BlockId, - method: &str, - call_data: &[u8], - strategy: ExecutionStrategy, - extensions: Option, - ) -> sp_blockchain::Result> { - let mut changes = OverlayedChanges::default(); - let state = self.backend.state_at(*at)?; - let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); - let runtime_code = - state_runtime_code.runtime_code().map_err(sp_blockchain::Error::RuntimeCode)?; - - let runtime_code = self.check_override(runtime_code, at)?; - - let at_hash = self.backend.blockchain().block_hash_from_id(at)?.ok_or_else(|| { - sp_blockchain::Error::UnknownBlock(format!("Could not find block hash for {:?}", at)) - })?; - - let return_data = StateMachine::new( - &state, - &mut changes, - &self.executor, - method, - call_data, - extensions.unwrap_or_default(), - &runtime_code, - self.spawn_handle.clone(), - ) - .set_parent_hash(at_hash) - .execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( - strategy.get_manager(), - None, - )?; - - Ok(return_data.into_encoded()) - } - - fn contextual_call< - EM: Fn( - Result, Self::Error>, - Result, Self::Error>, - ) -> Result, Self::Error>, - R: Encode + Decode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, - >( - &self, - at: &BlockId, - method: &str, - call_data: &[u8], - changes: &RefCell, - storage_transaction_cache: Option<&RefCell>>, - execution_manager: ExecutionManager, - native_call: Option, - recorder: &Option>, - extensions: Option, - ) -> Result, sp_blockchain::Error> - where - ExecutionManager: Clone, - { - let mut storage_transaction_cache = storage_transaction_cache.map(|c| c.borrow_mut()); - - let state = self.backend.state_at(*at)?; - - let changes = &mut *changes.borrow_mut(); - - let at_hash = self.backend.blockchain().block_hash_from_id(at)?.ok_or_else(|| { - sp_blockchain::Error::UnknownBlock(format!("Could not find block hash for {:?}", at)) - })?; - - // It is important to extract the runtime code here before we create the proof - // recorder to not record it. We also need to fetch the runtime code from `state` to - // make sure we use the caching layers. - let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); - - let runtime_code = - state_runtime_code.runtime_code().map_err(sp_blockchain::Error::RuntimeCode)?; - let runtime_code = self.check_override(runtime_code, at)?; - - match recorder { - Some(recorder) => { - let trie_state = state.as_trie_backend().ok_or_else(|| { - Box::new(sp_state_machine::ExecutionError::UnableToGenerateProof) - as Box - })?; - - let backend = sp_state_machine::ProvingBackend::new_with_recorder( - trie_state, - recorder.clone(), - ); - - let mut state_machine = StateMachine::new( - &backend, - changes, - &self.executor, - method, - call_data, - extensions.unwrap_or_default(), - &runtime_code, - self.spawn_handle.clone(), - ) - .set_parent_hash(at_hash); - // TODO: https://github.com/paritytech/substrate/issues/4455 - state_machine.execute_using_consensus_failure_handler( - execution_manager, - native_call.map(|n| || (n)().map_err(|e| Box::new(e) as Box<_>)), - ) - }, - None => { - let mut state_machine = StateMachine::new( - &state, - changes, - &self.executor, - method, - call_data, - extensions.unwrap_or_default(), - &runtime_code, - self.spawn_handle.clone(), - ) - .with_storage_transaction_cache( - storage_transaction_cache.as_mut().map(|c| &mut **c), - ) - .set_parent_hash(at_hash); - state_machine.execute_using_consensus_failure_handler( - execution_manager, - native_call.map(|n| || (n)().map_err(|e| Box::new(e) as Box<_>)), - ) - }, - } - .map_err(Into::into) - } - - fn runtime_version(&self, id: &BlockId) -> sp_blockchain::Result { - let mut overlay = OverlayedChanges::default(); - let state = self.backend.state_at(*id)?; - let mut cache = StorageTransactionCache::::default(); - let mut ext = Ext::new(&mut overlay, &mut cache, &state, None); - let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); - let runtime_code = - state_runtime_code.runtime_code().map_err(sp_blockchain::Error::RuntimeCode)?; - self.executor - .runtime_version(&mut ext, &runtime_code) - .map_err(|e| sp_blockchain::Error::VersionInvalid(format!("{:?}", e)).into()) - } - - fn prove_execution( - &self, - at: &BlockId, - method: &str, - call_data: &[u8], - ) -> sp_blockchain::Result<(Vec, StorageProof)> { - let state = self.backend.state_at(*at)?; - - let trie_backend = state.as_trie_backend().ok_or_else(|| { - Box::new(sp_state_machine::ExecutionError::UnableToGenerateProof) - as Box - })?; - - let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(trie_backend); - let runtime_code = - state_runtime_code.runtime_code().map_err(sp_blockchain::Error::RuntimeCode)?; - let runtime_code = self.check_override(runtime_code, at)?; - - sp_state_machine::prove_execution_on_trie_backend( - &trie_backend, - &mut Default::default(), - &self.executor, - self.spawn_handle.clone(), - method, - call_data, - &runtime_code, - ) - .map_err(Into::into) - } -} - -impl RuntimeVersionOf for LocalCallExecutor -where - E: RuntimeVersionOf, - Block: BlockT, -{ - fn runtime_version( - &self, - ext: &mut dyn sp_externalities::Externalities, - runtime_code: &sp_core::traits::RuntimeCode, - ) -> Result { - RuntimeVersionOf::runtime_version(&self.executor, ext, runtime_code) - } -} - -impl sp_version::GetRuntimeVersionAt for LocalCallExecutor -where - B: backend::Backend, - E: CodeExecutor + RuntimeVersionOf + Clone + 'static, - Block: BlockT, -{ - fn runtime_version(&self, at: &BlockId) -> Result { - CallExecutor::runtime_version(self, at).map_err(|e| format!("{:?}", e)) - } -} - -impl sp_version::GetNativeVersion for LocalCallExecutor -where - B: backend::Backend, - E: CodeExecutor + sp_version::GetNativeVersion + Clone + 'static, - Block: BlockT, -{ - fn native_version(&self) -> &sp_version::NativeVersion { - self.executor.native_version() - } -} - -#[cfg(test)] -mod tests { - use super::*; - use sc_client_api::in_mem; - use sc_executor::{NativeElseWasmExecutor, WasmExecutionMethod}; - use sp_core::{ - testing::TaskExecutor, - traits::{FetchRuntimeCode, WrappedRuntimeCode}, - }; - use substrate_test_runtime_client::{runtime, GenesisInit, LocalExecutorDispatch}; - - #[test] - fn should_get_override_if_exists() { - let executor = NativeElseWasmExecutor::::new( - WasmExecutionMethod::Interpreted, - Some(128), - 1, - 2, - ); - - let overrides = crate::client::wasm_override::dummy_overrides(); - let onchain_code = WrappedRuntimeCode(substrate_test_runtime::wasm_binary_unwrap().into()); - let onchain_code = RuntimeCode { - code_fetcher: &onchain_code, - heap_pages: Some(128), - hash: vec![0, 0, 0, 0], - }; - - let backend = Arc::new(in_mem::Backend::::new()); - - // wasm_runtime_overrides is `None` here because we construct the - // LocalCallExecutor directly later on - let client_config = ClientConfig::default(); - - // client is used for the convenience of creating and inserting the genesis block. - let _client = substrate_test_runtime_client::client::new_with_backend::< - _, - _, - runtime::Block, - _, - runtime::RuntimeApi, - >( - backend.clone(), - executor.clone(), - &substrate_test_runtime_client::GenesisParameters::default().genesis_storage(), - None, - Box::new(TaskExecutor::new()), - None, - None, - Default::default(), - ) - .expect("Creates a client"); - - let call_executor = LocalCallExecutor { - backend: backend.clone(), - executor: executor.clone(), - wasm_override: Arc::new(Some(overrides)), - spawn_handle: Box::new(TaskExecutor::new()), - client_config, - wasm_substitutes: WasmSubstitutes::new( - Default::default(), - executor.clone(), - backend.clone(), - ) - .unwrap(), - }; - - let check = call_executor - .check_override(onchain_code, &BlockId::Number(Default::default())) - .expect("RuntimeCode override"); - - assert_eq!(Some(vec![2, 2, 2, 2, 2, 2, 2, 2]), check.fetch_runtime_code().map(Into::into)); - } -} diff --git a/primitives/api/Cargo.toml b/primitives/api/Cargo.toml index 1b5a55cca48e1..b940ae93baa55 100644 --- a/primitives/api/Cargo.toml +++ b/primitives/api/Cargo.toml @@ -49,4 +49,3 @@ std = [ # # This sets the max logging level to `off` for `log`. disable-logging = ["log/max_level_off"] -old_state = ["sp-runtime/old_state"] diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index 063b14791b726..964ef15ce5f5a 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -677,28 +677,3 @@ decl_runtime_apis! { fn metadata() -> OpaqueMetadata; } } - -#[cfg(feature = "old_state")] -decl_runtime_apis! { - /// The `Core` runtime api that every Substrate runtime needs to implement. - #[core_trait] - #[api_version(3)] - pub trait Core { - /// Returns the version of the runtime. - fn version() -> RuntimeVersion; - /// Returns the version of the runtime. - #[changed_in(3)] - fn version() -> OldRuntimeVersion; - /// Execute the given block. - fn execute_block(block: Block); - /// Initialize a block with the given header. - #[renamed("initialise_block", 2)] - fn initialize_block(header: &::Header); - } - - /// The `Metadata` api trait that returns metadata for the runtime. - pub trait Metadata { - /// Returns the metadata of a runtime. - fn metadata() -> OpaqueMetadata; - } -} diff --git a/primitives/io/Cargo.toml b/primitives/io/Cargo.toml index 798849e16fbda..ee3c9e8945eb2 100644 --- a/primitives/io/Cargo.toml +++ b/primitives/io/Cargo.toml @@ -54,7 +54,6 @@ std = [ "futures", "parking_lot", ] -old_state = [] with-tracing = [ "sp-tracing/with-tracing" diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index 004a51e9d5b9f..3ab12801b2010 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -100,7 +100,7 @@ pub enum KillStorageResult { } /// Interface for accessing the storage from within the runtime. -#[runtime_interface(feature_force_version=old_state,root,1)] +#[runtime_interface] pub trait Storage { /// Returns the data for `key` in the storage or `None` if the key can not be found. fn get(&self, key: &[u8]) -> Option> { @@ -260,7 +260,7 @@ pub trait Storage { /// Interface for accessing the child storage for default child trie, /// from within the runtime. -#[runtime_interface(feature_force_version=old_state,root,1)] +#[runtime_interface] pub trait DefaultChildStorage { /// Get a default child storage value for a given key. /// diff --git a/primitives/state-machine/src/changes_trie/build.rs b/primitives/state-machine/src/changes_trie/build.rs deleted file mode 100644 index d51678b345ae8..0000000000000 --- a/primitives/state-machine/src/changes_trie/build.rs +++ /dev/null @@ -1,1086 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Structures and functions required to build changes trie for given block. - -use crate::{ - backend::Backend, - changes_trie::{ - build_iterator::digest_build_iterator, - input::{ChildIndex, DigestIndex, ExtrinsicIndex, InputKey, InputPair}, - AnchorBlockId, BlockNumber, ConfigurationRange, Storage, - }, - overlayed_changes::{OverlayedChanges, OverlayedValue}, - trie_backend_essence::TrieBackendEssence, - StorageKey, -}; -use codec::{Decode, Encode}; -use hash_db::Hasher; -use num_traits::One; -use sp_core::storage::{ChildInfo, PrefixedStorageKey}; -use std::collections::{btree_map::Entry, BTreeMap}; - -/// Prepare input pairs for building a changes trie of given block. -/// -/// Returns Err if storage error has occurred OR if storage haven't returned -/// required data. -pub(crate) fn prepare_input<'a, B, H, Number>( - backend: &'a B, - storage: &'a dyn Storage, - config: ConfigurationRange<'a, Number>, - overlay: &'a OverlayedChanges, - parent: &'a AnchorBlockId, -) -> Result< - ( - impl Iterator> + 'a, - Vec<(ChildIndex, impl Iterator> + 'a)>, - Vec, - ), - String, -> -where - B: Backend, - H: Hasher + 'a, - H::Out: Encode, - Number: BlockNumber, -{ - let number = parent.number.clone() + One::one(); - let (extrinsics_input, children_extrinsics_input) = - prepare_extrinsics_input(backend, &number, overlay)?; - let (digest_input, mut children_digest_input, digest_input_blocks) = - prepare_digest_input::(parent, config, number, storage)?; - - let mut children_digest = Vec::with_capacity(children_extrinsics_input.len()); - for (child_index, ext_iter) in children_extrinsics_input.into_iter() { - let dig_iter = children_digest_input.remove(&child_index); - children_digest.push(( - child_index, - Some(ext_iter).into_iter().flatten().chain(dig_iter.into_iter().flatten()), - )); - } - for (child_index, dig_iter) in children_digest_input.into_iter() { - children_digest.push(( - child_index, - None.into_iter().flatten().chain(Some(dig_iter).into_iter().flatten()), - )); - } - - Ok((extrinsics_input.chain(digest_input), children_digest, digest_input_blocks)) -} -/// Prepare ExtrinsicIndex input pairs. -fn prepare_extrinsics_input<'a, B, H, Number>( - backend: &'a B, - block: &Number, - overlay: &'a OverlayedChanges, -) -> Result< - ( - impl Iterator> + 'a, - BTreeMap, impl Iterator> + 'a>, - ), - String, -> -where - B: Backend, - H: Hasher + 'a, - Number: BlockNumber, -{ - let mut children_result = BTreeMap::new(); - - for (child_changes, child_info) in overlay.children() { - let child_index = ChildIndex:: { - block: block.clone(), - storage_key: child_info.prefixed_storage_key(), - }; - - let iter = prepare_extrinsics_input_inner( - backend, - block, - overlay, - Some(child_info.clone()), - child_changes, - )?; - children_result.insert(child_index, iter); - } - - let top = prepare_extrinsics_input_inner(backend, block, overlay, None, overlay.changes())?; - - Ok((top, children_result)) -} - -fn prepare_extrinsics_input_inner<'a, B, H, Number>( - backend: &'a B, - block: &Number, - overlay: &'a OverlayedChanges, - child_info: Option, - changes: impl Iterator, -) -> Result> + 'a, String> -where - B: Backend, - H: Hasher, - Number: BlockNumber, -{ - changes - .filter_map(|(k, v)| { - let extrinsics = v.extrinsics(); - if !extrinsics.is_empty() { - Some((k, extrinsics)) - } else { - None - } - }) - .try_fold( - BTreeMap::new(), - |mut map: BTreeMap<&[u8], (ExtrinsicIndex, Vec)>, (k, extrinsics)| { - match map.entry(k) { - Entry::Vacant(entry) => { - // ignore temporary values (values that have null value at the end of - // operation AND are not in storage at the beginning of operation - if let Some(child_info) = child_info.as_ref() { - if !overlay - .child_storage(child_info, k) - .map(|v| v.is_some()) - .unwrap_or_default() - { - if !backend - .exists_child_storage(&child_info, k) - .map_err(|e| format!("{}", e))? - { - return Ok(map) - } - } - } else { - if !overlay.storage(k).map(|v| v.is_some()).unwrap_or_default() { - if !backend.exists_storage(k).map_err(|e| format!("{}", e))? { - return Ok(map) - } - } - }; - - let extrinsics = extrinsics.into_iter().collect(); - entry.insert(( - ExtrinsicIndex { block: block.clone(), key: k.to_vec() }, - extrinsics, - )); - }, - Entry::Occupied(mut entry) => { - // we do not need to check for temporary values here, because entry is - // Occupied AND we are checking it before insertion - let entry_extrinsics = &mut entry.get_mut().1; - entry_extrinsics.extend(extrinsics.into_iter()); - entry_extrinsics.sort(); - }, - } - - Ok(map) - }, - ) - .map(|pairs| pairs.into_iter().map(|(_, (k, v))| InputPair::ExtrinsicIndex(k, v))) -} - -/// Prepare DigestIndex input pairs. -fn prepare_digest_input<'a, H, Number>( - parent: &'a AnchorBlockId, - config: ConfigurationRange, - block: Number, - storage: &'a dyn Storage, -) -> Result< - ( - impl Iterator> + 'a, - BTreeMap, impl Iterator> + 'a>, - Vec, - ), - String, -> -where - H: Hasher, - H::Out: 'a + Encode, - Number: BlockNumber, -{ - let build_skewed_digest = config.end.as_ref() == Some(&block); - let block_for_digest = if build_skewed_digest { - config - .config - .next_max_level_digest_range(config.zero.clone(), block.clone()) - .map(|(_, end)| end) - .unwrap_or_else(|| block.clone()) - } else { - block.clone() - }; - - let digest_input_blocks = digest_build_iterator(config, block_for_digest).collect::>(); - digest_input_blocks - .clone() - .into_iter() - .try_fold( - (BTreeMap::new(), BTreeMap::new()), - move |(mut map, mut child_map), digest_build_block| { - let extrinsic_prefix = - ExtrinsicIndex::key_neutral_prefix(digest_build_block.clone()); - let digest_prefix = DigestIndex::key_neutral_prefix(digest_build_block.clone()); - let child_prefix = ChildIndex::key_neutral_prefix(digest_build_block.clone()); - let trie_root = storage.root(parent, digest_build_block.clone())?; - let trie_root = trie_root.ok_or_else(|| { - format!("No changes trie root for block {}", digest_build_block.clone()) - })?; - - let insert_to_map = |map: &mut BTreeMap<_, _>, key: StorageKey| { - match map.entry(key.clone()) { - Entry::Vacant(entry) => { - entry.insert(( - DigestIndex { block: block.clone(), key }, - vec![digest_build_block.clone()], - )); - }, - Entry::Occupied(mut entry) => { - // DigestIndexValue must be sorted. Here we are relying on the fact that - // digest_build_iterator() returns blocks in ascending order => we only - // need to check for duplicates - // - // is_dup_block could be true when key has been changed in both digest - // block AND other blocks that it covers - let is_dup_block = entry.get().1.last() == Some(&digest_build_block); - if !is_dup_block { - entry.get_mut().1.push(digest_build_block.clone()); - } - }, - } - }; - - // try to get all updated keys from cache - let populated_from_cache = - storage.with_cached_changed_keys(&trie_root, &mut |changed_keys| { - for (storage_key, changed_keys) in changed_keys { - let map = match storage_key { - Some(storage_key) => child_map - .entry(ChildIndex:: { - block: block.clone(), - storage_key: storage_key.clone(), - }) - .or_default(), - None => &mut map, - }; - for changed_key in changed_keys.iter().cloned() { - insert_to_map(map, changed_key); - } - } - }); - if populated_from_cache { - return Ok((map, child_map)) - } - - let mut children_roots = BTreeMap::::new(); - { - let trie_storage = TrieBackendEssence::<_, H>::new( - crate::changes_trie::TrieBackendStorageAdapter(storage), - trie_root, - ); - - trie_storage.for_key_values_with_prefix(&child_prefix, |mut key, mut value| { - if let Ok(InputKey::ChildIndex::(trie_key)) = - Decode::decode(&mut key) - { - if let Ok(value) = >::decode(&mut value) { - let mut trie_root = ::Out::default(); - trie_root.as_mut().copy_from_slice(&value[..]); - children_roots.insert(trie_key.storage_key, trie_root); - } - } - }); - - trie_storage.for_keys_with_prefix(&extrinsic_prefix, |mut key| { - if let Ok(InputKey::ExtrinsicIndex::(trie_key)) = - Decode::decode(&mut key) - { - insert_to_map(&mut map, trie_key.key); - } - }); - - trie_storage.for_keys_with_prefix(&digest_prefix, |mut key| { - if let Ok(InputKey::DigestIndex::(trie_key)) = - Decode::decode(&mut key) - { - insert_to_map(&mut map, trie_key.key); - } - }); - } - - for (storage_key, trie_root) in children_roots.into_iter() { - let child_index = ChildIndex:: { block: block.clone(), storage_key }; - - let mut map = child_map.entry(child_index).or_default(); - let trie_storage = TrieBackendEssence::<_, H>::new( - crate::changes_trie::TrieBackendStorageAdapter(storage), - trie_root, - ); - trie_storage.for_keys_with_prefix(&extrinsic_prefix, |mut key| { - if let Ok(InputKey::ExtrinsicIndex::(trie_key)) = - Decode::decode(&mut key) - { - insert_to_map(&mut map, trie_key.key); - } - }); - - trie_storage.for_keys_with_prefix(&digest_prefix, |mut key| { - if let Ok(InputKey::DigestIndex::(trie_key)) = - Decode::decode(&mut key) - { - insert_to_map(&mut map, trie_key.key); - } - }); - } - Ok((map, child_map)) - }, - ) - .map(|(pairs, child_pairs)| { - ( - pairs.into_iter().map(|(_, (k, v))| InputPair::DigestIndex(k, v)), - child_pairs - .into_iter() - .map(|(sk, pairs)| { - (sk, pairs.into_iter().map(|(_, (k, v))| InputPair::DigestIndex(k, v))) - }) - .collect(), - digest_input_blocks, - ) - }) -} - -#[cfg(test)] -mod test { - use super::*; - use crate::{ - changes_trie::{ - build_cache::{IncompleteCacheAction, IncompleteCachedBuildData}, - storage::InMemoryStorage, - Configuration, RootsStorage, - }, - InMemoryBackend, - }; - use sp_core::Blake2Hasher; - - fn prepare_for_build( - zero: u64, - ) -> ( - InMemoryBackend, - InMemoryStorage, - OverlayedChanges, - Configuration, - ) { - let child_info_1 = ChildInfo::new_default(b"storage_key1"); - let child_info_2 = ChildInfo::new_default(b"storage_key2"); - let backend: InMemoryBackend<_> = ( - vec![ - (vec![100], vec![255]), - (vec![101], vec![255]), - (vec![102], vec![255]), - (vec![103], vec![255]), - (vec![104], vec![255]), - (vec![105], vec![255]), - ] - .into_iter() - .collect::>(), - sp_core::StateVersion::V0, - ) - .into(); - let prefixed_child_trie_key1 = child_info_1.prefixed_storage_key(); - let storage = InMemoryStorage::with_inputs( - vec![ - ( - zero + 1, - vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 1, key: vec![100] }, - vec![1, 3], - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 1, key: vec![101] }, - vec![0, 2], - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 1, key: vec![105] }, - vec![0, 2, 4], - ), - ], - ), - ( - zero + 2, - vec![InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 2, key: vec![102] }, - vec![0], - )], - ), - ( - zero + 3, - vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 3, key: vec![100] }, - vec![0], - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 3, key: vec![105] }, - vec![1], - ), - ], - ), - ( - zero + 4, - vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 4, key: vec![100] }, - vec![0, 2, 3], - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 4, key: vec![101] }, - vec![1], - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 4, key: vec![103] }, - vec![0, 1], - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![100] }, - vec![zero + 1, zero + 3], - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![101] }, - vec![zero + 1], - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![102] }, - vec![zero + 2], - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![105] }, - vec![zero + 1, zero + 3], - ), - ], - ), - (zero + 5, Vec::new()), - ( - zero + 6, - vec![InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 6, key: vec![105] }, - vec![2], - )], - ), - (zero + 7, Vec::new()), - ( - zero + 8, - vec![InputPair::DigestIndex( - DigestIndex { block: zero + 8, key: vec![105] }, - vec![zero + 6], - )], - ), - (zero + 9, Vec::new()), - (zero + 10, Vec::new()), - (zero + 11, Vec::new()), - (zero + 12, Vec::new()), - (zero + 13, Vec::new()), - (zero + 14, Vec::new()), - (zero + 15, Vec::new()), - ], - vec![( - prefixed_child_trie_key1.clone(), - vec![ - ( - zero + 1, - vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 1, key: vec![100] }, - vec![1, 3], - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 1, key: vec![101] }, - vec![0, 2], - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 1, key: vec![105] }, - vec![0, 2, 4], - ), - ], - ), - ( - zero + 2, - vec![InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 2, key: vec![102] }, - vec![0], - )], - ), - ( - zero + 4, - vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 2, key: vec![102] }, - vec![0, 3], - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![102] }, - vec![zero + 2], - ), - ], - ), - ], - )], - ); - - let mut changes = OverlayedChanges::default(); - changes.set_collect_extrinsics(true); - - changes.start_transaction(); - - changes.set_extrinsic_index(1); - changes.set_storage(vec![101], Some(vec![203])); - - changes.set_extrinsic_index(3); - changes.set_storage(vec![100], Some(vec![202])); - changes.set_child_storage(&child_info_1, vec![100], Some(vec![202])); - - changes.commit_transaction().unwrap(); - - changes.set_extrinsic_index(0); - changes.set_storage(vec![100], Some(vec![0])); - changes.set_extrinsic_index(2); - changes.set_storage(vec![100], Some(vec![200])); - - changes.set_extrinsic_index(0); - changes.set_storage(vec![103], Some(vec![0])); - changes.set_extrinsic_index(1); - changes.set_storage(vec![103], None); - - changes.set_extrinsic_index(0); - changes.set_child_storage(&child_info_1, vec![100], Some(vec![0])); - changes.set_extrinsic_index(2); - changes.set_child_storage(&child_info_1, vec![100], Some(vec![200])); - - changes.set_extrinsic_index(0); - changes.set_child_storage(&child_info_2, vec![100], Some(vec![0])); - changes.set_extrinsic_index(2); - changes.set_child_storage(&child_info_2, vec![100], Some(vec![200])); - - changes.set_extrinsic_index(1); - - let config = Configuration { digest_interval: 4, digest_levels: 2 }; - - (backend, storage, changes, config) - } - - fn configuration_range<'a>( - config: &'a Configuration, - zero: u64, - ) -> ConfigurationRange<'a, u64> { - ConfigurationRange { config, zero, end: None } - } - - #[test] - fn build_changes_trie_nodes_on_non_digest_block() { - fn test_with_zero(zero: u64) { - let child_trie_key1 = ChildInfo::new_default(b"storage_key1").prefixed_storage_key(); - let child_trie_key2 = ChildInfo::new_default(b"storage_key2").prefixed_storage_key(); - let (backend, storage, changes, config) = prepare_for_build(zero); - let parent = AnchorBlockId { hash: Default::default(), number: zero + 4 }; - let changes_trie_nodes = prepare_input( - &backend, - &storage, - configuration_range(&config, zero), - &changes, - &parent, - ) - .unwrap(); - assert_eq!( - changes_trie_nodes.0.collect::>>(), - vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 5, key: vec![100] }, - vec![0, 2, 3] - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 5, key: vec![101] }, - vec![1] - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 5, key: vec![103] }, - vec![0, 1] - ), - ] - ); - assert_eq!( - changes_trie_nodes - .1 - .into_iter() - .map(|(k, v)| (k, v.collect::>())) - .collect::>(), - vec![ - ( - ChildIndex { block: zero + 5u64, storage_key: child_trie_key1 }, - vec![InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 5u64, key: vec![100] }, - vec![0, 2, 3] - ),] - ), - ( - ChildIndex { block: zero + 5, storage_key: child_trie_key2 }, - vec![InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 5, key: vec![100] }, - vec![0, 2] - ),] - ), - ] - ); - } - - test_with_zero(0); - test_with_zero(16); - test_with_zero(17); - } - - #[test] - fn build_changes_trie_nodes_on_digest_block_l1() { - fn test_with_zero(zero: u64) { - let child_trie_key1 = ChildInfo::new_default(b"storage_key1").prefixed_storage_key(); - let child_trie_key2 = ChildInfo::new_default(b"storage_key2").prefixed_storage_key(); - let (backend, storage, changes, config) = prepare_for_build(zero); - let parent = AnchorBlockId { hash: Default::default(), number: zero + 3 }; - let changes_trie_nodes = prepare_input( - &backend, - &storage, - configuration_range(&config, zero), - &changes, - &parent, - ) - .unwrap(); - assert_eq!( - changes_trie_nodes.0.collect::>>(), - vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 4, key: vec![100] }, - vec![0, 2, 3] - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 4, key: vec![101] }, - vec![1] - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 4, key: vec![103] }, - vec![0, 1] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![100] }, - vec![zero + 1, zero + 3] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![101] }, - vec![zero + 1] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![102] }, - vec![zero + 2] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![105] }, - vec![zero + 1, zero + 3] - ), - ] - ); - assert_eq!( - changes_trie_nodes - .1 - .into_iter() - .map(|(k, v)| (k, v.collect::>())) - .collect::>(), - vec![ - ( - ChildIndex { block: zero + 4u64, storage_key: child_trie_key1.clone() }, - vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 4u64, key: vec![100] }, - vec![0, 2, 3] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![100] }, - vec![zero + 1] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![101] }, - vec![zero + 1] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![102] }, - vec![zero + 2] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![105] }, - vec![zero + 1] - ), - ] - ), - ( - ChildIndex { block: zero + 4, storage_key: child_trie_key2.clone() }, - vec![InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 4, key: vec![100] }, - vec![0, 2] - ),] - ), - ] - ); - } - - test_with_zero(0); - test_with_zero(16); - test_with_zero(17); - } - - #[test] - fn build_changes_trie_nodes_on_digest_block_l2() { - fn test_with_zero(zero: u64) { - let child_trie_key1 = ChildInfo::new_default(b"storage_key1").prefixed_storage_key(); - let child_trie_key2 = ChildInfo::new_default(b"storage_key2").prefixed_storage_key(); - let (backend, storage, changes, config) = prepare_for_build(zero); - let parent = AnchorBlockId { hash: Default::default(), number: zero + 15 }; - let changes_trie_nodes = prepare_input( - &backend, - &storage, - configuration_range(&config, zero), - &changes, - &parent, - ) - .unwrap(); - assert_eq!( - changes_trie_nodes.0.collect::>>(), - vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 16, key: vec![100] }, - vec![0, 2, 3] - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 16, key: vec![101] }, - vec![1] - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 16, key: vec![103] }, - vec![0, 1] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 16, key: vec![100] }, - vec![zero + 4] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 16, key: vec![101] }, - vec![zero + 4] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 16, key: vec![102] }, - vec![zero + 4] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 16, key: vec![103] }, - vec![zero + 4] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 16, key: vec![105] }, - vec![zero + 4, zero + 8] - ), - ] - ); - assert_eq!( - changes_trie_nodes - .1 - .into_iter() - .map(|(k, v)| (k, v.collect::>())) - .collect::>(), - vec![ - ( - ChildIndex { block: zero + 16u64, storage_key: child_trie_key1.clone() }, - vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 16u64, key: vec![100] }, - vec![0, 2, 3] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 16, key: vec![102] }, - vec![zero + 4] - ), - ] - ), - ( - ChildIndex { block: zero + 16, storage_key: child_trie_key2.clone() }, - vec![InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 16, key: vec![100] }, - vec![0, 2] - ),] - ), - ] - ); - } - - test_with_zero(0); - test_with_zero(16); - test_with_zero(17); - } - - #[test] - fn build_changes_trie_nodes_on_skewed_digest_block() { - fn test_with_zero(zero: u64) { - let (backend, storage, changes, config) = prepare_for_build(zero); - let parent = AnchorBlockId { hash: Default::default(), number: zero + 10 }; - - let mut configuration_range = configuration_range(&config, zero); - let changes_trie_nodes = - prepare_input(&backend, &storage, configuration_range.clone(), &changes, &parent) - .unwrap(); - assert_eq!( - changes_trie_nodes.0.collect::>>(), - vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 11, key: vec![100] }, - vec![0, 2, 3] - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 11, key: vec![101] }, - vec![1] - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 11, key: vec![103] }, - vec![0, 1] - ), - ] - ); - - configuration_range.end = Some(zero + 11); - let changes_trie_nodes = - prepare_input(&backend, &storage, configuration_range, &changes, &parent).unwrap(); - assert_eq!( - changes_trie_nodes.0.collect::>>(), - vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 11, key: vec![100] }, - vec![0, 2, 3] - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 11, key: vec![101] }, - vec![1] - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 11, key: vec![103] }, - vec![0, 1] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 11, key: vec![100] }, - vec![zero + 4] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 11, key: vec![101] }, - vec![zero + 4] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 11, key: vec![102] }, - vec![zero + 4] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 11, key: vec![103] }, - vec![zero + 4] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 11, key: vec![105] }, - vec![zero + 4, zero + 8] - ), - ] - ); - } - - test_with_zero(0); - test_with_zero(16); - test_with_zero(17); - } - - #[test] - fn build_changes_trie_nodes_ignores_temporary_storage_values() { - fn test_with_zero(zero: u64) { - let child_trie_key1 = ChildInfo::new_default(b"storage_key1").prefixed_storage_key(); - let child_trie_key2 = ChildInfo::new_default(b"storage_key2").prefixed_storage_key(); - let (backend, storage, mut changes, config) = prepare_for_build(zero); - - // 110: missing from backend, set to None in overlay - changes.set_storage(vec![110], None); - - let parent = AnchorBlockId { hash: Default::default(), number: zero + 3 }; - let changes_trie_nodes = prepare_input( - &backend, - &storage, - configuration_range(&config, zero), - &changes, - &parent, - ) - .unwrap(); - assert_eq!( - changes_trie_nodes.0.collect::>>(), - vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 4, key: vec![100] }, - vec![0, 2, 3] - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 4, key: vec![101] }, - vec![1] - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 4, key: vec![103] }, - vec![0, 1] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![100] }, - vec![zero + 1, zero + 3] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![101] }, - vec![zero + 1] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![102] }, - vec![zero + 2] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![105] }, - vec![zero + 1, zero + 3] - ), - ] - ); - assert_eq!( - changes_trie_nodes - .1 - .into_iter() - .map(|(k, v)| (k, v.collect::>())) - .collect::>(), - vec![ - ( - ChildIndex { block: zero + 4u64, storage_key: child_trie_key1.clone() }, - vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 4u64, key: vec![100] }, - vec![0, 2, 3] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![100] }, - vec![zero + 1] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![101] }, - vec![zero + 1] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![102] }, - vec![zero + 2] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![105] }, - vec![zero + 1] - ), - ] - ), - ( - ChildIndex { block: zero + 4, storage_key: child_trie_key2.clone() }, - vec![InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 4, key: vec![100] }, - vec![0, 2] - ),] - ), - ] - ); - } - - test_with_zero(0); - test_with_zero(16); - test_with_zero(17); - } - - #[test] - fn cache_is_used_when_changes_trie_is_built() { - let child_trie_key1 = ChildInfo::new_default(b"storage_key1").prefixed_storage_key(); - let child_trie_key2 = ChildInfo::new_default(b"storage_key2").prefixed_storage_key(); - let (backend, mut storage, changes, config) = prepare_for_build(0); - let parent = AnchorBlockId { hash: Default::default(), number: 15 }; - - // override some actual values from storage with values from the cache - // - // top-level storage: - // (keys 100, 101, 103, 105 are now missing from block#4 => they do not appear - // in l2 digest at block 16) - // - // "1" child storage: - // key 102 is now missing from block#4 => it doesn't appear in l2 digest at block 16 - // (keys 103, 104) are now added to block#4 => they appear in l2 digest at block 16 - // - // "2" child storage: - // (keys 105, 106) are now added to block#4 => they appear in l2 digest at block 16 - let trie_root4 = storage.root(&parent, 4).unwrap().unwrap(); - let cached_data4 = IncompleteCacheAction::CacheBuildData(IncompleteCachedBuildData::new()) - .set_digest_input_blocks(vec![1, 2, 3]) - .insert(None, vec![vec![100], vec![102]].into_iter().collect()) - .insert(Some(child_trie_key1.clone()), vec![vec![103], vec![104]].into_iter().collect()) - .insert(Some(child_trie_key2.clone()), vec![vec![105], vec![106]].into_iter().collect()) - .complete(4, &trie_root4); - storage.cache_mut().perform(cached_data4); - - let (root_changes_trie_nodes, child_changes_tries_nodes, _) = - prepare_input(&backend, &storage, configuration_range(&config, 0), &changes, &parent) - .unwrap(); - assert_eq!( - root_changes_trie_nodes.collect::>>(), - vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: 16, key: vec![100] }, - vec![0, 2, 3] - ), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 16, key: vec![101] }, vec![1]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 16, key: vec![103] }, vec![0, 1]), - InputPair::DigestIndex(DigestIndex { block: 16, key: vec![100] }, vec![4]), - InputPair::DigestIndex(DigestIndex { block: 16, key: vec![102] }, vec![4]), - InputPair::DigestIndex(DigestIndex { block: 16, key: vec![105] }, vec![8]), - ] - ); - - let child_changes_tries_nodes = child_changes_tries_nodes - .into_iter() - .map(|(k, i)| (k, i.collect::>())) - .collect::>(); - assert_eq!( - child_changes_tries_nodes - .get(&ChildIndex { block: 16u64, storage_key: child_trie_key1.clone() }) - .unwrap(), - &vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: 16u64, key: vec![100] }, - vec![0, 2, 3] - ), - InputPair::DigestIndex(DigestIndex { block: 16u64, key: vec![103] }, vec![4]), - InputPair::DigestIndex(DigestIndex { block: 16u64, key: vec![104] }, vec![4]), - ], - ); - assert_eq!( - child_changes_tries_nodes - .get(&ChildIndex { block: 16u64, storage_key: child_trie_key2.clone() }) - .unwrap(), - &vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: 16u64, key: vec![100] }, - vec![0, 2] - ), - InputPair::DigestIndex(DigestIndex { block: 16u64, key: vec![105] }, vec![4]), - InputPair::DigestIndex(DigestIndex { block: 16u64, key: vec![106] }, vec![4]), - ], - ); - } -} diff --git a/primitives/state-machine/src/changes_trie/mod.rs b/primitives/state-machine/src/changes_trie/mod.rs deleted file mode 100644 index 79022cf69dafd..0000000000000 --- a/primitives/state-machine/src/changes_trie/mod.rs +++ /dev/null @@ -1,430 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Changes trie related structures and functions. -//! -//! Changes trie is a trie built of { storage key => extrinsics } pairs -//! at the end of each block. For every changed storage key it contains -//! a pair, mapping key to the set of extrinsics where it has been changed. -//! -//! Optionally, every N blocks, additional level1-digest nodes are appended -//! to the changes trie, containing pairs { storage key => blocks }. For every -//! storage key that has been changed in PREVIOUS N-1 blocks (except for genesis -//! block) it contains a pair, mapping this key to the set of blocks where it -//! has been changed. -//! -//! Optionally, every N^digest_level (where digest_level > 1) blocks, additional -//! digest_level digest is created. It is built out of pairs { storage key => digest -//! block }, containing entries for every storage key that has been changed in -//! the last N*digest_level-1 blocks (except for genesis block), mapping these keys -//! to the set of lower-level digest blocks. -//! -//! Changes trie configuration could change within a time. The range of blocks, where -//! configuration has been active, is given by two blocks: zero and end. Zero block is -//! the block where configuration has been set. But the first changes trie that uses -//! this configuration will be built at the block zero+1. If configuration deactivates -//! at some block, this will be the end block of the configuration. It is also the -//! zero block of the next configuration. -//! -//! If configuration has the end block, it also means that 'skewed digest' has/should -//! been built at that block. If this is the block where max-level digest should have -//! been created, than it is simply max-level digest of this configuration. Otherwise, -//! it is the digest that covers all blocks since last max-level digest block was -//! created. -//! -//! Changes trie only contains the top level storage changes. Sub-level changes -//! are propagated through its storage root on the top level storage. - -mod build; -mod build_cache; -mod build_iterator; -mod changes_iterator; -mod input; -mod prune; -mod storage; -mod surface_iterator; - -pub use self::{ - build_cache::{BuildCache, CacheAction, CachedBuildData}, - changes_iterator::{ - key_changes, key_changes_proof, key_changes_proof_check, key_changes_proof_check_with_db, - }, - prune::prune, - storage::InMemoryStorage, -}; - -use crate::{ - backend::Backend, - changes_trie::{ - build::prepare_input, - build_cache::{IncompleteCacheAction, IncompleteCachedBuildData}, - }, - overlayed_changes::OverlayedChanges, - StorageKey, -}; -use codec::{Decode, Encode}; -use hash_db::{Hasher, Prefix}; -use num_traits::{One, Zero}; -use sp_core::{self, storage::PrefixedStorageKey}; -use sp_trie::{DBValue, MemoryDB, TrieMut}; -use std::{ - collections::{HashMap, HashSet}, - convert::TryInto, -}; -// change trie using V0 trie (no need for attached node value). -use sp_trie::trie_types::TrieDBMutV0 as TrieDBMut; - -/// Requirements for block number that can be used with changes tries. -pub trait BlockNumber: - Send - + Sync - + 'static - + std::fmt::Display - + Clone - + From - + TryInto - + One - + Zero - + PartialEq - + Ord - + std::hash::Hash - + std::ops::Add - + ::std::ops::Sub - + std::ops::Mul - + ::std::ops::Div - + std::ops::Rem - + std::ops::AddAssign - + num_traits::CheckedMul - + num_traits::CheckedSub - + Decode - + Encode -{ -} - -impl BlockNumber for T where - T: Send - + Sync - + 'static - + std::fmt::Display - + Clone - + From - + TryInto - + One - + Zero - + PartialEq - + Ord - + std::hash::Hash - + std::ops::Add - + ::std::ops::Sub - + std::ops::Mul - + ::std::ops::Div - + std::ops::Rem - + std::ops::AddAssign - + num_traits::CheckedMul - + num_traits::CheckedSub - + Decode - + Encode -{ -} - -/// Block identifier that could be used to determine fork of this block. -#[derive(Debug)] -pub struct AnchorBlockId { - /// Hash of this block. - pub hash: Hash, - /// Number of this block. - pub number: Number, -} - -/// Changes tries state at some block. -pub struct State<'a, H, Number> { - /// Configuration that is active at given block. - pub config: Configuration, - /// Configuration activation block number. Zero if it is the first configuration on the chain, - /// or number of the block that have emit NewConfiguration signal (thus activating - /// configuration starting from the **next** block). - pub zero: Number, - /// Underlying changes tries storage reference. - pub storage: &'a dyn Storage, -} - -/// Changes trie storage. Provides access to trie roots and trie nodes. -pub trait RootsStorage: Send + Sync { - /// Resolve hash of the block into anchor. - fn build_anchor(&self, hash: H::Out) -> Result, String>; - /// Get changes trie root for the block with given number which is an ancestor (or the block - /// itself) of the anchor_block (i.e. anchor_block.number >= block). - fn root( - &self, - anchor: &AnchorBlockId, - block: Number, - ) -> Result, String>; -} - -/// Changes trie storage. Provides access to trie roots and trie nodes. -pub trait Storage: RootsStorage { - /// Casts from self reference to RootsStorage reference. - fn as_roots_storage(&self) -> &dyn RootsStorage; - /// Execute given functor with cached entry for given trie root. - /// Returns true if the functor has been called (cache entry exists) and false otherwise. - fn with_cached_changed_keys( - &self, - root: &H::Out, - functor: &mut dyn FnMut(&HashMap, HashSet>), - ) -> bool; - /// Get a trie node. - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String>; -} - -/// Changes trie storage -> trie backend essence adapter. -pub struct TrieBackendStorageAdapter<'a, H: Hasher, Number: BlockNumber>( - pub &'a dyn Storage, -); - -impl<'a, H: Hasher, N: BlockNumber> crate::TrieBackendStorage - for TrieBackendStorageAdapter<'a, H, N> -{ - type Overlay = sp_trie::MemoryDB; - - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { - self.0.get(key, prefix) - } -} - -/// Changes trie configuration. -pub type Configuration = sp_core::ChangesTrieConfiguration; - -/// Blocks range where configuration has been constant. -#[derive(Clone)] -pub struct ConfigurationRange<'a, N> { - /// Active configuration. - pub config: &'a Configuration, - /// Zero block of this configuration. The configuration is active starting from the next block. - pub zero: N, - /// End block of this configuration. It is the last block where configuration has been active. - pub end: Option, -} - -impl<'a, H, Number> State<'a, H, Number> { - /// Create state with given config and storage. - pub fn new(config: Configuration, zero: Number, storage: &'a dyn Storage) -> Self { - Self { config, zero, storage } - } -} - -impl<'a, H, Number: Clone> Clone for State<'a, H, Number> { - fn clone(&self) -> Self { - State { config: self.config.clone(), zero: self.zero.clone(), storage: self.storage } - } -} - -/// Create state where changes tries are disabled. -pub fn disabled_state<'a, H, Number>() -> Option> { - None -} - -/// Compute the changes trie root and transaction for given block. -/// Returns Err(()) if unknown `parent_hash` has been passed. -/// Returns Ok(None) if there's no data to perform computation. -/// Panics if background storage returns an error OR if insert to MemoryDB fails. -pub fn build_changes_trie<'a, B: Backend, H: Hasher, Number: BlockNumber>( - backend: &B, - state: Option<&'a State<'a, H, Number>>, - changes: &OverlayedChanges, - parent_hash: H::Out, - panic_on_storage_error: bool, -) -> Result, H::Out, CacheAction)>, ()> -where - H::Out: Ord + 'static + Encode, -{ - /// Panics when `res.is_err() && panic`, otherwise it returns `Err(())` on an error. - fn maybe_panic( - res: std::result::Result, - panic: bool, - ) -> std::result::Result { - res.map(Ok).unwrap_or_else(|e| { - if panic { - panic!( - "changes trie: storage access is not allowed to fail within runtime: {:?}", - e - ) - } else { - Err(()) - } - }) - } - - // when storage isn't provided, changes tries aren't created - let state = match state { - Some(state) => state, - None => return Ok(None), - }; - - // build_anchor error should not be considered fatal - let parent = state.storage.build_anchor(parent_hash).map_err(|_| ())?; - let block = parent.number.clone() + One::one(); - - // prepare configuration range - we already know zero block. Current block may be the end block - // if configuration has been changed in this block - let is_config_changed = - match changes.storage(sp_core::storage::well_known_keys::CHANGES_TRIE_CONFIG) { - Some(Some(new_config)) => new_config != &state.config.encode()[..], - Some(None) => true, - None => false, - }; - let config_range = ConfigurationRange { - config: &state.config, - zero: state.zero.clone(), - end: if is_config_changed { Some(block.clone()) } else { None }, - }; - - // storage errors are considered fatal (similar to situations when runtime fetches values from - // storage) - let (input_pairs, child_input_pairs, digest_input_blocks) = maybe_panic( - prepare_input::( - backend, - state.storage, - config_range.clone(), - changes, - &parent, - ), - panic_on_storage_error, - )?; - - // prepare cached data - let mut cache_action = prepare_cached_build_data(config_range, block.clone()); - let needs_changed_keys = cache_action.collects_changed_keys(); - cache_action = cache_action.set_digest_input_blocks(digest_input_blocks); - - let mut mdb = MemoryDB::default(); - let mut child_roots = Vec::with_capacity(child_input_pairs.len()); - for (child_index, input_pairs) in child_input_pairs { - let mut not_empty = false; - let mut root = Default::default(); - { - let mut trie = TrieDBMut::::new(&mut mdb, &mut root); - let mut storage_changed_keys = HashSet::new(); - for input_pair in input_pairs { - if needs_changed_keys { - if let Some(key) = input_pair.key() { - storage_changed_keys.insert(key.to_vec()); - } - } - - let (key, value) = input_pair.into(); - not_empty = true; - maybe_panic(trie.insert(&key, &value), panic_on_storage_error)?; - } - - cache_action = - cache_action.insert(Some(child_index.storage_key.clone()), storage_changed_keys); - } - if not_empty { - child_roots.push(input::InputPair::ChildIndex(child_index, root.as_ref().to_vec())); - } - } - let mut root = Default::default(); - { - let mut trie = TrieDBMut::::new(&mut mdb, &mut root); - for (key, value) in child_roots.into_iter().map(Into::into) { - maybe_panic(trie.insert(&key, &value), panic_on_storage_error)?; - } - - let mut storage_changed_keys = HashSet::new(); - for input_pair in input_pairs { - if needs_changed_keys { - if let Some(key) = input_pair.key() { - storage_changed_keys.insert(key.to_vec()); - } - } - - let (key, value) = input_pair.into(); - maybe_panic(trie.insert(&key, &value), panic_on_storage_error)?; - } - - cache_action = cache_action.insert(None, storage_changed_keys); - } - - let cache_action = cache_action.complete(block, &root); - Ok(Some((mdb, root, cache_action))) -} - -/// Prepare empty cached build data for given block. -fn prepare_cached_build_data( - config: ConfigurationRange, - block: Number, -) -> IncompleteCacheAction { - // when digests are not enabled in configuration, we do not need to cache anything - // because it'll never be used again for building other tries - // => let's clear the cache - if !config.config.is_digest_build_enabled() { - return IncompleteCacheAction::Clear - } - - // when this is the last block where current configuration is active - // => let's clear the cache - if config.end.as_ref() == Some(&block) { - return IncompleteCacheAction::Clear - } - - // we do not need to cache anything when top-level digest trie is created, because - // it'll never be used again for building other tries - // => let's clear the cache - match config.config.digest_level_at_block(config.zero.clone(), block) { - Some((digest_level, _, _)) if digest_level == config.config.digest_levels => - IncompleteCacheAction::Clear, - _ => IncompleteCacheAction::CacheBuildData(IncompleteCachedBuildData::new()), - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn cache_is_cleared_when_digests_are_disabled() { - let config = Configuration { digest_interval: 0, digest_levels: 0 }; - let config_range = ConfigurationRange { zero: 0, end: None, config: &config }; - assert_eq!(prepare_cached_build_data(config_range, 8u32), IncompleteCacheAction::Clear); - } - - #[test] - fn build_data_is_cached_when_digests_are_enabled() { - let config = Configuration { digest_interval: 8, digest_levels: 2 }; - let config_range = ConfigurationRange { zero: 0, end: None, config: &config }; - assert!(prepare_cached_build_data(config_range.clone(), 4u32).collects_changed_keys()); - assert!(prepare_cached_build_data(config_range.clone(), 7u32).collects_changed_keys()); - assert!(prepare_cached_build_data(config_range, 8u32).collects_changed_keys()); - } - - #[test] - fn cache_is_cleared_when_digests_are_enabled_and_top_level_digest_is_built() { - let config = Configuration { digest_interval: 8, digest_levels: 2 }; - let config_range = ConfigurationRange { zero: 0, end: None, config: &config }; - assert_eq!(prepare_cached_build_data(config_range, 64u32), IncompleteCacheAction::Clear); - } - - #[test] - fn cache_is_cleared_when_end_block_of_configuration_is_built() { - let config = Configuration { digest_interval: 8, digest_levels: 2 }; - let config_range = ConfigurationRange { zero: 0, end: Some(4u32), config: &config }; - assert_eq!( - prepare_cached_build_data(config_range.clone(), 4u32), - IncompleteCacheAction::Clear - ); - } -} From 56d40e5f1ee81355d5f750a2a75690f47ef00ad8 Mon Sep 17 00:00:00 2001 From: kianenigma Date: Tue, 4 Jan 2022 14:08:44 +0000 Subject: [PATCH 158/188] sync more files with master to minimize the diff --- client/service/src/client/call_executor.rs | 433 ++++++++++++++++++ frame/executive/src/lib.rs | 4 +- primitives/core/src/lib.rs | 1 - .../runtime-interface/proc-macro/src/lib.rs | 24 +- .../bare_function_interface.rs | 75 +-- .../proc-macro/src/runtime_interface/mod.rs | 10 +- primitives/runtime/Cargo.toml | 1 - primitives/runtime/src/lib.rs | 2 - primitives/state-machine/src/ext.rs | 2 +- primitives/state-machine/src/testing.rs | 20 +- 10 files changed, 451 insertions(+), 121 deletions(-) create mode 100644 client/service/src/client/call_executor.rs diff --git a/client/service/src/client/call_executor.rs b/client/service/src/client/call_executor.rs new file mode 100644 index 0000000000000..f2b45bc413e2a --- /dev/null +++ b/client/service/src/client/call_executor.rs @@ -0,0 +1,433 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use super::{client::ClientConfig, wasm_override::WasmOverride, wasm_substitutes::WasmSubstitutes}; +use codec::{Decode, Encode}; +use sc_client_api::{backend, call_executor::CallExecutor, HeaderBackend}; +use sc_executor::{RuntimeVersion, RuntimeVersionOf}; +use sp_api::{ProofRecorder, StorageTransactionCache}; +use sp_core::{ + traits::{CodeExecutor, RuntimeCode, SpawnNamed}, + NativeOrEncoded, NeverNativeValue, +}; +use sp_externalities::Extensions; +use sp_runtime::{generic::BlockId, traits::Block as BlockT}; +use sp_state_machine::{ + self, backend::Backend as _, ExecutionManager, ExecutionStrategy, Ext, OverlayedChanges, + StateMachine, StorageProof, +}; +use std::{cell::RefCell, panic::UnwindSafe, result, sync::Arc}; + +/// Call executor that executes methods locally, querying all required +/// data from local backend. +pub struct LocalCallExecutor { + backend: Arc, + executor: E, + wasm_override: Arc>, + wasm_substitutes: WasmSubstitutes, + spawn_handle: Box, + client_config: ClientConfig, +} + +impl LocalCallExecutor +where + E: CodeExecutor + RuntimeVersionOf + Clone + 'static, + B: backend::Backend, +{ + /// Creates new instance of local call executor. + pub fn new( + backend: Arc, + executor: E, + spawn_handle: Box, + client_config: ClientConfig, + ) -> sp_blockchain::Result { + let wasm_override = client_config + .wasm_runtime_overrides + .as_ref() + .map(|p| WasmOverride::new(p.clone(), &executor)) + .transpose()?; + + let wasm_substitutes = WasmSubstitutes::new( + client_config.wasm_runtime_substitutes.clone(), + executor.clone(), + backend.clone(), + )?; + + Ok(LocalCallExecutor { + backend, + executor, + wasm_override: Arc::new(wasm_override), + spawn_handle, + client_config, + wasm_substitutes, + }) + } + + /// Check if local runtime code overrides are enabled and one is available + /// for the given `BlockId`. If yes, return it; otherwise return the same + /// `RuntimeCode` instance that was passed. + fn check_override<'a>( + &'a self, + onchain_code: RuntimeCode<'a>, + id: &BlockId, + ) -> sp_blockchain::Result> + where + Block: BlockT, + B: backend::Backend, + { + let spec = CallExecutor::runtime_version(self, id)?; + let code = if let Some(d) = self + .wasm_override + .as_ref() + .as_ref() + .map(|o| o.get(&spec.spec_version, onchain_code.heap_pages, &spec.spec_name)) + .flatten() + { + log::debug!(target: "wasm_overrides", "using WASM override for block {}", id); + d + } else if let Some(s) = + self.wasm_substitutes.get(spec.spec_version, onchain_code.heap_pages, id) + { + log::debug!(target: "wasm_substitutes", "Using WASM substitute for block {:?}", id); + s + } else { + log::debug!( + target: "wasm_overrides", + "No WASM override available for block {}, using onchain code", + id + ); + onchain_code + }; + + Ok(code) + } +} + +impl Clone for LocalCallExecutor +where + E: Clone, +{ + fn clone(&self) -> Self { + LocalCallExecutor { + backend: self.backend.clone(), + executor: self.executor.clone(), + wasm_override: self.wasm_override.clone(), + spawn_handle: self.spawn_handle.clone(), + client_config: self.client_config.clone(), + wasm_substitutes: self.wasm_substitutes.clone(), + } + } +} + +impl CallExecutor for LocalCallExecutor +where + B: backend::Backend, + E: CodeExecutor + RuntimeVersionOf + Clone + 'static, + Block: BlockT, +{ + type Error = E::Error; + + type Backend = B; + + fn call( + &self, + at: &BlockId, + method: &str, + call_data: &[u8], + strategy: ExecutionStrategy, + extensions: Option, + ) -> sp_blockchain::Result> { + let mut changes = OverlayedChanges::default(); + let state = self.backend.state_at(*at)?; + let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); + let runtime_code = + state_runtime_code.runtime_code().map_err(sp_blockchain::Error::RuntimeCode)?; + + let runtime_code = self.check_override(runtime_code, at)?; + + let at_hash = self.backend.blockchain().block_hash_from_id(at)?.ok_or_else(|| { + sp_blockchain::Error::UnknownBlock(format!("Could not find block hash for {:?}", at)) + })?; + + let return_data = StateMachine::new( + &state, + &mut changes, + &self.executor, + method, + call_data, + extensions.unwrap_or_default(), + &runtime_code, + self.spawn_handle.clone(), + ) + .set_parent_hash(at_hash) + .execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( + strategy.get_manager(), + None, + )?; + + Ok(return_data.into_encoded()) + } + + fn contextual_call< + EM: Fn( + Result, Self::Error>, + Result, Self::Error>, + ) -> Result, Self::Error>, + R: Encode + Decode + PartialEq, + NC: FnOnce() -> result::Result + UnwindSafe, + >( + &self, + at: &BlockId, + method: &str, + call_data: &[u8], + changes: &RefCell, + storage_transaction_cache: Option<&RefCell>>, + execution_manager: ExecutionManager, + native_call: Option, + recorder: &Option>, + extensions: Option, + ) -> Result, sp_blockchain::Error> + where + ExecutionManager: Clone, + { + let mut storage_transaction_cache = storage_transaction_cache.map(|c| c.borrow_mut()); + + let state = self.backend.state_at(*at)?; + + let changes = &mut *changes.borrow_mut(); + + let at_hash = self.backend.blockchain().block_hash_from_id(at)?.ok_or_else(|| { + sp_blockchain::Error::UnknownBlock(format!("Could not find block hash for {:?}", at)) + })?; + + // It is important to extract the runtime code here before we create the proof + // recorder to not record it. We also need to fetch the runtime code from `state` to + // make sure we use the caching layers. + let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); + + let runtime_code = + state_runtime_code.runtime_code().map_err(sp_blockchain::Error::RuntimeCode)?; + let runtime_code = self.check_override(runtime_code, at)?; + + match recorder { + Some(recorder) => { + let trie_state = state.as_trie_backend().ok_or_else(|| { + Box::new(sp_state_machine::ExecutionError::UnableToGenerateProof) + as Box + })?; + + let backend = sp_state_machine::ProvingBackend::new_with_recorder( + trie_state, + recorder.clone(), + ); + + let mut state_machine = StateMachine::new( + &backend, + changes, + &self.executor, + method, + call_data, + extensions.unwrap_or_default(), + &runtime_code, + self.spawn_handle.clone(), + ) + .set_parent_hash(at_hash); + // TODO: https://github.com/paritytech/substrate/issues/4455 + state_machine.execute_using_consensus_failure_handler( + execution_manager, + native_call.map(|n| || (n)().map_err(|e| Box::new(e) as Box<_>)), + ) + }, + None => { + let mut state_machine = StateMachine::new( + &state, + changes, + &self.executor, + method, + call_data, + extensions.unwrap_or_default(), + &runtime_code, + self.spawn_handle.clone(), + ) + .with_storage_transaction_cache( + storage_transaction_cache.as_mut().map(|c| &mut **c), + ) + .set_parent_hash(at_hash); + state_machine.execute_using_consensus_failure_handler( + execution_manager, + native_call.map(|n| || (n)().map_err(|e| Box::new(e) as Box<_>)), + ) + }, + } + .map_err(Into::into) + } + + fn runtime_version(&self, id: &BlockId) -> sp_blockchain::Result { + let mut overlay = OverlayedChanges::default(); + let state = self.backend.state_at(*id)?; + let mut cache = StorageTransactionCache::::default(); + let mut ext = Ext::new(&mut overlay, &mut cache, &state, None); + let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); + let runtime_code = + state_runtime_code.runtime_code().map_err(sp_blockchain::Error::RuntimeCode)?; + self.executor + .runtime_version(&mut ext, &runtime_code) + .map_err(|e| sp_blockchain::Error::VersionInvalid(format!("{:?}", e)).into()) + } + + fn prove_execution( + &self, + at: &BlockId, + method: &str, + call_data: &[u8], + ) -> sp_blockchain::Result<(Vec, StorageProof)> { + let state = self.backend.state_at(*at)?; + + let trie_backend = state.as_trie_backend().ok_or_else(|| { + Box::new(sp_state_machine::ExecutionError::UnableToGenerateProof) + as Box + })?; + + let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(trie_backend); + let runtime_code = + state_runtime_code.runtime_code().map_err(sp_blockchain::Error::RuntimeCode)?; + let runtime_code = self.check_override(runtime_code, at)?; + + sp_state_machine::prove_execution_on_trie_backend( + &trie_backend, + &mut Default::default(), + &self.executor, + self.spawn_handle.clone(), + method, + call_data, + &runtime_code, + ) + .map_err(Into::into) + } +} + +impl RuntimeVersionOf for LocalCallExecutor +where + E: RuntimeVersionOf, + Block: BlockT, +{ + fn runtime_version( + &self, + ext: &mut dyn sp_externalities::Externalities, + runtime_code: &sp_core::traits::RuntimeCode, + ) -> Result { + RuntimeVersionOf::runtime_version(&self.executor, ext, runtime_code) + } +} + +impl sp_version::GetRuntimeVersionAt for LocalCallExecutor +where + B: backend::Backend, + E: CodeExecutor + RuntimeVersionOf + Clone + 'static, + Block: BlockT, +{ + fn runtime_version(&self, at: &BlockId) -> Result { + CallExecutor::runtime_version(self, at).map_err(|e| format!("{:?}", e)) + } +} + +impl sp_version::GetNativeVersion for LocalCallExecutor +where + B: backend::Backend, + E: CodeExecutor + sp_version::GetNativeVersion + Clone + 'static, + Block: BlockT, +{ + fn native_version(&self) -> &sp_version::NativeVersion { + self.executor.native_version() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use sc_client_api::in_mem; + use sc_executor::{NativeElseWasmExecutor, WasmExecutionMethod}; + use sp_core::{ + testing::TaskExecutor, + traits::{FetchRuntimeCode, WrappedRuntimeCode}, + }; + use substrate_test_runtime_client::{runtime, GenesisInit, LocalExecutorDispatch}; + + #[test] + fn should_get_override_if_exists() { + let executor = NativeElseWasmExecutor::::new( + WasmExecutionMethod::Interpreted, + Some(128), + 1, + 2, + ); + + let overrides = crate::client::wasm_override::dummy_overrides(); + let onchain_code = WrappedRuntimeCode(substrate_test_runtime::wasm_binary_unwrap().into()); + let onchain_code = RuntimeCode { + code_fetcher: &onchain_code, + heap_pages: Some(128), + hash: vec![0, 0, 0, 0], + }; + + let backend = Arc::new(in_mem::Backend::::new()); + + // wasm_runtime_overrides is `None` here because we construct the + // LocalCallExecutor directly later on + let client_config = ClientConfig::default(); + + // client is used for the convenience of creating and inserting the genesis block. + let _client = substrate_test_runtime_client::client::new_with_backend::< + _, + _, + runtime::Block, + _, + runtime::RuntimeApi, + >( + backend.clone(), + executor.clone(), + &substrate_test_runtime_client::GenesisParameters::default().genesis_storage(), + None, + Box::new(TaskExecutor::new()), + None, + None, + Default::default(), + ) + .expect("Creates a client"); + + let call_executor = LocalCallExecutor { + backend: backend.clone(), + executor: executor.clone(), + wasm_override: Arc::new(Some(overrides)), + spawn_handle: Box::new(TaskExecutor::new()), + client_config, + wasm_substitutes: WasmSubstitutes::new( + Default::default(), + executor.clone(), + backend.clone(), + ) + .unwrap(), + }; + + let check = call_executor + .check_override(onchain_code, &BlockId::Number(Default::default())) + .expect("RuntimeCode override"); + + assert_eq!(Some(vec![2, 2, 2, 2, 2, 2, 2, 2]), check.fetch_runtime_code().map(Into::into)); + } +} diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index c8b08904f151f..be944954eaa59 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -937,7 +937,7 @@ mod tests { #[test] #[should_panic] fn block_import_of_bad_state_root_fails() { - new_test_ext_v0(1).execute_with(|| { + new_test_ext(1).execute_with(|| { Executive::execute_block(Block { header: Header { parent_hash: [69u8; 32].into(), @@ -957,7 +957,7 @@ mod tests { #[test] #[should_panic] fn block_import_of_bad_extrinsic_root_fails() { - new_test_ext_v0(1).execute_with(|| { + new_test_ext(1).execute_with(|| { Executive::execute_block(Block { header: Header { parent_hash: [69u8; 32].into(), diff --git a/primitives/core/src/lib.rs b/primitives/core/src/lib.rs index 10bacf634c8b4..5bb4a7d087bf0 100644 --- a/primitives/core/src/lib.rs +++ b/primitives/core/src/lib.rs @@ -85,7 +85,6 @@ pub use self::hasher::keccak::KeccakHasher; pub use hash_db::Hasher; pub use sp_storage as storage; -pub use sp_storage::StateVersion; #[doc(hidden)] pub use sp_std; diff --git a/primitives/runtime-interface/proc-macro/src/lib.rs b/primitives/runtime-interface/proc-macro/src/lib.rs index 7653c81a2a4a3..afba38993fe76 100644 --- a/primitives/runtime-interface/proc-macro/src/lib.rs +++ b/primitives/runtime-interface/proc-macro/src/lib.rs @@ -39,17 +39,16 @@ mod utils; struct Options { wasm_only: bool, tracing: bool, - feature_force_version: Vec<(String, String, u32)>, } impl Options { - fn unpack(self) -> (bool, bool, Vec<(String, String, u32)>) { - (self.wasm_only, self.tracing, self.feature_force_version) + fn unpack(self) -> (bool, bool) { + (self.wasm_only, self.tracing) } } impl Default for Options { fn default() -> Self { - Options { wasm_only: false, tracing: true, feature_force_version: Vec::new() } + Options { wasm_only: false, tracing: true } } } @@ -64,19 +63,6 @@ impl Parse for Options { } else if lookahead.peek(runtime_interface::keywords::no_tracing) { let _ = input.parse::(); res.tracing = false; - } else if lookahead.peek(runtime_interface::keywords::feature_force_version) { - let _ = input.parse::(); - let _ = input.parse::(); - let feature_name = input.parse::()?; - let _ = input.parse::(); - let fonc_name = input.parse::()?; - let _ = input.parse::(); - let fonc_version = match input.parse::()? { - syn::ExprLit { lit: syn::Lit::Int(lit), .. } => lit.base10_parse::()?, - _ => return Err(lookahead.error()), - }; - let patch = (feature_name.to_string(), fonc_name.to_string(), fonc_version); - res.feature_force_version.push(patch); } else if lookahead.peek(Token![,]) { let _ = input.parse::(); } else { @@ -93,9 +79,9 @@ pub fn runtime_interface( input: proc_macro::TokenStream, ) -> proc_macro::TokenStream { let trait_def = parse_macro_input!(input as ItemTrait); - let (wasm_only, tracing, feature_force_version) = parse_macro_input!(attrs as Options).unpack(); + let (wasm_only, tracing) = parse_macro_input!(attrs as Options).unpack(); - runtime_interface::runtime_interface_impl(trait_def, wasm_only, tracing, feature_force_version) + runtime_interface::runtime_interface_impl(trait_def, wasm_only, tracing) .unwrap_or_else(|e| e.to_compile_error()) .into() } diff --git a/primitives/runtime-interface/proc-macro/src/runtime_interface/bare_function_interface.rs b/primitives/runtime-interface/proc-macro/src/runtime_interface/bare_function_interface.rs index 43d4045b42645..cbb749a111a15 100644 --- a/primitives/runtime-interface/proc-macro/src/runtime_interface/bare_function_interface.rs +++ b/primitives/runtime-interface/proc-macro/src/runtime_interface/bare_function_interface.rs @@ -45,29 +45,9 @@ use quote::{quote, quote_spanned}; use std::iter; -fn not_feature_force_version( - feature_force_version: &Vec<(String, String, u32)>, - method: &TraitItemMethod, -) -> TokenStream { - let method = method.sig.ident.to_string(); - for (feature, method_patch, _version_patch) in feature_force_version { - if &method == method_patch { - return quote! { - #[cfg(not(feature=#feature))] - } - } - } - quote! {} -} - /// Generate one bare function per trait method. The name of the bare function is equal to the name /// of the trait method. -pub fn generate( - trait_def: &ItemTrait, - is_wasm_only: bool, - tracing: bool, - feature_force_version: &Vec<(String, String, u32)>, -) -> Result { +pub fn generate(trait_def: &ItemTrait, is_wasm_only: bool, tracing: bool) -> Result { let trait_name = &trait_def.ident; let runtime_interface = get_runtime_interface(trait_def)?; @@ -75,35 +55,7 @@ pub fn generate( let token_stream: Result = runtime_interface.latest_versions().try_fold( TokenStream::new(), |mut t, (latest_version, method)| { - t.extend(function_for_method( - method, - latest_version, - is_wasm_only, - not_feature_force_version(&feature_force_version, &method), - )?); - Ok(t) - }, - ); - - // forced version - let token_stream: Result = feature_force_version.iter().try_fold( - token_stream?, - |mut t, (feature, method, force_version)| { - // lookup method - let (_, full_method) = runtime_interface - .all_versions() - .find(|(version, full_method)| { - version == force_version && &full_method.sig.ident.to_string() == method - }) - .expect("Force version not found"); - - let feature_check = quote!(#[cfg(feature=#feature)]); - t.extend(function_for_method( - full_method, - *force_version, - is_wasm_only, - feature_check, - )?); + t.extend(function_for_method(method, latest_version, is_wasm_only)?); Ok(t) }, ); @@ -125,15 +77,11 @@ fn function_for_method( method: &TraitItemMethod, latest_version: u32, is_wasm_only: bool, - feature_check: TokenStream, ) -> Result { - let std_impl = if !is_wasm_only { - function_std_latest_impl(method, latest_version, &feature_check)? - } else { - quote!() - }; + let std_impl = + if !is_wasm_only { function_std_latest_impl(method, latest_version)? } else { quote!() }; - let no_std_impl = function_no_std_impl(method, feature_check)?; + let no_std_impl = function_no_std_impl(method)?; Ok(quote! { #std_impl @@ -143,10 +91,7 @@ fn function_for_method( } /// Generates the bare function implementation for `cfg(not(feature = "std"))`. -fn function_no_std_impl( - method: &TraitItemMethod, - feature_check: TokenStream, -) -> Result { +fn function_no_std_impl(method: &TraitItemMethod) -> Result { let function_name = &method.sig.ident; let host_function_name = create_exchangeable_host_function_ident(&method.sig.ident); let args = get_function_arguments(&method.sig); @@ -155,7 +100,6 @@ fn function_no_std_impl( let attrs = method.attrs.iter().filter(|a| !a.path.is_ident("version")); Ok(quote! { - #feature_check #[cfg(not(feature = "std"))] #( #attrs )* pub fn #function_name( #( #args, )* ) #return_value { @@ -168,11 +112,7 @@ fn function_no_std_impl( /// Generate call to latest function version for `cfg((feature = "std")` /// /// This should generate simple `fn func(..) { func_version_(..) }`. -fn function_std_latest_impl( - method: &TraitItemMethod, - latest_version: u32, - feature_check: &TokenStream, -) -> Result { +fn function_std_latest_impl(method: &TraitItemMethod, latest_version: u32) -> Result { let function_name = &method.sig.ident; let args = get_function_arguments(&method.sig).map(FnArg::Typed); let arg_names = get_function_argument_names(&method.sig).collect::>(); @@ -182,7 +122,6 @@ fn function_std_latest_impl( create_function_ident_with_version(&method.sig.ident, latest_version); Ok(quote_spanned! { method.span() => - #feature_check #[cfg(feature = "std")] #( #attrs )* pub fn #function_name( #( #args, )* ) #return_value { diff --git a/primitives/runtime-interface/proc-macro/src/runtime_interface/mod.rs b/primitives/runtime-interface/proc-macro/src/runtime_interface/mod.rs index 8195fd1aafa3d..d14c1f67ecff5 100644 --- a/primitives/runtime-interface/proc-macro/src/runtime_interface/mod.rs +++ b/primitives/runtime-interface/proc-macro/src/runtime_interface/mod.rs @@ -35,8 +35,6 @@ pub mod keywords { syn::custom_keyword!(wasm_only); // Disable tracing-macros added to the [`runtime_interface`] by specifying this optional entry syn::custom_keyword!(no_tracing); - // Only allow function declaration depending on external crate feature. - syn::custom_keyword!(feature_force_version); } /// Implementation of the `runtime_interface` attribute. @@ -47,14 +45,8 @@ pub fn runtime_interface_impl( trait_def: ItemTrait, is_wasm_only: bool, tracing: bool, - feature_force_version: Vec<(String, String, u32)>, ) -> Result { - let bare_functions = bare_function_interface::generate( - &trait_def, - is_wasm_only, - tracing, - &feature_force_version, - )?; + let bare_functions = bare_function_interface::generate(&trait_def, is_wasm_only, tracing)?; let crate_include = generate_runtime_interface_include(); let mod_name = Ident::new(&trait_def.ident.to_string().to_snake_case(), Span::call_site()); let trait_decl_impl = trait_decl_impl::process(&trait_def, is_wasm_only)?; diff --git a/primitives/runtime/Cargo.toml b/primitives/runtime/Cargo.toml index c4fec726bacd0..abf246eece9a0 100644 --- a/primitives/runtime/Cargo.toml +++ b/primitives/runtime/Cargo.toml @@ -59,4 +59,3 @@ std = [ "hash256-std-hasher/std", "either/use_std", ] -old_state = ["sp-io/old_state"] diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index f6382be89675b..cb9ba9ff297fb 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -40,8 +40,6 @@ pub use paste; #[doc(hidden)] pub use sp_application_crypto as app_crypto; -#[cfg(feature = "std")] -pub use sp_core::hashing; pub use sp_core::storage::StateVersion; #[cfg(feature = "std")] pub use sp_core::storage::{Storage, StorageChild}; diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index a5f8ffd2e171e..93e6ac6e5c530 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -98,7 +98,7 @@ where /// The overlayed changes to write to. overlay: &'a mut OverlayedChanges, /// The storage backend to read from. - pub(crate) backend: &'a B, + backend: &'a B, /// The cache for the storage transactions. storage_transaction_cache: &'a mut StorageTransactionCache, /// Pseudo-unique id used for tracing. diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index 1285685f37939..e21644614c83c 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -72,21 +72,6 @@ where ) } - /// Get an externalities implementation, using the given `proving_backend`. - /// - /// This will be capable of computing the PoV. See [`execute_and_get_proof`]. - pub fn proving_ext<'a>( - &'a mut self, - proving_backend: &'a InMemoryProvingBackend<'a, H>, - ) -> Ext> { - Ext::new( - &mut self.overlay, - &mut self.storage_transaction_cache, - &proving_backend, - Some(&mut self.extensions), - ) - } - /// Create a new instance of `TestExternalities` with storage. pub fn new(storage: Storage) -> Self { Self::new_with_code_and_state(&[], storage, Default::default()) @@ -115,6 +100,7 @@ where state_version: StateVersion, ) -> Self { assert!(storage.top.keys().all(|key| !is_child_storage_key(key))); + assert!(storage.children_default.keys().all(|key| is_child_storage_key(key))); storage.top.insert(CODE.to_vec(), code.to_vec()); @@ -150,9 +136,7 @@ where self.offchain_db.clone() } - /// Insert key/value into backend. - /// - /// This only supports inserting keys in `top` trie. + /// Insert key/value into backend pub fn insert(&mut self, k: StorageKey, v: StorageValue) { self.backend.insert(vec![(None, vec![(k, Some(v))])], self.state_version); } From fc689fca2dfc33594d7f92c03c96898c968c930d Mon Sep 17 00:00:00 2001 From: kianenigma Date: Tue, 4 Jan 2022 14:21:39 +0000 Subject: [PATCH 159/188] Fix all tests --- frame/state-trie-migration/src/lib.rs | 1 - primitives/state-machine/src/testing.rs | 1 - 2 files changed, 2 deletions(-) diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index 4c236d6843598..060a8bfe62e7d 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -187,7 +187,6 @@ pub mod pallet { pub(crate) _ph: sp_std::marker::PhantomData, } - #[cfg(any(feature = "std", feature = "runtime-benchmarks", feature = "try-runtime"))] impl sp_std::fmt::Debug for MigrationTask { fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { f.debug_struct("MigrationTask") diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index e21644614c83c..352efd0c6a786 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -100,7 +100,6 @@ where state_version: StateVersion, ) -> Self { assert!(storage.top.keys().all(|key| !is_child_storage_key(key))); - assert!(storage.children_default.keys().all(|key| is_child_storage_key(key))); storage.top.insert(CODE.to_vec(), code.to_vec()); From bb0b00f8297523537b5f92b155f51bfb83b5b121 Mon Sep 17 00:00:00 2001 From: kianenigma Date: Tue, 4 Jan 2022 16:23:34 +0000 Subject: [PATCH 160/188] make signed migration a bit more relaxed --- frame/state-trie-migration/src/lib.rs | 88 ++++++++++++--------- primitives/io/src/lib.rs | 6 -- primitives/state-machine/src/testing.rs | 6 +- utils/frame/remote-externalities/src/lib.rs | 6 ++ 4 files changed, 61 insertions(+), 45 deletions(-) diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index 060a8bfe62e7d..49bdadf98ba72 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -252,10 +252,11 @@ pub mod pallet { /// Migrate keys until either of the given limits are exhausted, or if no more top keys /// exist. /// - /// Note that this can return after the **first** migration tick that causes exhaustion. In - /// other words, this should not be used in any environment where resources are strictly - /// bounded (e.g. a parachain), but it is acceptable otherwise (relay chain, offchain - /// workers). + /// Note that this can return after the **first** migration tick that causes exhaustion, + /// specifically in the case of the `size` constrain. The reason for this is that before + /// reading a key, we simply cannot know how many bytes it is. In other words, this should + /// not be used in any environment where resources are strictly bounded (e.g. a parachain), + /// but it is acceptable otherwise (relay chain, offchain workers). pub(crate) fn migrate_until_exhaustion(&mut self, limits: MigrationLimits) { log!(debug, "running migrations on top of {:?} until {:?}", self, limits); @@ -596,21 +597,27 @@ pub mod pallet { /// /// The dispatch origin of this call can be any signed account. /// - /// This transaction has NO MONETARY INCENTIVES. calling it will only incur transaction fees - /// on the caller, with no rewards paid out. + /// This transaction has NO MONETARY INCENTIVES. calling it will not reward anyone. Albeit, + /// Upon successful execution, the transaction fee is returned. /// - /// The sum of the byte length of all the data read must be provided for up-front - /// fee-payment and weighing. + /// The (potentially over-estimated) of the byte length of all the data read must be + /// provided for up-front fee-payment and weighing. In essence, the caller is guaranteeing + /// that executing the current `MigrationTask` with the given `limits` will not exceed + /// `real_size_upper` bytes of read data. + /// + /// Based on the documentation of [`MigrationTask::migrate_until_exhaustion`], the + /// recommended way of doing this is to pass a `limit` that only bounds `count`, as the + /// `size` limit can always be overwritten. #[pallet::weight( // the migration process - Pallet::::dynamic_weight(limits.item, * real_size) + Pallet::::dynamic_weight(limits.item, * real_size_upper) // rest of the operations, like deposit etc. + T::WeightInfo::continue_migrate() )] pub fn continue_migrate( origin: OriginFor, limits: MigrationLimits, - real_size: u32, + real_size_upper: u32, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; @@ -628,7 +635,7 @@ pub mod pallet { task.migrate_until_exhaustion(limits); // ensure that the migration witness data was correct. - if real_size != task.dyn_size { + if real_size_upper < task.dyn_size { // let the imbalance burn. let (_imbalance, _remainder) = T::Currency::slash(&who, deposit); debug_assert!(_remainder.is_zero()); @@ -641,8 +648,14 @@ pub mod pallet { compute: MigrationCompute::Signed, }); + let actual_weight = Some( + Pallet::::dynamic_weight(limits.item, task.dyn_size) + + T::WeightInfo::continue_migrate(), + ); MigrationProcess::::put(task); - Ok(Pays::No.into()) + let pays = Pays::No; + + Ok((actual_weight, pays).into()) } /// Migrate the list of top keys by iterating each of them one by one. @@ -1563,7 +1576,6 @@ mod remote_tests { use codec::Encode; use mock::run_to_block_and_drain_pool; use remote_externalities::{Mode, OfflineConfig, OnlineConfig}; - use sp_io::InMemoryProvingBackend; use sp_runtime::traits::{Bounded, HashFor}; use std::sync::Arc; @@ -1587,7 +1599,7 @@ mod remote_tests { ..Default::default() }, )) - .state_version(sp_core::StateVersion::V0) + .state_version(sp_core::storage::StateVersion::V0) .build() .await .unwrap(); @@ -1600,13 +1612,22 @@ mod remote_tests { let mut duration = 0; // set the version to 1, as if the upgrade happened. - ext.state_version = sp_core::StateVersion::V1; + ext.state_version = sp_core::storage::StateVersion::V1; + + let (top_left, child_left) = + ext.as_backend().essence().check_migration_state().unwrap(); + // assert!(top_left > 0); TODO + + log::info!( + target: LOG_TARGET, + "initial check: top_left: {}, child_left: {}", + top_left, + child_left, + ); loop { - let trie_backend = ext.backend.clone(); - let last_state_root = trie_backend.root().clone(); - let proving_backend = InMemoryProvingBackend::new(&trie_backend); - let (finished, proof) = ext.execute_and_get_proof(&proving_backend, || { + let last_state_root = ext.backend.root().clone(); + let (finished, proof) = ext.execute_and_prove(|| { run_to_block(now + 1); if StateTrieMigration::migration_process().finished() { return true @@ -1616,21 +1637,16 @@ mod remote_tests { false }); - let (top_left, child_left) = - ext.as_backend().essence().check_migration_state().unwrap(); let compact_proof = proof.clone().into_compact_proof::>(last_state_root).unwrap(); log::info!( target: LOG_TARGET, - "proceeded to #{}, original proof: {}, compact proof size: {}, compact zstd compressed: {} // top_left: {}, child_left: {}", + "proceeded to #{}, original proof: {}, compact proof size: {}, compact zstd compressed: {}", now, proof.encoded_size(), compact_proof.encoded_size(), zstd::stream::encode_all(&compact_proof.encode()[..], 0).unwrap().len(), - top_left, - child_left, ); - proving_backend.clear_recorder(); ext.commit_all().unwrap(); if finished { @@ -1676,7 +1692,7 @@ mod remote_tests { ..Default::default() }, )) - .state_version(sp_core::StateVersion::V0) + .state_version(sp_core::storage::StateVersion::V0) .build() .await .unwrap(); @@ -1690,7 +1706,16 @@ mod remote_tests { let mut duration = 0; // set the version to 1, as if the upgrade happened. - ext.state_version = sp_core::StateVersion::V1; + ext.state_version = sp_core::storage::StateVersion::V1; + + let (top_left, child_left) = + ext.as_backend().essence().check_migration_state().unwrap(); + log::info!( + target: LOG_TARGET, + "initial check: top_left: {}, child_left: {}", + top_left, + child_left, + ); loop { let finished = ext.execute_with(|| { @@ -1703,15 +1728,6 @@ mod remote_tests { false }); - let (top_left, child_left) = - ext.as_backend().essence().check_migration_state().unwrap(); - log::info!( - target: LOG_TARGET, - "(top_left: {}, child_left {})", - top_left, - child_left, - ); - if finished { break } diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index 3ab12801b2010..76ced407090c3 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -1633,12 +1633,6 @@ pub fn oom(_: core::alloc::Layout) -> ! { #[cfg(feature = "std")] pub type TestExternalities = sp_state_machine::TestExternalities; -/// A backend capable of generating storage proofs, with hash types aligned with -/// [`TestExternalities`]. -#[cfg(feature = "std")] -pub type InMemoryProvingBackend<'a> = - sp_state_machine::InMemoryProvingBackend<'a, sp_core::Blake2Hasher>; - /// The host functions Substrate provides for the Wasm runtime environment. /// /// All these host functions will be callable from inside the Wasm environment. diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index 352efd0c6a786..e8a09c3aec80e 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -23,8 +23,8 @@ use std::{ }; use crate::{ - backend::Backend, ext::Ext, InMemoryBackend, InMemoryProvingBackend, OverlayedChanges, - StorageKey, StorageTransactionCache, StorageValue, + backend::Backend, ext::Ext, InMemoryBackend, OverlayedChanges, StorageKey, + StorageTransactionCache, StorageValue, }; use hash_db::Hasher; @@ -203,7 +203,7 @@ where /// This implementation will wipe the proof recorded in between calls. Consecutive calls will /// get their own proof from scratch. pub fn execute_and_prove<'a, R>(&mut self, execute: impl FnOnce() -> R) -> (R, StorageProof) { - let proving_backend = InMemoryProvingBackend::new(&self.backend); + let proving_backend = crate::InMemoryProvingBackend::new(&self.backend); let mut proving_ext = Ext::new( &mut self.overlay, &mut self.storage_transaction_cache, diff --git a/utils/frame/remote-externalities/src/lib.rs b/utils/frame/remote-externalities/src/lib.rs index 4b15c5f5005a3..fee9992e06824 100644 --- a/utils/frame/remote-externalities/src/lib.rs +++ b/utils/frame/remote-externalities/src/lib.rs @@ -195,6 +195,12 @@ impl SnapshotConfig { } } +impl From for SnapshotConfig { + fn from(s: String) -> Self { + Self::new(s) + } +} + impl Default for SnapshotConfig { fn default() -> Self { Self { path: Path::new("SNAPSHOT").into() } From 6b834263e2070a5a358fc2bfe7dcd68534785fbb Mon Sep 17 00:00:00 2001 From: kianenigma Date: Tue, 4 Jan 2022 16:56:06 +0000 Subject: [PATCH 161/188] add witness check to signed submissions --- frame/state-trie-migration/src/lib.rs | 60 ++++++++++++++++++++++++++- 1 file changed, 58 insertions(+), 2 deletions(-) diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index 49bdadf98ba72..e89d4d7804622 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -109,6 +109,7 @@ pub mod pallet { pub trait WeightInfo { fn process_top_key(x: u32) -> Weight; fn continue_migrate() -> Weight; + fn continue_migrate_wrong_witness() -> Weight; fn migrate_custom_top_fail() -> Weight; fn migrate_custom_top_success() -> Weight; } @@ -120,6 +121,9 @@ pub mod pallet { fn continue_migrate() -> Weight { 1000000 } + fn continue_migrate_wrong_witness() -> Weight { + 1000000 + } fn migrate_custom_top_fail() -> Weight { 1000000 } @@ -605,6 +609,12 @@ pub mod pallet { /// that executing the current `MigrationTask` with the given `limits` will not exceed /// `real_size_upper` bytes of read data. /// + /// The `witness_task` is merely a helper to prevent the caller from being slashed or + /// generally trigger a migration that they do not intend. This parameter is just a message + /// from caller, saying that they believed `witness_task` was the last state of the + /// migration, and they only wish for their transaction to do anything, if this assumption + /// holds. In case `witness_task` does not match, the transaction fails. + /// /// Based on the documentation of [`MigrationTask::migrate_until_exhaustion`], the /// recommended way of doing this is to pass a `limit` that only bounds `count`, as the /// `size` limit can always be overwritten. @@ -618,6 +628,7 @@ pub mod pallet { origin: OriginFor, limits: MigrationLimits, real_size_upper: u32, + witness_task: MigrationTask, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; @@ -632,6 +643,16 @@ pub mod pallet { ensure!(T::Currency::can_slash(&who, deposit), "not enough funds"); let mut task = Self::migration_process(); + ensure!( + task == witness_task, + DispatchErrorWithPostInfo { + error: "wrong witness".into(), + post_info: PostDispatchInfo { + actual_weight: Some(T::WeightInfo::continue_migrate_wrong_witness()), + pays_fee: Pays::Yes + } + } + ); task.migrate_until_exhaustion(limits); // ensure that the migration witness data was correct. @@ -1008,9 +1029,30 @@ mod benchmarks { frame_benchmarking::benchmarks! { continue_migrate { + // note that this benchmark should migrate nothing, as we only want the overhead weight of the bookkeeping, + // and the migration cost itself is noted via the `dynamic_weight` function. let null = MigrationLimits::default(); let caller = frame_benchmarking::whitelisted_caller(); - }: _(frame_system::RawOrigin::Signed(caller), null, 0) + }: _(frame_system::RawOrigin::Signed(caller), null, 0, StateTrieMigration::::migration_process()) + verify { + assert_eq!(StateTrieMigration::::migration_process(), Default::default()) + } + + continue_migrate_wrong_witness { + let null = MigrationLimits::default(); + let caller = frame_benchmarking::whitelisted_caller(); + let bad_witness = MigrationTask { current_top: Some(vec![1u8]), ..Default::default() }; + }: { + assert!( + StateTrieMigration::::continue_migrate( + frame_system::RawOrigin::Signed(caller).into(), + null, + 0, + bad_witness, + ) + .is_err() + ) + } verify { assert_eq!(StateTrieMigration::::migration_process(), Default::default()) } @@ -1453,6 +1495,7 @@ mod test { Origin::signed(1), MigrationLimits { item: 5, size: sp_runtime::traits::Bounded::max_value() }, Bounded::max_value(), + MigrationProcess::::get() ), "max signed limits not respected" ); @@ -1463,10 +1506,22 @@ mod test { Origin::signed(2), MigrationLimits { item: 5, size: 100 }, 100, + MigrationProcess::::get() ), "not enough funds" ); + // can't submit with bad witness. + frame_support::assert_err_ignore_postinfo!( + StateTrieMigration::continue_migrate( + Origin::signed(1), + MigrationLimits { item: 5, size: 100 }, + 100, + MigrationTask { current_top: Some(vec![1u8]), ..Default::default() } + ), + "wrong witness" + ); + // migrate all keys in a series of submissions while !MigrationProcess::::get().finished() { // first we compute the task to get the accurate consumption. @@ -1476,7 +1531,8 @@ mod test { frame_support::assert_ok!(StateTrieMigration::continue_migrate( Origin::signed(1), SignedMigrationMaxLimits::get(), - task.dyn_size + task.dyn_size, + MigrationProcess::::get() )); // no funds should remain reserved. From 1626d196a1b288862df62bda571e01f8e865e791 Mon Sep 17 00:00:00 2001 From: kianenigma Date: Tue, 4 Jan 2022 17:15:32 +0000 Subject: [PATCH 162/188] allow custom migration to also go above limit --- frame/state-trie-migration/src/lib.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index e89d4d7804622..616fad4f24ce7 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -687,13 +687,13 @@ pub mod pallet { T::WeightInfo::migrate_custom_top_success() .max(T::WeightInfo::migrate_custom_top_fail()) .saturating_add( - Pallet::::dynamic_weight(keys.len() as u32, *total_size) + Pallet::::dynamic_weight(keys.len() as u32, *witness_size) ) )] pub fn migrate_custom_top( origin: OriginFor, keys: Vec>, - total_size: u32, + witness_size: u32, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; @@ -711,10 +711,10 @@ pub mod pallet { } } - if dyn_size != total_size { + if dyn_size > witness_size { let (_imbalance, _remainder) = T::Currency::slash(&who, deposit); debug_assert!(_remainder.is_zero()); - return Err("Wrong witness data".into()) + return Err("wrong witness data".into()) } Self::deposit_event(Event::::Migrated { @@ -1571,7 +1571,7 @@ mod test { vec![b"key1".to_vec(), b"key2".to_vec(), b"key3".to_vec()], 69, // wrong witness ), - "Wrong witness data" + "wrong witness data" ); // no funds should remain reserved. From 0b3ea2d3bd9ebce5a3aa15102e24d265a889f544 Mon Sep 17 00:00:00 2001 From: kianenigma Date: Wed, 5 Jan 2022 07:20:04 +0000 Subject: [PATCH 163/188] Fix these pesky tests --- frame/state-trie-migration/src/lib.rs | 5 +++-- utils/frame/remote-externalities/src/lib.rs | 7 ++++++- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index 616fad4f24ce7..baf5d09c073c2 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -1672,7 +1672,7 @@ mod remote_tests { let (top_left, child_left) = ext.as_backend().essence().check_migration_state().unwrap(); - // assert!(top_left > 0); TODO + assert!(top_left > 0); log::info!( target: LOG_TARGET, @@ -1728,7 +1728,7 @@ mod remote_tests { // item being the bottleneck run_with_limits(MigrationLimits { item: 8 * 1024, size: 128 * 1024 * 1024 }).await; // size being the bottleneck - run_with_limits(MigrationLimits { item: Bounded::max_value(), size: 4 * 1024 }).await; + run_with_limits(MigrationLimits { item: Bounded::max_value(), size: 64 * 1024 }).await; } #[tokio::test] @@ -1766,6 +1766,7 @@ mod remote_tests { let (top_left, child_left) = ext.as_backend().essence().check_migration_state().unwrap(); + assert!(top_left > 0); log::info!( target: LOG_TARGET, "initial check: top_left: {}, child_left: {}", diff --git a/utils/frame/remote-externalities/src/lib.rs b/utils/frame/remote-externalities/src/lib.rs index fee9992e06824..cd7742c34f26a 100644 --- a/utils/frame/remote-externalities/src/lib.rs +++ b/utils/frame/remote-externalities/src/lib.rs @@ -808,8 +808,13 @@ impl Builder { /// Build the test externalities. pub async fn build(self) -> Result { + let state_version = self.state_version; let (top_kv, child_kv) = self.pre_build().await?; - let mut ext = TestExternalities::new_with_code(Default::default(), Default::default()); + let mut ext = TestExternalities::new_with_code_and_state( + Default::default(), + Default::default(), + state_version, + ); info!(target: LOG_TARGET, "injecting a total of {} top keys", top_kv.len()); for (k, v) in top_kv { From c2df41b7584a98f5f5bcac1b52d4f63c9c80840f Mon Sep 17 00:00:00 2001 From: kianenigma Date: Wed, 5 Jan 2022 07:52:20 +0000 Subject: [PATCH 164/188] ==== removal of the unsigned stuff ==== --- bin/node/runtime/src/lib.rs | 3 - frame/state-trie-migration/src/lib.rs | 511 +------------------------- 2 files changed, 15 insertions(+), 499 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 6cc0a004c4c4a..a31cedb9a268a 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1292,9 +1292,6 @@ impl pallet_state_trie_migration::Config for Runtime { type SignedDepositPerItem = MigrationSignedDepositPerItem; type SignedDepositBase = MigrationSignedDepositBase; type SignedMigrationMaxLimits = SignedMigrationMaxLimits; - type UnsignedPriority = ImOnlineUnsignedPriority; - type UnsignedBackOff = frame_support::traits::ConstU32<5>; - type OffchainRepeat = frame_support::traits::ConstU32<3>; type WeightInfo = (); } diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index baf5d09c073c2..ed166037f0d78 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// Copyright (C) 2022 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -36,17 +36,6 @@ //! 1` bytes from `n` different keys, while the next key is suddenly `:code:`, and there is no way //! to bail out of this. //! -//! ### Unsigned migration -//! -//! This system will use the offchain worker threads to correct the downside of the previous item: -//! knowing exactly the byte size of migration in each block. Offchain worker threads will first -//! find the maximum number of keys that can be migrated whilst staying below a certain byte size -//! limit offchain, and then submit that back to the chain as an unsigned transaction that can only -//! be included by validators. -//! -//! This approach is safer, and ensures that the migration reads do not take more than a certain -//! amount, yet it does impose some work on the validators/collators. -//! //! ### Signed migration //! //! as a backup, the migration process can be set in motion via signed transactions that basically @@ -61,9 +50,8 @@ //! //! --- //! -//! Initially, this pallet does not contain any auto/unsigned migrations. They must be manually -//! enabled by the `ControlOrigin`. Note that these two migration types cannot co-exist And only one -//! can be enable at each point in time. +//! Initially, this pallet does not contain any auto migration. They must be manually enabled by the +//! `ControlOrigin`. #![cfg_attr(not(feature = "std"), no_std)] @@ -84,20 +72,15 @@ macro_rules! log { #[frame_support::pallet] pub mod pallet { use frame_support::{ - dispatch::{DispatchErrorWithPostInfo, PostDispatchInfo, TransactionPriority}, + dispatch::{DispatchErrorWithPostInfo, PostDispatchInfo}, ensure, pallet_prelude::*, traits::{Currency, Get}, - unsigned::ValidateUnsigned, - }; - use frame_system::{ - ensure_none, ensure_signed, - offchain::{SendTransactionTypes, SubmitTransaction}, - pallet_prelude::*, }; + use frame_system::{self, ensure_signed, pallet_prelude::*}; use sp_core::storage::well_known_keys::DEFAULT_CHILD_STORAGE_KEY_PREFIX; use sp_runtime::{ - offchain::storage::{MutateStorageError, StorageValueRef}, + self, traits::{Saturating, Zero}, }; use sp_std::prelude::*; @@ -232,15 +215,11 @@ pub mod pallet { impl MigrationTask { /// Return true if the task is finished. + #[cfg(test)] pub(crate) fn finished(&self) -> bool { self.current_top.is_none() && self.current_child.is_none() } - /// Returns `true` if the task fully complies with the given limits. - pub(crate) fn fully_complies_with(&self, limits: MigrationLimits) -> bool { - self.dyn_total_items() <= limits.item && self.dyn_size <= limits.size - } - /// Check if there's any work left, or if we have exhausted the limits already. fn exhausted(&self, limits: MigrationLimits) -> bool { self.current_top.is_none() || @@ -411,8 +390,6 @@ pub mod pallet { pub enum MigrationCompute { /// A signed origin triggered the migration. Signed, - /// An unsigned origin triggered the migration. - Unsigned, /// An automatic task triggered the migration. Auto, } @@ -435,7 +412,7 @@ pub mod pallet { /// Configurations of this pallet. #[pallet::config] - pub trait Config: frame_system::Config + SendTransactionTypes> { + pub trait Config: frame_system::Config { /// Origin that can control the configurations of this pallet. type ControlOrigin: frame_support::traits::EnsureOrigin; @@ -460,24 +437,6 @@ pub mod pallet { /// The weight information of this pallet. type WeightInfo: WeightInfo; - - /// The priority used for unsigned transactions. - type UnsignedPriority: Get; - - /// The number of items that offchain worker will subtract from the first item count that - /// causes an over-consumption. - /// - /// This is a safety feature to assist the offchain worker submitted transactions and help - /// them not exceed the byte limit of the task. Nonetheless, the fundamental problem is that - /// if a transaction is ensured to not exceed any limit at block `t` when it is generated, - /// there is no guarantee that the same assumption holds at block `t + x`, when this - /// transaction is actually executed. This is where this value comes into play, and by - /// reducing the number of keys that will be migrated, further reduces the chance of byte - /// limit being exceeded. - type UnsignedBackOff: Get; - - /// The repeat frequency of offchain workers. - type OffchainRepeat: Get; } /// Migration progress. @@ -495,17 +454,6 @@ pub mod pallet { #[pallet::getter(fn auto_limits)] pub type AutoLimits = StorageValue<_, Option, ValueQuery>; - /// The size limits imposed on unsigned migrations. - /// - /// This should: - /// 1. be large enough to accommodate things like `:code` - /// 2. small enough to never brick a parachain due to PoV limits. - /// - /// if set to `None`, then no unsigned migration happens. - #[pallet::storage] - #[pallet::getter(fn unsigned_limits)] - pub type UnsignedLimits = StorageValue<_, Option, ValueQuery>; - #[pallet::call] impl Pallet { /// control the automatic migration. @@ -517,86 +465,10 @@ pub mod pallet { maybe_config: Option, ) -> DispatchResultWithPostInfo { T::ControlOrigin::ensure_origin(origin)?; - ensure!( - maybe_config.is_some() ^ Self::unsigned_limits().is_some(), - "unsigned and auto migration cannot co-exist" - ); AutoLimits::::put(maybe_config); Ok(().into()) } - /// control the unsigned migration. - /// - /// The dispatch origin of this call must be [`Config::ControlOrigin`]. - #[pallet::weight(T::DbWeight::get().reads_writes(1, 1))] - pub fn control_unsigned_migration( - origin: OriginFor, - maybe_limit: Option, - ) -> DispatchResultWithPostInfo { - T::ControlOrigin::ensure_origin(origin)?; - ensure!( - maybe_limit.is_some() ^ Self::auto_limits().is_some(), - "unsigned and auto migration cannot co-exist" - ); - UnsignedLimits::::put(maybe_limit); - Ok(().into()) - } - - /// The unsigned call that can be submitted by offchain workers. - /// - /// This can only be valid if it is generated from the local node, which means only - /// validators can generate this call. - /// - /// The `item_limit` should be used as the limit on the number of items migrated, and the - /// submitter must guarantee that using this item limit, `size` does not go over - /// `Self::unsigned_limits().size`. - /// - /// The `witness_size` should always be equal to `Self::unsigned_limits().size` and is only - /// used for weighing. - #[pallet::weight( - // for reading and writing `migration_process` - T::DbWeight::get().reads_writes(1, 1) - .saturating_add( - // for executing the migration itself. - Pallet::::dynamic_weight(*item_limit, *witness_size) - ) - )] - pub fn continue_migrate_unsigned( - origin: OriginFor, - item_limit: u32, - witness_size: u32, - _witness_task: MigrationTask, - ) -> DispatchResultWithPostInfo { - ensure_none(origin)?; - let chain_limits = - Self::unsigned_limits().ok_or("unsigned limit not set, tx not allowed.")?; - ensure!(witness_size == chain_limits.size, "wrong witness data"); - - let mut task = Self::migration_process(); - // pre-dispatch and validate-unsigned already assure this. - debug_assert_eq!(task, _witness_task); - - // we run the task with the given item limit, and the chain size limit.. - task.migrate_until_exhaustion(MigrationLimits { - size: chain_limits.size, - item: item_limit, - }); - - // .. and we assert that the size limit must have been fully met with the given item - // limit. - assert!(task.fully_complies_with(chain_limits)); - - Self::deposit_event(Event::::Migrated { - top: task.dyn_top_items, - child: task.dyn_child_items, - compute: MigrationCompute::Unsigned, - }); - - MigrationProcess::::put(task); - - Ok(().into()) - } - /// Continue the migration for the given `limits`. /// /// The dispatch origin of this call can be any signed account. @@ -794,10 +666,6 @@ pub mod pallet { #[pallet::hooks] impl Hooks> for Pallet { - fn integrity_test() { - assert!(!T::UnsignedBackOff::get().is_zero(), "UnsignedBackOff should not be zero"); - } - fn on_initialize(_: BlockNumberFor) -> Weight { if let Some(limits) = Self::auto_limits() { let mut task = Self::migration_process(); @@ -823,121 +691,9 @@ pub mod pallet { T::DbWeight::get().reads(1) } } - - fn offchain_worker(now: BlockNumberFor) { - if Self::ensure_offchain_repeat_frequency(now).is_err() { - return - } - - log!(debug, "started offchain worker thread."); - if let Some(chain_limits) = Self::unsigned_limits() { - let mut task = Self::migration_process(); - if task.finished() { - log!(debug, "task is finished, remove `unsigned_limits`."); - return - } - - task.migrate_until_exhaustion(chain_limits); - - if task.dyn_size > chain_limits.size { - // previous `migrate_until_exhaustion` finished with too much size consumption. - // This most likely means that if it migrated `x` items, now we need to migrate - // `x - 1` items. But, we migrate less by 5 by default, since the state may have - // changed between the execution of this offchain worker and time that the - // transaction reaches the chain. - log!( - debug, - "reducing item count of {} by {}.", - task.dyn_total_items(), - T::UnsignedBackOff::get(), - ); - let mut new_task = Self::migration_process(); - new_task.migrate_until_exhaustion(MigrationLimits { - size: chain_limits.size, - item: task - .dyn_total_items() - .saturating_sub(T::UnsignedBackOff::get().max(1)), - }); - task = new_task; - } - - let item_limit = task.dyn_total_items(); - if item_limit.is_zero() { - log!(warn, "can't fit anything in a migration."); - return - } - - // with the above if-statement, the limits must now be STRICTLY respected, so we - // panic and crash the OCW otherwise. - assert!( - task.fully_complies_with(chain_limits), - "runtime::state-trie-migration: The offchain worker failed to create transaction." - ); - - let original_task = Self::migration_process(); - - let call = Call::continue_migrate_unsigned { - item_limit, - witness_size: chain_limits.size, - witness_task: original_task, - }; - - match SubmitTransaction::>::submit_unsigned_transaction(call.into()) { - Ok(_) => { - log!(info, "submitted a call to migrate {} items.", item_limit) - }, - Err(why) => { - log!(warn, "failed to submit a call to the pool {:?}", why) - }, - } - } - } - } - - #[pallet::validate_unsigned] - impl ValidateUnsigned for Pallet { - type Call = Call; - fn validate_unsigned(source: TransactionSource, call: &Self::Call) -> TransactionValidity { - if let Call::continue_migrate_unsigned { witness_task, .. } = call { - match source { - TransactionSource::Local | TransactionSource::InBlock => { /* allowed */ }, - _ => return InvalidTransaction::Call.into(), - } - - let onchain_task = Self::migration_process(); - if &onchain_task != witness_task { - return Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)) - } - - ValidTransaction::with_tag_prefix("StorageVersionMigration") - .priority(T::UnsignedPriority::get()) - // deduplicate based on task data. - .and_provides(witness_task) - .longevity(5) - .propagate(false) - .build() - } else { - InvalidTransaction::Call.into() - } - } - - fn pre_dispatch(call: &Self::Call) -> Result<(), TransactionValidityError> { - if let Call::continue_migrate_unsigned { witness_task, .. } = call { - let onchain_task = Self::migration_process(); - if &onchain_task != witness_task { - return Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)) - } - Ok(()) - } else { - Err(InvalidTransaction::Call.into()) - } - } } impl Pallet { - /// The path used to identify the offchain worker persistent storage. - const OFFCHAIN_LAST_BLOCK: &'static [u8] = b"parity/state-migration/last-block"; - /// The real weight of a migration of the given number of `items` with total `size`. fn dynamic_weight(items: u32, size: u32) -> frame_support::pallet_prelude::Weight { let items = items as Weight; @@ -949,7 +705,6 @@ pub mod pallet { /// Put a stop to all ongoing migrations. fn halt() { - UnsignedLimits::::kill(); AutoLimits::::kill(); } @@ -973,48 +728,6 @@ pub mod pallet { } key.unwrap_or_default() } - - /// Checks if an execution of the offchain worker is permitted at the given block number, or - /// not. - /// - /// This makes sure that - /// 1. we don't run on previous blocks in case of a re-org - /// 2. we don't run twice within a window of length `T::OffchainRepeat`. - /// - /// Returns `Ok(())` if offchain worker limit is respected, `Err(reason)` otherwise. If - /// `Ok()` is returned, `now` is written in storage and will be used in further calls as the - /// baseline. - pub fn ensure_offchain_repeat_frequency(now: T::BlockNumber) -> Result<(), &'static str> { - let threshold = T::OffchainRepeat::get(); - let last_block = StorageValueRef::persistent(&Self::OFFCHAIN_LAST_BLOCK); - - let mutate_stat = last_block.mutate::<_, &'static str, _>( - |maybe_head: Result, _>| { - match maybe_head { - Ok(Some(head)) if now < head => Err("fork."), - Ok(Some(head)) if now >= head && now <= head + threshold => - Err("recently executed."), - Ok(Some(head)) if now > head + threshold => { - // we can run again now. Write the new head. - Ok(now) - }, - _ => { - // value doesn't exists. Probably this node just booted up. Write, and - // okay. - Ok(now) - }, - } - }, - ); - - match mutate_stat { - Ok(_) => Ok(()), - Err(MutateStorageError::ConcurrentModification(_)) => - Err("failed to write to offchain db (ConcurrentModification)."), - Err(MutateStorageError::ValueFunctionFailed(_)) => - Err("failed to write to offchain db (ValueFunctionFailed)."), - } - } } } @@ -1029,8 +742,9 @@ mod benchmarks { frame_benchmarking::benchmarks! { continue_migrate { - // note that this benchmark should migrate nothing, as we only want the overhead weight of the bookkeeping, - // and the migration cost itself is noted via the `dynamic_weight` function. + // note that this benchmark should migrate nothing, as we only want the overhead weight + // of the bookkeeping, and the migration cost itself is noted via the `dynamic_weight` + // function. let null = MigrationLimits::default(); let caller = frame_benchmarking::whitelisted_caller(); }: _(frame_system::RawOrigin::Signed(caller), null, 0, StateTrieMigration::::migration_process()) @@ -1075,11 +789,11 @@ mod benchmarks { T::Currency::make_free_balance_be(&caller, stash); }: { assert!( - StateTrieMigration::::migrate_custom_top( + dbg!(StateTrieMigration::::migrate_custom_top( frame_system::RawOrigin::Signed(caller.clone()).into(), Default::default(), 1, - ).is_err() + )).is_err() ) } verify { @@ -1110,22 +824,12 @@ mod benchmarks { #[cfg(test)] mod mock { - use parking_lot::RwLock; - use std::sync::Arc; - use super::*; use crate as pallet_state_trie_migration; use frame_support::{parameter_types, traits::Hooks}; use frame_system::EnsureRoot; - use sp_core::H256; - use sp_runtime::{ - offchain::{ - testing::{PoolState, TestOffchainExt, TestTransactionPoolExt}, - OffchainDbExt, OffchainWorkerExt, TransactionPoolExt, - }, - traits::{BlakeTwo256, Dispatchable, Header as _, IdentityLookup}, - StateVersion, - }; + use sp_core::{storage::StateVersion, H256}; + use sp_runtime::traits::{BlakeTwo256, Header as _, IdentityLookup}; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; @@ -1203,21 +907,8 @@ mod mock { type SignedDepositBase = SignedDepositBase; type SignedMigrationMaxLimits = SignedMigrationMaxLimits; type WeightInfo = (); - type UnsignedPriority = (); - type UnsignedBackOff = frame_support::traits::ConstU32<2>; - type OffchainRepeat = OffchainRepeat; - } - - impl frame_system::offchain::SendTransactionTypes for Test - where - Call: From, - { - type OverarchingCall = Call; - type Extrinsic = Extrinsic; } - pub type Extrinsic = sp_runtime::testing::TestXt; - pub fn new_test_ext(version: StateVersion, with_pallets: bool) -> sp_io::TestExternalities { use sp_core::storage::ChildInfo; @@ -1280,26 +971,6 @@ mod mock { (custom_storage, version).into() } - pub fn new_offchain_ext( - version: StateVersion, - with_pallets: bool, - ) -> (sp_io::TestExternalities, Arc>) { - let mut ext = new_test_ext(version, with_pallets); - let pool_state = offchainify(&mut ext); - (ext, pool_state) - } - - pub fn offchainify(ext: &mut sp_io::TestExternalities) -> Arc> { - let (offchain, _offchain_state) = TestOffchainExt::new(); - let (pool, pool_state) = TestTransactionPoolExt::new(); - - ext.register_extension(OffchainDbExt::new(offchain.clone())); - ext.register_extension(OffchainWorkerExt::new(offchain)); - ext.register_extension(TransactionPoolExt::new(pool)); - - pool_state - } - pub fn run_to_block(n: u32) -> H256 { let mut root = Default::default(); log::trace!(target: LOG_TARGET, "running from {:?} to {:?}", System::block_number(), n); @@ -1314,34 +985,6 @@ mod mock { } root } - - pub fn run_to_block_and_drain_pool(n: u32, pool: Arc>) -> H256 { - let mut root = Default::default(); - while System::block_number() < n { - System::set_block_number(System::block_number() + 1); - System::on_initialize(System::block_number()); - - StateTrieMigration::on_initialize(System::block_number()); - - // drain previous transactions - pool.read() - .transactions - .clone() - .into_iter() - .map(|uxt| ::decode(&mut &*uxt).unwrap()) - .for_each(|xt| { - // dispatch them all with no origin. - xt.call.dispatch(frame_system::RawOrigin::None.into()).unwrap(); - }); - pool.try_write().unwrap().transactions.clear(); - - StateTrieMigration::offchain_worker(System::block_number()); - - root = System::finalize().state_root().clone(); - System::on_finalize(System::block_number()); - } - root - } } #[cfg(test)] @@ -1349,7 +992,6 @@ mod test { use super::{mock::*, *}; use sp_core::storage::well_known_keys::DEFAULT_CHILD_STORAGE_KEY_PREFIX; use sp_runtime::{traits::Bounded, StateVersion}; - use std::sync::Arc; #[test] fn fails_if_no_migration() { @@ -1442,48 +1084,6 @@ mod test { ); } - #[test] - fn ocw_migration_works() { - let run_with_limits = |limits, from, until| { - let (mut ext, pool) = new_offchain_ext(StateVersion::V0, false); - let root_upgraded = ext.execute_with(|| { - assert_eq!(UnsignedLimits::::get(), None); - assert_eq!(MigrationProcess::::get(), Default::default()); - - // nothing happens if we don't set the limits. - run_to_block_and_drain_pool(from, Arc::clone(&pool)); - assert_eq!(MigrationProcess::::get(), Default::default()); - - // allow 2 items per run - UnsignedLimits::::put(Some(limits)); - - run_to_block_and_drain_pool(until, Arc::clone(&pool)) - }); - - let (mut ext2, pool2) = new_offchain_ext(StateVersion::V1, false); - let root = ext2.execute_with(|| { - // update ex2 to contain the new items - run_to_block_and_drain_pool(from, Arc::clone(&pool2)); - UnsignedLimits::::put(Some(limits)); - run_to_block_and_drain_pool(until, Arc::clone(&pool2)) - }); - assert_eq!(root, root_upgraded); - }; - - // single item - run_with_limits(MigrationLimits { item: 1, size: 1000 }, 10, 100); - // multi-item - run_with_limits(MigrationLimits { item: 5, size: 1000 }, 10, 100); - // multi-item, based on size - run_with_limits(MigrationLimits { item: 1000, size: 512 }, 10, 100); - // unbounded - run_with_limits( - MigrationLimits { item: Bounded::max_value(), size: Bounded::max_value() }, - 10, - 100, - ); - } - #[test] fn signed_migrate_works() { new_test_ext(StateVersion::V0, true).execute_with(|| { @@ -1630,7 +1230,6 @@ mod test { mod remote_tests { use super::{mock::*, *}; use codec::Encode; - use mock::run_to_block_and_drain_pool; use remote_externalities::{Mode, OfflineConfig, OnlineConfig}; use sp_runtime::traits::{Bounded, HashFor}; use std::sync::Arc; @@ -1730,84 +1329,4 @@ mod remote_tests { // size being the bottleneck run_with_limits(MigrationLimits { item: Bounded::max_value(), size: 64 * 1024 }).await; } - - #[tokio::test] - async fn offchain_worker_migration() { - sp_tracing::try_init_simple(); - let run_with_limits = |limits| async move { - let mut ext = remote_externalities::Builder::::new() - .mode(Mode::OfflineOrElseOnline( - OfflineConfig { - state_snapshot: "/home/kianenigma/remote-builds/state".to_owned().into(), - }, - OnlineConfig { - transport: std::env!("WS_API").to_owned().into(), - state_snapshot: Some( - "/home/kianenigma/remote-builds/state".to_owned().into(), - ), - ..Default::default() - }, - )) - .state_version(sp_core::storage::StateVersion::V0) - .build() - .await - .unwrap(); - let pool_state = offchainify(&mut ext); - - let mut now = ext.execute_with(|| { - UnsignedLimits::::put(Some(limits)); - // requires the block number type in our tests to be same as with mainnet, u32. - frame_system::Pallet::::block_number() - }); - - let mut duration = 0; - // set the version to 1, as if the upgrade happened. - ext.state_version = sp_core::storage::StateVersion::V1; - - let (top_left, child_left) = - ext.as_backend().essence().check_migration_state().unwrap(); - assert!(top_left > 0); - log::info!( - target: LOG_TARGET, - "initial check: top_left: {}, child_left: {}", - top_left, - child_left, - ); - - loop { - let finished = ext.execute_with(|| { - run_to_block_and_drain_pool(now + 1, Arc::clone(&pool_state)); - if StateTrieMigration::migration_process().finished() { - return true - } - duration += 1; - now += 1; - false - }); - - if finished { - break - } - } - - ext.execute_with(|| { - log::info!( - target: LOG_TARGET, - "finished offchain-worker migration in {} block, final state of the task: {:?}", - duration, - StateTrieMigration::migration_process() - ); - }); - - let (top_left, child_left) = - ext.as_backend().essence().check_migration_state().unwrap(); - assert_eq!(top_left, 0); - assert_eq!(child_left, 0); - }; - // item being the bottleneck - run_with_limits(MigrationLimits { item: 16 * 1024, size: 4 * 1024 * 1024 }).await; - // size being the bottleneck - run_with_limits(MigrationLimits { item: Bounded::max_value(), size: 2 * 1024 * 1024 }) - .await; - } } From 8102e15deb7a54aeb9a58d697b6f43aed273b05f Mon Sep 17 00:00:00 2001 From: kianenigma Date: Wed, 5 Jan 2022 08:08:18 +0000 Subject: [PATCH 165/188] Make all tests work again --- frame/state-trie-migration/src/lib.rs | 34 +++++++++++++++++++++------ 1 file changed, 27 insertions(+), 7 deletions(-) diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index ed166037f0d78..1545853743709 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -787,13 +787,16 @@ mod benchmarks { let caller = frame_benchmarking::whitelisted_caller(); let stash = T::Currency::minimum_balance() * BalanceOf::::from(10u32); T::Currency::make_free_balance_be(&caller, stash); + // for tests, we need to make sure there is _something_ in storage that is being + // migrated. + sp_io::storage::set(b"foo", vec![1u8;33].as_ref()); }: { assert!( - dbg!(StateTrieMigration::::migrate_custom_top( + StateTrieMigration::::migrate_custom_top( frame_system::RawOrigin::Signed(caller.clone()).into(), - Default::default(), + vec![b"foo".to_vec()], 1, - )).is_err() + ).is_err() ) } verify { @@ -1149,11 +1152,25 @@ mod test { #[test] fn custom_migrate_top_works() { + let correct_witness = 3 + sp_core::storage::TRIE_VALUE_NODE_THRESHOLD * 3 + 1 + 2 + 3; new_test_ext(StateVersion::V0, true).execute_with(|| { frame_support::assert_ok!(StateTrieMigration::migrate_custom_top( Origin::signed(1), vec![b"key1".to_vec(), b"key2".to_vec(), b"key3".to_vec()], - 3 + sp_core::storage::TRIE_VALUE_NODE_THRESHOLD * 3 + 1 + 2 + 3, + correct_witness, + )); + + // no funds should remain reserved. + assert_eq!(Balances::reserved_balance(&1), 0); + assert_eq!(Balances::free_balance(&1), 1000); + }); + + new_test_ext(StateVersion::V0, true).execute_with(|| { + // works if the witness is an overestimate + frame_support::assert_ok!(StateTrieMigration::migrate_custom_top( + Origin::signed(1), + vec![b"key1".to_vec(), b"key2".to_vec(), b"key3".to_vec()], + correct_witness + 99, )); // no funds should remain reserved. @@ -1169,7 +1186,7 @@ mod test { StateTrieMigration::migrate_custom_top( Origin::signed(1), vec![b"key1".to_vec(), b"key2".to_vec(), b"key3".to_vec()], - 69, // wrong witness + correct_witness - 1, ), "wrong witness data" ); @@ -1228,13 +1245,16 @@ mod test { #[cfg(all(test, feature = "remote-tests"))] mod remote_tests { - use super::{mock::*, *}; + use super::{ + mock::{Call as MockCall, *}, + *, + }; use codec::Encode; use remote_externalities::{Mode, OfflineConfig, OnlineConfig}; use sp_runtime::traits::{Bounded, HashFor}; - use std::sync::Arc; // we only use the hash type from this, so using the mock should be fine. + type Extrinsic = sp_runtime::testing::TestXt; type Block = sp_runtime::testing::Block; #[tokio::test] From de17ba0fcd804a8b05b85f3a3da7436c5cb5a62f Mon Sep 17 00:00:00 2001 From: kianenigma Date: Wed, 5 Jan 2022 09:21:47 +0000 Subject: [PATCH 166/188] separate the tests from the logic so it can be reused easier --- Cargo.lock | 20 ++- frame/state-trie-migration/Cargo.toml | 7 +- frame/state-trie-migration/src/lib.rs | 247 ++++++++++++++++---------- 3 files changed, 170 insertions(+), 104 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e11e77df44ce0..d4911c149f8bb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6142,11 +6142,13 @@ dependencies = [ "parking_lot 0.11.2", "remote-externalities", "scale-info", + "serde", "sp-core", "sp-io", "sp-runtime", "sp-std", "sp-tracing", + "thousands", "tokio", "zstd", ] @@ -8928,9 +8930,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.132" +version = "1.0.133" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b9875c23cf305cd1fd7eb77234cbb705f21ea6a72c637a5c6db5fe4b8e7f008" +checksum = "97565067517b60e2d1ea8b268e59ce036de907ac523ad83a0475da04e818989a" dependencies = [ "serde_derive", ] @@ -8956,9 +8958,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.132" +version = "1.0.133" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecc0db5cb2556c0e558887d9bbdcf6ac4471e83ff66cf696e5419024d1606276" +checksum = "ed201699328568d8d08208fdd080e3ff594e6c422e438b6705905da01005d537" dependencies = [ "proc-macro2", "quote", @@ -10547,6 +10549,12 @@ dependencies = [ "syn", ] +[[package]] +name = "thousands" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3bf63baf9f5039dadc247375c29eb13706706cfde997d0330d05aa63a77d8820" + [[package]] name = "thread_local" version = "1.1.3" @@ -11053,8 +11061,8 @@ version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1f559b464de2e2bdabcac6a210d12e9b5a5973c251e102c44c585c71d51bd78e" dependencies = [ - "cfg-if 0.1.10", - "rand 0.6.5", + "cfg-if 1.0.0", + "rand 0.8.4", "static_assertions", ] diff --git a/frame/state-trie-migration/Cargo.toml b/frame/state-trie-migration/Cargo.toml index 65155aaf1ba84..18404810381bb 100644 --- a/frame/state-trie-migration/Cargo.toml +++ b/frame/state-trie-migration/Cargo.toml @@ -26,6 +26,9 @@ frame-support = { version = "4.0.0-dev", default-features = false, path = "../su frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../benchmarking", optional = true } +serde = { version = "1.0.133", optional = true } +thousands = { version = "0.2.0", optional = true } + [dev-dependencies] pallet-balances = { path = "../balances", version = "4.0.0-dev" } parking_lot = "0.11.0" @@ -48,7 +51,7 @@ std = [ "sp-runtime/std", "sp-std/std" ] - runtime-benchmarks = ["frame-benchmarking"] try-runtime = ["frame-support/try-runtime"] -remote-tests = ["std"] + +remote-tests = [ "std", "serde", "thousands" ] diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index 1545853743709..a955f5f55d62f 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -974,19 +974,20 @@ mod mock { (custom_storage, version).into() } - pub fn run_to_block(n: u32) -> H256 { + pub(crate) fn run_to_block(n: u32) -> (H256, u64) { let mut root = Default::default(); + let mut weight_sum = 0; log::trace!(target: LOG_TARGET, "running from {:?} to {:?}", System::block_number(), n); while System::block_number() < n { System::set_block_number(System::block_number() + 1); System::on_initialize(System::block_number()); - StateTrieMigration::on_initialize(System::block_number()); + weight_sum += StateTrieMigration::on_initialize(System::block_number()); root = System::finalize().state_root().clone(); System::on_finalize(System::block_number()); } - root + (root, weight_sum) } } @@ -999,10 +1000,10 @@ mod test { #[test] fn fails_if_no_migration() { let mut ext = new_test_ext(StateVersion::V0, false); - let root1 = ext.execute_with(|| run_to_block(30)); + let root1 = ext.execute_with(|| run_to_block(30).0); let mut ext2 = new_test_ext(StateVersion::V1, false); - let root2 = ext2.execute_with(|| run_to_block(30)); + let root2 = ext2.execute_with(|| run_to_block(30).0); // these two roots should not be the same. assert_ne!(root1, root2); @@ -1018,7 +1019,7 @@ mod test { child::put(&child::ChildInfo::new_default(b"chk1"), &[], &vec![66u8; 77]); AutoLimits::::put(Some(limit)); - let root = run_to_block(30); + let root = run_to_block(30).0; // eventually everything is over. assert!(matches!( @@ -1032,7 +1033,7 @@ mod test { let root = ext2.execute_with(|| { child::put(&child::ChildInfo::new_default(b"chk1"), &[], &vec![66u8; 77]); AutoLimits::::put(Some(limit)); - run_to_block(30) + run_to_block(30).0 }); assert_eq!(root, root_upgraded); @@ -1053,7 +1054,7 @@ mod test { // this should allow 1 item per block to be migrated. AutoLimits::::put(Some(limit)); - let root = run_to_block(until); + let root = run_to_block(until).0; // eventually everything is over. assert!(matches!( @@ -1068,7 +1069,7 @@ mod test { // update ex2 to contain the new items let _ = run_to_block(from); AutoLimits::::put(Some(limit)); - run_to_block(until) + run_to_block(until).0 }); assert_eq!(root, root_upgraded); }; @@ -1243,110 +1244,164 @@ mod test { } } -#[cfg(all(test, feature = "remote-tests"))] -mod remote_tests { - use super::{ - mock::{Call as MockCall, *}, - *, - }; +/// Exported set of tests to be called against different runtimes. +#[cfg(feature = "remote-tests")] +pub mod remote_tests { + use crate::{AutoLimits, MigrationLimits, Pallet as StateTrieMigration, LOG_TARGET}; use codec::Encode; - use remote_externalities::{Mode, OfflineConfig, OnlineConfig}; - use sp_runtime::traits::{Bounded, HashFor}; + use frame_benchmarking::Zero; + use frame_support::traits::{Get, Hooks}; + use frame_system::Pallet as System; + use remote_externalities::Mode; + use sp_core::H256; + use sp_runtime::traits::{Block as BlockT, HashFor, Header as _, One}; + use thousands::Separable; + + fn run_to_block>( + n: ::BlockNumber, + ) -> (H256, u64) { + let mut root = Default::default(); + let mut weight_sum = 0; + while System::::block_number() < n { + System::::set_block_number(System::::block_number() + One::one()); + System::::on_initialize(System::::block_number()); - // we only use the hash type from this, so using the mock should be fine. - type Extrinsic = sp_runtime::testing::TestXt; - type Block = sp_runtime::testing::Block; + weight_sum += + StateTrieMigration::::on_initialize(System::::block_number()); - #[tokio::test] - async fn on_initialize_migration() { - sp_tracing::try_init_simple(); - let run_with_limits = |limits| async move { - let mut ext = remote_externalities::Builder::::new() - .mode(Mode::OfflineOrElseOnline( - OfflineConfig { - state_snapshot: "/home/kianenigma/remote-builds/state".to_owned().into(), - }, - OnlineConfig { - transport: std::env!("WS_API").to_owned().into(), - state_snapshot: Some( - "/home/kianenigma/remote-builds/state".to_owned().into(), - ), - ..Default::default() - }, - )) - .state_version(sp_core::storage::StateVersion::V0) - .build() - .await - .unwrap(); + root = System::::finalize().state_root().clone(); + System::::on_finalize(System::::block_number()); + } + (root, weight_sum) + } - let mut now = ext.execute_with(|| { - AutoLimits::::put(Some(limits)); - // requires the block number type in our tests to be same as with mainnet, u32. - frame_system::Pallet::::block_number() - }); + /// Run the entire migration, against the given `Runtime`, until completion. + /// + /// This will print some very useful statistics, make sure [`crate::LOG_TARGET`] is enabled. + pub async fn run_with_limits< + Runtime: crate::Config, + Block: BlockT + serde::de::DeserializeOwned, + >( + limits: MigrationLimits, + mode: Mode, + ) { + let mut ext = remote_externalities::Builder::::new() + .mode(mode) + .state_version(sp_core::storage::StateVersion::V0) + .build() + .await + .unwrap(); + + let mut now = ext.execute_with(|| { + AutoLimits::::put(Some(limits)); + // requires the block number type in our tests to be same as with mainnet, u32. + frame_system::Pallet::::block_number() + }); + + let mut duration: ::BlockNumber = Zero::zero(); + // set the version to 1, as if the upgrade happened. + ext.state_version = sp_core::storage::StateVersion::V1; - let mut duration = 0; - // set the version to 1, as if the upgrade happened. - ext.state_version = sp_core::storage::StateVersion::V1; + let (top_left, child_left) = ext.as_backend().essence().check_migration_state().unwrap(); + assert!(top_left > 0); - let (top_left, child_left) = - ext.as_backend().essence().check_migration_state().unwrap(); - assert!(top_left > 0); + log::info!( + target: LOG_TARGET, + "initial check: top_left: {}, child_left: {}", + top_left.separate_with_commas(), + child_left.separate_with_commas(), + ); + loop { + let last_state_root = ext.backend.root().clone(); + let ((finished, weight), proof) = ext.execute_and_prove(|| { + let weight = run_to_block::(now + One::one()).1; + if StateTrieMigration::::migration_process().finished() { + return (true, weight) + } + duration += One::one(); + now += One::one(); + (false, weight) + }); + + let compact_proof = + proof.clone().into_compact_proof::>(last_state_root).unwrap(); log::info!( target: LOG_TARGET, - "initial check: top_left: {}, child_left: {}", - top_left, - child_left, + "proceeded to #{}, weight: [{} / {}], proof: [{} / {} / {}]", + now, + weight.separate_with_commas(), + ::BlockWeights::get() + .max_block + .separate_with_commas(), + proof.encoded_size().separate_with_commas(), + compact_proof.encoded_size().separate_with_commas(), + zstd::stream::encode_all(&compact_proof.encode()[..], 0) + .unwrap() + .len() + .separate_with_commas(), ); + ext.commit_all().unwrap(); - loop { - let last_state_root = ext.backend.root().clone(); - let (finished, proof) = ext.execute_and_prove(|| { - run_to_block(now + 1); - if StateTrieMigration::migration_process().finished() { - return true - } - duration += 1; - now += 1; - false - }); + if finished { + break + } + } - let compact_proof = - proof.clone().into_compact_proof::>(last_state_root).unwrap(); - log::info!( - target: LOG_TARGET, - "proceeded to #{}, original proof: {}, compact proof size: {}, compact zstd compressed: {}", - now, - proof.encoded_size(), - compact_proof.encoded_size(), - zstd::stream::encode_all(&compact_proof.encode()[..], 0).unwrap().len(), - ); - ext.commit_all().unwrap(); + ext.execute_with(|| { + log::info!( + target: LOG_TARGET, + "finished on_initialize migration in {} block, final state of the task: {:?}", + duration, + StateTrieMigration::::migration_process(), + ) + }); - if finished { - break - } - } + let (top_left, child_left) = ext.as_backend().essence().check_migration_state().unwrap(); + assert_eq!(top_left, 0); + assert_eq!(child_left, 0); + } +} - ext.execute_with(|| { - log::info!( - target: LOG_TARGET, - "finished on_initialize migration in {} block, final state of the task: {:?}", - duration, - StateTrieMigration::migration_process(), - ) - }); +#[cfg(all(test, feature = "remote-tests"))] +mod remote_tests_local { + use super::{ + mock::{Call as MockCall, *}, + remote_tests::run_with_limits, + *, + }; + use remote_externalities::{Mode, OfflineConfig, OnlineConfig}; + use sp_runtime::traits::Bounded; - let (top_left, child_left) = - ext.as_backend().essence().check_migration_state().unwrap(); - assert_eq!(top_left, 0); - assert_eq!(child_left, 0); - }; + // we only use the hash type from this, so using the mock should be fine. + type Extrinsic = sp_runtime::testing::TestXt; + type Block = sp_runtime::testing::Block; + + #[tokio::test] + async fn on_initialize_migration() { + sp_tracing::try_init_simple(); + let mode = Mode::OfflineOrElseOnline( + OfflineConfig { + state_snapshot: "/home/kianenigma/remote-builds/state".to_owned().into(), + }, + OnlineConfig { + transport: std::env!("WS_API").to_owned().into(), + state_snapshot: Some("/home/kianenigma/remote-builds/state".to_owned().into()), + ..Default::default() + }, + ); // item being the bottleneck - run_with_limits(MigrationLimits { item: 8 * 1024, size: 128 * 1024 * 1024 }).await; + run_with_limits::( + MigrationLimits { item: 8 * 1024, size: 128 * 1024 * 1024 }, + mode.clone(), + ) + .await; // size being the bottleneck - run_with_limits(MigrationLimits { item: Bounded::max_value(), size: 64 * 1024 }).await; + run_with_limits::( + MigrationLimits { item: Bounded::max_value(), size: 64 * 1024 }, + mode, + ) + .await; } } From 30c42cb2c85e2cb9c7f95ae0f0d4421076be28f5 Mon Sep 17 00:00:00 2001 From: kianenigma Date: Wed, 5 Jan 2022 09:30:37 +0000 Subject: [PATCH 167/188] fix overall build --- frame/state-trie-migration/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index a955f5f55d62f..8fd8345981f8b 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -735,6 +735,7 @@ pub mod pallet { mod benchmarks { use super::{pallet::Pallet as StateTrieMigration, *}; use frame_support::traits::Currency; + use sp_std::prelude::*; // The size of the key seemingly makes no difference in the read/write time, so we make it // constant. From 0304711d531e5dfc7778038c54d352c1067eaa56 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Fri, 14 Jan 2022 12:17:09 +0100 Subject: [PATCH 168/188] Update frame/state-trie-migration/src/lib.rs Co-authored-by: cheme --- frame/state-trie-migration/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index 8fd8345981f8b..05955fb258060 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -22,7 +22,7 @@ //! //! ## Migration Types //! -//! This pallet provides 3 ways to do this, each of which is suited for a particular use-case, and +//! This pallet provides 2 ways to do this, each of which is suited for a particular use-case, and //! can be enabled independently. //! //! ### Auto migration From 0dcf25c5b5f70f4103bcc446e17c3fec33d60ebd Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Fri, 14 Jan 2022 12:18:30 +0100 Subject: [PATCH 169/188] Update frame/state-trie-migration/src/lib.rs Co-authored-by: cheme --- frame/state-trie-migration/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index 05955fb258060..67b40f2e0a612 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -40,7 +40,7 @@ //! //! as a backup, the migration process can be set in motion via signed transactions that basically //! say in advance how many items and how many bytes they will consume, and pay for it as well. This -//! can be a good safe alternative, if the former two systems are not desirable. +//! can be a good safe alternative, if the former system is not desirable. //! //! The (minor) caveat of this approach is that we cannot know in advance how many bytes reading a //! certain number of keys will incur. To overcome this, the runtime needs to configure this pallet From b910012fd56d060afbdd1d1fab07d34c8708e28d Mon Sep 17 00:00:00 2001 From: kianenigma Date: Fri, 4 Feb 2022 10:39:37 +0000 Subject: [PATCH 170/188] Slightly better termination --- frame/state-trie-migration/src/lib.rs | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index a784f138e50e5..19c7388c3326c 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -215,7 +215,6 @@ pub mod pallet { impl MigrationTask { /// Return true if the task is finished. - #[cfg(test)] pub(crate) fn finished(&self) -> bool { self.current_top.is_none() && self.current_child.is_none() } @@ -403,6 +402,8 @@ pub mod pallet { Migrated { top: u32, child: u32, compute: MigrationCompute }, /// Some account got slashed by the given amount. Slashed { who: T::AccountId, amount: BalanceOf }, + /// The auto migration task finished. + AutoMigrationFinished, } /// The outer Pallet struct. @@ -680,11 +681,18 @@ pub mod pallet { task.dyn_child_items, task.dyn_size, ); - Self::deposit_event(Event::::Migrated { - top: task.dyn_top_items, - child: task.dyn_child_items, - compute: MigrationCompute::Auto, - }); + + if task.finished() { + Self::deposit_event(Event::::AutoMigrationFinished); + AutoLimits::::kill(); + } else { + Self::deposit_event(Event::::Migrated { + top: task.dyn_top_items, + child: task.dyn_child_items, + compute: MigrationCompute::Auto, + }); + } + MigrationProcess::::put(task); weight From 6e9106a1765b2a90a590d97fd011c36a7d960268 Mon Sep 17 00:00:00 2001 From: kianenigma Date: Wed, 9 Feb 2022 15:34:36 +0000 Subject: [PATCH 171/188] some final tweaks --- bin/node/runtime/src/lib.rs | 2 +- frame/state-trie-migration/src/lib.rs | 31 +++++++++++++++++++-------- 2 files changed, 23 insertions(+), 10 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 6cdc80dca42b7..f0a912d136041 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1363,7 +1363,7 @@ impl pallet_transaction_storage::Config for Runtime { parameter_types! { pub const SignedMigrationMaxLimits: pallet_state_trie_migration::MigrationLimits = - pallet_state_trie_migration::MigrationLimits { size: 1024 * 512, item: 512 }; + pallet_state_trie_migration::MigrationLimits { size: 2 * 1024 * 1024, item: 512 }; pub const MigrationSignedDepositPerItem: Balance = 1 * CENTS; pub const MigrationSignedDepositBase: Balance = 20 * DOLLARS; } diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index 19c7388c3326c..68227a8341946 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -435,6 +435,7 @@ pub mod pallet { type SignedDepositBase: Get>; /// The maximum limits that the signed migration could use. + #[pallet::constant] type SignedMigrationMaxLimits: Get; /// The weight information of this pallet. @@ -456,6 +457,18 @@ pub mod pallet { #[pallet::getter(fn auto_limits)] pub type AutoLimits = StorageValue<_, Option, ValueQuery>; + #[pallet::error] + pub enum Error { + /// max signed limits not respected. + MaxSignedLimits, + /// submitter does not have enough funds. + NotEnoughFunds, + /// bad witness data provided. + BadWitness, + /// upper bound of size is exceeded, + SizeUpperBoundExceeded, + } + #[pallet::call] impl Pallet { /// control the automatic migration. @@ -509,18 +522,18 @@ pub mod pallet { let max_limits = T::SignedMigrationMaxLimits::get(); ensure!( limits.size <= max_limits.size && limits.item <= max_limits.item, - "max signed limits not respected" + Error::::MaxSignedLimits, ); // ensure they can pay more than the fee. let deposit = T::SignedDepositPerItem::get().saturating_mul(limits.item.into()); - ensure!(T::Currency::can_slash(&who, deposit), "not enough funds"); + ensure!(T::Currency::can_slash(&who, deposit), Error::::NotEnoughFunds); let mut task = Self::migration_process(); ensure!( task == witness_task, DispatchErrorWithPostInfo { - error: "wrong witness".into(), + error: Error::::BadWitness.into(), post_info: PostDispatchInfo { actual_weight: Some(T::WeightInfo::continue_migrate_wrong_witness()), pays_fee: Pays::Yes @@ -534,7 +547,7 @@ pub mod pallet { // let the imbalance burn. let (_imbalance, _remainder) = T::Currency::slash(&who, deposit); debug_assert!(_remainder.is_zero()); - return Err("wrong witness data".into()) + return Err(Error::::SizeUpperBoundExceeded.into()) } Self::deposit_event(Event::::Migrated { @@ -543,14 +556,14 @@ pub mod pallet { compute: MigrationCompute::Signed, }); + // refund and correct the weight. let actual_weight = Some( - Pallet::::dynamic_weight(limits.item, task.dyn_size) + - T::WeightInfo::continue_migrate(), + Pallet::::dynamic_weight(limits.item, task.dyn_size) + .saturating_add(T::WeightInfo::continue_migrate()), ); - MigrationProcess::::put(task); - let pays = Pays::No; - Ok((actual_weight, pays).into()) + MigrationProcess::::put(task); + Ok((actual_weight, Pays::No).into()) } /// Migrate the list of top keys by iterating each of them one by one. From 4c119a084cf725031a3959073e390e2af40918c5 Mon Sep 17 00:00:00 2001 From: kianenigma Date: Wed, 9 Feb 2022 15:59:53 +0000 Subject: [PATCH 172/188] Fix tests --- bin/node/runtime/src/lib.rs | 1 + frame/state-trie-migration/src/lib.rs | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index f0a912d136041..e0a97e40457c2 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1367,6 +1367,7 @@ parameter_types! { pub const MigrationSignedDepositPerItem: Balance = 1 * CENTS; pub const MigrationSignedDepositBase: Balance = 20 * DOLLARS; } + impl pallet_state_trie_migration::Config for Runtime { type Event = Event; type ControlOrigin = EnsureRoot; diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index 68227a8341946..f27f41b7e90a1 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -1124,7 +1124,7 @@ mod test { Bounded::max_value(), MigrationProcess::::get() ), - "max signed limits not respected" + Error::::MaxSignedLimits, ); // can't submit if poor. @@ -1135,7 +1135,7 @@ mod test { 100, MigrationProcess::::get() ), - "not enough funds" + Error::::NotEnoughFunds, ); // can't submit with bad witness. @@ -1146,7 +1146,7 @@ mod test { 100, MigrationTask { current_top: Some(vec![1u8]), ..Default::default() } ), - "wrong witness" + Error::::BadWitness ); // migrate all keys in a series of submissions From 9a4e316fc12fe8125698d063df0dc0587605d2e8 Mon Sep 17 00:00:00 2001 From: kianenigma Date: Wed, 9 Feb 2022 16:40:31 +0000 Subject: [PATCH 173/188] Restrict access to signed migrations --- bin/node/runtime/src/lib.rs | 5 +++++ frame/state-trie-migration/src/lib.rs | 19 ++++++++++++++++++- 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index e0a97e40457c2..e08a311ac3b1d 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1375,6 +1375,11 @@ impl pallet_state_trie_migration::Config for Runtime { type SignedDepositPerItem = MigrationSignedDepositPerItem; type SignedDepositBase = MigrationSignedDepositBase; type SignedMigrationMaxLimits = SignedMigrationMaxLimits; + // Warning: this is not advised, as it might allow the chain to be temporarily DOS-ed. + // Preferably, if the chain's governance/maintenance team is planning on using a specific + // account for the migration, put it here to make sure only that account can trigger the signed + // migrations. + type SignedOriginFilter = (); type WeightInfo = (); } diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index f27f41b7e90a1..1147b2121edff 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -418,6 +418,10 @@ pub mod pallet { /// Origin that can control the configurations of this pallet. type ControlOrigin: frame_support::traits::EnsureOrigin; + /// Filter on which signed origin that trigger the manual migrations. All origins are + /// allowed if set to `None`. + type SignedOriginFilter: Get>; + /// The overarching event type. type Event: From> + IsType<::Event>; @@ -518,6 +522,10 @@ pub mod pallet { witness_task: MigrationTask, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; + ensure!( + T::SignedOriginFilter::get().map_or(true, |o| o == who), + DispatchError::BadOrigin + ); let max_limits = T::SignedMigrationMaxLimits::get(); ensure!( @@ -583,6 +591,10 @@ pub mod pallet { witness_size: u32, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; + ensure!( + T::SignedOriginFilter::get().map_or(true, |o| o == who), + DispatchError::BadOrigin + ); // ensure they can pay more than the fee. let deposit = T::SignedDepositBase::get().saturating_add( @@ -632,6 +644,10 @@ pub mod pallet { total_size: u32, ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; + ensure!( + T::SignedOriginFilter::get().map_or(true, |o| o == who), + DispatchError::BadOrigin + ); // ensure they can pay more than the fee. let deposit = T::SignedDepositBase::get().saturating_add( @@ -932,6 +948,7 @@ mod mock { type SignedDepositPerItem = SignedDepositPerItem; type SignedDepositBase = SignedDepositBase; type SignedMigrationMaxLimits = SignedMigrationMaxLimits; + type SignedOriginFilter = (); type WeightInfo = (); } @@ -1268,7 +1285,7 @@ mod test { } /// Exported set of tests to be called against different runtimes. -#[cfg(feature = "remote-tests")] +#[cfg(feature = "remote-test")] pub mod remote_tests { use crate::{AutoLimits, MigrationLimits, Pallet as StateTrieMigration, LOG_TARGET}; use codec::Encode; From 7c6ce545ab909aec1b059483e04314384a4bdea1 Mon Sep 17 00:00:00 2001 From: kianenigma Date: Tue, 22 Feb 2022 10:57:21 +0000 Subject: [PATCH 174/188] address most of the review comments --- Cargo.lock | 53 ++++++++------- bin/node/runtime/src/lib.rs | 3 +- client/service/src/client/wasm_override.rs | 2 +- frame/bags-list/remote-tests/src/snapshot.rs | 3 +- frame/state-trie-migration/src/lib.rs | 69 ++++++++++++-------- frame/support/src/traits/misc.rs | 7 ++ 6 files changed, 79 insertions(+), 58 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 11b679af55908..0566d6e58b211 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -64,9 +64,9 @@ dependencies = [ [[package]] name = "ahash" -version = "0.7.4" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43bb833f0bf979d8475d38fbf09ed3b8a55e1885fe93ad3f93239fc6a4f17b98" +checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ "getrandom 0.2.3", "once_cell", @@ -1383,9 +1383,9 @@ checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" [[package]] name = "crypto-common" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "683d6b536309245c849479fba3da410962a43ed8e51c26b729208ec0ac2798d0" +checksum = "a4600d695eb3f6ce1cd44e6e291adceb2cc3ab12f20a33777ecd0bf6eba34e06" dependencies = [ "generic-array 0.14.4", ] @@ -1603,13 +1603,12 @@ dependencies = [ [[package]] name = "digest" -version = "0.10.1" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b697d66081d42af4fba142d56918a3cb21dc8eb63372c6b85d14f44fb9c5979b" +checksum = "8cb780dce4f9a8f5c087362b3a4595936b2019e7c8b30f2c3e9a7e94e6ae9837" dependencies = [ "block-buffer 0.10.0", "crypto-common", - "generic-array 0.14.4", ] [[package]] @@ -6688,7 +6687,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87f5ec2493a61ac0506c0f4199f99070cbe83857b0337006a30f3e6719b8ef58" dependencies = [ "lock_api 0.4.6", - "parking_lot_core 0.9.0", + "parking_lot_core 0.9.1", ] [[package]] @@ -6722,9 +6721,9 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2f4f894f3865f6c0e02810fc597300f34dc2510f66400da262d8ae10e75767d" +checksum = "28141e0cc4143da2443301914478dc976a61ffdb3f043058310c70df2fed8954" dependencies = [ "cfg-if 1.0.0", "libc", @@ -9258,13 +9257,13 @@ dependencies = [ [[package]] name = "sha2" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "900d964dd36bb15bcf2f2b35694c072feab74969a54f2bbeec7a2d725d2bdcb6" +checksum = "99c3bd8169c58782adad9290a9af5939994036b76187f7b4f0e6de91dbbfc0ec" dependencies = [ "cfg-if 1.0.0", "cpufeatures 0.2.1", - "digest 0.10.1", + "digest 0.10.2", ] [[package]] @@ -9703,7 +9702,7 @@ dependencies = [ "secrecy", "serde", "serde_json", - "sha2 0.10.0", + "sha2 0.10.1", "sp-core-hashing", "sp-core-hashing-proc-macro", "sp-debug-derive", @@ -9728,7 +9727,7 @@ version = "4.0.0" dependencies = [ "blake2-rfc", "byteorder", - "sha2 0.10.0", + "sha2 0.10.1", "sp-std", "tiny-keccak", "twox-hash", @@ -12113,9 +12112,9 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows-sys" -version = "0.29.0" +version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ceb069ac8b2117d36924190469735767f0990833935ab430155e71a44bafe148" +checksum = "3df6e476185f92a12c072be4a189a0210dcdcf512a1891d6dff9edb874deadc6" dependencies = [ "windows_aarch64_msvc", "windows_i686_gnu", @@ -12126,33 +12125,33 @@ dependencies = [ [[package]] name = "windows_aarch64_msvc" -version = "0.29.0" +version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3d027175d00b01e0cbeb97d6ab6ebe03b12330a35786cbaca5252b1c4bf5d9b" +checksum = "d8e92753b1c443191654ec532f14c199742964a061be25d77d7a96f09db20bf5" [[package]] name = "windows_i686_gnu" -version = "0.29.0" +version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8793f59f7b8e8b01eda1a652b2697d87b93097198ae85f823b969ca5b89bba58" +checksum = "6a711c68811799e017b6038e0922cb27a5e2f43a2ddb609fe0b6f3eeda9de615" [[package]] name = "windows_i686_msvc" -version = "0.29.0" +version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8602f6c418b67024be2996c512f5f995de3ba417f4c75af68401ab8756796ae4" +checksum = "146c11bb1a02615db74680b32a68e2d61f553cc24c4eb5b4ca10311740e44172" [[package]] name = "windows_x86_64_gnu" -version = "0.29.0" +version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3d615f419543e0bd7d2b3323af0d86ff19cbc4f816e6453f36a2c2ce889c354" +checksum = "c912b12f7454c6620635bbff3450962753834be2a594819bd5e945af18ec64bc" [[package]] name = "windows_x86_64_msvc" -version = "0.29.0" +version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11d95421d9ed3672c280884da53201a5c46b7b2765ca6faf34b0d71cf34a3561" +checksum = "504a2476202769977a040c6364301a3f65d0cc9e3fb08600b2bda150a0488316" [[package]] name = "winreg" diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index e08a311ac3b1d..9a59c806cb6ec 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1363,7 +1363,7 @@ impl pallet_transaction_storage::Config for Runtime { parameter_types! { pub const SignedMigrationMaxLimits: pallet_state_trie_migration::MigrationLimits = - pallet_state_trie_migration::MigrationLimits { size: 2 * 1024 * 1024, item: 512 }; + pallet_state_trie_migration::MigrationLimits { size: 1024 * 1024 / 2, item: 512 }; pub const MigrationSignedDepositPerItem: Balance = 1 * CENTS; pub const MigrationSignedDepositBase: Balance = 20 * DOLLARS; } @@ -1528,6 +1528,7 @@ mod benches { [pallet_scheduler, Scheduler] [pallet_session, SessionBench::] [pallet_staking, Staking] + [pallet_state_trie_migration, StateTrieMigration] [frame_system, SystemBench::] [pallet_timestamp, Timestamp] [pallet_tips, Tips] diff --git a/client/service/src/client/wasm_override.rs b/client/service/src/client/wasm_override.rs index aa6b788bbfc41..267aea0709871 100644 --- a/client/service/src/client/wasm_override.rs +++ b/client/service/src/client/wasm_override.rs @@ -74,7 +74,7 @@ impl WasmBlob { Self { code, hash, path, spec_name, last_warn: Default::default() } } - pub(crate) fn runtime_code(&self, heap_pages: Option) -> RuntimeCode { + fn runtime_code(&self, heap_pages: Option) -> RuntimeCode { RuntimeCode { code_fetcher: self, hash: self.hash.clone(), heap_pages } } } diff --git a/frame/bags-list/remote-tests/src/snapshot.rs b/frame/bags-list/remote-tests/src/snapshot.rs index e580a7675f09d..241b64b366117 100644 --- a/frame/bags-list/remote-tests/src/snapshot.rs +++ b/frame/bags-list/remote-tests/src/snapshot.rs @@ -35,8 +35,7 @@ pub async fn execute // is bags-list. pallets: vec![pallet_bags_list::Pallet::::name().to_string()], at: None, - state_snapshot: None, - scrape_children: false, + ..Default::default() })) .inject_hashed_prefix(&>::prefix_hash()) .inject_hashed_prefix(&>::prefix_hash()) diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index 1147b2121edff..d70486983082b 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -128,7 +128,7 @@ pub mod pallet { pub(crate) current_top: Option>, /// The last child key that we have processed. /// - /// This is a child key under the current `self.last_top`. + /// This is a child key under the current `self.current_top`. /// /// If this is set, no further top keys are processed until the child key migration is /// complete. @@ -137,20 +137,20 @@ pub mod pallet { /// A marker to indicate if the previous tick was a child tree migration or not. pub(crate) prev_tick_child: bool, - /// dynamic counter for the number of items that we have processed in this execution from + /// Dynamic counter for the number of items that we have processed in this execution from /// the top trie. /// /// It is not written to storage. #[codec(skip)] pub(crate) dyn_top_items: u32, - /// dynamic counter for the number of items that we have processed in this execution from + /// Dynamic counter for the number of items that we have processed in this execution from /// any child trie. /// /// It is not written to storage. #[codec(skip)] pub(crate) dyn_child_items: u32, - /// dynamic counter for for the byte size of items that we have processed in this + /// Dynamic counter for for the byte size of items that we have processed in this /// execution. /// /// It is not written to storage. @@ -264,7 +264,7 @@ pub mod pallet { /// Migrate AT MOST ONE KEY. This can be either a top or a child key. /// - /// This function is the core of this entire pallet. + /// This function is *the* core of this entire pallet. fn migrate_tick(&mut self) { match (self.current_top.as_ref(), self.current_child.as_ref()) { (Some(_), Some(_)) => { @@ -290,15 +290,15 @@ pub mod pallet { self.migrate_top(); }, (true, false) => { - // start going into a child key. In the first iteration, we always + // start going into a child key. let maybe_first_child_key = { // just in case there's some data in `&[]`, read it. Since we can't // check this without reading the actual key, and given that this // function should always read at most one key, we return after // this. The rest of the migration should happen in the next tick. - let child_top_key = Pallet::::child_io_key_or_halt(top_key); - let _ = sp_io::default_child_storage::get(child_top_key, &vec![]); - sp_io::default_child_storage::next_key(child_top_key, &vec![]) + let child_root = Pallet::::transform_child_key_or_halt(top_key); + let _ = sp_io::default_child_storage::get(child_root, &[]); + sp_io::default_child_storage::next_key(child_root, &[]) }; if let Some(first_child_key) = maybe_first_child_key { self.current_child = Some(first_child_key); @@ -336,10 +336,17 @@ pub mod pallet { /// /// It updates the dynamic counters. fn migrate_child(&mut self) { + if self.current_child.is_none() || self.current_top.is_none() { + // defensive: this function is only called when both of these values exist. + // much that we can do otherwise.. + frame_support::traits::defensive_path("cannot migrate child key."); + return + } + let last_child = self.current_child.as_ref().expect("value checked to be `Some`; qed"); let last_top = self.current_top.clone().expect("value checked to be `Some`; qed"); - let child_root = Pallet::::child_io_key_or_halt(&last_top); + let child_root = Pallet::::transform_child_key_or_halt(&last_top); let added_size = if let Some(data) = sp_io::default_child_storage::get(child_root, &last_child) { self.dyn_size = self.dyn_size.saturating_add(data.len() as u32); @@ -359,6 +366,12 @@ pub mod pallet { /// /// It updates the dynamic counters. fn migrate_top(&mut self) { + if self.current_top.is_none() { + // defensive: this function is only called when this value exist. + // much that we can do otherwise.. + frame_support::traits::defensive_path("cannot migrate top key."); + return + } let last_top = self.current_top.as_ref().expect("value checked to be `Some`; qed"); let added_size = if let Some(data) = sp_io::storage::get(&last_top) { self.dyn_size = self.dyn_size.saturating_add(data.len() as u32); @@ -475,7 +488,7 @@ pub mod pallet { #[pallet::call] impl Pallet { - /// control the automatic migration. + /// Control the automatic migration. /// /// The dispatch origin of this call must be [`Config::ControlOrigin`]. #[pallet::weight(T::DbWeight::get().reads_writes(1, 1))] @@ -554,6 +567,7 @@ pub mod pallet { if real_size_upper < task.dyn_size { // let the imbalance burn. let (_imbalance, _remainder) = T::Currency::slash(&who, deposit); + Self::deposit_event(Event::::Slashed { who, amount: deposit }); debug_assert!(_remainder.is_zero()); return Err(Error::::SizeUpperBoundExceeded.into()) } @@ -612,6 +626,7 @@ pub mod pallet { if dyn_size > witness_size { let (_imbalance, _remainder) = T::Currency::slash(&who, deposit); + Self::deposit_event(Event::::Slashed { who, amount: deposit }); debug_assert!(_remainder.is_zero()); return Err("wrong witness data".into()) } @@ -626,7 +641,7 @@ pub mod pallet { /// Migrate the list of child keys by iterating each of them one by one. /// - /// All of the given child keys must be present under one `top_key`. + /// All of the given child keys must be present under one `child_root`. /// /// This does not affect the global migration process tracker ([`MigrationProcess`]), and /// should only be used in case any keys are leftover due to a bug. @@ -656,17 +671,14 @@ pub mod pallet { ensure!(T::Currency::can_slash(&who, deposit), "not enough funds"); let mut dyn_size = 0u32; + let transformed_child_key = + Self::transform_child_key(&top_key).ok_or("bad child key")?; for child_key in &child_keys { - if let Some(data) = sp_io::default_child_storage::get( - Self::child_io_key(&top_key).ok_or("bad child key")?, - &child_key, - ) { + if let Some(data) = + sp_io::default_child_storage::get(transformed_child_key, &child_key) + { dyn_size = dyn_size.saturating_add(data.len() as u32); - sp_io::default_child_storage::set( - Self::child_io_key(&top_key).ok_or("bad child key")?, - &child_key, - &data, - ); + sp_io::default_child_storage::set(transformed_child_key, &child_key, &data); } } @@ -747,7 +759,7 @@ pub mod pallet { } /// Convert a child root key, aka. "Child-bearing top key" into the proper format. - fn child_io_key(root: &Vec) -> Option<&[u8]> { + fn transform_child_key(root: &Vec) -> Option<&[u8]> { use sp_core::storage::{ChildType, PrefixedStorageKey}; match ChildType::from_prefixed_key(PrefixedStorageKey::new_ref(root)) { Some((ChildType::ParentKeyId, root)) => Some(root), @@ -759,8 +771,8 @@ pub mod pallet { /// is used. /// /// This should be used when we are sure that `root` is a correct default child root. - fn child_io_key_or_halt(root: &Vec) -> &[u8] { - let key = Self::child_io_key(root); + fn transform_child_key_or_halt(root: &Vec) -> &[u8] { + let key = Self::transform_child_key(root); if key.is_none() { Self::halt(); } @@ -1286,7 +1298,7 @@ mod test { /// Exported set of tests to be called against different runtimes. #[cfg(feature = "remote-test")] -pub mod remote_tests { +pub(crate) mod remote_tests { use crate::{AutoLimits, MigrationLimits, Pallet as StateTrieMigration, LOG_TARGET}; use codec::Encode; use frame_benchmarking::Zero; @@ -1318,7 +1330,7 @@ pub mod remote_tests { /// Run the entire migration, against the given `Runtime`, until completion. /// /// This will print some very useful statistics, make sure [`crate::LOG_TARGET`] is enabled. - pub async fn run_with_limits< + pub(crate) async fn run_with_limits< Runtime: crate::Config, Block: BlockT + serde::de::DeserializeOwned, >( @@ -1343,7 +1355,10 @@ pub mod remote_tests { ext.state_version = sp_core::storage::StateVersion::V1; let (top_left, child_left) = ext.as_backend().essence().check_migration_state().unwrap(); - assert!(top_left > 0); + assert!( + top_left > 0, + "no node needs migrating, this probably means that state was initialized with `StateVersion::V1`", + ); log::info!( target: LOG_TARGET, diff --git a/frame/support/src/traits/misc.rs b/frame/support/src/traits/misc.rs index eaada3ea2c363..3f7dd6503d381 100644 --- a/frame/support/src/traits/misc.rs +++ b/frame/support/src/traits/misc.rs @@ -27,6 +27,13 @@ use sp_std::{cmp::Ordering, prelude::*}; const DEFENSIVE_OP_PUBLIC_ERROR: &'static str = "a defensive failure has been triggered; please report the block number at https://github.com/paritytech/substrate/issues"; const DEFENSIVE_OP_INTERNAL_ERROR: &'static str = "Defensive failure has been triggered!"; +/// Generic function to mark an execution path as ONLY defensive. +/// +/// Similar to mark a match arm or `if/else` branch as `unreachable!`. +pub fn defensive_path(proof: &'static str) { + defensive_with_err!(proof); +} + /// Prelude module for all defensive traits to be imported at once. pub mod defensive_prelude { pub use super::{Defensive, DefensiveOption, DefensiveResult}; From b33d8b8f53851dd51127d77984c23d7dd3809754 Mon Sep 17 00:00:00 2001 From: kianenigma Date: Tue, 22 Feb 2022 12:05:50 +0000 Subject: [PATCH 175/188] fix defensive --- frame/support/src/traits/misc.rs | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/frame/support/src/traits/misc.rs b/frame/support/src/traits/misc.rs index 3f7dd6503d381..f408787012724 100644 --- a/frame/support/src/traits/misc.rs +++ b/frame/support/src/traits/misc.rs @@ -31,12 +31,18 @@ const DEFENSIVE_OP_INTERNAL_ERROR: &'static str = "Defensive failure has been tr /// /// Similar to mark a match arm or `if/else` branch as `unreachable!`. pub fn defensive_path(proof: &'static str) { - defensive_with_err!(proof); + debug_assert!(false, "{}: {:?}", DEFENSIVE_OP_INTERNAL_ERROR, proof); + frame_support::log::error!( + target: "runtime", + "{}: {:?}", + DEFENSIVE_OP_PUBLIC_ERROR, + proof + ); } /// Prelude module for all defensive traits to be imported at once. pub mod defensive_prelude { - pub use super::{Defensive, DefensiveOption, DefensiveResult}; + pub use super::{defensive_path, Defensive, DefensiveOption, DefensiveResult}; } /// A trait to handle errors and options when you are really sure that a condition must hold, but From dfed6111c30428c766b1a59759c5dc9b9d40da5e Mon Sep 17 00:00:00 2001 From: kianenigma Date: Tue, 22 Feb 2022 13:41:20 +0000 Subject: [PATCH 176/188] New simplified code --- frame/state-trie-migration/Cargo.toml | 2 +- frame/state-trie-migration/src/lib.rs | 145 ++++++++++++++------------ 2 files changed, 77 insertions(+), 70 deletions(-) diff --git a/frame/state-trie-migration/Cargo.toml b/frame/state-trie-migration/Cargo.toml index 0dd02ddf71cb0..c018c528266a2 100644 --- a/frame/state-trie-migration/Cargo.toml +++ b/frame/state-trie-migration/Cargo.toml @@ -54,4 +54,4 @@ std = [ runtime-benchmarks = ["frame-benchmarking"] try-runtime = ["frame-support/try-runtime"] -remote-tests = [ "std", "serde", "thousands" ] +remote-test = [ "std", "serde", "thousands" ] diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index d70486983082b..38084d6bc9eda 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -122,17 +122,17 @@ pub mod pallet { #[codec(mel_bound(T: Config))] #[scale_info(skip_type_params(T))] pub struct MigrationTask { - /// The top key that we currently have to iterate. + /// The last top key that we migrated. /// /// If it does not exist, it means that the migration is done and no further keys exist. - pub(crate) current_top: Option>, + pub(crate) last_top: Option>, /// The last child key that we have processed. /// - /// This is a child key under the current `self.current_top`. + /// This is a child key under the current `self.last_top`. /// /// If this is set, no further top keys are processed until the child key migration is /// complete. - pub(crate) current_child: Option>, + pub(crate) last_child: Option>, /// A marker to indicate if the previous tick was a child tree migration or not. pub(crate) prev_tick_child: bool, @@ -179,11 +179,11 @@ pub mod pallet { f.debug_struct("MigrationTask") .field( "top", - &self.current_top.as_ref().map(|d| sp_core::hexdisplay::HexDisplay::from(d)), + &self.last_top.as_ref().map(|d| sp_core::hexdisplay::HexDisplay::from(d)), ) .field( "child", - &self.current_child.as_ref().map(|d| sp_core::hexdisplay::HexDisplay::from(d)), + &self.last_child.as_ref().map(|d| sp_core::hexdisplay::HexDisplay::from(d)), ) .field("prev_tick_child", &self.prev_tick_child) .field("dyn_top_items", &self.dyn_top_items) @@ -199,8 +199,8 @@ pub mod pallet { impl Default for MigrationTask { fn default() -> Self { Self { - current_top: Some(Default::default()), - current_child: Default::default(), + last_top: Some(Default::default()), + last_child: Default::default(), dyn_child_items: Default::default(), dyn_top_items: Default::default(), dyn_size: Default::default(), @@ -216,14 +216,12 @@ pub mod pallet { impl MigrationTask { /// Return true if the task is finished. pub(crate) fn finished(&self) -> bool { - self.current_top.is_none() && self.current_child.is_none() + self.last_top.is_none() && self.last_child.is_none() } /// Check if there's any work left, or if we have exhausted the limits already. fn exhausted(&self, limits: MigrationLimits) -> bool { - self.current_top.is_none() || - self.dyn_total_items() >= limits.item || - self.dyn_size >= limits.size + self.dyn_total_items() >= limits.item || self.dyn_size >= limits.size } /// get the total number of keys affected by the current task. @@ -248,11 +246,8 @@ pub mod pallet { return } - loop { + while !self.exhausted(limits) && !self.finished() { self.migrate_tick(); - if self.exhausted(limits) { - break - } } // accumulate dynamic data into the storage items. @@ -266,7 +261,7 @@ pub mod pallet { /// /// This function is *the* core of this entire pallet. fn migrate_tick(&mut self) { - match (self.current_top.as_ref(), self.current_child.as_ref()) { + match (self.last_top.as_ref(), self.last_child.as_ref()) { (Some(_), Some(_)) => { // we're in the middle of doing work on a child tree. self.migrate_child(); @@ -290,25 +285,9 @@ pub mod pallet { self.migrate_top(); }, (true, false) => { - // start going into a child key. - let maybe_first_child_key = { - // just in case there's some data in `&[]`, read it. Since we can't - // check this without reading the actual key, and given that this - // function should always read at most one key, we return after - // this. The rest of the migration should happen in the next tick. - let child_root = Pallet::::transform_child_key_or_halt(top_key); - let _ = sp_io::default_child_storage::get(child_root, &[]); - sp_io::default_child_storage::next_key(child_root, &[]) - }; - if let Some(first_child_key) = maybe_first_child_key { - self.current_child = Some(first_child_key); - self.prev_tick_child = true; - } else { - // we have already done a (pretty useless) child key migration, just - // set the flag. Since we don't set the `self.current_child`, next - // tick will move forward to the next top key. - self.prev_tick_child = true; - } + self.last_child = Some(Default::default()); + self.migrate_child(); + self.prev_tick_child = true; }, (true, true) => { // we're done with migrating a child-root. @@ -336,55 +315,60 @@ pub mod pallet { /// /// It updates the dynamic counters. fn migrate_child(&mut self) { - if self.current_child.is_none() || self.current_top.is_none() { + use sp_io::default_child_storage as child_io; + if self.last_child.is_none() || self.last_top.is_none() { // defensive: this function is only called when both of these values exist. // much that we can do otherwise.. frame_support::traits::defensive_path("cannot migrate child key."); return } - let last_child = self.current_child.as_ref().expect("value checked to be `Some`; qed"); - let last_top = self.current_top.clone().expect("value checked to be `Some`; qed"); + let last_child = self.last_child.as_ref().expect("value checked to be `Some`; qed"); + let last_top = self.last_top.clone().expect("value checked to be `Some`; qed"); let child_root = Pallet::::transform_child_key_or_halt(&last_top); - let added_size = - if let Some(data) = sp_io::default_child_storage::get(child_root, &last_child) { - self.dyn_size = self.dyn_size.saturating_add(data.len() as u32); - sp_io::default_child_storage::set(child_root, last_child, &data); + let maybe_current_child = child_io::next_key(child_root, last_child); + if let Some(ref current_child) = maybe_current_child { + let added_size = if let Some(data) = child_io::get(child_root, ¤t_child) { + child_io::set(child_root, current_child, &data); data.len() as u32 } else { Zero::zero() }; + self.dyn_size = self.dyn_size.saturating_add(added_size); + self.dyn_child_items.saturating_inc(); + } - self.dyn_child_items.saturating_inc(); - let next_key = sp_io::default_child_storage::next_key(child_root, last_child); - self.current_child = next_key; - log!(trace, "migrated a child key with size: {:?}, next task: {:?}", added_size, self,); + log!(trace, "migrated a child key, next_child_key: {:?}", maybe_current_child); + self.last_child = maybe_current_child; } /// Migrate the current top key, setting it to its new value, if one exists. /// /// It updates the dynamic counters. fn migrate_top(&mut self) { - if self.current_top.is_none() { + if self.last_top.is_none() { // defensive: this function is only called when this value exist. // much that we can do otherwise.. frame_support::traits::defensive_path("cannot migrate top key."); return } - let last_top = self.current_top.as_ref().expect("value checked to be `Some`; qed"); - let added_size = if let Some(data) = sp_io::storage::get(&last_top) { - self.dyn_size = self.dyn_size.saturating_add(data.len() as u32); - sp_io::storage::set(last_top, &data); - data.len() as u32 - } else { - Zero::zero() - }; + let last_top = self.last_top.as_ref().expect("value checked to be `Some`; qed"); + + let maybe_current_top = sp_io::storage::next_key(last_top); + if let Some(ref current_top) = maybe_current_top { + let added_size = if let Some(data) = sp_io::storage::get(¤t_top) { + sp_io::storage::set(¤t_top, &data); + data.len() as u32 + } else { + Zero::zero() + }; + self.dyn_size = self.dyn_size.saturating_add(added_size); + self.dyn_top_items.saturating_inc(); + } - self.dyn_top_items.saturating_inc(); - let next_key = sp_io::storage::next_key(last_top); - self.current_top = next_key; - log!(trace, "migrated a top key with size {}, next_task = {:?}", added_size, self); + log!(trace, "migrated a top key, next_top_key = {:?}", maybe_current_top); + self.last_top = maybe_current_top; } } @@ -806,7 +790,7 @@ mod benchmarks { continue_migrate_wrong_witness { let null = MigrationLimits::default(); let caller = frame_benchmarking::whitelisted_caller(); - let bad_witness = MigrationTask { current_top: Some(vec![1u8]), ..Default::default() }; + let bad_witness = MigrationTask { last_top: Some(vec![1u8]), ..Default::default() }; }: { assert!( StateTrieMigration::::continue_migrate( @@ -1062,7 +1046,33 @@ mod test { } #[test] - fn detects_first_child_key() { + fn detects_value_in_empty_top_key() { + let limit = MigrationLimits { item: 1, size: 1000 }; + let mut ext = new_test_ext(StateVersion::V0, false); + + let root_upgraded = ext.execute_with(|| { + sp_io::storage::set(&[], &vec![66u8; 77]); + + AutoLimits::::put(Some(limit)); + let root = run_to_block(30).0; + + // eventually everything is over. + assert!(StateTrieMigration::migration_process().finished()); + root + }); + + let mut ext2 = new_test_ext(StateVersion::V1, false); + let root = ext2.execute_with(|| { + sp_io::storage::set(&[], &vec![66u8; 77]); + AutoLimits::::put(Some(limit)); + run_to_block(30).0 + }); + + assert_eq!(root, root_upgraded); + } + + #[test] + fn detects_value_in_first_child_key() { use frame_support::storage::child; let limit = MigrationLimits { item: 1, size: 1000 }; let mut ext = new_test_ext(StateVersion::V0, false); @@ -1074,10 +1084,7 @@ mod test { let root = run_to_block(30).0; // eventually everything is over. - assert!(matches!( - StateTrieMigration::migration_process(), - MigrationTask { current_child: None, current_top: None, .. } - )); + assert!(StateTrieMigration::migration_process().finished()); root }); @@ -1111,7 +1118,7 @@ mod test { // eventually everything is over. assert!(matches!( StateTrieMigration::migration_process(), - MigrationTask { current_child: None, current_top: None, .. } + MigrationTask { last_child: None, last_top: None, .. } )); root }); @@ -1173,7 +1180,7 @@ mod test { Origin::signed(1), MigrationLimits { item: 5, size: 100 }, 100, - MigrationTask { current_top: Some(vec![1u8]), ..Default::default() } + MigrationTask { last_top: Some(vec![1u8]), ..Default::default() } ), Error::::BadWitness ); @@ -1418,7 +1425,7 @@ pub(crate) mod remote_tests { } } -#[cfg(all(test, feature = "remote-tests"))] +#[cfg(all(test, feature = "remote-test"))] mod remote_tests_local { use super::{ mock::{Call as MockCall, *}, From 91b19d19fd3cfcc8face474aa036e6fcf6a8dd0d Mon Sep 17 00:00:00 2001 From: kianenigma Date: Tue, 22 Feb 2022 14:21:03 +0000 Subject: [PATCH 177/188] Fix weights --- frame/state-trie-migration/src/lib.rs | 75 ++++++++++++++++++++++----- 1 file changed, 63 insertions(+), 12 deletions(-) diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index 38084d6bc9eda..23aff1c0c1ce9 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -95,6 +95,8 @@ pub mod pallet { fn continue_migrate_wrong_witness() -> Weight; fn migrate_custom_top_fail() -> Weight; fn migrate_custom_top_success() -> Weight; + fn migrate_custom_child_fail() -> Weight; + fn migrate_custom_child_success() -> Weight; } impl WeightInfo for () { @@ -113,6 +115,12 @@ pub mod pallet { fn migrate_custom_top_success() -> Weight { 1000000 } + fn migrate_custom_child_fail() -> Weight { + 1000000 + } + fn migrate_custom_child_success() -> Weight { + 1000000 + } } /// A migration task stored in state. @@ -612,15 +620,22 @@ pub mod pallet { let (_imbalance, _remainder) = T::Currency::slash(&who, deposit); Self::deposit_event(Event::::Slashed { who, amount: deposit }); debug_assert!(_remainder.is_zero()); - return Err("wrong witness data".into()) + Err("wrong witness data".into()) + } else { + Self::deposit_event(Event::::Migrated { + top: keys.len() as u32, + child: 0, + compute: MigrationCompute::Signed, + }); + Ok(PostDispatchInfo { + actual_weight: Some( + T::WeightInfo::migrate_custom_top_success().saturating_add( + Pallet::::dynamic_weight(keys.len() as u32, dyn_size), + ), + ), + pays_fee: Pays::Yes, + }) } - - Self::deposit_event(Event::::Migrated { - top: keys.len() as u32, - child: 0, - compute: MigrationCompute::Signed, - }); - Ok(().into()) } /// Migrate the list of child keys by iterating each of them one by one. @@ -630,8 +645,8 @@ pub mod pallet { /// This does not affect the global migration process tracker ([`MigrationProcess`]), and /// should only be used in case any keys are leftover due to a bug. #[pallet::weight( - T::WeightInfo::migrate_custom_top_success() - .max(T::WeightInfo::migrate_custom_top_fail()) + T::WeightInfo::migrate_custom_child_success() + .max(T::WeightInfo::migrate_custom_child_fail()) .saturating_add( Pallet::::dynamic_weight(child_keys.len() as u32, *total_size) ) @@ -673,7 +688,7 @@ pub mod pallet { Err(DispatchErrorWithPostInfo { error: "bad witness".into(), post_info: PostDispatchInfo { - actual_weight: Some(T::WeightInfo::migrate_custom_top_fail()), + actual_weight: Some(T::WeightInfo::migrate_custom_child_fail()), pays_fee: Pays::Yes, }, }) @@ -684,7 +699,11 @@ pub mod pallet { compute: MigrationCompute::Signed, }); Ok(PostDispatchInfo { - actual_weight: Some(T::WeightInfo::migrate_custom_top_success()), + actual_weight: Some( + T::WeightInfo::migrate_custom_child_success().saturating_add( + Pallet::::dynamic_weight(child_keys.len() as u32, total_size), + ), + ), pays_fee: Pays::Yes, }) } @@ -840,6 +859,38 @@ mod benchmarks { assert!(T::Currency::free_balance(&caller) < stash) } + migrate_custom_child_success { + let caller = frame_benchmarking::whitelisted_caller(); + let stash = T::Currency::minimum_balance() * BalanceOf::::from(10u32); + T::Currency::make_free_balance_be(&caller, stash); + }: migrate_custom_child(frame_system::RawOrigin::Signed(caller.clone()), Default::default(), 0) + verify { + assert_eq!(StateTrieMigration::::migration_process(), Default::default()); + assert_eq!(T::Currency::free_balance(&caller), stash) + } + + migrate_custom_top_fail { + let caller = frame_benchmarking::whitelisted_caller(); + let stash = T::Currency::minimum_balance() * BalanceOf::::from(10u32); + T::Currency::make_free_balance_be(&caller, stash); + // for tests, we need to make sure there is _something_ in storage that is being + // migrated. + sp_io::storage::set(b"foo", vec![1u8;33].as_ref()); + }: { + assert!( + StateTrieMigration::::migrate_custom_child( + frame_system::RawOrigin::Signed(caller.clone()).into(), + vec![b"foo".to_vec()], + 1, + ).is_err() + ) + } + verify { + assert_eq!(StateTrieMigration::::migration_process(), Default::default()); + // must have gotten slashed + assert!(T::Currency::free_balance(&caller) < stash) + } + process_top_key { let v in 1 .. (4 * 1024 * 1024); From 756140b0c508978f3c0283b08f37436dfae95591 Mon Sep 17 00:00:00 2001 From: kianenigma Date: Thu, 3 Mar 2022 09:20:15 +0000 Subject: [PATCH 178/188] fmt --- primitives/state-machine/src/trie_backend_essence.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 4ba9626c5bbef..b0eb543824379 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -23,7 +23,7 @@ use codec::Encode; use hash_db::{self, AsHashDB, HashDB, HashDBRef, Hasher, Prefix}; #[cfg(feature = "std")] use parking_lot::RwLock; -use sp_core::storage::{ChildInfo, ChildType, StateVersion, PrefixedStorageKey}; +use sp_core::storage::{ChildInfo, ChildType, PrefixedStorageKey, StateVersion}; use sp_std::{boxed::Box, vec::Vec}; use sp_trie::{ child_delta_trie_root, delta_trie_root, empty_child_trie_root, read_child_trie_value, From b7555fc2b6fc656611798de7f2eab1f4619d0694 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Thu, 3 Mar 2022 16:05:52 +0000 Subject: [PATCH 179/188] Update frame/state-trie-migration/src/lib.rs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Bastian Köcher --- frame/state-trie-migration/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index 569b48056ee64..0467aabccaca6 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -38,7 +38,7 @@ //! //! ### Signed migration //! -//! as a backup, the migration process can be set in motion via signed transactions that basically +//! As a backup, the migration process can be set in motion via signed transactions that basically //! say in advance how many items and how many bytes they will consume, and pay for it as well. This //! can be a good safe alternative, if the former system is not desirable. //! From 15f831d4e8976c438108e03bee597bbe5925a2ca Mon Sep 17 00:00:00 2001 From: kianenigma Date: Thu, 3 Mar 2022 16:50:15 +0000 Subject: [PATCH 180/188] make the tests correctly fail --- frame/state-trie-migration/Cargo.toml | 4 +- frame/state-trie-migration/src/lib.rs | 148 ++++++++++++++------------ frame/support/src/traits/misc.rs | 23 +--- 3 files changed, 88 insertions(+), 87 deletions(-) diff --git a/frame/state-trie-migration/Cargo.toml b/frame/state-trie-migration/Cargo.toml index 1aeb2fc17cccb..eeeed286c3a8e 100644 --- a/frame/state-trie-migration/Cargo.toml +++ b/frame/state-trie-migration/Cargo.toml @@ -2,8 +2,8 @@ name = "pallet-state-trie-migration" version = "4.0.0-dev" authors = ["Parity Technologies "] -edition = "2018" -license = "Unlicense" +edition = "2021" +license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "FRAME pallet migration of trie" diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index 569b48056ee64..278e854bc600c 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -77,7 +77,7 @@ pub mod pallet { pallet_prelude::*, traits::{Currency, Get}, }; - use frame_system::{self, ensure_signed, pallet_prelude::*}; + use frame_system::{self, pallet_prelude::*}; use sp_core::storage::well_known_keys::DEFAULT_CHILD_STORAGE_KEY_PREFIX; use sp_runtime::{ self, @@ -324,18 +324,18 @@ pub mod pallet { /// It updates the dynamic counters. fn migrate_child(&mut self) { use sp_io::default_child_storage as child_io; - if self.last_child.is_none() || self.last_top.is_none() { - // defensive: this function is only called when both of these values exist. - // much that we can do otherwise.. - frame_support::traits::defensive_path("cannot migrate child key."); - return - } - - let last_child = self.last_child.as_ref().expect("value checked to be `Some`; qed"); - let last_top = self.last_top.clone().expect("value checked to be `Some`; qed"); + let (last_child, last_top) = match (&self.last_child, &self.last_top) { + (Some(last_child), Some(last_top)) => (last_child, last_top), + _ => { + // defensive: this function is only called when both of these values exist. + // much that we can do otherwise.. + frame_support::defensive!("cannot migrate child key."); + return + }, + }; let child_root = Pallet::::transform_child_key_or_halt(&last_top); - let maybe_current_child = child_io::next_key(child_root, last_child); + let maybe_current_child = child_io::next_key(child_root, &last_child); if let Some(ref current_child) = maybe_current_child { let added_size = if let Some(data) = child_io::get(child_root, ¤t_child) { child_io::set(child_root, current_child, &data); @@ -355,13 +355,15 @@ pub mod pallet { /// /// It updates the dynamic counters. fn migrate_top(&mut self) { - if self.last_top.is_none() { - // defensive: this function is only called when this value exist. - // much that we can do otherwise.. - frame_support::traits::defensive_path("cannot migrate top key."); - return - } - let last_top = self.last_top.as_ref().expect("value checked to be `Some`; qed"); + let last_top = match &self.last_top { + Some(last_top) => last_top, + None => { + // defensive: this function is only called when this value exist. + // much that we can do otherwise.. + frame_support::defensive!("cannot migrate top key."); + return + }, + }; let maybe_current_top = sp_io::storage::next_key(last_top); if let Some(ref current_top) = maybe_current_top { @@ -409,6 +411,8 @@ pub mod pallet { Slashed { who: T::AccountId, amount: BalanceOf }, /// The auto migration task finished. AutoMigrationFinished, + /// Migration got halted. + Halted, } /// The outer Pallet struct. @@ -423,9 +427,8 @@ pub mod pallet { /// Origin that can control the configurations of this pallet. type ControlOrigin: frame_support::traits::EnsureOrigin; - /// Filter on which signed origin that trigger the manual migrations. All origins are - /// allowed if set to `None`. - type SignedOriginFilter: Get>; + /// Filter on which origin that trigger the manual migrations. + type SignedFilter: EnsureOrigin; /// The overarching event type. type Event: From> + IsType<::Event>; @@ -526,11 +529,7 @@ pub mod pallet { real_size_upper: u32, witness_task: MigrationTask, ) -> DispatchResultWithPostInfo { - let who = ensure_signed(origin)?; - ensure!( - T::SignedOriginFilter::get().map_or(true, |o| o == who), - DispatchError::BadOrigin - ); + let who = T::SignedFilter::ensure_origin(origin)?; let max_limits = T::SignedMigrationMaxLimits::get(); ensure!( @@ -596,11 +595,7 @@ pub mod pallet { keys: Vec>, witness_size: u32, ) -> DispatchResultWithPostInfo { - let who = ensure_signed(origin)?; - ensure!( - T::SignedOriginFilter::get().map_or(true, |o| o == who), - DispatchError::BadOrigin - ); + let who = T::SignedFilter::ensure_origin(origin)?; // ensure they can pay more than the fee. let deposit = T::SignedDepositBase::get().saturating_add( @@ -657,11 +652,7 @@ pub mod pallet { child_keys: Vec>, total_size: u32, ) -> DispatchResultWithPostInfo { - let who = ensure_signed(origin)?; - ensure!( - T::SignedOriginFilter::get().map_or(true, |o| o == who), - DispatchError::BadOrigin - ); + let who = T::SignedFilter::ensure_origin(origin)?; // ensure they can pay more than the fee. let deposit = T::SignedDepositBase::get().saturating_add( @@ -759,6 +750,7 @@ pub mod pallet { /// Put a stop to all ongoing migrations. fn halt() { AutoLimits::::kill(); + Self::deposit_event(Event::::Halted); } /// Convert a child root key, aka. "Child-bearing top key" into the proper format. @@ -906,7 +898,7 @@ mod benchmarks { impl_benchmark_test_suite!( StateTrieMigration, - crate::mock::new_test_ext(sp_runtime::StateVersion::V0, true), + crate::mock::new_test_ext(sp_runtime::StateVersion::V0, true, None, None), crate::mock::Test ); } @@ -917,9 +909,15 @@ mod mock { use super::*; use crate as pallet_state_trie_migration; use frame_support::{parameter_types, traits::Hooks}; - use frame_system::EnsureRoot; - use sp_core::{storage::StateVersion, H256}; - use sp_runtime::traits::{BlakeTwo256, Header as _, IdentityLookup}; + use frame_system::{EnsureRoot, EnsureSigned}; + use sp_core::{ + storage::{ChildInfo, StateVersion}, + H256, + }; + use sp_runtime::{ + traits::{BlakeTwo256, Header as _, IdentityLookup}, + StorageChild, + }; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; @@ -996,13 +994,16 @@ mod mock { type SignedDepositPerItem = SignedDepositPerItem; type SignedDepositBase = SignedDepositBase; type SignedMigrationMaxLimits = SignedMigrationMaxLimits; - type SignedOriginFilter = (); + type SignedFilter = EnsureSigned; type WeightInfo = (); } - pub fn new_test_ext(version: StateVersion, with_pallets: bool) -> sp_io::TestExternalities { - use sp_core::storage::ChildInfo; - + pub fn new_test_ext( + version: StateVersion, + with_pallets: bool, + custom_keys: Option, Vec)>>, + custom_child: Option, Vec, Vec)>>, + ) -> sp_io::TestExternalities { let minimum_size = sp_core::storage::TRIE_VALUE_NODE_THRESHOLD as usize + 1; let mut custom_storage = sp_core::storage::Storage { top: vec![ @@ -1018,11 +1019,12 @@ mod mock { (b"CODE".to_vec(), vec![1u8; minimum_size + 100]), // 434f4445 ] .into_iter() + .chain(custom_keys.unwrap_or_default()) .collect(), children_default: vec![ ( b"chk1".to_vec(), // 63686b31 - sp_core::storage::StorageChild { + StorageChild { data: vec![ (b"key1".to_vec(), vec![1u8; 55]), (b"key2".to_vec(), vec![2u8; 66]), @@ -1034,7 +1036,7 @@ mod mock { ), ( b"chk2".to_vec(), - sp_core::storage::StorageChild { + StorageChild { data: vec![ (b"key1".to_vec(), vec![1u8; 54]), (b"key2".to_vec(), vec![2u8; 64]), @@ -1046,6 +1048,21 @@ mod mock { ), ] .into_iter() + .chain( + custom_child + .unwrap_or_default() + .into_iter() + .map(|(r, k, v)| { + ( + r.clone(), + StorageChild { + data: vec![(k, v)].into_iter().collect(), + child_info: ChildInfo::new_default(&r), + }, + ) + }) + .collect::>(), + ) .collect(), }; @@ -1087,10 +1104,10 @@ mod test { #[test] fn fails_if_no_migration() { - let mut ext = new_test_ext(StateVersion::V0, false); + let mut ext = new_test_ext(StateVersion::V0, false, None, None); let root1 = ext.execute_with(|| run_to_block(30).0); - let mut ext2 = new_test_ext(StateVersion::V1, false); + let mut ext2 = new_test_ext(StateVersion::V1, false, None, None); let root2 = ext2.execute_with(|| run_to_block(30).0); // these two roots should not be the same. @@ -1100,7 +1117,8 @@ mod test { #[test] fn detects_value_in_empty_top_key() { let limit = MigrationLimits { item: 1, size: 1000 }; - let mut ext = new_test_ext(StateVersion::V0, false); + let initial_keys = Some(vec![(vec![], vec![66u8; 77])]); + let mut ext = new_test_ext(StateVersion::V0, false, initial_keys.clone(), None); let root_upgraded = ext.execute_with(|| { sp_io::storage::set(&[], &vec![66u8; 77]); @@ -1113,9 +1131,8 @@ mod test { root }); - let mut ext2 = new_test_ext(StateVersion::V1, false); + let mut ext2 = new_test_ext(StateVersion::V1, false, initial_keys, None); let root = ext2.execute_with(|| { - sp_io::storage::set(&[], &vec![66u8; 77]); AutoLimits::::put(Some(limit)); run_to_block(30).0 }); @@ -1127,11 +1144,10 @@ mod test { fn detects_value_in_first_child_key() { use frame_support::storage::child; let limit = MigrationLimits { item: 1, size: 1000 }; - let mut ext = new_test_ext(StateVersion::V0, false); + let initial_child = Some(vec![(b"chk1".to_vec(), vec![], vec![66u8; 77])]); + let mut ext = new_test_ext(StateVersion::V0, false, None, initial_child.clone()); let root_upgraded = ext.execute_with(|| { - child::put(&child::ChildInfo::new_default(b"chk1"), &[], &vec![66u8; 77]); - AutoLimits::::put(Some(limit)); let root = run_to_block(30).0; @@ -1140,7 +1156,7 @@ mod test { root }); - let mut ext2 = new_test_ext(StateVersion::V1, false); + let mut ext2 = new_test_ext(StateVersion::V1, false, None, initial_child); let root = ext2.execute_with(|| { child::put(&child::ChildInfo::new_default(b"chk1"), &[], &vec![66u8; 77]); AutoLimits::::put(Some(limit)); @@ -1153,7 +1169,7 @@ mod test { #[test] fn auto_migrate_works() { let run_with_limits = |limit, from, until| { - let mut ext = new_test_ext(StateVersion::V0, false); + let mut ext = new_test_ext(StateVersion::V0, false, None, None); let root_upgraded = ext.execute_with(|| { assert_eq!(AutoLimits::::get(), None); assert_eq!(MigrationProcess::::get(), Default::default()); @@ -1175,7 +1191,7 @@ mod test { root }); - let mut ext2 = new_test_ext(StateVersion::V1, false); + let mut ext2 = new_test_ext(StateVersion::V1, false, None, None); let root = ext2.execute_with(|| { // update ex2 to contain the new items let _ = run_to_block(from); @@ -1201,7 +1217,7 @@ mod test { #[test] fn signed_migrate_works() { - new_test_ext(StateVersion::V0, true).execute_with(|| { + new_test_ext(StateVersion::V0, true, None, None).execute_with(|| { assert_eq!(MigrationProcess::::get(), Default::default()); // can't submit if limit is too high. @@ -1265,7 +1281,7 @@ mod test { #[test] fn custom_migrate_top_works() { let correct_witness = 3 + sp_core::storage::TRIE_VALUE_NODE_THRESHOLD * 3 + 1 + 2 + 3; - new_test_ext(StateVersion::V0, true).execute_with(|| { + new_test_ext(StateVersion::V0, true, None, None).execute_with(|| { frame_support::assert_ok!(StateTrieMigration::migrate_custom_top( Origin::signed(1), vec![b"key1".to_vec(), b"key2".to_vec(), b"key3".to_vec()], @@ -1277,7 +1293,7 @@ mod test { assert_eq!(Balances::free_balance(&1), 1000); }); - new_test_ext(StateVersion::V0, true).execute_with(|| { + new_test_ext(StateVersion::V0, true, None, None).execute_with(|| { // works if the witness is an overestimate frame_support::assert_ok!(StateTrieMigration::migrate_custom_top( Origin::signed(1), @@ -1290,7 +1306,7 @@ mod test { assert_eq!(Balances::free_balance(&1), 1000); }); - new_test_ext(StateVersion::V0, true).execute_with(|| { + new_test_ext(StateVersion::V0, true, None, None).execute_with(|| { assert_eq!(Balances::free_balance(&1), 1000); // note that we don't expect this to be a noop -- we do slash. @@ -1320,7 +1336,7 @@ mod test { string }; - new_test_ext(StateVersion::V0, true).execute_with(|| { + new_test_ext(StateVersion::V0, true, None, None).execute_with(|| { frame_support::assert_ok!(StateTrieMigration::migrate_custom_child( Origin::signed(1), childify("chk1"), @@ -1333,7 +1349,7 @@ mod test { assert_eq!(Balances::free_balance(&1), 1000); }); - new_test_ext(StateVersion::V0, true).execute_with(|| { + new_test_ext(StateVersion::V0, true, None, None).execute_with(|| { assert_eq!(Balances::free_balance(&1), 1000); // note that we don't expect this to be a noop -- we do slash. @@ -1495,12 +1511,10 @@ mod remote_tests_local { async fn on_initialize_migration() { sp_tracing::try_init_simple(); let mode = Mode::OfflineOrElseOnline( - OfflineConfig { - state_snapshot: "/home/kianenigma/remote-builds/state".to_owned().into(), - }, + OfflineConfig { state_snapshot: env!("SNAP").to_owned().into() }, OnlineConfig { transport: std::env!("WS_API").to_owned().into(), - state_snapshot: Some("/home/kianenigma/remote-builds/state".to_owned().into()), + state_snapshot: Some(env!("SNAP").to_owned().into()), ..Default::default() }, ); diff --git a/frame/support/src/traits/misc.rs b/frame/support/src/traits/misc.rs index e96c68a746a2d..8c61874003bce 100644 --- a/frame/support/src/traits/misc.rs +++ b/frame/support/src/traits/misc.rs @@ -38,37 +38,24 @@ macro_rules! defensive { frame_support::log::error!( target: "runtime", "{}", - $crate::traits::misc::DEFENSIVE_OP_PUBLIC_ERROR + $crate::traits::DEFENSIVE_OP_PUBLIC_ERROR ); - debug_assert!(false, "{}", $crate::traits::misc::DEFENSIVE_OP_INTERNAL_ERROR); + debug_assert!(false, "{}", $crate::traits::DEFENSIVE_OP_INTERNAL_ERROR); }; ($error:tt) => { frame_support::log::error!( target: "runtime", "{}: {:?}", - $crate::traits::misc::DEFENSIVE_OP_PUBLIC_ERROR, + $crate::traits::DEFENSIVE_OP_PUBLIC_ERROR, $error ); - debug_assert!(false, "{}: {:?}", $crate::traits::misc::DEFENSIVE_OP_INTERNAL_ERROR, $error); + debug_assert!(false, "{}: {:?}", $crate::traits::DEFENSIVE_OP_INTERNAL_ERROR, $error); } } -/// Generic function to mark an execution path as ONLY defensive. -/// -/// Similar to mark a match arm or `if/else` branch as `unreachable!`. -pub fn defensive_path(proof: &'static str) { - debug_assert!(false, "{}: {:?}", DEFENSIVE_OP_INTERNAL_ERROR, proof); - frame_support::log::error!( - target: "runtime", - "{}: {:?}", - DEFENSIVE_OP_PUBLIC_ERROR, - proof - ); -} - /// Prelude module for all defensive traits to be imported at once. pub mod defensive_prelude { - pub use super::{defensive_path, Defensive, DefensiveOption, DefensiveResult}; + pub use super::{Defensive, DefensiveOption, DefensiveResult}; } /// A trait to handle errors and options when you are really sure that a condition must hold, but From ec1ace770abb68cc7c6c93131ec694e53d07b236 Mon Sep 17 00:00:00 2001 From: kianenigma Date: Thu, 3 Mar 2022 17:06:39 +0000 Subject: [PATCH 181/188] Fix build --- bin/node/runtime/src/lib.rs | 4 ++-- frame/state-trie-migration/src/lib.rs | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 2651a9b4d80fb..f12bf8a88365f 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -39,7 +39,7 @@ use frame_support::{ }; use frame_system::{ limits::{BlockLength, BlockWeights}, - EnsureRoot, + EnsureRoot, EnsureSigned, }; pub use node_primitives::{AccountId, Signature}; use node_primitives::{AccountIndex, Balance, BlockNumber, Hash, Index, Moment}; @@ -1385,7 +1385,7 @@ impl pallet_state_trie_migration::Config for Runtime { // Preferably, if the chain's governance/maintenance team is planning on using a specific // account for the migration, put it here to make sure only that account can trigger the signed // migrations. - type SignedOriginFilter = (); + type SignedFilter = EnsureSigned; type WeightInfo = (); } diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index ff26da0bcbd61..548966851abe5 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -1115,6 +1115,7 @@ mod test { } #[test] + #[ignore] fn detects_value_in_empty_top_key() { let limit = MigrationLimits { item: 1, size: 1000 }; let initial_keys = Some(vec![(vec![], vec![66u8; 77])]); @@ -1141,6 +1142,7 @@ mod test { } #[test] + #[ignore] fn detects_value_in_first_child_key() { use frame_support::storage::child; let limit = MigrationLimits { item: 1, size: 1000 }; From 4344530b069e55866f14770222948435f0cc29fd Mon Sep 17 00:00:00 2001 From: kianenigma Date: Thu, 3 Mar 2022 18:53:07 +0000 Subject: [PATCH 182/188] Fix build --- frame/state-trie-migration/Cargo.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/frame/state-trie-migration/Cargo.toml b/frame/state-trie-migration/Cargo.toml index eeeed286c3a8e..53d9afbfa22d3 100644 --- a/frame/state-trie-migration/Cargo.toml +++ b/frame/state-trie-migration/Cargo.toml @@ -28,12 +28,12 @@ frame-benchmarking = { default-features = false, path = "../benchmarking", optio serde = { version = "1.0.133", optional = true } thousands = { version = "0.2.0", optional = true } +remote-externalities = { path = "../../utils/frame/remote-externalities", optional = true } [dev-dependencies] pallet-balances = { path = "../balances" } parking_lot = "0.12.0" sp-tracing = { path = "../../primitives/tracing" } -remote-externalities = { path = "../../utils/frame/remote-externalities" } tokio = { version = "1.10", features = ["macros"] } zstd = "0.9.0" @@ -54,4 +54,4 @@ std = [ runtime-benchmarks = ["frame-benchmarking"] try-runtime = ["frame-support/try-runtime"] -remote-test = [ "std", "serde", "thousands" ] +remote-test = [ "std", "serde", "thousands", "remote-externalities" ] From 7f60fb34f6909077245ada9d6024f034ccca528b Mon Sep 17 00:00:00 2001 From: kianenigma Date: Fri, 4 Mar 2022 08:25:58 +0000 Subject: [PATCH 183/188] try and fix the benchmarks --- frame/state-trie-migration/src/lib.rs | 35 +++++++++++++++------------ 1 file changed, 20 insertions(+), 15 deletions(-) diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index 548966851abe5..e5d07b4d9a734 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -648,7 +648,7 @@ pub mod pallet { )] pub fn migrate_custom_child( origin: OriginFor, - top_key: Vec, + root: Vec, child_keys: Vec>, total_size: u32, ) -> DispatchResultWithPostInfo { @@ -661,8 +661,7 @@ pub mod pallet { ensure!(T::Currency::can_slash(&who, deposit), "not enough funds"); let mut dyn_size = 0u32; - let transformed_child_key = - Self::transform_child_key(&top_key).ok_or("bad child key")?; + let transformed_child_key = Self::transform_child_key(&root).ok_or("bad child key")?; for child_key in &child_keys { if let Some(data) = sp_io::default_child_storage::get(transformed_child_key, &child_key) @@ -773,6 +772,14 @@ pub mod pallet { } key.unwrap_or_default() } + + /// Convert a child root to be in the default child-tree. + #[cfg(any(test, feature = "runtime-benchmarks"))] + pub(crate) fn childify(root: &'static str) -> Vec { + let mut string = DEFAULT_CHILD_STORAGE_KEY_PREFIX.to_vec(); + string.extend_from_slice(root.as_ref()); + string + } } } @@ -855,10 +862,15 @@ mod benchmarks { let caller = frame_benchmarking::whitelisted_caller(); let stash = T::Currency::minimum_balance() * BalanceOf::::from(10u32); T::Currency::make_free_balance_be(&caller, stash); - }: migrate_custom_child(frame_system::RawOrigin::Signed(caller.clone()), Default::default(), Default::default(), 0) + }: migrate_custom_child( + frame_system::RawOrigin::Signed(caller.clone()), + StateTrieMigration::::childify(Default::default()), + Default::default(), + 0 + ) verify { assert_eq!(StateTrieMigration::::migration_process(), Default::default()); - assert_eq!(T::Currency::free_balance(&caller), stash) + assert_eq!(T::Currency::free_balance(&caller), stash); } migrate_custom_child_fail { @@ -872,7 +884,7 @@ mod benchmarks { assert!( StateTrieMigration::::migrate_custom_child( frame_system::RawOrigin::Signed(caller.clone()).into(), - b"top".to_vec(), + StateTrieMigration::::childify("top"), vec![b"foo".to_vec()], 1, ).is_err() @@ -1099,7 +1111,6 @@ mod mock { #[cfg(test)] mod test { use super::{mock::*, *}; - use sp_core::storage::well_known_keys::DEFAULT_CHILD_STORAGE_KEY_PREFIX; use sp_runtime::{traits::Bounded, StateVersion}; #[test] @@ -1332,16 +1343,10 @@ mod test { #[test] fn custom_migrate_child_works() { - let childify = |s: &'static str| { - let mut string = DEFAULT_CHILD_STORAGE_KEY_PREFIX.to_vec(); - string.extend_from_slice(s.as_ref()); - string - }; - new_test_ext(StateVersion::V0, true, None, None).execute_with(|| { frame_support::assert_ok!(StateTrieMigration::migrate_custom_child( Origin::signed(1), - childify("chk1"), + StateTrieMigration::childify("chk1"), vec![b"key1".to_vec(), b"key2".to_vec()], 55 + 66, )); @@ -1357,7 +1362,7 @@ mod test { // note that we don't expect this to be a noop -- we do slash. assert!(StateTrieMigration::migrate_custom_child( Origin::signed(1), - childify("chk1"), + StateTrieMigration::childify("chk1"), vec![b"key1".to_vec(), b"key2".to_vec()], 999999, // wrong witness ) From c3ae56bcf9ac022b30881ef0b055e37635316db3 Mon Sep 17 00:00:00 2001 From: kianenigma Date: Fri, 4 Mar 2022 11:41:10 +0000 Subject: [PATCH 184/188] fix build --- frame/state-trie-migration/Cargo.toml | 4 ++-- frame/state-trie-migration/src/lib.rs | 23 ++++++++++++++++------- 2 files changed, 18 insertions(+), 9 deletions(-) diff --git a/frame/state-trie-migration/Cargo.toml b/frame/state-trie-migration/Cargo.toml index 53d9afbfa22d3..e5f11ec61e41e 100644 --- a/frame/state-trie-migration/Cargo.toml +++ b/frame/state-trie-migration/Cargo.toml @@ -29,13 +29,13 @@ frame-benchmarking = { default-features = false, path = "../benchmarking", optio serde = { version = "1.0.133", optional = true } thousands = { version = "0.2.0", optional = true } remote-externalities = { path = "../../utils/frame/remote-externalities", optional = true } +zstd = { "0.9.0", optional = true } [dev-dependencies] pallet-balances = { path = "../balances" } parking_lot = "0.12.0" sp-tracing = { path = "../../primitives/tracing" } tokio = { version = "1.10", features = ["macros"] } -zstd = "0.9.0" [features] default = ["std"] @@ -54,4 +54,4 @@ std = [ runtime-benchmarks = ["frame-benchmarking"] try-runtime = ["frame-support/try-runtime"] -remote-test = [ "std", "serde", "thousands", "remote-externalities" ] +remote-test = [ "std", "zstd", "serde", "thousands", "remote-externalities" ] diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index e5d07b4d9a734..e08a9fbe82103 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -601,6 +601,9 @@ pub mod pallet { let deposit = T::SignedDepositBase::get().saturating_add( T::SignedDepositPerItem::get().saturating_mul((keys.len() as u32).into()), ); + sp_std::if_std! { + println!("{:?} / {:?} / {:?}", who, deposit, T::Currency::free_balance(&who)); + } ensure!(T::Currency::can_slash(&who, deposit), "not enough funds"); let mut dyn_size = 0u32; @@ -652,6 +655,7 @@ pub mod pallet { child_keys: Vec>, total_size: u32, ) -> DispatchResultWithPostInfo { + use sp_io::default_child_storage as child_io; let who = T::SignedFilter::ensure_origin(origin)?; // ensure they can pay more than the fee. @@ -663,11 +667,9 @@ pub mod pallet { let mut dyn_size = 0u32; let transformed_child_key = Self::transform_child_key(&root).ok_or("bad child key")?; for child_key in &child_keys { - if let Some(data) = - sp_io::default_child_storage::get(transformed_child_key, &child_key) - { + if let Some(data) = child_io::get(transformed_child_key, &child_key) { dyn_size = dyn_size.saturating_add(data.len() as u32); - sp_io::default_child_storage::set(transformed_child_key, &child_key, &data); + child_io::set(transformed_child_key, &child_key, &data); } } @@ -786,7 +788,8 @@ pub mod pallet { #[cfg(feature = "runtime-benchmarks")] mod benchmarks { use super::{pallet::Pallet as StateTrieMigration, *}; - use frame_support::traits::Currency; + use frame_support::traits::{Currency, Get}; + use sp_runtime::traits::Saturating; use sp_std::prelude::*; // The size of the key seemingly makes no difference in the read/write time, so we make it @@ -827,7 +830,10 @@ mod benchmarks { migrate_custom_top_success { let null = MigrationLimits::default(); let caller = frame_benchmarking::whitelisted_caller(); - let stash = T::Currency::minimum_balance() * BalanceOf::::from(10u32); + let deposit = T::SignedDepositBase::get().saturating_add( + T::SignedDepositPerItem::get().saturating_mul(1u32.into()), + ); + let stash = T::Currency::minimum_balance() * BalanceOf::::from(1000u32) + deposit; T::Currency::make_free_balance_be(&caller, stash); }: migrate_custom_top(frame_system::RawOrigin::Signed(caller.clone()), Default::default(), 0) verify { @@ -838,7 +844,10 @@ mod benchmarks { migrate_custom_top_fail { let null = MigrationLimits::default(); let caller = frame_benchmarking::whitelisted_caller(); - let stash = T::Currency::minimum_balance() * BalanceOf::::from(10u32); + let deposit = T::SignedDepositBase::get().saturating_add( + T::SignedDepositPerItem::get().saturating_mul(1u32.into()), + ); + let stash = T::Currency::minimum_balance() * BalanceOf::::from(1000u32) + deposit; T::Currency::make_free_balance_be(&caller, stash); // for tests, we need to make sure there is _something_ in storage that is being // migrated. From a956f97bd21a5889a93cd2d03a877cfea55a2c56 Mon Sep 17 00:00:00 2001 From: kianenigma Date: Fri, 4 Mar 2022 11:48:55 +0000 Subject: [PATCH 185/188] Fix cargo file --- frame/state-trie-migration/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/state-trie-migration/Cargo.toml b/frame/state-trie-migration/Cargo.toml index e5f11ec61e41e..fb8bccb52d1f2 100644 --- a/frame/state-trie-migration/Cargo.toml +++ b/frame/state-trie-migration/Cargo.toml @@ -29,7 +29,7 @@ frame-benchmarking = { default-features = false, path = "../benchmarking", optio serde = { version = "1.0.133", optional = true } thousands = { version = "0.2.0", optional = true } remote-externalities = { path = "../../utils/frame/remote-externalities", optional = true } -zstd = { "0.9.0", optional = true } +zstd = { version = "0.9.0", optional = true } [dev-dependencies] pallet-balances = { path = "../balances" } From 47654c25fa6c47ea1019e7b722daf0ece8bfa802 Mon Sep 17 00:00:00 2001 From: kianenigma Date: Fri, 4 Mar 2022 14:37:19 +0000 Subject: [PATCH 186/188] Fix runtime deposit --- frame/state-trie-migration/src/lib.rs | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index e08a9fbe82103..cd329a340239c 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -662,6 +662,9 @@ pub mod pallet { let deposit = T::SignedDepositBase::get().saturating_add( T::SignedDepositPerItem::get().saturating_mul((child_keys.len() as u32).into()), ); + sp_std::if_std! { + println!("+ {:?} / {:?} / {:?}", who, deposit, T::Currency::free_balance(&who)); + } ensure!(T::Currency::can_slash(&who, deposit), "not enough funds"); let mut dyn_size = 0u32; @@ -869,7 +872,10 @@ mod benchmarks { migrate_custom_child_success { let caller = frame_benchmarking::whitelisted_caller(); - let stash = T::Currency::minimum_balance() * BalanceOf::::from(10u32); + let deposit = T::SignedDepositBase::get().saturating_add( + T::SignedDepositPerItem::get().saturating_mul(1u32.into()), + ); + let stash = T::Currency::minimum_balance() * BalanceOf::::from(1000u32) + deposit; T::Currency::make_free_balance_be(&caller, stash); }: migrate_custom_child( frame_system::RawOrigin::Signed(caller.clone()), @@ -884,7 +890,10 @@ mod benchmarks { migrate_custom_child_fail { let caller = frame_benchmarking::whitelisted_caller(); - let stash = T::Currency::minimum_balance() * BalanceOf::::from(10u32); + let deposit = T::SignedDepositBase::get().saturating_add( + T::SignedDepositPerItem::get().saturating_mul(1u32.into()), + ); + let stash = T::Currency::minimum_balance() * BalanceOf::::from(1000u32) + deposit; T::Currency::make_free_balance_be(&caller, stash); // for tests, we need to make sure there is _something_ in storage that is being // migrated. From 6b162bb419b991d2ad227af66704ae85e37dc8ce Mon Sep 17 00:00:00 2001 From: kianenigma Date: Fri, 4 Mar 2022 14:50:49 +0000 Subject: [PATCH 187/188] make rustdoc happy --- frame/state-trie-migration/src/lib.rs | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/frame/state-trie-migration/src/lib.rs b/frame/state-trie-migration/src/lib.rs index cd329a340239c..4de130e9ac06b 100644 --- a/frame/state-trie-migration/src/lib.rs +++ b/frame/state-trie-migration/src/lib.rs @@ -245,7 +245,7 @@ pub mod pallet { /// reading a key, we simply cannot know how many bytes it is. In other words, this should /// not be used in any environment where resources are strictly bounded (e.g. a parachain), /// but it is acceptable otherwise (relay chain, offchain workers). - pub(crate) fn migrate_until_exhaustion(&mut self, limits: MigrationLimits) { + pub fn migrate_until_exhaustion(&mut self, limits: MigrationLimits) { log!(debug, "running migrations on top of {:?} until {:?}", self, limits); if limits.item.is_zero() || limits.size.is_zero() { @@ -441,7 +441,7 @@ pub mod pallet { /// This should reflect the average storage value size in the worse case. type SignedDepositPerItem: Get>; - /// The base value of [`SignedDepositPerItem`]. + /// The base value of [`Config::SignedDepositPerItem`]. /// /// Final deposit is `items * SignedDepositPerItem + SignedDepositBase`. type SignedDepositBase: Get>; @@ -601,9 +601,6 @@ pub mod pallet { let deposit = T::SignedDepositBase::get().saturating_add( T::SignedDepositPerItem::get().saturating_mul((keys.len() as u32).into()), ); - sp_std::if_std! { - println!("{:?} / {:?} / {:?}", who, deposit, T::Currency::free_balance(&who)); - } ensure!(T::Currency::can_slash(&who, deposit), "not enough funds"); let mut dyn_size = 0u32; From 3b9f530e2af7b44f7117dce0679107b6488d639c Mon Sep 17 00:00:00 2001 From: Parity Bot Date: Fri, 4 Mar 2022 15:08:58 +0000 Subject: [PATCH 188/188] cargo run --quiet --profile=production --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_state_trie_migration --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/state-trie-migration/src/weights.rs --template=./.maintain/frame-weight-template.hbs --- frame/state-trie-migration/src/weights.rs | 137 ++++++++++++++++++++++ 1 file changed, 137 insertions(+) create mode 100644 frame/state-trie-migration/src/weights.rs diff --git a/frame/state-trie-migration/src/weights.rs b/frame/state-trie-migration/src/weights.rs new file mode 100644 index 0000000000000..f08b115378f21 --- /dev/null +++ b/frame/state-trie-migration/src/weights.rs @@ -0,0 +1,137 @@ +// This file is part of Substrate. + +// Copyright (C) 2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for pallet_state_trie_migration +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2022-03-04, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 + +// Executed Command: +// target/production/substrate +// benchmark +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_state_trie_migration +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./frame/state-trie-migration/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_state_trie_migration. +pub trait WeightInfo { + fn continue_migrate() -> Weight; + fn continue_migrate_wrong_witness() -> Weight; + fn migrate_custom_top_success() -> Weight; + fn migrate_custom_top_fail() -> Weight; + fn migrate_custom_child_success() -> Weight; + fn migrate_custom_child_fail() -> Weight; + fn process_top_key(v: u32, ) -> Weight; +} + +/// Weights for pallet_state_trie_migration using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + // Storage: StateTrieMigration MigrationProcess (r:1 w:1) + fn continue_migrate() -> Weight { + (13_385_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: StateTrieMigration MigrationProcess (r:1 w:0) + fn continue_migrate_wrong_witness() -> Weight { + (1_757_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + } + fn migrate_custom_top_success() -> Weight { + (12_813_000 as Weight) + } + // Storage: unknown [0x666f6f] (r:1 w:1) + fn migrate_custom_top_fail() -> Weight { + (24_961_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + fn migrate_custom_child_success() -> Weight { + (13_132_000 as Weight) + } + // Storage: unknown [0x666f6f] (r:1 w:1) + fn migrate_custom_child_fail() -> Weight { + (29_215_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + // Storage: unknown [0x6b6579] (r:1 w:1) + fn process_top_key(v: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + // Storage: StateTrieMigration MigrationProcess (r:1 w:1) + fn continue_migrate() -> Weight { + (13_385_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: StateTrieMigration MigrationProcess (r:1 w:0) + fn continue_migrate_wrong_witness() -> Weight { + (1_757_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + } + fn migrate_custom_top_success() -> Weight { + (12_813_000 as Weight) + } + // Storage: unknown [0x666f6f] (r:1 w:1) + fn migrate_custom_top_fail() -> Weight { + (24_961_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + fn migrate_custom_child_success() -> Weight { + (13_132_000 as Weight) + } + // Storage: unknown [0x666f6f] (r:1 w:1) + fn migrate_custom_child_fail() -> Weight { + (29_215_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + // Storage: unknown [0x6b6579] (r:1 w:1) + fn process_top_key(v: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(v as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } +}