diff --git a/fork-off/src/account_setting.rs b/fork-off/src/account_setting.rs index 04a39dd60c..4819157717 100644 --- a/fork-off/src/account_setting.rs +++ b/fork-off/src/account_setting.rs @@ -135,7 +135,7 @@ pub fn apply_account_setting(mut state: Storage, setting: AccountSetting) -> Sto let account_hash = account.clone().into(); let key = &account_map.join(&account_hash); - state.insert(key.clone(), info.clone().into()); + state.top.insert(key.clone(), info.clone().into()); info!(target: "fork-off", "Account info of `{:?}` set to `{:?}`", account, info); } state diff --git a/fork-off/src/chainspec_combining.rs b/fork-off/src/chainspec_combining.rs index c1f1d3069e..2734edd261 100644 --- a/fork-off/src/chainspec_combining.rs +++ b/fork-off/src/chainspec_combining.rs @@ -27,6 +27,7 @@ impl Index<&StoragePath> for PathCounter { } } +/// Combines states - ommiting child state as we assume that it is empty in initial chainspec pub fn combine_states( mut state: Storage, initial_state: Storage, @@ -40,7 +41,7 @@ pub fn combine_states( let mut removed_per_path_count = PathCounter::default(); let mut added_per_path_cnt = PathCounter::default(); - state.retain(|k, _v| { + state.top.retain(|k, _v| { match storage_prefixes .iter() .find(|(_, prefix)| prefix.is_prefix_of(k)) @@ -53,13 +54,13 @@ pub fn combine_states( } }); - for (k, v) in initial_state.iter() { + for (k, v) in initial_state.top.iter() { if let Some((path, _)) = storage_prefixes .iter() .find(|(_, prefix)| prefix.is_prefix_of(k)) { added_per_path_cnt.bump(path); - state.insert(k.clone(), v.clone()); + state.top.insert(k.clone(), v.clone()); } } diff --git a/fork-off/src/fetching.rs b/fork-off/src/fetching.rs index 26deca58c8..99a4c7be9c 100644 --- a/fork-off/src/fetching.rs +++ b/fork-off/src/fetching.rs @@ -1,4 +1,4 @@ -use std::{collections::HashMap, iter::repeat_with, sync::Arc}; +use std::{iter::repeat_with, sync::Arc}; use async_channel::Receiver; use futures::{future::join_all, join}; @@ -37,10 +37,23 @@ impl StateFetcher { .await .unwrap(); + let child_storage_map_res = self + .client + .get_child_storage_for_key(key.clone(), &block) + .await + .unwrap(); + let mut output_guard = output.lock(); - output_guard.insert(key, value); - if output_guard.len() % LOG_PROGRESS_FREQUENCY == 0 { - info!("Fetched {} values", output_guard.len()); + output_guard.top.insert(key.clone(), value); + if let Some(child_storage_map) = child_storage_map_res { + info!("Fetched child trie with {} keys", child_storage_map.len()); + output_guard + .child_storage + .insert(key.without_child_storage_prefix(), child_storage_map); + } + + if output_guard.top.len() % LOG_PROGRESS_FREQUENCY == 0 { + info!("Fetched {} values", output_guard.top.len()); } } } @@ -49,7 +62,7 @@ impl StateFetcher { info!("Fetching state at block {:?}", block_hash); let (input, key_fetcher) = self.client.stream_all_keys(&block_hash); - let output = Arc::new(Mutex::new(HashMap::new())); + let output = Arc::new(Mutex::new(Storage::default())); let workers = repeat_with(|| { self.value_fetching_worker(block_hash.clone(), input.clone(), output.clone()) diff --git a/fork-off/src/fsio.rs b/fork-off/src/fsio.rs index 97dec0c55d..8d8a54eab5 100644 --- a/fork-off/src/fsio.rs +++ b/fork-off/src/fsio.rs @@ -40,7 +40,7 @@ pub fn save_snapshot_to_file(snapshot: Storage, path: String) { let data = serde_json::to_vec_pretty(&snapshot).unwrap(); info!( "Writing snapshot of {} key-val pairs and {} total bytes", - snapshot.len(), + snapshot.top.len(), data.len() ); write_to_file(path, &data); @@ -50,6 +50,6 @@ pub fn read_snapshot_from_file(path: String) -> Storage { let snapshot: Storage = serde_json::from_str(&fs::read_to_string(path).expect("Could not read snapshot file")) .expect("could not parse from snapshot"); - info!("Read snapshot of {} key-val pairs", snapshot.len()); + info!("Read snapshot of {} key-val pairs", snapshot.top.len()); snapshot } diff --git a/fork-off/src/jsonrpc_client.rs b/fork-off/src/jsonrpc_client.rs index be89f650d9..97e343bd59 100644 --- a/fork-off/src/jsonrpc_client.rs +++ b/fork-off/src/jsonrpc_client.rs @@ -6,7 +6,7 @@ use jsonrpc_core::Error; use jsonrpc_core_client::{transports::ws, RpcError}; use jsonrpc_derive::rpc; -use crate::types::{BlockHash, StorageKey, StorageValue}; +use crate::types::{BlockHash, ChildStorageMap, StorageKey, StorageValue}; #[rpc] pub trait Rpc { @@ -28,6 +28,24 @@ pub trait Rpc { start_key: Option, at: Option, ) -> Result, Error>; + + #[rpc(name = "childstate_getKeysPaged")] + fn get_child_keys_paged( + &self, + child_storage: StorageKey, + prefix: StorageKey, + count: usize, + start_key: Option, + at: Option, + ) -> Result, Error>; + + #[rpc(name = "childstate_getStorageEntries")] + fn get_child_storage_entries( + &self, + child_storage: StorageKey, + keys: Vec, + at: Option, + ) -> Result, Error>; } type RpcResult = Result; @@ -71,6 +89,68 @@ impl Client { (receiver, self.do_stream_all_keys(sender, at.clone())) } + /// Returns a map representing a single child trie + pub async fn get_child_storage_for_key( + &self, + child_key: StorageKey, + at: &BlockHash, + ) -> RpcResult> { + let res = self + .get_child_storage_for_key_inner(child_key, at) + .await + .map(Some); + + if let Err(RpcError::JsonRpcError(err)) = res { + // Empty child storage is returned as error + if err.message == "Client error: Invalid child storage key" { + Ok(None) + } else { + Err(RpcError::JsonRpcError(err)) + } + } else { + res + } + } + + async fn get_child_storage_for_key_inner( + &self, + child_key: StorageKey, + at: &BlockHash, + ) -> RpcResult { + let empty_prefix = StorageKey::new("0x"); + let mut child_storage_map = ChildStorageMap::new(); + let mut start_key = None; + + loop { + let keys = self + .client + .get_child_keys_paged( + child_key.clone(), + empty_prefix.clone(), + CHUNK_SIZE, + start_key, + Some(at.clone()), + ) + .await?; + + let values = self + .client + .get_child_storage_entries(child_key.clone(), keys.clone(), Some(at.clone())) + .await?; + + child_storage_map.append(&mut keys.iter().cloned().zip(values).collect()); + + let fetched = keys.len(); + start_key = keys.last().cloned(); + + if fetched < CHUNK_SIZE { + break; + } + } + + Ok(child_storage_map) + } + async fn do_stream_all_keys(&self, sender: Sender, at: BlockHash) -> RpcResult<()> { let empty_prefix = StorageKey::new("0x"); let mut start_key = None; diff --git a/fork-off/src/main.rs b/fork-off/src/main.rs index 5468dc6ff9..804feb29ec 100644 --- a/fork-off/src/main.rs +++ b/fork-off/src/main.rs @@ -56,9 +56,8 @@ async fn main() -> anyhow::Result<()> { } let state = read_snapshot_from_file(snapshot_path); - let initial_state: Storage = - serde_json::from_value(initial_spec["genesis"]["raw"]["top"].take()) - .expect("Deserialization of state from given chainspec file failed"); + // Initialize with state from chainspec + empty child storage + let initial_state = Storage::new(&initial_spec); let state = combine_states(state, initial_state, storage_keep_state); @@ -77,8 +76,12 @@ async fn main() -> anyhow::Result<()> { }; let state = apply_account_setting(state, account_setting); - let json_state = serde_json::to_value(state).expect("Failed to convert a storage map to json"); + let json_state = + serde_json::to_value(state.top).expect("Failed to convert a storage map to json"); + let json_child_state = + serde_json::to_value(state.child_storage).expect("Failed to convert a storage map to json"); initial_spec["genesis"]["raw"]["top"] = json_state; + initial_spec["genesis"]["raw"]["childrenDefault"] = json_child_state; let new_spec = serde_json::to_vec_pretty(&initial_spec)?; info!(target: "fork-off", "Writing new chainspec to {}", &combined_spec_path); diff --git a/fork-off/src/types.rs b/fork-off/src/types.rs index 8e32066457..1e866cf985 100644 --- a/fork-off/src/types.rs +++ b/fork-off/src/types.rs @@ -5,11 +5,16 @@ //! to a function, as `clippy` screams outrageously about changing it to `&str` and then the alias //! is useless. -use std::{collections::HashMap, fmt::Debug, str::FromStr}; +use std::{ + collections::{BTreeMap, HashMap}, + fmt::Debug, + str::FromStr, +}; use codec::Encode; use frame_support::{sp_runtime::AccountId32, Blake2_128Concat, StorageHasher, Twox128}; -use hex::ToHex; +use hex::{encode, ToHex}; +use jsonrpc_core::Value; use serde::{Deserialize, Serialize}; pub trait Get { @@ -17,7 +22,7 @@ pub trait Get { } /// Remove leading `"0x"`. -fn strip_hex(t: &T) -> String { +pub fn strip_hex(t: &T) -> String { let s = t.to_string(); s.strip_prefix("0x").map(ToString::to_string).unwrap_or(s) } @@ -69,7 +74,7 @@ impl Get for StoragePath { } /// Hex-encoded key in raw chainspec. -#[derive(Clone, Debug, Eq, PartialEq, Hash, Serialize, Deserialize)] +#[derive(Clone, Debug, Eq, PartialEq, Hash, Serialize, Deserialize, PartialOrd, Ord)] pub struct StorageKey(String); impl From<&StorageKey> for Vec { @@ -78,6 +83,8 @@ impl From<&StorageKey> for Vec { } } +pub const CHILD_STORAGE_PREFIX: &[u8] = b":child_storage:default:"; + impl StorageKey { pub fn new(content: &T) -> Self { Self(as_hex(content)) @@ -96,6 +103,16 @@ impl StorageKey { let longer = as_hex(&other.0); longer.starts_with(&shorter) } + + pub fn without_child_storage_prefix(self) -> Self { + StorageKey::new( + &(as_hex( + &self + .get() + .split_off(as_hex(&encode(CHILD_STORAGE_PREFIX)).len()), + )), + ) + } } /// Convert `AccountId` to `StorageKey` using `Blake2_128Concat` hashing algorithm. @@ -169,6 +186,27 @@ impl FromStr for BlockHash { } /// Content of `chainspec["genesis"]["raw"]["top"]`. -pub type Storage = HashMap; +pub type TopStorage = HashMap; + +/// Content of `chainspec["genesis"]["raw"]["childrenDefault"]`. +pub type ChildStorage = HashMap; + +pub type ChildStorageMap = BTreeMap; + +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +pub struct Storage { + pub top: TopStorage, + pub child_storage: ChildStorage, +} pub type Balance = u128; + +impl Storage { + pub fn new(initial_spec: &Value) -> Self { + Storage { + top: serde_json::from_value(initial_spec["genesis"]["raw"]["top"].clone()) + .expect("Deserialization of state from initial chainspec has failed"), + child_storage: ChildStorage::new(), + } + } +}